1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
12 * These four procedures are the external interface to the executor.
13 * In each case, the query descriptor is required as an argument.
15 * ExecutorStart must be called at the beginning of execution of any
16 * query plan and ExecutorEnd must always be called at the end of
17 * execution of a plan (unless it is aborted due to error).
19 * ExecutorRun accepts direction and count arguments that specify whether
20 * the plan is to be executed forwards, backwards, and for how many tuples.
21 * In some cases ExecutorRun may be called multiple times to process all
22 * the tuples for a plan. It is also acceptable to stop short of executing
23 * the whole plan (but only if it is a SELECT).
25 * ExecutorFinish must be called after the final ExecutorRun call and
26 * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 * which should also omit ExecutorRun.
29 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
30 * Portions Copyright (c) 1994, Regents of the University of California
34 * src/backend/executor/execMain.c
36 *-------------------------------------------------------------------------
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "catalog/partition.h"
46 #include "commands/matview.h"
47 #include "commands/trigger.h"
48 #include "executor/execdebug.h"
49 #include "foreign/fdwapi.h"
50 #include "mb/pg_wchar.h"
51 #include "miscadmin.h"
52 #include "optimizer/clauses.h"
53 #include "parser/parsetree.h"
54 #include "rewrite/rewriteManip.h"
55 #include "storage/bufmgr.h"
56 #include "storage/lmgr.h"
57 #include "tcop/utility.h"
58 #include "utils/acl.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/rls.h"
62 #include "utils/snapmgr.h"
63 #include "utils/tqual.h"
66 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
67 ExecutorStart_hook_type ExecutorStart_hook = NULL;
68 ExecutorRun_hook_type ExecutorRun_hook = NULL;
69 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
70 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
72 /* Hook for plugin to get control in ExecCheckRTPerms() */
73 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
75 /* decls for local routines only used within this module */
76 static void InitPlan(QueryDesc *queryDesc, int eflags);
77 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
78 static void ExecPostprocessPlan(EState *estate);
79 static void ExecEndPlan(PlanState *planstate, EState *estate);
80 static void ExecutePlan(EState *estate, PlanState *planstate,
81 bool use_parallel_mode,
85 ScanDirection direction,
87 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
88 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
89 Bitmapset *modifiedCols,
90 AclMode requiredPerms);
91 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
92 static char *ExecBuildSlotValueDescription(Oid reloid,
95 Bitmapset *modifiedCols,
97 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
101 * Note that GetUpdatedColumns() also exists in commands/trigger.c. There does
102 * not appear to be any good header to put it into, given the structures that
103 * it uses, so we let them be duplicated. Be sure to update both if one needs
104 * to be changed, however.
106 #define GetInsertedColumns(relinfo, estate) \
107 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
108 #define GetUpdatedColumns(relinfo, estate) \
109 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
111 /* end of local decls */
114 /* ----------------------------------------------------------------
117 * This routine must be called at the beginning of any execution of any
120 * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
121 * only because some places use QueryDescs for utility commands). The tupDesc
122 * field of the QueryDesc is filled in to describe the tuples that will be
123 * returned, and the internal fields (estate and planstate) are set up.
125 * eflags contains flag bits as described in executor.h.
127 * NB: the CurrentMemoryContext when this is called will become the parent
128 * of the per-query context used for this Executor invocation.
130 * We provide a function hook variable that lets loadable plugins
131 * get control when ExecutorStart is called. Such a plugin would
132 * normally call standard_ExecutorStart().
134 * ----------------------------------------------------------------
137 ExecutorStart(QueryDesc *queryDesc, int eflags)
139 if (ExecutorStart_hook)
140 (*ExecutorStart_hook) (queryDesc, eflags);
142 standard_ExecutorStart(queryDesc, eflags);
146 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
149 MemoryContext oldcontext;
151 /* sanity checks: queryDesc must not be started already */
152 Assert(queryDesc != NULL);
153 Assert(queryDesc->estate == NULL);
156 * If the transaction is read-only, we need to check if any writes are
157 * planned to non-temporary tables. EXPLAIN is considered read-only.
159 * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
160 * would require (a) storing the combocid hash in shared memory, rather
161 * than synchronizing it just once at the start of parallelism, and (b) an
162 * alternative to heap_update()'s reliance on xmax for mutual exclusion.
163 * INSERT may have no such troubles, but we forbid it to simplify the
166 * We have lower-level defenses in CommandCounterIncrement and elsewhere
167 * against performing unsafe operations in parallel mode, but this gives a
168 * more user-friendly error message.
170 if ((XactReadOnly || IsInParallelMode()) &&
171 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
172 ExecCheckXactReadOnly(queryDesc->plannedstmt);
175 * Build EState, switch into per-query memory context for startup.
177 estate = CreateExecutorState();
178 queryDesc->estate = estate;
180 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
183 * Fill in external parameters, if any, from queryDesc; and allocate
184 * workspace for internal parameters
186 estate->es_param_list_info = queryDesc->params;
188 if (queryDesc->plannedstmt->nParamExec > 0)
189 estate->es_param_exec_vals = (ParamExecData *)
190 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
193 * If non-read-only query, set the command ID to mark output tuples with
195 switch (queryDesc->operation)
200 * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
203 if (queryDesc->plannedstmt->rowMarks != NIL ||
204 queryDesc->plannedstmt->hasModifyingCTE)
205 estate->es_output_cid = GetCurrentCommandId(true);
208 * A SELECT without modifying CTEs can't possibly queue triggers,
209 * so force skip-triggers mode. This is just a marginal efficiency
210 * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
211 * all that expensive, but we might as well do it.
213 if (!queryDesc->plannedstmt->hasModifyingCTE)
214 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
220 estate->es_output_cid = GetCurrentCommandId(true);
224 elog(ERROR, "unrecognized operation code: %d",
225 (int) queryDesc->operation);
230 * Copy other important information into the EState
232 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
233 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
234 estate->es_top_eflags = eflags;
235 estate->es_instrument = queryDesc->instrument_options;
238 * Initialize the plan state tree
240 InitPlan(queryDesc, eflags);
243 * Set up an AFTER-trigger statement context, unless told not to, or
244 * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
246 if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
247 AfterTriggerBeginQuery();
249 MemoryContextSwitchTo(oldcontext);
252 /* ----------------------------------------------------------------
255 * This is the main routine of the executor module. It accepts
256 * the query descriptor from the traffic cop and executes the
259 * ExecutorStart must have been called already.
261 * If direction is NoMovementScanDirection then nothing is done
262 * except to start up/shut down the destination. Otherwise,
263 * we retrieve up to 'count' tuples in the specified direction.
265 * Note: count = 0 is interpreted as no portal limit, i.e., run to
266 * completion. Also note that the count limit is only applied to
267 * retrieved tuples, not for instance to those inserted/updated/deleted
268 * by a ModifyTable plan node.
270 * There is no return value, but output tuples (if any) are sent to
271 * the destination receiver specified in the QueryDesc; and the number
272 * of tuples processed at the top level can be found in
273 * estate->es_processed.
275 * We provide a function hook variable that lets loadable plugins
276 * get control when ExecutorRun is called. Such a plugin would
277 * normally call standard_ExecutorRun().
279 * ----------------------------------------------------------------
282 ExecutorRun(QueryDesc *queryDesc,
283 ScanDirection direction, uint64 count)
285 if (ExecutorRun_hook)
286 (*ExecutorRun_hook) (queryDesc, direction, count);
288 standard_ExecutorRun(queryDesc, direction, count);
292 standard_ExecutorRun(QueryDesc *queryDesc,
293 ScanDirection direction, uint64 count)
299 MemoryContext oldcontext;
302 Assert(queryDesc != NULL);
304 estate = queryDesc->estate;
306 Assert(estate != NULL);
307 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
310 * Switch into per-query memory context
312 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
314 /* Allow instrumentation of Executor overall runtime */
315 if (queryDesc->totaltime)
316 InstrStartNode(queryDesc->totaltime);
319 * extract information from the query descriptor and the query feature.
321 operation = queryDesc->operation;
322 dest = queryDesc->dest;
325 * startup tuple receiver, if we will be emitting tuples
327 estate->es_processed = 0;
328 estate->es_lastoid = InvalidOid;
330 sendTuples = (operation == CMD_SELECT ||
331 queryDesc->plannedstmt->hasReturning);
334 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
339 if (!ScanDirectionIsNoMovement(direction))
341 queryDesc->planstate,
342 queryDesc->plannedstmt->parallelModeNeeded,
350 * shutdown tuple receiver, if we started it
353 (*dest->rShutdown) (dest);
355 if (queryDesc->totaltime)
356 InstrStopNode(queryDesc->totaltime, estate->es_processed);
358 MemoryContextSwitchTo(oldcontext);
361 /* ----------------------------------------------------------------
364 * This routine must be called after the last ExecutorRun call.
365 * It performs cleanup such as firing AFTER triggers. It is
366 * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
367 * include these actions in the total runtime.
369 * We provide a function hook variable that lets loadable plugins
370 * get control when ExecutorFinish is called. Such a plugin would
371 * normally call standard_ExecutorFinish().
373 * ----------------------------------------------------------------
376 ExecutorFinish(QueryDesc *queryDesc)
378 if (ExecutorFinish_hook)
379 (*ExecutorFinish_hook) (queryDesc);
381 standard_ExecutorFinish(queryDesc);
385 standard_ExecutorFinish(QueryDesc *queryDesc)
388 MemoryContext oldcontext;
391 Assert(queryDesc != NULL);
393 estate = queryDesc->estate;
395 Assert(estate != NULL);
396 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
398 /* This should be run once and only once per Executor instance */
399 Assert(!estate->es_finished);
401 /* Switch into per-query memory context */
402 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
404 /* Allow instrumentation of Executor overall runtime */
405 if (queryDesc->totaltime)
406 InstrStartNode(queryDesc->totaltime);
408 /* Run ModifyTable nodes to completion */
409 ExecPostprocessPlan(estate);
411 /* Execute queued AFTER triggers, unless told not to */
412 if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
413 AfterTriggerEndQuery(estate);
415 if (queryDesc->totaltime)
416 InstrStopNode(queryDesc->totaltime, 0);
418 MemoryContextSwitchTo(oldcontext);
420 estate->es_finished = true;
423 /* ----------------------------------------------------------------
426 * This routine must be called at the end of execution of any
429 * We provide a function hook variable that lets loadable plugins
430 * get control when ExecutorEnd is called. Such a plugin would
431 * normally call standard_ExecutorEnd().
433 * ----------------------------------------------------------------
436 ExecutorEnd(QueryDesc *queryDesc)
438 if (ExecutorEnd_hook)
439 (*ExecutorEnd_hook) (queryDesc);
441 standard_ExecutorEnd(queryDesc);
445 standard_ExecutorEnd(QueryDesc *queryDesc)
448 MemoryContext oldcontext;
451 Assert(queryDesc != NULL);
453 estate = queryDesc->estate;
455 Assert(estate != NULL);
458 * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
459 * Assert is needed because ExecutorFinish is new as of 9.1, and callers
460 * might forget to call it.
462 Assert(estate->es_finished ||
463 (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
466 * Switch into per-query memory context to run ExecEndPlan
468 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
470 ExecEndPlan(queryDesc->planstate, estate);
472 /* do away with our snapshots */
473 UnregisterSnapshot(estate->es_snapshot);
474 UnregisterSnapshot(estate->es_crosscheck_snapshot);
477 * Must switch out of context before destroying it
479 MemoryContextSwitchTo(oldcontext);
482 * Release EState and per-query memory context. This should release
483 * everything the executor has allocated.
485 FreeExecutorState(estate);
487 /* Reset queryDesc fields that no longer point to anything */
488 queryDesc->tupDesc = NULL;
489 queryDesc->estate = NULL;
490 queryDesc->planstate = NULL;
491 queryDesc->totaltime = NULL;
494 /* ----------------------------------------------------------------
497 * This routine may be called on an open queryDesc to rewind it
499 * ----------------------------------------------------------------
502 ExecutorRewind(QueryDesc *queryDesc)
505 MemoryContext oldcontext;
508 Assert(queryDesc != NULL);
510 estate = queryDesc->estate;
512 Assert(estate != NULL);
514 /* It's probably not sensible to rescan updating queries */
515 Assert(queryDesc->operation == CMD_SELECT);
518 * Switch into per-query memory context
520 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
525 ExecReScan(queryDesc->planstate);
527 MemoryContextSwitchTo(oldcontext);
533 * Check access permissions for all relations listed in a range table.
535 * Returns true if permissions are adequate. Otherwise, throws an appropriate
536 * error if ereport_on_violation is true, or simply returns false otherwise.
538 * Note that this does NOT address row level security policies (aka: RLS). If
539 * rows will be returned to the user as a result of this permission check
540 * passing, then RLS also needs to be consulted (and check_enable_rls()).
542 * See rewrite/rowsecurity.c.
545 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
550 foreach(l, rangeTable)
552 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
554 result = ExecCheckRTEPerms(rte);
557 Assert(rte->rtekind == RTE_RELATION);
558 if (ereport_on_violation)
559 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
560 get_rel_name(rte->relid));
565 if (ExecutorCheckPerms_hook)
566 result = (*ExecutorCheckPerms_hook) (rangeTable,
567 ereport_on_violation);
573 * Check access permissions for a single RTE.
576 ExecCheckRTEPerms(RangeTblEntry *rte)
578 AclMode requiredPerms;
580 AclMode remainingPerms;
585 * Only plain-relation RTEs need to be checked here. Function RTEs are
586 * checked by init_fcache when the function is prepared for execution.
587 * Join, subquery, and special RTEs need no checks.
589 if (rte->rtekind != RTE_RELATION)
593 * No work if requiredPerms is empty.
595 requiredPerms = rte->requiredPerms;
596 if (requiredPerms == 0)
602 * userid to check as: current user unless we have a setuid indication.
604 * Note: GetUserId() is presently fast enough that there's no harm in
605 * calling it separately for each RTE. If that stops being true, we could
606 * call it once in ExecCheckRTPerms and pass the userid down from there.
607 * But for now, no need for the extra clutter.
609 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
612 * We must have *all* the requiredPerms bits, but some of the bits can be
613 * satisfied from column-level rather than relation-level permissions.
614 * First, remove any bits that are satisfied by relation permissions.
616 relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
617 remainingPerms = requiredPerms & ~relPerms;
618 if (remainingPerms != 0)
623 * If we lack any permissions that exist only as relation permissions,
624 * we can fail straight away.
626 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
630 * Check to see if we have the needed privileges at column level.
632 * Note: failures just report a table-level error; it would be nicer
633 * to report a column-level error if we have some but not all of the
636 if (remainingPerms & ACL_SELECT)
639 * When the query doesn't explicitly reference any columns (for
640 * example, SELECT COUNT(*) FROM table), allow the query if we
641 * have SELECT on any column of the rel, as per SQL spec.
643 if (bms_is_empty(rte->selectedCols))
645 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
646 ACLMASK_ANY) != ACLCHECK_OK)
650 while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
652 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
653 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
655 if (attno == InvalidAttrNumber)
657 /* Whole-row reference, must have priv on all cols */
658 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
659 ACLMASK_ALL) != ACLCHECK_OK)
664 if (pg_attribute_aclcheck(relOid, attno, userid,
665 ACL_SELECT) != ACLCHECK_OK)
672 * Basically the same for the mod columns, for both INSERT and UPDATE
673 * privilege as specified by remainingPerms.
675 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
681 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
691 * ExecCheckRTEPermsModified
692 * Check INSERT or UPDATE access permissions for a single RTE (these
693 * are processed uniformly).
696 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
697 AclMode requiredPerms)
702 * When the query doesn't explicitly update any columns, allow the query
703 * if we have permission on any column of the rel. This is to handle
704 * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
706 if (bms_is_empty(modifiedCols))
708 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
709 ACLMASK_ANY) != ACLCHECK_OK)
713 while ((col = bms_next_member(modifiedCols, col)) >= 0)
715 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
716 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
718 if (attno == InvalidAttrNumber)
720 /* whole-row reference can't happen here */
721 elog(ERROR, "whole-row update is not implemented");
725 if (pg_attribute_aclcheck(relOid, attno, userid,
726 requiredPerms) != ACLCHECK_OK)
734 * Check that the query does not imply any writes to non-temp tables;
735 * unless we're in parallel mode, in which case don't even allow writes
738 * Note: in a Hot Standby slave this would need to reject writes to temp
739 * tables just as we do in parallel mode; but an HS slave can't have created
740 * any temp tables in the first place, so no need to check that.
743 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
748 * Fail if write permissions are requested in parallel mode for table
749 * (temp or non-temp), otherwise fail for any non-temp table.
751 foreach(l, plannedstmt->rtable)
753 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
755 if (rte->rtekind != RTE_RELATION)
758 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
761 if (isTempNamespace(get_rel_namespace(rte->relid)))
764 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
767 if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
768 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
772 /* ----------------------------------------------------------------
775 * Initializes the query plan: open files, allocate storage
776 * and start up the rule manager
777 * ----------------------------------------------------------------
780 InitPlan(QueryDesc *queryDesc, int eflags)
782 CmdType operation = queryDesc->operation;
783 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
784 Plan *plan = plannedstmt->planTree;
785 List *rangeTable = plannedstmt->rtable;
786 EState *estate = queryDesc->estate;
787 PlanState *planstate;
793 * Do permissions checks
795 ExecCheckRTPerms(rangeTable, true);
798 * initialize the node's execution state
800 estate->es_range_table = rangeTable;
801 estate->es_plannedstmt = plannedstmt;
804 * initialize result relation stuff, and open/lock the result rels.
806 * We must do this before initializing the plan tree, else we might try to
807 * do a lock upgrade if a result rel is also a source rel.
809 if (plannedstmt->resultRelations)
811 List *resultRelations = plannedstmt->resultRelations;
812 int numResultRelations = list_length(resultRelations);
813 ResultRelInfo *resultRelInfos;
814 ResultRelInfo *resultRelInfo;
816 resultRelInfos = (ResultRelInfo *)
817 palloc(numResultRelations * sizeof(ResultRelInfo));
818 resultRelInfo = resultRelInfos;
819 foreach(l, resultRelations)
821 Index resultRelationIndex = lfirst_int(l);
822 Oid resultRelationOid;
823 Relation resultRelation;
825 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
826 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
828 InitResultRelInfo(resultRelInfo,
832 estate->es_instrument);
835 estate->es_result_relations = resultRelInfos;
836 estate->es_num_result_relations = numResultRelations;
837 /* es_result_relation_info is NULL except when within ModifyTable */
838 estate->es_result_relation_info = NULL;
843 * if no result relation, then set state appropriately
845 estate->es_result_relations = NULL;
846 estate->es_num_result_relations = 0;
847 estate->es_result_relation_info = NULL;
851 * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
852 * before we initialize the plan tree, else we'd be risking lock upgrades.
853 * While we are at it, build the ExecRowMark list.
855 estate->es_rowMarks = NIL;
856 foreach(l, plannedstmt->rowMarks)
858 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
863 /* ignore "parent" rowmarks; they are irrelevant at runtime */
867 /* get relation's OID (will produce InvalidOid if subquery) */
868 relid = getrelid(rc->rti, rangeTable);
871 * If you change the conditions under which rel locks are acquired
872 * here, be sure to adjust ExecOpenScanRelation to match.
874 switch (rc->markType)
876 case ROW_MARK_EXCLUSIVE:
877 case ROW_MARK_NOKEYEXCLUSIVE:
879 case ROW_MARK_KEYSHARE:
880 relation = heap_open(relid, RowShareLock);
882 case ROW_MARK_REFERENCE:
883 relation = heap_open(relid, AccessShareLock);
886 /* no physical table access is required */
890 elog(ERROR, "unrecognized markType: %d", rc->markType);
891 relation = NULL; /* keep compiler quiet */
895 /* Check that relation is a legal target for marking */
897 CheckValidRowMarkRel(relation, rc->markType);
899 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
900 erm->relation = relation;
903 erm->prti = rc->prti;
904 erm->rowmarkId = rc->rowmarkId;
905 erm->markType = rc->markType;
906 erm->strength = rc->strength;
907 erm->waitPolicy = rc->waitPolicy;
908 erm->ermActive = false;
909 ItemPointerSetInvalid(&(erm->curCtid));
910 erm->ermExtra = NULL;
911 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
915 * Initialize the executor's tuple table to empty.
917 estate->es_tupleTable = NIL;
918 estate->es_trig_tuple_slot = NULL;
919 estate->es_trig_oldtup_slot = NULL;
920 estate->es_trig_newtup_slot = NULL;
922 /* mark EvalPlanQual not active */
923 estate->es_epqTuple = NULL;
924 estate->es_epqTupleSet = NULL;
925 estate->es_epqScanDone = NULL;
928 * Initialize private state information for each SubPlan. We must do this
929 * before running ExecInitNode on the main query tree, since
930 * ExecInitSubPlan expects to be able to find these entries.
932 Assert(estate->es_subplanstates == NIL);
933 i = 1; /* subplan indices count from 1 */
934 foreach(l, plannedstmt->subplans)
936 Plan *subplan = (Plan *) lfirst(l);
937 PlanState *subplanstate;
941 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
942 * it is a parameterless subplan (not initplan), we suggest that it be
943 * prepared to handle REWIND efficiently; otherwise there is no need.
946 & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
947 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
948 sp_eflags |= EXEC_FLAG_REWIND;
950 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
952 estate->es_subplanstates = lappend(estate->es_subplanstates,
959 * Initialize the private state information for all the nodes in the query
960 * tree. This opens files, allocates storage and leaves us ready to start
963 planstate = ExecInitNode(plan, estate, eflags);
966 * Get the tuple descriptor describing the type of tuples to return.
968 tupType = ExecGetResultType(planstate);
971 * Initialize the junk filter if needed. SELECT queries need a filter if
972 * there are any junk attrs in the top-level tlist.
974 if (operation == CMD_SELECT)
976 bool junk_filter_needed = false;
979 foreach(tlist, plan->targetlist)
981 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
985 junk_filter_needed = true;
990 if (junk_filter_needed)
994 j = ExecInitJunkFilter(planstate->plan->targetlist,
996 ExecInitExtraTupleSlot(estate));
997 estate->es_junkFilter = j;
999 /* Want to return the cleaned tuple type */
1000 tupType = j->jf_cleanTupType;
1004 queryDesc->tupDesc = tupType;
1005 queryDesc->planstate = planstate;
1009 * Check that a proposed result relation is a legal target for the operation
1011 * Generally the parser and/or planner should have noticed any such mistake
1012 * already, but let's make sure.
1014 * Note: when changing this function, you probably also need to look at
1015 * CheckValidRowMarkRel.
1018 CheckValidResultRel(Relation resultRel, CmdType operation)
1020 TriggerDesc *trigDesc = resultRel->trigdesc;
1021 FdwRoutine *fdwroutine;
1023 switch (resultRel->rd_rel->relkind)
1025 case RELKIND_RELATION:
1026 case RELKIND_PARTITIONED_TABLE:
1029 case RELKIND_SEQUENCE:
1031 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1032 errmsg("cannot change sequence \"%s\"",
1033 RelationGetRelationName(resultRel))));
1035 case RELKIND_TOASTVALUE:
1037 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1038 errmsg("cannot change TOAST relation \"%s\"",
1039 RelationGetRelationName(resultRel))));
1044 * Okay only if there's a suitable INSTEAD OF trigger. Messages
1045 * here should match rewriteHandler.c's rewriteTargetView, except
1046 * that we omit errdetail because we haven't got the information
1047 * handy (and given that we really shouldn't get here anyway, it's
1048 * not worth great exertion to get).
1053 if (!trigDesc || !trigDesc->trig_insert_instead_row)
1055 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1056 errmsg("cannot insert into view \"%s\"",
1057 RelationGetRelationName(resultRel)),
1058 errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1061 if (!trigDesc || !trigDesc->trig_update_instead_row)
1063 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1064 errmsg("cannot update view \"%s\"",
1065 RelationGetRelationName(resultRel)),
1066 errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1069 if (!trigDesc || !trigDesc->trig_delete_instead_row)
1071 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1072 errmsg("cannot delete from view \"%s\"",
1073 RelationGetRelationName(resultRel)),
1074 errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1077 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1081 case RELKIND_MATVIEW:
1082 if (!MatViewIncrementalMaintenanceIsEnabled())
1084 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1085 errmsg("cannot change materialized view \"%s\"",
1086 RelationGetRelationName(resultRel))));
1088 case RELKIND_FOREIGN_TABLE:
1089 /* Okay only if the FDW supports it */
1090 fdwroutine = GetFdwRoutineForRelation(resultRel, false);
1094 if (fdwroutine->ExecForeignInsert == NULL)
1096 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1097 errmsg("cannot insert into foreign table \"%s\"",
1098 RelationGetRelationName(resultRel))));
1099 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1100 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1102 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1103 errmsg("foreign table \"%s\" does not allow inserts",
1104 RelationGetRelationName(resultRel))));
1107 if (fdwroutine->ExecForeignUpdate == NULL)
1109 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1110 errmsg("cannot update foreign table \"%s\"",
1111 RelationGetRelationName(resultRel))));
1112 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1113 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1115 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1116 errmsg("foreign table \"%s\" does not allow updates",
1117 RelationGetRelationName(resultRel))));
1120 if (fdwroutine->ExecForeignDelete == NULL)
1122 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1123 errmsg("cannot delete from foreign table \"%s\"",
1124 RelationGetRelationName(resultRel))));
1125 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1126 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1128 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1129 errmsg("foreign table \"%s\" does not allow deletes",
1130 RelationGetRelationName(resultRel))));
1133 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1139 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1140 errmsg("cannot change relation \"%s\"",
1141 RelationGetRelationName(resultRel))));
1147 * Check that a proposed rowmark target relation is a legal target
1149 * In most cases parser and/or planner should have noticed this already, but
1150 * they don't cover all cases.
1153 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1155 FdwRoutine *fdwroutine;
1157 switch (rel->rd_rel->relkind)
1159 case RELKIND_RELATION:
1160 case RELKIND_PARTITIONED_TABLE:
1163 case RELKIND_SEQUENCE:
1164 /* Must disallow this because we don't vacuum sequences */
1166 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1167 errmsg("cannot lock rows in sequence \"%s\"",
1168 RelationGetRelationName(rel))));
1170 case RELKIND_TOASTVALUE:
1171 /* We could allow this, but there seems no good reason to */
1173 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1174 errmsg("cannot lock rows in TOAST relation \"%s\"",
1175 RelationGetRelationName(rel))));
1178 /* Should not get here; planner should have expanded the view */
1180 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1181 errmsg("cannot lock rows in view \"%s\"",
1182 RelationGetRelationName(rel))));
1184 case RELKIND_MATVIEW:
1185 /* Allow referencing a matview, but not actual locking clauses */
1186 if (markType != ROW_MARK_REFERENCE)
1188 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1189 errmsg("cannot lock rows in materialized view \"%s\"",
1190 RelationGetRelationName(rel))));
1192 case RELKIND_FOREIGN_TABLE:
1193 /* Okay only if the FDW supports it */
1194 fdwroutine = GetFdwRoutineForRelation(rel, false);
1195 if (fdwroutine->RefetchForeignRow == NULL)
1197 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1198 errmsg("cannot lock rows in foreign table \"%s\"",
1199 RelationGetRelationName(rel))));
1203 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1204 errmsg("cannot lock rows in relation \"%s\"",
1205 RelationGetRelationName(rel))));
1211 * Initialize ResultRelInfo data for one result relation
1213 * Caution: before Postgres 9.1, this function included the relkind checking
1214 * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1215 * appropriate. Be sure callers cover those needs.
1218 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1219 Relation resultRelationDesc,
1220 Index resultRelationIndex,
1221 Relation partition_root,
1222 int instrument_options)
1224 List *partition_check = NIL;
1226 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1227 resultRelInfo->type = T_ResultRelInfo;
1228 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1229 resultRelInfo->ri_RelationDesc = resultRelationDesc;
1230 resultRelInfo->ri_NumIndices = 0;
1231 resultRelInfo->ri_IndexRelationDescs = NULL;
1232 resultRelInfo->ri_IndexRelationInfo = NULL;
1233 /* make a copy so as not to depend on relcache info not changing... */
1234 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1235 if (resultRelInfo->ri_TrigDesc)
1237 int n = resultRelInfo->ri_TrigDesc->numtriggers;
1239 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1240 palloc0(n * sizeof(FmgrInfo));
1241 resultRelInfo->ri_TrigWhenExprs = (List **)
1242 palloc0(n * sizeof(List *));
1243 if (instrument_options)
1244 resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1248 resultRelInfo->ri_TrigFunctions = NULL;
1249 resultRelInfo->ri_TrigWhenExprs = NULL;
1250 resultRelInfo->ri_TrigInstrument = NULL;
1252 if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1253 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1255 resultRelInfo->ri_FdwRoutine = NULL;
1256 resultRelInfo->ri_FdwState = NULL;
1257 resultRelInfo->ri_usesFdwDirectModify = false;
1258 resultRelInfo->ri_ConstraintExprs = NULL;
1259 resultRelInfo->ri_junkFilter = NULL;
1260 resultRelInfo->ri_projectReturning = NULL;
1263 * If partition_root has been specified, that means we are builiding the
1264 * ResultRelationInfo for one of its leaf partitions. In that case, we
1265 * need *not* initialize the leaf partition's constraint, but rather the
1266 * the partition_root's (if any). We must do that explicitly like this,
1267 * because implicit partition constraints are not inherited like user-
1268 * defined constraints and would fail to be enforced by ExecConstraints()
1269 * after a tuple is routed to a leaf partition.
1274 * Root table itself may or may not be a partition; partition_check
1275 * would be NIL in the latter case.
1277 partition_check = RelationGetPartitionQual(partition_root);
1280 * This is not our own partition constraint, but rather an ancestor's.
1281 * So any Vars in it bear the ancestor's attribute numbers. We must
1282 * switch them to our own.
1284 if (partition_check != NIL)
1285 partition_check = map_partition_varattnos(partition_check,
1290 partition_check = RelationGetPartitionQual(resultRelationDesc);
1292 resultRelInfo->ri_PartitionCheck = partition_check;
1293 resultRelInfo->ri_PartitionRoot = partition_root;
1297 * ExecGetTriggerResultRel
1299 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1300 * triggers are fired on one of the result relations of the query, and so
1301 * we can just return a member of the es_result_relations array. (Note: in
1302 * self-join situations there might be multiple members with the same OID;
1303 * if so it doesn't matter which one we pick.) However, it is sometimes
1304 * necessary to fire triggers on other relations; this happens mainly when an
1305 * RI update trigger queues additional triggers on other relations, which will
1306 * be processed in the context of the outer query. For efficiency's sake,
1307 * we want to have a ResultRelInfo for those triggers too; that can avoid
1308 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1309 * ANALYZE to report the runtimes of such triggers.) So we make additional
1310 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1313 ExecGetTriggerResultRel(EState *estate, Oid relid)
1315 ResultRelInfo *rInfo;
1319 MemoryContext oldcontext;
1321 /* First, search through the query result relations */
1322 rInfo = estate->es_result_relations;
1323 nr = estate->es_num_result_relations;
1326 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1331 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1332 foreach(l, estate->es_trig_target_relations)
1334 rInfo = (ResultRelInfo *) lfirst(l);
1335 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1338 /* Nope, so we need a new one */
1341 * Open the target relation's relcache entry. We assume that an
1342 * appropriate lock is still held by the backend from whenever the trigger
1343 * event got queued, so we need take no new lock here. Also, we need not
1344 * recheck the relkind, so no need for CheckValidResultRel.
1346 rel = heap_open(relid, NoLock);
1349 * Make the new entry in the right context.
1351 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1352 rInfo = makeNode(ResultRelInfo);
1353 InitResultRelInfo(rInfo,
1355 0, /* dummy rangetable index */
1357 estate->es_instrument);
1358 estate->es_trig_target_relations =
1359 lappend(estate->es_trig_target_relations, rInfo);
1360 MemoryContextSwitchTo(oldcontext);
1363 * Currently, we don't need any index information in ResultRelInfos used
1364 * only for triggers, so no need to call ExecOpenIndices.
1371 * ExecContextForcesOids
1373 * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1374 * we need to ensure that result tuples have space for an OID iff they are
1375 * going to be stored into a relation that has OIDs. In other contexts
1376 * we are free to choose whether to leave space for OIDs in result tuples
1377 * (we generally don't want to, but we do if a physical-tlist optimization
1378 * is possible). This routine checks the plan context and returns TRUE if the
1379 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1380 * *hasoids is set to the required value.
1382 * One reason this is ugly is that all plan nodes in the plan tree will emit
1383 * tuples with space for an OID, though we really only need the topmost node
1384 * to do so. However, node types like Sort don't project new tuples but just
1385 * return their inputs, and in those cases the requirement propagates down
1386 * to the input node. Eventually we might make this code smart enough to
1387 * recognize how far down the requirement really goes, but for now we just
1388 * make all plan nodes do the same thing if the top level forces the choice.
1390 * We assume that if we are generating tuples for INSERT or UPDATE,
1391 * estate->es_result_relation_info is already set up to describe the target
1392 * relation. Note that in an UPDATE that spans an inheritance tree, some of
1393 * the target relations may have OIDs and some not. We have to make the
1394 * decisions on a per-relation basis as we initialize each of the subplans of
1395 * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1396 * while initializing each subplan.
1398 * CREATE TABLE AS is even uglier, because we don't have the target relation's
1399 * descriptor available when this code runs; we have to look aside at the
1400 * flags passed to ExecutorStart().
1403 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1405 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1409 Relation rel = ri->ri_RelationDesc;
1413 *hasoids = rel->rd_rel->relhasoids;
1418 if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1423 if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1432 /* ----------------------------------------------------------------
1433 * ExecPostprocessPlan
1435 * Give plan nodes a final chance to execute before shutdown
1436 * ----------------------------------------------------------------
1439 ExecPostprocessPlan(EState *estate)
1444 * Make sure nodes run forward.
1446 estate->es_direction = ForwardScanDirection;
1449 * Run any secondary ModifyTable nodes to completion, in case the main
1450 * query did not fetch all rows from them. (We do this to ensure that
1451 * such nodes have predictable results.)
1453 foreach(lc, estate->es_auxmodifytables)
1455 PlanState *ps = (PlanState *) lfirst(lc);
1459 TupleTableSlot *slot;
1461 /* Reset the per-output-tuple exprcontext each time */
1462 ResetPerTupleExprContext(estate);
1464 slot = ExecProcNode(ps);
1466 if (TupIsNull(slot))
1472 /* ----------------------------------------------------------------
1475 * Cleans up the query plan -- closes files and frees up storage
1477 * NOTE: we are no longer very worried about freeing storage per se
1478 * in this code; FreeExecutorState should be guaranteed to release all
1479 * memory that needs to be released. What we are worried about doing
1480 * is closing relations and dropping buffer pins. Thus, for example,
1481 * tuple tables must be cleared or dropped to ensure pins are released.
1482 * ----------------------------------------------------------------
1485 ExecEndPlan(PlanState *planstate, EState *estate)
1487 ResultRelInfo *resultRelInfo;
1492 * shut down the node-type-specific query processing
1494 ExecEndNode(planstate);
1499 foreach(l, estate->es_subplanstates)
1501 PlanState *subplanstate = (PlanState *) lfirst(l);
1503 ExecEndNode(subplanstate);
1507 * destroy the executor's tuple table. Actually we only care about
1508 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1509 * the TupleTableSlots, since the containing memory context is about to go
1512 ExecResetTupleTable(estate->es_tupleTable, false);
1515 * close the result relation(s) if any, but hold locks until xact commit.
1517 resultRelInfo = estate->es_result_relations;
1518 for (i = estate->es_num_result_relations; i > 0; i--)
1520 /* Close indices and then the relation itself */
1521 ExecCloseIndices(resultRelInfo);
1522 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1527 * likewise close any trigger target relations
1529 foreach(l, estate->es_trig_target_relations)
1531 resultRelInfo = (ResultRelInfo *) lfirst(l);
1532 /* Close indices and then the relation itself */
1533 ExecCloseIndices(resultRelInfo);
1534 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1538 * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1541 foreach(l, estate->es_rowMarks)
1543 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1546 heap_close(erm->relation, NoLock);
1550 /* ----------------------------------------------------------------
1553 * Processes the query plan until we have retrieved 'numberTuples' tuples,
1554 * moving in the specified direction.
1556 * Runs to completion if numberTuples is 0
1558 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1560 * ----------------------------------------------------------------
1563 ExecutePlan(EState *estate,
1564 PlanState *planstate,
1565 bool use_parallel_mode,
1568 uint64 numberTuples,
1569 ScanDirection direction,
1572 TupleTableSlot *slot;
1573 uint64 current_tuple_count;
1576 * initialize local variables
1578 current_tuple_count = 0;
1581 * Set the direction.
1583 estate->es_direction = direction;
1586 * If a tuple count was supplied, we must force the plan to run without
1587 * parallelism, because we might exit early. Also disable parallelism
1588 * when writing into a relation, because no database changes are allowed
1591 if (numberTuples || dest->mydest == DestIntoRel)
1592 use_parallel_mode = false;
1595 * If a tuple count was supplied, we must force the plan to run without
1596 * parallelism, because we might exit early.
1598 if (use_parallel_mode)
1599 EnterParallelMode();
1602 * Loop until we've processed the proper number of tuples from the plan.
1606 /* Reset the per-output-tuple exprcontext */
1607 ResetPerTupleExprContext(estate);
1610 * Execute the plan and obtain a tuple
1612 slot = ExecProcNode(planstate);
1615 * if the tuple is null, then we assume there is nothing more to
1616 * process so we just end the loop...
1618 if (TupIsNull(slot))
1620 /* Allow nodes to release or shut down resources. */
1621 (void) ExecShutdownNode(planstate);
1626 * If we have a junk filter, then project a new tuple with the junk
1629 * Store this new "clean" tuple in the junkfilter's resultSlot.
1630 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1631 * because that tuple slot has the wrong descriptor.)
1633 if (estate->es_junkFilter != NULL)
1634 slot = ExecFilterJunk(estate->es_junkFilter, slot);
1637 * If we are supposed to send the tuple somewhere, do so. (In
1638 * practice, this is probably always the case at this point.)
1643 * If we are not able to send the tuple, we assume the destination
1644 * has closed and no more tuples can be sent. If that's the case,
1647 if (!((*dest->receiveSlot) (slot, dest)))
1652 * Count tuples processed, if this is a SELECT. (For other operation
1653 * types, the ModifyTable plan node must count the appropriate
1656 if (operation == CMD_SELECT)
1657 (estate->es_processed)++;
1660 * check our tuple count.. if we've processed the proper number then
1661 * quit, else loop again and process more tuples. Zero numberTuples
1664 current_tuple_count++;
1665 if (numberTuples && numberTuples == current_tuple_count)
1669 if (use_parallel_mode)
1675 * ExecRelCheck --- check that tuple meets constraints for result relation
1677 * Returns NULL if OK, else name of failed check constraint
1680 ExecRelCheck(ResultRelInfo *resultRelInfo,
1681 TupleTableSlot *slot, EState *estate)
1683 Relation rel = resultRelInfo->ri_RelationDesc;
1684 int ncheck = rel->rd_att->constr->num_check;
1685 ConstrCheck *check = rel->rd_att->constr->check;
1686 ExprContext *econtext;
1687 MemoryContext oldContext;
1692 * If first time through for this result relation, build expression
1693 * nodetrees for rel's constraint expressions. Keep them in the per-query
1694 * memory context so they'll survive throughout the query.
1696 if (resultRelInfo->ri_ConstraintExprs == NULL)
1698 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1699 resultRelInfo->ri_ConstraintExprs =
1700 (List **) palloc(ncheck * sizeof(List *));
1701 for (i = 0; i < ncheck; i++)
1703 /* ExecQual wants implicit-AND form */
1704 qual = make_ands_implicit(stringToNode(check[i].ccbin));
1705 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1706 ExecPrepareExpr((Expr *) qual, estate);
1708 MemoryContextSwitchTo(oldContext);
1712 * We will use the EState's per-tuple context for evaluating constraint
1713 * expressions (creating it if it's not already there).
1715 econtext = GetPerTupleExprContext(estate);
1717 /* Arrange for econtext's scan tuple to be the tuple under test */
1718 econtext->ecxt_scantuple = slot;
1720 /* And evaluate the constraints */
1721 for (i = 0; i < ncheck; i++)
1723 qual = resultRelInfo->ri_ConstraintExprs[i];
1726 * NOTE: SQL specifies that a NULL result from a constraint expression
1727 * is not to be treated as a failure. Therefore, tell ExecQual to
1728 * return TRUE for NULL.
1730 if (!ExecQual(qual, econtext, true))
1731 return check[i].ccname;
1734 /* NULL result means no error */
1739 * ExecPartitionCheck --- check that tuple meets the partition constraint.
1741 * Note: This is called *iff* resultRelInfo is the main target table.
1744 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1747 ExprContext *econtext;
1750 * If first time through, build expression state tree for the partition
1751 * check expression. Keep it in the per-query memory context so they'll
1752 * survive throughout the query.
1754 if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1756 List *qual = resultRelInfo->ri_PartitionCheck;
1758 resultRelInfo->ri_PartitionCheckExpr = (List *)
1759 ExecPrepareExpr((Expr *) qual, estate);
1763 * We will use the EState's per-tuple context for evaluating constraint
1764 * expressions (creating it if it's not already there).
1766 econtext = GetPerTupleExprContext(estate);
1768 /* Arrange for econtext's scan tuple to be the tuple under test */
1769 econtext->ecxt_scantuple = slot;
1772 * As in case of the catalogued constraints, we treat a NULL result as
1773 * success here, not a failure.
1775 return ExecQual(resultRelInfo->ri_PartitionCheckExpr, econtext, true);
1779 * ExecConstraints - check constraints of the tuple in 'slot'
1781 * This checks the traditional NOT NULL and check constraints, as well as
1782 * the partition constraint, if any.
1784 * Note: 'slot' contains the tuple to check the constraints of, which may
1785 * have been converted from the original input tuple after tuple routing,
1786 * while 'orig_slot' contains the original tuple to be shown in the message,
1787 * if an error occurs.
1790 ExecConstraints(ResultRelInfo *resultRelInfo,
1791 TupleTableSlot *slot, TupleTableSlot *orig_slot,
1794 Relation rel = resultRelInfo->ri_RelationDesc;
1795 TupleDesc tupdesc = RelationGetDescr(rel);
1796 TupleConstr *constr = tupdesc->constr;
1797 Bitmapset *modifiedCols;
1798 Bitmapset *insertedCols;
1799 Bitmapset *updatedCols;
1801 Assert(constr || resultRelInfo->ri_PartitionCheck);
1803 if (constr && constr->has_not_null)
1805 int natts = tupdesc->natts;
1808 for (attrChk = 1; attrChk <= natts; attrChk++)
1810 if (tupdesc->attrs[attrChk - 1]->attnotnull &&
1811 slot_attisnull(slot, attrChk))
1814 Relation orig_rel = rel;
1815 TupleDesc orig_tupdesc = tupdesc;
1818 * choose the correct relation to build val_desc from the
1819 * tuple contained in orig_slot
1821 if (resultRelInfo->ri_PartitionRoot)
1823 rel = resultRelInfo->ri_PartitionRoot;
1824 tupdesc = RelationGetDescr(rel);
1827 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1828 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1829 modifiedCols = bms_union(insertedCols, updatedCols);
1830 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1837 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1838 errmsg("null value in column \"%s\" violates not-null constraint",
1839 NameStr(orig_tupdesc->attrs[attrChk - 1]->attname)),
1840 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1841 errtablecol(orig_rel, attrChk)));
1846 if (constr && constr->num_check > 0)
1850 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1853 Relation orig_rel = rel;
1855 /* See the comment above. */
1856 if (resultRelInfo->ri_PartitionRoot)
1858 rel = resultRelInfo->ri_PartitionRoot;
1859 tupdesc = RelationGetDescr(rel);
1862 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1863 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1864 modifiedCols = bms_union(insertedCols, updatedCols);
1865 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1871 (errcode(ERRCODE_CHECK_VIOLATION),
1872 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1873 RelationGetRelationName(orig_rel), failed),
1874 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1875 errtableconstraint(orig_rel, failed)));
1879 if (resultRelInfo->ri_PartitionCheck &&
1880 !ExecPartitionCheck(resultRelInfo, slot, estate))
1883 Relation orig_rel = rel;
1885 /* See the comment above. */
1886 if (resultRelInfo->ri_PartitionRoot)
1888 rel = resultRelInfo->ri_PartitionRoot;
1889 tupdesc = RelationGetDescr(rel);
1892 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1893 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1894 modifiedCols = bms_union(insertedCols, updatedCols);
1895 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1901 (errcode(ERRCODE_CHECK_VIOLATION),
1902 errmsg("new row for relation \"%s\" violates partition constraint",
1903 RelationGetRelationName(orig_rel)),
1904 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1909 * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
1910 * of the specified kind.
1912 * Note that this needs to be called multiple times to ensure that all kinds of
1913 * WITH CHECK OPTIONs are handled (both those from views which have the WITH
1914 * CHECK OPTION set and from row level security policies). See ExecInsert()
1918 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
1919 TupleTableSlot *slot, EState *estate)
1921 Relation rel = resultRelInfo->ri_RelationDesc;
1922 TupleDesc tupdesc = RelationGetDescr(rel);
1923 ExprContext *econtext;
1928 * We will use the EState's per-tuple context for evaluating constraint
1929 * expressions (creating it if it's not already there).
1931 econtext = GetPerTupleExprContext(estate);
1933 /* Arrange for econtext's scan tuple to be the tuple under test */
1934 econtext->ecxt_scantuple = slot;
1936 /* Check each of the constraints */
1937 forboth(l1, resultRelInfo->ri_WithCheckOptions,
1938 l2, resultRelInfo->ri_WithCheckOptionExprs)
1940 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
1941 ExprState *wcoExpr = (ExprState *) lfirst(l2);
1944 * Skip any WCOs which are not the kind we are looking for at this
1947 if (wco->kind != kind)
1951 * WITH CHECK OPTION checks are intended to ensure that the new tuple
1952 * is visible (in the case of a view) or that it passes the
1953 * 'with-check' policy (in the case of row security). If the qual
1954 * evaluates to NULL or FALSE, then the new tuple won't be included in
1955 * the view or doesn't pass the 'with-check' policy for the table. We
1956 * need ExecQual to return FALSE for NULL to handle the view case (the
1957 * opposite of what we do above for CHECK constraints).
1959 if (!ExecQual((List *) wcoExpr, econtext, false))
1962 Bitmapset *modifiedCols;
1963 Bitmapset *insertedCols;
1964 Bitmapset *updatedCols;
1969 * For WITH CHECK OPTIONs coming from views, we might be
1970 * able to provide the details on the row, depending on
1971 * the permissions on the relation (that is, if the user
1972 * could view it directly anyway). For RLS violations, we
1973 * don't include the data since we don't know if the user
1974 * should be able to view the tuple as as that depends on
1977 case WCO_VIEW_CHECK:
1978 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1979 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1980 modifiedCols = bms_union(insertedCols, updatedCols);
1981 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1988 (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
1989 errmsg("new row violates check option for view \"%s\"",
1991 val_desc ? errdetail("Failing row contains %s.",
1994 case WCO_RLS_INSERT_CHECK:
1995 case WCO_RLS_UPDATE_CHECK:
1996 if (wco->polname != NULL)
1998 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1999 errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2000 wco->polname, wco->relname)));
2003 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2004 errmsg("new row violates row-level security policy for table \"%s\"",
2007 case WCO_RLS_CONFLICT_CHECK:
2008 if (wco->polname != NULL)
2010 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2011 errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2012 wco->polname, wco->relname)));
2015 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2016 errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2020 elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2028 * ExecBuildSlotValueDescription -- construct a string representing a tuple
2030 * This is intentionally very similar to BuildIndexValueDescription, but
2031 * unlike that function, we truncate long field values (to at most maxfieldlen
2032 * bytes). That seems necessary here since heap field values could be very
2033 * long, whereas index entries typically aren't so wide.
2035 * Also, unlike the case with index entries, we need to be prepared to ignore
2036 * dropped columns. We used to use the slot's tuple descriptor to decode the
2037 * data, but the slot's descriptor doesn't identify dropped columns, so we
2038 * now need to be passed the relation's descriptor.
2040 * Note that, like BuildIndexValueDescription, if the user does not have
2041 * permission to view any of the columns involved, a NULL is returned. Unlike
2042 * BuildIndexValueDescription, if the user has access to view a subset of the
2043 * column involved, that subset will be returned with a key identifying which
2047 ExecBuildSlotValueDescription(Oid reloid,
2048 TupleTableSlot *slot,
2050 Bitmapset *modifiedCols,
2054 StringInfoData collist;
2055 bool write_comma = false;
2056 bool write_comma_collist = false;
2058 AclResult aclresult;
2059 bool table_perm = false;
2060 bool any_perm = false;
2063 * Check if RLS is enabled and should be active for the relation; if so,
2064 * then don't return anything. Otherwise, go through normal permission
2067 if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2070 initStringInfo(&buf);
2072 appendStringInfoChar(&buf, '(');
2075 * Check if the user has permissions to see the row. Table-level SELECT
2076 * allows access to all columns. If the user does not have table-level
2077 * SELECT then we check each column and include those the user has SELECT
2078 * rights on. Additionally, we always include columns the user provided
2081 aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2082 if (aclresult != ACLCHECK_OK)
2084 /* Set up the buffer for the column list */
2085 initStringInfo(&collist);
2086 appendStringInfoChar(&collist, '(');
2089 table_perm = any_perm = true;
2091 /* Make sure the tuple is fully deconstructed */
2092 slot_getallattrs(slot);
2094 for (i = 0; i < tupdesc->natts; i++)
2096 bool column_perm = false;
2100 /* ignore dropped columns */
2101 if (tupdesc->attrs[i]->attisdropped)
2107 * No table-level SELECT, so need to make sure they either have
2108 * SELECT rights on the column or that they have provided the data
2109 * for the column. If not, omit this column from the error
2112 aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
2113 GetUserId(), ACL_SELECT);
2114 if (bms_is_member(tupdesc->attrs[i]->attnum - FirstLowInvalidHeapAttributeNumber,
2115 modifiedCols) || aclresult == ACLCHECK_OK)
2117 column_perm = any_perm = true;
2119 if (write_comma_collist)
2120 appendStringInfoString(&collist, ", ");
2122 write_comma_collist = true;
2124 appendStringInfoString(&collist, NameStr(tupdesc->attrs[i]->attname));
2128 if (table_perm || column_perm)
2130 if (slot->tts_isnull[i])
2137 getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
2138 &foutoid, &typisvarlena);
2139 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2143 appendStringInfoString(&buf, ", ");
2147 /* truncate if needed */
2148 vallen = strlen(val);
2149 if (vallen <= maxfieldlen)
2150 appendStringInfoString(&buf, val);
2153 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2154 appendBinaryStringInfo(&buf, val, vallen);
2155 appendStringInfoString(&buf, "...");
2160 /* If we end up with zero columns being returned, then return NULL. */
2164 appendStringInfoChar(&buf, ')');
2168 appendStringInfoString(&collist, ") = ");
2169 appendStringInfoString(&collist, buf.data);
2171 return collist.data;
2179 * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2180 * given ResultRelInfo
2183 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2186 Bitmapset *updatedCols;
2189 * Compute lock mode to use. If columns that are part of the key have not
2190 * been modified, then we can use a weaker lock, allowing for better
2193 updatedCols = GetUpdatedColumns(relinfo, estate);
2194 keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2195 INDEX_ATTR_BITMAP_KEY);
2197 if (bms_overlap(keyCols, updatedCols))
2198 return LockTupleExclusive;
2200 return LockTupleNoKeyExclusive;
2204 * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2206 * If no such struct, either return NULL or throw error depending on missing_ok
2209 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2213 foreach(lc, estate->es_rowMarks)
2215 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2217 if (erm->rti == rti)
2221 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2226 * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2228 * Inputs are the underlying ExecRowMark struct and the targetlist of the
2229 * input plan node (not planstate node!). We need the latter to find out
2230 * the column numbers of the resjunk columns.
2233 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2235 ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2238 aerm->rowmark = erm;
2240 /* Look up the resjunk columns associated with this rowmark */
2241 if (erm->markType != ROW_MARK_COPY)
2243 /* need ctid for all methods other than COPY */
2244 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2245 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2247 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2248 elog(ERROR, "could not find junk %s column", resname);
2252 /* need wholerow if COPY */
2253 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2254 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2256 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2257 elog(ERROR, "could not find junk %s column", resname);
2260 /* if child rel, need tableoid */
2261 if (erm->rti != erm->prti)
2263 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2264 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2266 if (!AttributeNumberIsValid(aerm->toidAttNo))
2267 elog(ERROR, "could not find junk %s column", resname);
2275 * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2276 * process the updated version under READ COMMITTED rules.
2278 * See backend/executor/README for some info about how this works.
2283 * Check a modified tuple to see if we want to process its updated version
2284 * under READ COMMITTED rules.
2286 * estate - outer executor state data
2287 * epqstate - state for EvalPlanQual rechecking
2288 * relation - table containing tuple
2289 * rti - rangetable index of table containing tuple
2290 * lockmode - requested tuple lock mode
2291 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2292 * priorXmax - t_xmax from the outdated tuple
2294 * *tid is also an output parameter: it's modified to hold the TID of the
2295 * latest version of the tuple (note this may be changed even on failure)
2297 * Returns a slot containing the new candidate update/delete tuple, or
2298 * NULL if we determine we shouldn't process the row.
2300 * Note: properly, lockmode should be declared as enum LockTupleMode,
2301 * but we use "int" to avoid having to include heapam.h in executor.h.
2304 EvalPlanQual(EState *estate, EPQState *epqstate,
2305 Relation relation, Index rti, int lockmode,
2306 ItemPointer tid, TransactionId priorXmax)
2308 TupleTableSlot *slot;
2309 HeapTuple copyTuple;
2314 * Get and lock the updated version of the row; if fail, return NULL.
2316 copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2319 if (copyTuple == NULL)
2323 * For UPDATE/DELETE we have to return tid of actual row we're executing
2326 *tid = copyTuple->t_self;
2329 * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2331 EvalPlanQualBegin(epqstate, estate);
2334 * Free old test tuple, if any, and store new tuple where relation's scan
2337 EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2340 * Fetch any non-locked source rows
2342 EvalPlanQualFetchRowMarks(epqstate);
2345 * Run the EPQ query. We assume it will return at most one tuple.
2347 slot = EvalPlanQualNext(epqstate);
2350 * If we got a tuple, force the slot to materialize the tuple so that it
2351 * is not dependent on any local state in the EPQ query (in particular,
2352 * it's highly likely that the slot contains references to any pass-by-ref
2353 * datums that may be present in copyTuple). As with the next step, this
2354 * is to guard against early re-use of the EPQ query.
2356 if (!TupIsNull(slot))
2357 (void) ExecMaterializeSlot(slot);
2360 * Clear out the test tuple. This is needed in case the EPQ query is
2361 * re-used to test a tuple for a different relation. (Not clear that can
2362 * really happen, but let's be safe.)
2364 EvalPlanQualSetTuple(epqstate, rti, NULL);
2370 * Fetch a copy of the newest version of an outdated tuple
2372 * estate - executor state data
2373 * relation - table containing tuple
2374 * lockmode - requested tuple lock mode
2375 * wait_policy - requested lock wait policy
2376 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2377 * priorXmax - t_xmax from the outdated tuple
2379 * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2380 * that there is no newest version (ie, the row was deleted not updated).
2381 * We also return NULL if the tuple is locked and the wait policy is to skip
2384 * If successful, we have locked the newest tuple version, so caller does not
2385 * need to worry about it changing anymore.
2387 * Note: properly, lockmode should be declared as enum LockTupleMode,
2388 * but we use "int" to avoid having to include heapam.h in executor.h.
2391 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2392 LockWaitPolicy wait_policy,
2393 ItemPointer tid, TransactionId priorXmax)
2395 HeapTuple copyTuple = NULL;
2396 HeapTupleData tuple;
2397 SnapshotData SnapshotDirty;
2400 * fetch target tuple
2402 * Loop here to deal with updated or busy tuples
2404 InitDirtySnapshot(SnapshotDirty);
2405 tuple.t_self = *tid;
2410 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2413 HeapUpdateFailureData hufd;
2416 * If xmin isn't what we're expecting, the slot must have been
2417 * recycled and reused for an unrelated tuple. This implies that
2418 * the latest version of the row was deleted, so we need do
2419 * nothing. (Should be safe to examine xmin without getting
2420 * buffer's content lock. We assume reading a TransactionId to be
2421 * atomic, and Xmin never changes in an existing tuple, except to
2422 * invalid or frozen, and neither of those can match priorXmax.)
2424 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2427 ReleaseBuffer(buffer);
2431 /* otherwise xmin should not be dirty... */
2432 if (TransactionIdIsValid(SnapshotDirty.xmin))
2433 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2436 * If tuple is being updated by other transaction then we have to
2437 * wait for its commit/abort, or die trying.
2439 if (TransactionIdIsValid(SnapshotDirty.xmax))
2441 ReleaseBuffer(buffer);
2442 switch (wait_policy)
2445 XactLockTableWait(SnapshotDirty.xmax,
2446 relation, &tuple.t_self,
2450 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2451 return NULL; /* skip instead of waiting */
2454 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2456 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2457 errmsg("could not obtain lock on row in relation \"%s\"",
2458 RelationGetRelationName(relation))));
2461 continue; /* loop back to repeat heap_fetch */
2465 * If tuple was inserted by our own transaction, we have to check
2466 * cmin against es_output_cid: cmin >= current CID means our
2467 * command cannot see the tuple, so we should ignore it. Otherwise
2468 * heap_lock_tuple() will throw an error, and so would any later
2469 * attempt to update or delete the tuple. (We need not check cmax
2470 * because HeapTupleSatisfiesDirty will consider a tuple deleted
2471 * by our transaction dead, regardless of cmax.) We just checked
2472 * that priorXmax == xmin, so we can test that variable instead of
2473 * doing HeapTupleHeaderGetXmin again.
2475 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2476 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2478 ReleaseBuffer(buffer);
2483 * This is a live tuple, so now try to lock it.
2485 test = heap_lock_tuple(relation, &tuple,
2486 estate->es_output_cid,
2487 lockmode, wait_policy,
2488 false, &buffer, &hufd);
2489 /* We now have two pins on the buffer, get rid of one */
2490 ReleaseBuffer(buffer);
2494 case HeapTupleSelfUpdated:
2497 * The target tuple was already updated or deleted by the
2498 * current command, or by a later command in the current
2499 * transaction. We *must* ignore the tuple in the former
2500 * case, so as to avoid the "Halloween problem" of
2501 * repeated update attempts. In the latter case it might
2502 * be sensible to fetch the updated tuple instead, but
2503 * doing so would require changing heap_update and
2504 * heap_delete to not complain about updating "invisible"
2505 * tuples, which seems pretty scary (heap_lock_tuple will
2506 * not complain, but few callers expect
2507 * HeapTupleInvisible, and we're not one of them). So for
2508 * now, treat the tuple as deleted and do not process.
2510 ReleaseBuffer(buffer);
2513 case HeapTupleMayBeUpdated:
2514 /* successfully locked */
2517 case HeapTupleUpdated:
2518 ReleaseBuffer(buffer);
2519 if (IsolationUsesXactSnapshot())
2521 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2522 errmsg("could not serialize access due to concurrent update")));
2524 /* Should not encounter speculative tuple on recheck */
2525 Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
2526 if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2528 /* it was updated, so look at the updated version */
2529 tuple.t_self = hufd.ctid;
2530 /* updated row should have xmin matching this xmax */
2531 priorXmax = hufd.xmax;
2534 /* tuple was deleted, so give up */
2537 case HeapTupleWouldBlock:
2538 ReleaseBuffer(buffer);
2541 case HeapTupleInvisible:
2542 elog(ERROR, "attempted to lock invisible tuple");
2545 ReleaseBuffer(buffer);
2546 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2548 return NULL; /* keep compiler quiet */
2552 * We got tuple - now copy it for use by recheck query.
2554 copyTuple = heap_copytuple(&tuple);
2555 ReleaseBuffer(buffer);
2560 * If the referenced slot was actually empty, the latest version of
2561 * the row must have been deleted, so we need do nothing.
2563 if (tuple.t_data == NULL)
2565 ReleaseBuffer(buffer);
2570 * As above, if xmin isn't what we're expecting, do nothing.
2572 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2575 ReleaseBuffer(buffer);
2580 * If we get here, the tuple was found but failed SnapshotDirty.
2581 * Assuming the xmin is either a committed xact or our own xact (as it
2582 * certainly should be if we're trying to modify the tuple), this must
2583 * mean that the row was updated or deleted by either a committed xact
2584 * or our own xact. If it was deleted, we can ignore it; if it was
2585 * updated then chain up to the next version and repeat the whole
2588 * As above, it should be safe to examine xmax and t_ctid without the
2589 * buffer content lock, because they can't be changing.
2591 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2593 /* deleted, so forget about it */
2594 ReleaseBuffer(buffer);
2598 /* updated, so look at the updated row */
2599 tuple.t_self = tuple.t_data->t_ctid;
2600 /* updated row should have xmin matching this xmax */
2601 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2602 ReleaseBuffer(buffer);
2603 /* loop back to fetch next in chain */
2607 * Return the copied tuple
2613 * EvalPlanQualInit -- initialize during creation of a plan state node
2614 * that might need to invoke EPQ processing.
2616 * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2617 * with EvalPlanQualSetPlan.
2620 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2621 Plan *subplan, List *auxrowmarks, int epqParam)
2623 /* Mark the EPQ state inactive */
2624 epqstate->estate = NULL;
2625 epqstate->planstate = NULL;
2626 epqstate->origslot = NULL;
2627 /* ... and remember data that EvalPlanQualBegin will need */
2628 epqstate->plan = subplan;
2629 epqstate->arowMarks = auxrowmarks;
2630 epqstate->epqParam = epqParam;
2634 * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2636 * We need this so that ModifyTable can deal with multiple subplans.
2639 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2641 /* If we have a live EPQ query, shut it down */
2642 EvalPlanQualEnd(epqstate);
2643 /* And set/change the plan pointer */
2644 epqstate->plan = subplan;
2645 /* The rowmarks depend on the plan, too */
2646 epqstate->arowMarks = auxrowmarks;
2650 * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2652 * NB: passed tuple must be palloc'd; it may get freed later
2655 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2657 EState *estate = epqstate->estate;
2662 * free old test tuple, if any, and store new tuple where relation's scan
2665 if (estate->es_epqTuple[rti - 1] != NULL)
2666 heap_freetuple(estate->es_epqTuple[rti - 1]);
2667 estate->es_epqTuple[rti - 1] = tuple;
2668 estate->es_epqTupleSet[rti - 1] = true;
2672 * Fetch back the current test tuple (if any) for the specified RTI
2675 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2677 EState *estate = epqstate->estate;
2681 return estate->es_epqTuple[rti - 1];
2685 * Fetch the current row values for any non-locked relations that need
2686 * to be scanned by an EvalPlanQual operation. origslot must have been set
2687 * to contain the current result row (top-level row) that we need to recheck.
2690 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2694 Assert(epqstate->origslot != NULL);
2696 foreach(l, epqstate->arowMarks)
2698 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2699 ExecRowMark *erm = aerm->rowmark;
2702 HeapTupleData tuple;
2704 if (RowMarkRequiresRowShareLock(erm->markType))
2705 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2707 /* clear any leftover test tuple for this rel */
2708 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2710 /* if child rel, must check whether it produced this row */
2711 if (erm->rti != erm->prti)
2715 datum = ExecGetJunkAttribute(epqstate->origslot,
2718 /* non-locked rels could be on the inside of outer joins */
2721 tableoid = DatumGetObjectId(datum);
2723 Assert(OidIsValid(erm->relid));
2724 if (tableoid != erm->relid)
2726 /* this child is inactive right now */
2731 if (erm->markType == ROW_MARK_REFERENCE)
2733 HeapTuple copyTuple;
2735 Assert(erm->relation != NULL);
2737 /* fetch the tuple's ctid */
2738 datum = ExecGetJunkAttribute(epqstate->origslot,
2741 /* non-locked rels could be on the inside of outer joins */
2745 /* fetch requests on foreign tables must be passed to their FDW */
2746 if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2748 FdwRoutine *fdwroutine;
2749 bool updated = false;
2751 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2752 /* this should have been checked already, but let's be safe */
2753 if (fdwroutine->RefetchForeignRow == NULL)
2755 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2756 errmsg("cannot lock rows in foreign table \"%s\"",
2757 RelationGetRelationName(erm->relation))));
2758 copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2762 if (copyTuple == NULL)
2763 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2766 * Ideally we'd insist on updated == false here, but that
2767 * assumes that FDWs can track that exactly, which they might
2768 * not be able to. So just ignore the flag.
2773 /* ordinary table, fetch the tuple */
2776 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2777 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2779 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2781 /* successful, copy tuple */
2782 copyTuple = heap_copytuple(&tuple);
2783 ReleaseBuffer(buffer);
2787 EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
2793 Assert(erm->markType == ROW_MARK_COPY);
2795 /* fetch the whole-row Var for the relation */
2796 datum = ExecGetJunkAttribute(epqstate->origslot,
2799 /* non-locked rels could be on the inside of outer joins */
2802 td = DatumGetHeapTupleHeader(datum);
2804 /* build a temporary HeapTuple control structure */
2805 tuple.t_len = HeapTupleHeaderGetDatumLength(td);
2807 /* relation might be a foreign table, if so provide tableoid */
2808 tuple.t_tableOid = erm->relid;
2809 /* also copy t_ctid in case there's valid data there */
2810 tuple.t_self = td->t_ctid;
2812 /* copy and store tuple */
2813 EvalPlanQualSetTuple(epqstate, erm->rti,
2814 heap_copytuple(&tuple));
2820 * Fetch the next row (if any) from EvalPlanQual testing
2822 * (In practice, there should never be more than one row...)
2825 EvalPlanQualNext(EPQState *epqstate)
2827 MemoryContext oldcontext;
2828 TupleTableSlot *slot;
2830 oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
2831 slot = ExecProcNode(epqstate->planstate);
2832 MemoryContextSwitchTo(oldcontext);
2838 * Initialize or reset an EvalPlanQual state tree
2841 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
2843 EState *estate = epqstate->estate;
2847 /* First time through, so create a child EState */
2848 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
2853 * We already have a suitable child EPQ tree, so just reset it.
2855 int rtsize = list_length(parentestate->es_range_table);
2856 PlanState *planstate = epqstate->planstate;
2858 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
2860 /* Recopy current values of parent parameters */
2861 if (parentestate->es_plannedstmt->nParamExec > 0)
2863 int i = parentestate->es_plannedstmt->nParamExec;
2867 /* copy value if any, but not execPlan link */
2868 estate->es_param_exec_vals[i].value =
2869 parentestate->es_param_exec_vals[i].value;
2870 estate->es_param_exec_vals[i].isnull =
2871 parentestate->es_param_exec_vals[i].isnull;
2876 * Mark child plan tree as needing rescan at all scan nodes. The
2877 * first ExecProcNode will take care of actually doing the rescan.
2879 planstate->chgParam = bms_add_member(planstate->chgParam,
2880 epqstate->epqParam);
2885 * Start execution of an EvalPlanQual plan tree.
2887 * This is a cut-down version of ExecutorStart(): we copy some state from
2888 * the top-level estate rather than initializing it fresh.
2891 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
2895 MemoryContext oldcontext;
2898 rtsize = list_length(parentestate->es_range_table);
2900 epqstate->estate = estate = CreateExecutorState();
2902 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2905 * Child EPQ EStates share the parent's copy of unchanging state such as
2906 * the snapshot, rangetable, result-rel info, and external Param info.
2907 * They need their own copies of local state, including a tuple table,
2908 * es_param_exec_vals, etc.
2910 * The ResultRelInfo array management is trickier than it looks. We
2911 * create a fresh array for the child but copy all the content from the
2912 * parent. This is because it's okay for the child to share any
2913 * per-relation state the parent has already created --- but if the child
2914 * sets up any ResultRelInfo fields, such as its own junkfilter, that
2915 * state must *not* propagate back to the parent. (For one thing, the
2916 * pointed-to data is in a memory context that won't last long enough.)
2918 estate->es_direction = ForwardScanDirection;
2919 estate->es_snapshot = parentestate->es_snapshot;
2920 estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2921 estate->es_range_table = parentestate->es_range_table;
2922 estate->es_plannedstmt = parentestate->es_plannedstmt;
2923 estate->es_junkFilter = parentestate->es_junkFilter;
2924 estate->es_output_cid = parentestate->es_output_cid;
2925 if (parentestate->es_num_result_relations > 0)
2927 int numResultRelations = parentestate->es_num_result_relations;
2928 ResultRelInfo *resultRelInfos;
2930 resultRelInfos = (ResultRelInfo *)
2931 palloc(numResultRelations * sizeof(ResultRelInfo));
2932 memcpy(resultRelInfos, parentestate->es_result_relations,
2933 numResultRelations * sizeof(ResultRelInfo));
2934 estate->es_result_relations = resultRelInfos;
2935 estate->es_num_result_relations = numResultRelations;
2937 /* es_result_relation_info must NOT be copied */
2938 /* es_trig_target_relations must NOT be copied */
2939 estate->es_rowMarks = parentestate->es_rowMarks;
2940 estate->es_top_eflags = parentestate->es_top_eflags;
2941 estate->es_instrument = parentestate->es_instrument;
2942 /* es_auxmodifytables must NOT be copied */
2945 * The external param list is simply shared from parent. The internal
2946 * param workspace has to be local state, but we copy the initial values
2947 * from the parent, so as to have access to any param values that were
2948 * already set from other parts of the parent's plan tree.
2950 estate->es_param_list_info = parentestate->es_param_list_info;
2951 if (parentestate->es_plannedstmt->nParamExec > 0)
2953 int i = parentestate->es_plannedstmt->nParamExec;
2955 estate->es_param_exec_vals = (ParamExecData *)
2956 palloc0(i * sizeof(ParamExecData));
2959 /* copy value if any, but not execPlan link */
2960 estate->es_param_exec_vals[i].value =
2961 parentestate->es_param_exec_vals[i].value;
2962 estate->es_param_exec_vals[i].isnull =
2963 parentestate->es_param_exec_vals[i].isnull;
2968 * Each EState must have its own es_epqScanDone state, but if we have
2969 * nested EPQ checks they should share es_epqTuple arrays. This allows
2970 * sub-rechecks to inherit the values being examined by an outer recheck.
2972 estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
2973 if (parentestate->es_epqTuple != NULL)
2975 estate->es_epqTuple = parentestate->es_epqTuple;
2976 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
2980 estate->es_epqTuple = (HeapTuple *)
2981 palloc0(rtsize * sizeof(HeapTuple));
2982 estate->es_epqTupleSet = (bool *)
2983 palloc0(rtsize * sizeof(bool));
2987 * Each estate also has its own tuple table.
2989 estate->es_tupleTable = NIL;
2992 * Initialize private state information for each SubPlan. We must do this
2993 * before running ExecInitNode on the main query tree, since
2994 * ExecInitSubPlan expects to be able to find these entries. Some of the
2995 * SubPlans might not be used in the part of the plan tree we intend to
2996 * run, but since it's not easy to tell which, we just initialize them
2999 Assert(estate->es_subplanstates == NIL);
3000 foreach(l, parentestate->es_plannedstmt->subplans)
3002 Plan *subplan = (Plan *) lfirst(l);
3003 PlanState *subplanstate;
3005 subplanstate = ExecInitNode(subplan, estate, 0);
3006 estate->es_subplanstates = lappend(estate->es_subplanstates,
3011 * Initialize the private state information for all the nodes in the part
3012 * of the plan tree we need to run. This opens files, allocates storage
3013 * and leaves us ready to start processing tuples.
3015 epqstate->planstate = ExecInitNode(planTree, estate, 0);
3017 MemoryContextSwitchTo(oldcontext);
3021 * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3022 * or if we are done with the current EPQ child.
3024 * This is a cut-down version of ExecutorEnd(); basically we want to do most
3025 * of the normal cleanup, but *not* close result relations (which we are
3026 * just sharing from the outer query). We do, however, have to close any
3027 * trigger target relations that got opened, since those are not shared.
3028 * (There probably shouldn't be any of the latter, but just in case...)
3031 EvalPlanQualEnd(EPQState *epqstate)
3033 EState *estate = epqstate->estate;
3034 MemoryContext oldcontext;
3038 return; /* idle, so nothing to do */
3040 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3042 ExecEndNode(epqstate->planstate);
3044 foreach(l, estate->es_subplanstates)
3046 PlanState *subplanstate = (PlanState *) lfirst(l);
3048 ExecEndNode(subplanstate);
3051 /* throw away the per-estate tuple table */
3052 ExecResetTupleTable(estate->es_tupleTable, false);
3054 /* close any trigger target relations attached to this EState */
3055 foreach(l, estate->es_trig_target_relations)
3057 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
3059 /* Close indices and then the relation itself */
3060 ExecCloseIndices(resultRelInfo);
3061 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
3064 MemoryContextSwitchTo(oldcontext);
3066 FreeExecutorState(estate);
3068 /* Mark EPQState idle */
3069 epqstate->estate = NULL;
3070 epqstate->planstate = NULL;
3071 epqstate->origslot = NULL;
3075 * ExecSetupPartitionTupleRouting - set up information needed during
3076 * tuple routing for partitioned tables
3079 * 'pd' receives an array of PartitionDispatch objects with one entry for
3080 * every partitioned table in the partition tree
3081 * 'partitions' receives an array of ResultRelInfo objects with one entry for
3082 * every leaf partition in the partition tree
3083 * 'tup_conv_maps' receives an array of TupleConversionMap objects with one
3084 * entry for every leaf partition (required to convert input tuple based
3085 * on the root table's rowtype to a leaf partition's rowtype after tuple
3087 * 'partition_tuple_slot' receives a standalone TupleTableSlot to be used
3088 * to manipulate any given leaf partition's rowtype after that partition
3089 * is chosen by tuple-routing.
3090 * 'num_parted' receives the number of partitioned tables in the partition
3091 * tree (= the number of entries in the 'pd' output array)
3092 * 'num_partitions' receives the number of leaf partitions in the partition
3093 * tree (= the number of entries in the 'partitions' and 'tup_conv_maps'
3096 * Note that all the relations in the partition tree are locked using the
3097 * RowExclusiveLock mode upon return from this function.
3100 ExecSetupPartitionTupleRouting(Relation rel,
3101 PartitionDispatch **pd,
3102 ResultRelInfo **partitions,
3103 TupleConversionMap ***tup_conv_maps,
3104 TupleTableSlot **partition_tuple_slot,
3105 int *num_parted, int *num_partitions)
3107 TupleDesc tupDesc = RelationGetDescr(rel);
3111 ResultRelInfo *leaf_part_rri;
3113 /* Get the tuple-routing information and lock partitions */
3114 *pd = RelationGetPartitionDispatchInfo(rel, RowExclusiveLock, num_parted,
3116 *num_partitions = list_length(leaf_parts);
3117 *partitions = (ResultRelInfo *) palloc(*num_partitions *
3118 sizeof(ResultRelInfo));
3119 *tup_conv_maps = (TupleConversionMap **) palloc0(*num_partitions *
3120 sizeof(TupleConversionMap *));
3123 * Initialize an empty slot that will be used to manipulate tuples of any
3124 * given partition's rowtype. It is attached to the caller-specified node
3125 * (such as ModifyTableState) and released when the node finishes
3128 *partition_tuple_slot = MakeTupleTableSlot();
3130 leaf_part_rri = *partitions;
3132 foreach(cell, leaf_parts)
3135 TupleDesc part_tupdesc;
3138 * We locked all the partitions above including the leaf partitions.
3139 * Note that each of the relations in *partitions are eventually
3140 * closed by the caller.
3142 partrel = heap_open(lfirst_oid(cell), NoLock);
3143 part_tupdesc = RelationGetDescr(partrel);
3146 * Verify result relation is a valid target for the current operation.
3148 CheckValidResultRel(partrel, CMD_INSERT);
3151 * Save a tuple conversion map to convert a tuple routed to this
3152 * partition from the parent's type to the partition's.
3154 (*tup_conv_maps)[i] = convert_tuples_by_name(tupDesc, part_tupdesc,
3155 gettext_noop("could not convert row type"));
3157 InitResultRelInfo(leaf_part_rri,
3164 * Open partition indices (remember we do not support ON CONFLICT in
3165 * case of partitioned tables, so we do not need support information
3166 * for speculative insertion)
3168 if (leaf_part_rri->ri_RelationDesc->rd_rel->relhasindex &&
3169 leaf_part_rri->ri_IndexRelationDescs == NULL)
3170 ExecOpenIndices(leaf_part_rri, false);
3178 * ExecFindPartition -- Find a leaf partition in the partition tree rooted
3179 * at parent, for the heap tuple contained in *slot
3181 * estate must be non-NULL; we'll need it to compute any expressions in the
3184 * If no leaf partition is found, this routine errors out with the appropriate
3185 * error message, else it returns the leaf partition sequence number returned
3186 * by get_partition_for_tuple() unchanged.
3189 ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd,
3190 TupleTableSlot *slot, EState *estate)
3194 ExprContext *econtext = GetPerTupleExprContext(estate);
3196 econtext->ecxt_scantuple = slot;
3197 result = get_partition_for_tuple(pd, slot, estate, &failed_at);
3200 Relation rel = resultRelInfo->ri_RelationDesc;
3202 Bitmapset *insertedCols,
3205 TupleDesc tupDesc = RelationGetDescr(rel);
3207 insertedCols = GetInsertedColumns(resultRelInfo, estate);
3208 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
3209 modifiedCols = bms_union(insertedCols, updatedCols);
3210 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
3215 Assert(OidIsValid(failed_at));
3217 (errcode(ERRCODE_CHECK_VIOLATION),
3218 errmsg("no partition of relation \"%s\" found for row",
3219 get_rel_name(failed_at)),
3220 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));