1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
12 * These four procedures are the external interface to the executor.
13 * In each case, the query descriptor is required as an argument.
15 * ExecutorStart must be called at the beginning of execution of any
16 * query plan and ExecutorEnd must always be called at the end of
17 * execution of a plan (unless it is aborted due to error).
19 * ExecutorRun accepts direction and count arguments that specify whether
20 * the plan is to be executed forwards, backwards, and for how many tuples.
21 * In some cases ExecutorRun may be called multiple times to process all
22 * the tuples for a plan. It is also acceptable to stop short of executing
23 * the whole plan (but only if it is a SELECT).
25 * ExecutorFinish must be called after the final ExecutorRun call and
26 * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 * which should also omit ExecutorRun.
29 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
30 * Portions Copyright (c) 1994, Regents of the University of California
34 * src/backend/executor/execMain.c
36 *-------------------------------------------------------------------------
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "catalog/partition.h"
46 #include "catalog/pg_publication.h"
47 #include "commands/matview.h"
48 #include "commands/trigger.h"
49 #include "executor/execdebug.h"
50 #include "foreign/fdwapi.h"
51 #include "mb/pg_wchar.h"
52 #include "miscadmin.h"
53 #include "optimizer/clauses.h"
54 #include "parser/parsetree.h"
55 #include "rewrite/rewriteManip.h"
56 #include "storage/bufmgr.h"
57 #include "storage/lmgr.h"
58 #include "tcop/utility.h"
59 #include "utils/acl.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/rls.h"
63 #include "utils/ruleutils.h"
64 #include "utils/snapmgr.h"
65 #include "utils/tqual.h"
68 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
69 ExecutorStart_hook_type ExecutorStart_hook = NULL;
70 ExecutorRun_hook_type ExecutorRun_hook = NULL;
71 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
72 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
74 /* Hook for plugin to get control in ExecCheckRTPerms() */
75 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
77 /* decls for local routines only used within this module */
78 static void InitPlan(QueryDesc *queryDesc, int eflags);
79 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
80 static void ExecPostprocessPlan(EState *estate);
81 static void ExecEndPlan(PlanState *planstate, EState *estate);
82 static void ExecutePlan(EState *estate, PlanState *planstate,
83 bool use_parallel_mode,
87 ScanDirection direction,
90 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
91 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
92 Bitmapset *modifiedCols,
93 AclMode requiredPerms);
94 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
95 static char *ExecBuildSlotValueDescription(Oid reloid,
98 Bitmapset *modifiedCols,
100 static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
104 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
106 static void ExecPartitionCheck(ResultRelInfo *resultRelInfo,
107 TupleTableSlot *slot, EState *estate);
110 * Note that GetUpdatedColumns() also exists in commands/trigger.c. There does
111 * not appear to be any good header to put it into, given the structures that
112 * it uses, so we let them be duplicated. Be sure to update both if one needs
113 * to be changed, however.
115 #define GetInsertedColumns(relinfo, estate) \
116 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
117 #define GetUpdatedColumns(relinfo, estate) \
118 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
120 /* end of local decls */
123 /* ----------------------------------------------------------------
126 * This routine must be called at the beginning of any execution of any
129 * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
130 * only because some places use QueryDescs for utility commands). The tupDesc
131 * field of the QueryDesc is filled in to describe the tuples that will be
132 * returned, and the internal fields (estate and planstate) are set up.
134 * eflags contains flag bits as described in executor.h.
136 * NB: the CurrentMemoryContext when this is called will become the parent
137 * of the per-query context used for this Executor invocation.
139 * We provide a function hook variable that lets loadable plugins
140 * get control when ExecutorStart is called. Such a plugin would
141 * normally call standard_ExecutorStart().
143 * ----------------------------------------------------------------
146 ExecutorStart(QueryDesc *queryDesc, int eflags)
148 if (ExecutorStart_hook)
149 (*ExecutorStart_hook) (queryDesc, eflags);
151 standard_ExecutorStart(queryDesc, eflags);
155 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
158 MemoryContext oldcontext;
160 /* sanity checks: queryDesc must not be started already */
161 Assert(queryDesc != NULL);
162 Assert(queryDesc->estate == NULL);
165 * If the transaction is read-only, we need to check if any writes are
166 * planned to non-temporary tables. EXPLAIN is considered read-only.
168 * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
169 * would require (a) storing the combocid hash in shared memory, rather
170 * than synchronizing it just once at the start of parallelism, and (b) an
171 * alternative to heap_update()'s reliance on xmax for mutual exclusion.
172 * INSERT may have no such troubles, but we forbid it to simplify the
175 * We have lower-level defenses in CommandCounterIncrement and elsewhere
176 * against performing unsafe operations in parallel mode, but this gives a
177 * more user-friendly error message.
179 if ((XactReadOnly || IsInParallelMode()) &&
180 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
181 ExecCheckXactReadOnly(queryDesc->plannedstmt);
184 * Build EState, switch into per-query memory context for startup.
186 estate = CreateExecutorState();
187 queryDesc->estate = estate;
189 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
192 * Fill in external parameters, if any, from queryDesc; and allocate
193 * workspace for internal parameters
195 estate->es_param_list_info = queryDesc->params;
197 if (queryDesc->plannedstmt->nParamExec > 0)
198 estate->es_param_exec_vals = (ParamExecData *)
199 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
201 estate->es_sourceText = queryDesc->sourceText;
204 * Fill in the query environment, if any, from queryDesc.
206 estate->es_queryEnv = queryDesc->queryEnv;
209 * If non-read-only query, set the command ID to mark output tuples with
211 switch (queryDesc->operation)
216 * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
219 if (queryDesc->plannedstmt->rowMarks != NIL ||
220 queryDesc->plannedstmt->hasModifyingCTE)
221 estate->es_output_cid = GetCurrentCommandId(true);
224 * A SELECT without modifying CTEs can't possibly queue triggers,
225 * so force skip-triggers mode. This is just a marginal efficiency
226 * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
227 * all that expensive, but we might as well do it.
229 if (!queryDesc->plannedstmt->hasModifyingCTE)
230 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
236 estate->es_output_cid = GetCurrentCommandId(true);
240 elog(ERROR, "unrecognized operation code: %d",
241 (int) queryDesc->operation);
246 * Copy other important information into the EState
248 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
249 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
250 estate->es_top_eflags = eflags;
251 estate->es_instrument = queryDesc->instrument_options;
254 * Initialize the plan state tree
256 InitPlan(queryDesc, eflags);
259 * Set up an AFTER-trigger statement context, unless told not to, or
260 * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
262 if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
263 AfterTriggerBeginQuery();
265 MemoryContextSwitchTo(oldcontext);
268 /* ----------------------------------------------------------------
271 * This is the main routine of the executor module. It accepts
272 * the query descriptor from the traffic cop and executes the
275 * ExecutorStart must have been called already.
277 * If direction is NoMovementScanDirection then nothing is done
278 * except to start up/shut down the destination. Otherwise,
279 * we retrieve up to 'count' tuples in the specified direction.
281 * Note: count = 0 is interpreted as no portal limit, i.e., run to
282 * completion. Also note that the count limit is only applied to
283 * retrieved tuples, not for instance to those inserted/updated/deleted
284 * by a ModifyTable plan node.
286 * There is no return value, but output tuples (if any) are sent to
287 * the destination receiver specified in the QueryDesc; and the number
288 * of tuples processed at the top level can be found in
289 * estate->es_processed.
291 * We provide a function hook variable that lets loadable plugins
292 * get control when ExecutorRun is called. Such a plugin would
293 * normally call standard_ExecutorRun().
295 * ----------------------------------------------------------------
298 ExecutorRun(QueryDesc *queryDesc,
299 ScanDirection direction, uint64 count,
302 if (ExecutorRun_hook)
303 (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
305 standard_ExecutorRun(queryDesc, direction, count, execute_once);
309 standard_ExecutorRun(QueryDesc *queryDesc,
310 ScanDirection direction, uint64 count, bool execute_once)
316 MemoryContext oldcontext;
319 Assert(queryDesc != NULL);
321 estate = queryDesc->estate;
323 Assert(estate != NULL);
324 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
327 * Switch into per-query memory context
329 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
331 /* Allow instrumentation of Executor overall runtime */
332 if (queryDesc->totaltime)
333 InstrStartNode(queryDesc->totaltime);
336 * extract information from the query descriptor and the query feature.
338 operation = queryDesc->operation;
339 dest = queryDesc->dest;
342 * startup tuple receiver, if we will be emitting tuples
344 estate->es_processed = 0;
345 estate->es_lastoid = InvalidOid;
347 sendTuples = (operation == CMD_SELECT ||
348 queryDesc->plannedstmt->hasReturning);
351 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
356 if (!ScanDirectionIsNoMovement(direction))
358 if (execute_once && queryDesc->already_executed)
359 elog(ERROR, "can't re-execute query flagged for single execution");
360 queryDesc->already_executed = true;
363 queryDesc->planstate,
364 queryDesc->plannedstmt->parallelModeNeeded,
374 * shutdown tuple receiver, if we started it
377 (*dest->rShutdown) (dest);
379 if (queryDesc->totaltime)
380 InstrStopNode(queryDesc->totaltime, estate->es_processed);
382 MemoryContextSwitchTo(oldcontext);
385 /* ----------------------------------------------------------------
388 * This routine must be called after the last ExecutorRun call.
389 * It performs cleanup such as firing AFTER triggers. It is
390 * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
391 * include these actions in the total runtime.
393 * We provide a function hook variable that lets loadable plugins
394 * get control when ExecutorFinish is called. Such a plugin would
395 * normally call standard_ExecutorFinish().
397 * ----------------------------------------------------------------
400 ExecutorFinish(QueryDesc *queryDesc)
402 if (ExecutorFinish_hook)
403 (*ExecutorFinish_hook) (queryDesc);
405 standard_ExecutorFinish(queryDesc);
409 standard_ExecutorFinish(QueryDesc *queryDesc)
412 MemoryContext oldcontext;
415 Assert(queryDesc != NULL);
417 estate = queryDesc->estate;
419 Assert(estate != NULL);
420 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
422 /* This should be run once and only once per Executor instance */
423 Assert(!estate->es_finished);
425 /* Switch into per-query memory context */
426 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
428 /* Allow instrumentation of Executor overall runtime */
429 if (queryDesc->totaltime)
430 InstrStartNode(queryDesc->totaltime);
432 /* Run ModifyTable nodes to completion */
433 ExecPostprocessPlan(estate);
435 /* Execute queued AFTER triggers, unless told not to */
436 if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
437 AfterTriggerEndQuery(estate);
439 if (queryDesc->totaltime)
440 InstrStopNode(queryDesc->totaltime, 0);
442 MemoryContextSwitchTo(oldcontext);
444 estate->es_finished = true;
447 /* ----------------------------------------------------------------
450 * This routine must be called at the end of execution of any
453 * We provide a function hook variable that lets loadable plugins
454 * get control when ExecutorEnd is called. Such a plugin would
455 * normally call standard_ExecutorEnd().
457 * ----------------------------------------------------------------
460 ExecutorEnd(QueryDesc *queryDesc)
462 if (ExecutorEnd_hook)
463 (*ExecutorEnd_hook) (queryDesc);
465 standard_ExecutorEnd(queryDesc);
469 standard_ExecutorEnd(QueryDesc *queryDesc)
472 MemoryContext oldcontext;
475 Assert(queryDesc != NULL);
477 estate = queryDesc->estate;
479 Assert(estate != NULL);
482 * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
483 * Assert is needed because ExecutorFinish is new as of 9.1, and callers
484 * might forget to call it.
486 Assert(estate->es_finished ||
487 (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
490 * Switch into per-query memory context to run ExecEndPlan
492 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
494 ExecEndPlan(queryDesc->planstate, estate);
496 /* do away with our snapshots */
497 UnregisterSnapshot(estate->es_snapshot);
498 UnregisterSnapshot(estate->es_crosscheck_snapshot);
501 * Must switch out of context before destroying it
503 MemoryContextSwitchTo(oldcontext);
506 * Release EState and per-query memory context. This should release
507 * everything the executor has allocated.
509 FreeExecutorState(estate);
511 /* Reset queryDesc fields that no longer point to anything */
512 queryDesc->tupDesc = NULL;
513 queryDesc->estate = NULL;
514 queryDesc->planstate = NULL;
515 queryDesc->totaltime = NULL;
518 /* ----------------------------------------------------------------
521 * This routine may be called on an open queryDesc to rewind it
523 * ----------------------------------------------------------------
526 ExecutorRewind(QueryDesc *queryDesc)
529 MemoryContext oldcontext;
532 Assert(queryDesc != NULL);
534 estate = queryDesc->estate;
536 Assert(estate != NULL);
538 /* It's probably not sensible to rescan updating queries */
539 Assert(queryDesc->operation == CMD_SELECT);
542 * Switch into per-query memory context
544 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
549 ExecReScan(queryDesc->planstate);
551 MemoryContextSwitchTo(oldcontext);
557 * Check access permissions for all relations listed in a range table.
559 * Returns true if permissions are adequate. Otherwise, throws an appropriate
560 * error if ereport_on_violation is true, or simply returns false otherwise.
562 * Note that this does NOT address row level security policies (aka: RLS). If
563 * rows will be returned to the user as a result of this permission check
564 * passing, then RLS also needs to be consulted (and check_enable_rls()).
566 * See rewrite/rowsecurity.c.
569 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
574 foreach(l, rangeTable)
576 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
578 result = ExecCheckRTEPerms(rte);
581 Assert(rte->rtekind == RTE_RELATION);
582 if (ereport_on_violation)
583 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
584 get_rel_name(rte->relid));
589 if (ExecutorCheckPerms_hook)
590 result = (*ExecutorCheckPerms_hook) (rangeTable,
591 ereport_on_violation);
597 * Check access permissions for a single RTE.
600 ExecCheckRTEPerms(RangeTblEntry *rte)
602 AclMode requiredPerms;
604 AclMode remainingPerms;
609 * Only plain-relation RTEs need to be checked here. Function RTEs are
610 * checked when the function is prepared for execution. Join, subquery,
611 * and special RTEs need no checks.
613 if (rte->rtekind != RTE_RELATION)
617 * No work if requiredPerms is empty.
619 requiredPerms = rte->requiredPerms;
620 if (requiredPerms == 0)
626 * userid to check as: current user unless we have a setuid indication.
628 * Note: GetUserId() is presently fast enough that there's no harm in
629 * calling it separately for each RTE. If that stops being true, we could
630 * call it once in ExecCheckRTPerms and pass the userid down from there.
631 * But for now, no need for the extra clutter.
633 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
636 * We must have *all* the requiredPerms bits, but some of the bits can be
637 * satisfied from column-level rather than relation-level permissions.
638 * First, remove any bits that are satisfied by relation permissions.
640 relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
641 remainingPerms = requiredPerms & ~relPerms;
642 if (remainingPerms != 0)
647 * If we lack any permissions that exist only as relation permissions,
648 * we can fail straight away.
650 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
654 * Check to see if we have the needed privileges at column level.
656 * Note: failures just report a table-level error; it would be nicer
657 * to report a column-level error if we have some but not all of the
660 if (remainingPerms & ACL_SELECT)
663 * When the query doesn't explicitly reference any columns (for
664 * example, SELECT COUNT(*) FROM table), allow the query if we
665 * have SELECT on any column of the rel, as per SQL spec.
667 if (bms_is_empty(rte->selectedCols))
669 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
670 ACLMASK_ANY) != ACLCHECK_OK)
674 while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
676 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
677 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
679 if (attno == InvalidAttrNumber)
681 /* Whole-row reference, must have priv on all cols */
682 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
683 ACLMASK_ALL) != ACLCHECK_OK)
688 if (pg_attribute_aclcheck(relOid, attno, userid,
689 ACL_SELECT) != ACLCHECK_OK)
696 * Basically the same for the mod columns, for both INSERT and UPDATE
697 * privilege as specified by remainingPerms.
699 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
705 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
715 * ExecCheckRTEPermsModified
716 * Check INSERT or UPDATE access permissions for a single RTE (these
717 * are processed uniformly).
720 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
721 AclMode requiredPerms)
726 * When the query doesn't explicitly update any columns, allow the query
727 * if we have permission on any column of the rel. This is to handle
728 * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
730 if (bms_is_empty(modifiedCols))
732 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
733 ACLMASK_ANY) != ACLCHECK_OK)
737 while ((col = bms_next_member(modifiedCols, col)) >= 0)
739 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
740 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
742 if (attno == InvalidAttrNumber)
744 /* whole-row reference can't happen here */
745 elog(ERROR, "whole-row update is not implemented");
749 if (pg_attribute_aclcheck(relOid, attno, userid,
750 requiredPerms) != ACLCHECK_OK)
758 * Check that the query does not imply any writes to non-temp tables;
759 * unless we're in parallel mode, in which case don't even allow writes
762 * Note: in a Hot Standby slave this would need to reject writes to temp
763 * tables just as we do in parallel mode; but an HS slave can't have created
764 * any temp tables in the first place, so no need to check that.
767 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
772 * Fail if write permissions are requested in parallel mode for table
773 * (temp or non-temp), otherwise fail for any non-temp table.
775 foreach(l, plannedstmt->rtable)
777 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
779 if (rte->rtekind != RTE_RELATION)
782 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
785 if (isTempNamespace(get_rel_namespace(rte->relid)))
788 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
791 if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
792 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
796 /* ----------------------------------------------------------------
799 * Initializes the query plan: open files, allocate storage
800 * and start up the rule manager
801 * ----------------------------------------------------------------
804 InitPlan(QueryDesc *queryDesc, int eflags)
806 CmdType operation = queryDesc->operation;
807 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
808 Plan *plan = plannedstmt->planTree;
809 List *rangeTable = plannedstmt->rtable;
810 EState *estate = queryDesc->estate;
811 PlanState *planstate;
817 * Do permissions checks
819 ExecCheckRTPerms(rangeTable, true);
822 * initialize the node's execution state
824 estate->es_range_table = rangeTable;
825 estate->es_plannedstmt = plannedstmt;
828 * initialize result relation stuff, and open/lock the result rels.
830 * We must do this before initializing the plan tree, else we might try to
831 * do a lock upgrade if a result rel is also a source rel.
833 if (plannedstmt->resultRelations)
835 List *resultRelations = plannedstmt->resultRelations;
836 int numResultRelations = list_length(resultRelations);
837 ResultRelInfo *resultRelInfos;
838 ResultRelInfo *resultRelInfo;
840 resultRelInfos = (ResultRelInfo *)
841 palloc(numResultRelations * sizeof(ResultRelInfo));
842 resultRelInfo = resultRelInfos;
843 foreach(l, resultRelations)
845 Index resultRelationIndex = lfirst_int(l);
846 Oid resultRelationOid;
847 Relation resultRelation;
849 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
850 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
852 InitResultRelInfo(resultRelInfo,
856 estate->es_instrument);
859 estate->es_result_relations = resultRelInfos;
860 estate->es_num_result_relations = numResultRelations;
861 /* es_result_relation_info is NULL except when within ModifyTable */
862 estate->es_result_relation_info = NULL;
865 * In the partitioned result relation case, lock the non-leaf result
866 * relations too. A subset of these are the roots of respective
867 * partitioned tables, for which we also allocate ResulRelInfos.
869 estate->es_root_result_relations = NULL;
870 estate->es_num_root_result_relations = 0;
871 if (plannedstmt->nonleafResultRelations)
873 int num_roots = list_length(plannedstmt->rootResultRelations);
876 * Firstly, build ResultRelInfos for all the partitioned table
877 * roots, because we will need them to fire the statement-level
880 resultRelInfos = (ResultRelInfo *)
881 palloc(num_roots * sizeof(ResultRelInfo));
882 resultRelInfo = resultRelInfos;
883 foreach(l, plannedstmt->rootResultRelations)
885 Index resultRelIndex = lfirst_int(l);
887 Relation resultRelDesc;
889 resultRelOid = getrelid(resultRelIndex, rangeTable);
890 resultRelDesc = heap_open(resultRelOid, RowExclusiveLock);
891 InitResultRelInfo(resultRelInfo,
895 estate->es_instrument);
899 estate->es_root_result_relations = resultRelInfos;
900 estate->es_num_root_result_relations = num_roots;
902 /* Simply lock the rest of them. */
903 foreach(l, plannedstmt->nonleafResultRelations)
905 Index resultRelIndex = lfirst_int(l);
907 /* We locked the roots above. */
908 if (!list_member_int(plannedstmt->rootResultRelations,
910 LockRelationOid(getrelid(resultRelIndex, rangeTable),
918 * if no result relation, then set state appropriately
920 estate->es_result_relations = NULL;
921 estate->es_num_result_relations = 0;
922 estate->es_result_relation_info = NULL;
923 estate->es_root_result_relations = NULL;
924 estate->es_num_root_result_relations = 0;
928 * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
929 * before we initialize the plan tree, else we'd be risking lock upgrades.
930 * While we are at it, build the ExecRowMark list. Any partitioned child
931 * tables are ignored here (because isParent=true) and will be locked by
932 * the first Append or MergeAppend node that references them. (Note that
933 * the RowMarks corresponding to partitioned child tables are present in
934 * the same list as the rest, i.e., plannedstmt->rowMarks.)
936 estate->es_rowMarks = NIL;
937 foreach(l, plannedstmt->rowMarks)
939 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
944 /* ignore "parent" rowmarks; they are irrelevant at runtime */
948 /* get relation's OID (will produce InvalidOid if subquery) */
949 relid = getrelid(rc->rti, rangeTable);
952 * If you change the conditions under which rel locks are acquired
953 * here, be sure to adjust ExecOpenScanRelation to match.
955 switch (rc->markType)
957 case ROW_MARK_EXCLUSIVE:
958 case ROW_MARK_NOKEYEXCLUSIVE:
960 case ROW_MARK_KEYSHARE:
961 relation = heap_open(relid, RowShareLock);
963 case ROW_MARK_REFERENCE:
964 relation = heap_open(relid, AccessShareLock);
967 /* no physical table access is required */
971 elog(ERROR, "unrecognized markType: %d", rc->markType);
972 relation = NULL; /* keep compiler quiet */
976 /* Check that relation is a legal target for marking */
978 CheckValidRowMarkRel(relation, rc->markType);
980 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
981 erm->relation = relation;
984 erm->prti = rc->prti;
985 erm->rowmarkId = rc->rowmarkId;
986 erm->markType = rc->markType;
987 erm->strength = rc->strength;
988 erm->waitPolicy = rc->waitPolicy;
989 erm->ermActive = false;
990 ItemPointerSetInvalid(&(erm->curCtid));
991 erm->ermExtra = NULL;
992 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
996 * Initialize the executor's tuple table to empty.
998 estate->es_tupleTable = NIL;
999 estate->es_trig_tuple_slot = NULL;
1000 estate->es_trig_oldtup_slot = NULL;
1001 estate->es_trig_newtup_slot = NULL;
1003 /* mark EvalPlanQual not active */
1004 estate->es_epqTuple = NULL;
1005 estate->es_epqTupleSet = NULL;
1006 estate->es_epqScanDone = NULL;
1009 * Initialize private state information for each SubPlan. We must do this
1010 * before running ExecInitNode on the main query tree, since
1011 * ExecInitSubPlan expects to be able to find these entries.
1013 Assert(estate->es_subplanstates == NIL);
1014 i = 1; /* subplan indices count from 1 */
1015 foreach(l, plannedstmt->subplans)
1017 Plan *subplan = (Plan *) lfirst(l);
1018 PlanState *subplanstate;
1022 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
1023 * it is a parameterless subplan (not initplan), we suggest that it be
1024 * prepared to handle REWIND efficiently; otherwise there is no need.
1027 & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
1028 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
1029 sp_eflags |= EXEC_FLAG_REWIND;
1031 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
1033 estate->es_subplanstates = lappend(estate->es_subplanstates,
1040 * Initialize the private state information for all the nodes in the query
1041 * tree. This opens files, allocates storage and leaves us ready to start
1042 * processing tuples.
1044 planstate = ExecInitNode(plan, estate, eflags);
1047 * Get the tuple descriptor describing the type of tuples to return.
1049 tupType = ExecGetResultType(planstate);
1052 * Initialize the junk filter if needed. SELECT queries need a filter if
1053 * there are any junk attrs in the top-level tlist.
1055 if (operation == CMD_SELECT)
1057 bool junk_filter_needed = false;
1060 foreach(tlist, plan->targetlist)
1062 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1066 junk_filter_needed = true;
1071 if (junk_filter_needed)
1075 j = ExecInitJunkFilter(planstate->plan->targetlist,
1077 ExecInitExtraTupleSlot(estate));
1078 estate->es_junkFilter = j;
1080 /* Want to return the cleaned tuple type */
1081 tupType = j->jf_cleanTupType;
1085 queryDesc->tupDesc = tupType;
1086 queryDesc->planstate = planstate;
1090 * Check that a proposed result relation is a legal target for the operation
1092 * Generally the parser and/or planner should have noticed any such mistake
1093 * already, but let's make sure.
1095 * Note: when changing this function, you probably also need to look at
1096 * CheckValidRowMarkRel.
1099 CheckValidResultRel(Relation resultRel, CmdType operation)
1101 TriggerDesc *trigDesc = resultRel->trigdesc;
1102 FdwRoutine *fdwroutine;
1104 switch (resultRel->rd_rel->relkind)
1106 case RELKIND_RELATION:
1107 case RELKIND_PARTITIONED_TABLE:
1108 CheckCmdReplicaIdentity(resultRel, operation);
1110 case RELKIND_SEQUENCE:
1112 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1113 errmsg("cannot change sequence \"%s\"",
1114 RelationGetRelationName(resultRel))));
1116 case RELKIND_TOASTVALUE:
1118 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1119 errmsg("cannot change TOAST relation \"%s\"",
1120 RelationGetRelationName(resultRel))));
1125 * Okay only if there's a suitable INSTEAD OF trigger. Messages
1126 * here should match rewriteHandler.c's rewriteTargetView, except
1127 * that we omit errdetail because we haven't got the information
1128 * handy (and given that we really shouldn't get here anyway, it's
1129 * not worth great exertion to get).
1134 if (!trigDesc || !trigDesc->trig_insert_instead_row)
1136 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1137 errmsg("cannot insert into view \"%s\"",
1138 RelationGetRelationName(resultRel)),
1139 errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1142 if (!trigDesc || !trigDesc->trig_update_instead_row)
1144 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1145 errmsg("cannot update view \"%s\"",
1146 RelationGetRelationName(resultRel)),
1147 errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1150 if (!trigDesc || !trigDesc->trig_delete_instead_row)
1152 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1153 errmsg("cannot delete from view \"%s\"",
1154 RelationGetRelationName(resultRel)),
1155 errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1158 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1162 case RELKIND_MATVIEW:
1163 if (!MatViewIncrementalMaintenanceIsEnabled())
1165 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1166 errmsg("cannot change materialized view \"%s\"",
1167 RelationGetRelationName(resultRel))));
1169 case RELKIND_FOREIGN_TABLE:
1170 /* Okay only if the FDW supports it */
1171 fdwroutine = GetFdwRoutineForRelation(resultRel, false);
1175 if (fdwroutine->ExecForeignInsert == NULL)
1177 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1178 errmsg("cannot insert into foreign table \"%s\"",
1179 RelationGetRelationName(resultRel))));
1180 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1181 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1183 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1184 errmsg("foreign table \"%s\" does not allow inserts",
1185 RelationGetRelationName(resultRel))));
1188 if (fdwroutine->ExecForeignUpdate == NULL)
1190 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1191 errmsg("cannot update foreign table \"%s\"",
1192 RelationGetRelationName(resultRel))));
1193 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1194 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1196 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1197 errmsg("foreign table \"%s\" does not allow updates",
1198 RelationGetRelationName(resultRel))));
1201 if (fdwroutine->ExecForeignDelete == NULL)
1203 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1204 errmsg("cannot delete from foreign table \"%s\"",
1205 RelationGetRelationName(resultRel))));
1206 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1207 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1209 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1210 errmsg("foreign table \"%s\" does not allow deletes",
1211 RelationGetRelationName(resultRel))));
1214 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1220 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1221 errmsg("cannot change relation \"%s\"",
1222 RelationGetRelationName(resultRel))));
1228 * Check that a proposed rowmark target relation is a legal target
1230 * In most cases parser and/or planner should have noticed this already, but
1231 * they don't cover all cases.
1234 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1236 FdwRoutine *fdwroutine;
1238 switch (rel->rd_rel->relkind)
1240 case RELKIND_RELATION:
1241 case RELKIND_PARTITIONED_TABLE:
1244 case RELKIND_SEQUENCE:
1245 /* Must disallow this because we don't vacuum sequences */
1247 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1248 errmsg("cannot lock rows in sequence \"%s\"",
1249 RelationGetRelationName(rel))));
1251 case RELKIND_TOASTVALUE:
1252 /* We could allow this, but there seems no good reason to */
1254 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1255 errmsg("cannot lock rows in TOAST relation \"%s\"",
1256 RelationGetRelationName(rel))));
1259 /* Should not get here; planner should have expanded the view */
1261 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1262 errmsg("cannot lock rows in view \"%s\"",
1263 RelationGetRelationName(rel))));
1265 case RELKIND_MATVIEW:
1266 /* Allow referencing a matview, but not actual locking clauses */
1267 if (markType != ROW_MARK_REFERENCE)
1269 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1270 errmsg("cannot lock rows in materialized view \"%s\"",
1271 RelationGetRelationName(rel))));
1273 case RELKIND_FOREIGN_TABLE:
1274 /* Okay only if the FDW supports it */
1275 fdwroutine = GetFdwRoutineForRelation(rel, false);
1276 if (fdwroutine->RefetchForeignRow == NULL)
1278 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1279 errmsg("cannot lock rows in foreign table \"%s\"",
1280 RelationGetRelationName(rel))));
1284 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1285 errmsg("cannot lock rows in relation \"%s\"",
1286 RelationGetRelationName(rel))));
1292 * Initialize ResultRelInfo data for one result relation
1294 * Caution: before Postgres 9.1, this function included the relkind checking
1295 * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1296 * appropriate. Be sure callers cover those needs.
1299 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1300 Relation resultRelationDesc,
1301 Index resultRelationIndex,
1302 Relation partition_root,
1303 int instrument_options)
1305 List *partition_check = NIL;
1307 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1308 resultRelInfo->type = T_ResultRelInfo;
1309 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1310 resultRelInfo->ri_RelationDesc = resultRelationDesc;
1311 resultRelInfo->ri_NumIndices = 0;
1312 resultRelInfo->ri_IndexRelationDescs = NULL;
1313 resultRelInfo->ri_IndexRelationInfo = NULL;
1314 /* make a copy so as not to depend on relcache info not changing... */
1315 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1316 if (resultRelInfo->ri_TrigDesc)
1318 int n = resultRelInfo->ri_TrigDesc->numtriggers;
1320 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1321 palloc0(n * sizeof(FmgrInfo));
1322 resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1323 palloc0(n * sizeof(ExprState *));
1324 if (instrument_options)
1325 resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1329 resultRelInfo->ri_TrigFunctions = NULL;
1330 resultRelInfo->ri_TrigWhenExprs = NULL;
1331 resultRelInfo->ri_TrigInstrument = NULL;
1333 if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1334 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1336 resultRelInfo->ri_FdwRoutine = NULL;
1337 resultRelInfo->ri_FdwState = NULL;
1338 resultRelInfo->ri_usesFdwDirectModify = false;
1339 resultRelInfo->ri_ConstraintExprs = NULL;
1340 resultRelInfo->ri_junkFilter = NULL;
1341 resultRelInfo->ri_projectReturning = NULL;
1344 * Partition constraint, which also includes the partition constraint of
1345 * all the ancestors that are partitions. Note that it will be checked
1346 * even in the case of tuple-routing where this table is the target leaf
1347 * partition, if there any BR triggers defined on the table. Although
1348 * tuple-routing implicitly preserves the partition constraint of the
1349 * target partition for a given row, the BR triggers may change the row
1350 * such that the constraint is no longer satisfied, which we must fail for
1351 * by checking it explicitly.
1353 * If this is a partitioned table, the partition constraint (if any) of a
1354 * given row will be checked just before performing tuple-routing.
1356 partition_check = RelationGetPartitionQual(resultRelationDesc);
1358 resultRelInfo->ri_PartitionCheck = partition_check;
1359 resultRelInfo->ri_PartitionRoot = partition_root;
1363 * ExecGetTriggerResultRel
1365 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1366 * triggers are fired on one of the result relations of the query, and so
1367 * we can just return a member of the es_result_relations array. (Note: in
1368 * self-join situations there might be multiple members with the same OID;
1369 * if so it doesn't matter which one we pick.) However, it is sometimes
1370 * necessary to fire triggers on other relations; this happens mainly when an
1371 * RI update trigger queues additional triggers on other relations, which will
1372 * be processed in the context of the outer query. For efficiency's sake,
1373 * we want to have a ResultRelInfo for those triggers too; that can avoid
1374 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1375 * ANALYZE to report the runtimes of such triggers.) So we make additional
1376 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1379 ExecGetTriggerResultRel(EState *estate, Oid relid)
1381 ResultRelInfo *rInfo;
1385 MemoryContext oldcontext;
1387 /* First, search through the query result relations */
1388 rInfo = estate->es_result_relations;
1389 nr = estate->es_num_result_relations;
1392 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1397 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1398 foreach(l, estate->es_trig_target_relations)
1400 rInfo = (ResultRelInfo *) lfirst(l);
1401 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1404 /* Nope, so we need a new one */
1407 * Open the target relation's relcache entry. We assume that an
1408 * appropriate lock is still held by the backend from whenever the trigger
1409 * event got queued, so we need take no new lock here. Also, we need not
1410 * recheck the relkind, so no need for CheckValidResultRel.
1412 rel = heap_open(relid, NoLock);
1415 * Make the new entry in the right context.
1417 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1418 rInfo = makeNode(ResultRelInfo);
1419 InitResultRelInfo(rInfo,
1421 0, /* dummy rangetable index */
1423 estate->es_instrument);
1424 estate->es_trig_target_relations =
1425 lappend(estate->es_trig_target_relations, rInfo);
1426 MemoryContextSwitchTo(oldcontext);
1429 * Currently, we don't need any index information in ResultRelInfos used
1430 * only for triggers, so no need to call ExecOpenIndices.
1437 * Close any relations that have been opened by ExecGetTriggerResultRel().
1440 ExecCleanUpTriggerState(EState *estate)
1444 foreach(l, estate->es_trig_target_relations)
1446 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1448 /* Close indices and then the relation itself */
1449 ExecCloseIndices(resultRelInfo);
1450 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1455 * ExecContextForcesOids
1457 * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1458 * we need to ensure that result tuples have space for an OID iff they are
1459 * going to be stored into a relation that has OIDs. In other contexts
1460 * we are free to choose whether to leave space for OIDs in result tuples
1461 * (we generally don't want to, but we do if a physical-tlist optimization
1462 * is possible). This routine checks the plan context and returns TRUE if the
1463 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1464 * *hasoids is set to the required value.
1466 * One reason this is ugly is that all plan nodes in the plan tree will emit
1467 * tuples with space for an OID, though we really only need the topmost node
1468 * to do so. However, node types like Sort don't project new tuples but just
1469 * return their inputs, and in those cases the requirement propagates down
1470 * to the input node. Eventually we might make this code smart enough to
1471 * recognize how far down the requirement really goes, but for now we just
1472 * make all plan nodes do the same thing if the top level forces the choice.
1474 * We assume that if we are generating tuples for INSERT or UPDATE,
1475 * estate->es_result_relation_info is already set up to describe the target
1476 * relation. Note that in an UPDATE that spans an inheritance tree, some of
1477 * the target relations may have OIDs and some not. We have to make the
1478 * decisions on a per-relation basis as we initialize each of the subplans of
1479 * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1480 * while initializing each subplan.
1482 * CREATE TABLE AS is even uglier, because we don't have the target relation's
1483 * descriptor available when this code runs; we have to look aside at the
1484 * flags passed to ExecutorStart().
1487 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1489 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1493 Relation rel = ri->ri_RelationDesc;
1497 *hasoids = rel->rd_rel->relhasoids;
1502 if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1507 if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1516 /* ----------------------------------------------------------------
1517 * ExecPostprocessPlan
1519 * Give plan nodes a final chance to execute before shutdown
1520 * ----------------------------------------------------------------
1523 ExecPostprocessPlan(EState *estate)
1528 * Make sure nodes run forward.
1530 estate->es_direction = ForwardScanDirection;
1533 * Run any secondary ModifyTable nodes to completion, in case the main
1534 * query did not fetch all rows from them. (We do this to ensure that
1535 * such nodes have predictable results.)
1537 foreach(lc, estate->es_auxmodifytables)
1539 PlanState *ps = (PlanState *) lfirst(lc);
1543 TupleTableSlot *slot;
1545 /* Reset the per-output-tuple exprcontext each time */
1546 ResetPerTupleExprContext(estate);
1548 slot = ExecProcNode(ps);
1550 if (TupIsNull(slot))
1556 /* ----------------------------------------------------------------
1559 * Cleans up the query plan -- closes files and frees up storage
1561 * NOTE: we are no longer very worried about freeing storage per se
1562 * in this code; FreeExecutorState should be guaranteed to release all
1563 * memory that needs to be released. What we are worried about doing
1564 * is closing relations and dropping buffer pins. Thus, for example,
1565 * tuple tables must be cleared or dropped to ensure pins are released.
1566 * ----------------------------------------------------------------
1569 ExecEndPlan(PlanState *planstate, EState *estate)
1571 ResultRelInfo *resultRelInfo;
1576 * shut down the node-type-specific query processing
1578 ExecEndNode(planstate);
1583 foreach(l, estate->es_subplanstates)
1585 PlanState *subplanstate = (PlanState *) lfirst(l);
1587 ExecEndNode(subplanstate);
1591 * destroy the executor's tuple table. Actually we only care about
1592 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1593 * the TupleTableSlots, since the containing memory context is about to go
1596 ExecResetTupleTable(estate->es_tupleTable, false);
1599 * close the result relation(s) if any, but hold locks until xact commit.
1601 resultRelInfo = estate->es_result_relations;
1602 for (i = estate->es_num_result_relations; i > 0; i--)
1604 /* Close indices and then the relation itself */
1605 ExecCloseIndices(resultRelInfo);
1606 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1610 /* Close the root target relation(s). */
1611 resultRelInfo = estate->es_root_result_relations;
1612 for (i = estate->es_num_root_result_relations; i > 0; i--)
1614 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1618 /* likewise close any trigger target relations */
1619 ExecCleanUpTriggerState(estate);
1622 * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1625 foreach(l, estate->es_rowMarks)
1627 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1630 heap_close(erm->relation, NoLock);
1634 /* ----------------------------------------------------------------
1637 * Processes the query plan until we have retrieved 'numberTuples' tuples,
1638 * moving in the specified direction.
1640 * Runs to completion if numberTuples is 0
1642 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1644 * ----------------------------------------------------------------
1647 ExecutePlan(EState *estate,
1648 PlanState *planstate,
1649 bool use_parallel_mode,
1652 uint64 numberTuples,
1653 ScanDirection direction,
1657 TupleTableSlot *slot;
1658 uint64 current_tuple_count;
1661 * initialize local variables
1663 current_tuple_count = 0;
1666 * Set the direction.
1668 estate->es_direction = direction;
1671 * If the plan might potentially be executed multiple times, we must force
1672 * it to run without parallelism, because we might exit early. Also
1673 * disable parallelism when writing into a relation, because no database
1674 * changes are allowed in parallel mode.
1676 if (!execute_once || dest->mydest == DestIntoRel)
1677 use_parallel_mode = false;
1679 if (use_parallel_mode)
1680 EnterParallelMode();
1683 * Loop until we've processed the proper number of tuples from the plan.
1687 /* Reset the per-output-tuple exprcontext */
1688 ResetPerTupleExprContext(estate);
1691 * Execute the plan and obtain a tuple
1693 slot = ExecProcNode(planstate);
1696 * if the tuple is null, then we assume there is nothing more to
1697 * process so we just end the loop...
1699 if (TupIsNull(slot))
1701 /* Allow nodes to release or shut down resources. */
1702 (void) ExecShutdownNode(planstate);
1707 * If we have a junk filter, then project a new tuple with the junk
1710 * Store this new "clean" tuple in the junkfilter's resultSlot.
1711 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1712 * because that tuple slot has the wrong descriptor.)
1714 if (estate->es_junkFilter != NULL)
1715 slot = ExecFilterJunk(estate->es_junkFilter, slot);
1718 * If we are supposed to send the tuple somewhere, do so. (In
1719 * practice, this is probably always the case at this point.)
1724 * If we are not able to send the tuple, we assume the destination
1725 * has closed and no more tuples can be sent. If that's the case,
1728 if (!((*dest->receiveSlot) (slot, dest)))
1733 * Count tuples processed, if this is a SELECT. (For other operation
1734 * types, the ModifyTable plan node must count the appropriate
1737 if (operation == CMD_SELECT)
1738 (estate->es_processed)++;
1741 * check our tuple count.. if we've processed the proper number then
1742 * quit, else loop again and process more tuples. Zero numberTuples
1745 current_tuple_count++;
1746 if (numberTuples && numberTuples == current_tuple_count)
1748 /* Allow nodes to release or shut down resources. */
1749 (void) ExecShutdownNode(planstate);
1754 if (use_parallel_mode)
1760 * ExecRelCheck --- check that tuple meets constraints for result relation
1762 * Returns NULL if OK, else name of failed check constraint
1765 ExecRelCheck(ResultRelInfo *resultRelInfo,
1766 TupleTableSlot *slot, EState *estate)
1768 Relation rel = resultRelInfo->ri_RelationDesc;
1769 int ncheck = rel->rd_att->constr->num_check;
1770 ConstrCheck *check = rel->rd_att->constr->check;
1771 ExprContext *econtext;
1772 MemoryContext oldContext;
1776 * If first time through for this result relation, build expression
1777 * nodetrees for rel's constraint expressions. Keep them in the per-query
1778 * memory context so they'll survive throughout the query.
1780 if (resultRelInfo->ri_ConstraintExprs == NULL)
1782 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1783 resultRelInfo->ri_ConstraintExprs =
1784 (ExprState **) palloc(ncheck * sizeof(ExprState *));
1785 for (i = 0; i < ncheck; i++)
1789 checkconstr = stringToNode(check[i].ccbin);
1790 resultRelInfo->ri_ConstraintExprs[i] =
1791 ExecPrepareExpr(checkconstr, estate);
1793 MemoryContextSwitchTo(oldContext);
1797 * We will use the EState's per-tuple context for evaluating constraint
1798 * expressions (creating it if it's not already there).
1800 econtext = GetPerTupleExprContext(estate);
1802 /* Arrange for econtext's scan tuple to be the tuple under test */
1803 econtext->ecxt_scantuple = slot;
1805 /* And evaluate the constraints */
1806 for (i = 0; i < ncheck; i++)
1808 ExprState *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1811 * NOTE: SQL specifies that a NULL result from a constraint expression
1812 * is not to be treated as a failure. Therefore, use ExecCheck not
1815 if (!ExecCheck(checkconstr, econtext))
1816 return check[i].ccname;
1819 /* NULL result means no error */
1824 * ExecPartitionCheck --- check that tuple meets the partition constraint.
1827 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1830 Relation rel = resultRelInfo->ri_RelationDesc;
1831 TupleDesc tupdesc = RelationGetDescr(rel);
1832 Bitmapset *modifiedCols;
1833 Bitmapset *insertedCols;
1834 Bitmapset *updatedCols;
1835 ExprContext *econtext;
1838 * If first time through, build expression state tree for the partition
1839 * check expression. Keep it in the per-query memory context so they'll
1840 * survive throughout the query.
1842 if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1844 List *qual = resultRelInfo->ri_PartitionCheck;
1846 resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1850 * We will use the EState's per-tuple context for evaluating constraint
1851 * expressions (creating it if it's not already there).
1853 econtext = GetPerTupleExprContext(estate);
1855 /* Arrange for econtext's scan tuple to be the tuple under test */
1856 econtext->ecxt_scantuple = slot;
1859 * As in case of the catalogued constraints, we treat a NULL result as
1860 * success here, not a failure.
1862 if (!ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext))
1865 Relation orig_rel = rel;
1867 /* See the comment above. */
1868 if (resultRelInfo->ri_PartitionRoot)
1870 HeapTuple tuple = ExecFetchSlotTuple(slot);
1871 TupleDesc old_tupdesc = RelationGetDescr(rel);
1872 TupleConversionMap *map;
1874 rel = resultRelInfo->ri_PartitionRoot;
1875 tupdesc = RelationGetDescr(rel);
1877 map = convert_tuples_by_name(old_tupdesc, tupdesc,
1878 gettext_noop("could not convert row type"));
1881 tuple = do_convert_tuple(tuple, map);
1882 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
1886 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1887 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1888 modifiedCols = bms_union(insertedCols, updatedCols);
1889 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1895 (errcode(ERRCODE_CHECK_VIOLATION),
1896 errmsg("new row for relation \"%s\" violates partition constraint",
1897 RelationGetRelationName(orig_rel)),
1898 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1903 * ExecConstraints - check constraints of the tuple in 'slot'
1905 * This checks the traditional NOT NULL and check constraints, as well as
1906 * the partition constraint, if any.
1908 * Note: 'slot' contains the tuple to check the constraints of, which may
1909 * have been converted from the original input tuple after tuple routing.
1910 * 'resultRelInfo' is the original result relation, before tuple routing.
1913 ExecConstraints(ResultRelInfo *resultRelInfo,
1914 TupleTableSlot *slot, EState *estate)
1916 Relation rel = resultRelInfo->ri_RelationDesc;
1917 TupleDesc tupdesc = RelationGetDescr(rel);
1918 TupleConstr *constr = tupdesc->constr;
1919 Bitmapset *modifiedCols;
1920 Bitmapset *insertedCols;
1921 Bitmapset *updatedCols;
1923 Assert(constr || resultRelInfo->ri_PartitionCheck);
1925 if (constr && constr->has_not_null)
1927 int natts = tupdesc->natts;
1930 for (attrChk = 1; attrChk <= natts; attrChk++)
1932 if (tupdesc->attrs[attrChk - 1]->attnotnull &&
1933 slot_attisnull(slot, attrChk))
1936 Relation orig_rel = rel;
1937 TupleDesc orig_tupdesc = RelationGetDescr(rel);
1940 * If the tuple has been routed, it's been converted to the
1941 * partition's rowtype, which might differ from the root
1942 * table's. We must convert it back to the root table's
1943 * rowtype so that val_desc shown error message matches the
1946 if (resultRelInfo->ri_PartitionRoot)
1948 HeapTuple tuple = ExecFetchSlotTuple(slot);
1949 TupleConversionMap *map;
1951 rel = resultRelInfo->ri_PartitionRoot;
1952 tupdesc = RelationGetDescr(rel);
1954 map = convert_tuples_by_name(orig_tupdesc, tupdesc,
1955 gettext_noop("could not convert row type"));
1958 tuple = do_convert_tuple(tuple, map);
1959 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
1963 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1964 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1965 modifiedCols = bms_union(insertedCols, updatedCols);
1966 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1973 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1974 errmsg("null value in column \"%s\" violates not-null constraint",
1975 NameStr(orig_tupdesc->attrs[attrChk - 1]->attname)),
1976 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1977 errtablecol(orig_rel, attrChk)));
1982 if (constr && constr->num_check > 0)
1986 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1989 Relation orig_rel = rel;
1991 /* See the comment above. */
1992 if (resultRelInfo->ri_PartitionRoot)
1994 HeapTuple tuple = ExecFetchSlotTuple(slot);
1995 TupleDesc old_tupdesc = RelationGetDescr(rel);
1996 TupleConversionMap *map;
1998 rel = resultRelInfo->ri_PartitionRoot;
1999 tupdesc = RelationGetDescr(rel);
2001 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2002 gettext_noop("could not convert row type"));
2005 tuple = do_convert_tuple(tuple, map);
2006 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2010 insertedCols = GetInsertedColumns(resultRelInfo, estate);
2011 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2012 modifiedCols = bms_union(insertedCols, updatedCols);
2013 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2019 (errcode(ERRCODE_CHECK_VIOLATION),
2020 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2021 RelationGetRelationName(orig_rel), failed),
2022 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2023 errtableconstraint(orig_rel, failed)));
2027 if (resultRelInfo->ri_PartitionCheck)
2028 ExecPartitionCheck(resultRelInfo, slot, estate);
2033 * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2034 * of the specified kind.
2036 * Note that this needs to be called multiple times to ensure that all kinds of
2037 * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2038 * CHECK OPTION set and from row level security policies). See ExecInsert()
2042 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2043 TupleTableSlot *slot, EState *estate)
2045 Relation rel = resultRelInfo->ri_RelationDesc;
2046 TupleDesc tupdesc = RelationGetDescr(rel);
2047 ExprContext *econtext;
2052 * We will use the EState's per-tuple context for evaluating constraint
2053 * expressions (creating it if it's not already there).
2055 econtext = GetPerTupleExprContext(estate);
2057 /* Arrange for econtext's scan tuple to be the tuple under test */
2058 econtext->ecxt_scantuple = slot;
2060 /* Check each of the constraints */
2061 forboth(l1, resultRelInfo->ri_WithCheckOptions,
2062 l2, resultRelInfo->ri_WithCheckOptionExprs)
2064 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2065 ExprState *wcoExpr = (ExprState *) lfirst(l2);
2068 * Skip any WCOs which are not the kind we are looking for at this
2071 if (wco->kind != kind)
2075 * WITH CHECK OPTION checks are intended to ensure that the new tuple
2076 * is visible (in the case of a view) or that it passes the
2077 * 'with-check' policy (in the case of row security). If the qual
2078 * evaluates to NULL or FALSE, then the new tuple won't be included in
2079 * the view or doesn't pass the 'with-check' policy for the table.
2081 if (!ExecQual(wcoExpr, econtext))
2084 Bitmapset *modifiedCols;
2085 Bitmapset *insertedCols;
2086 Bitmapset *updatedCols;
2091 * For WITH CHECK OPTIONs coming from views, we might be
2092 * able to provide the details on the row, depending on
2093 * the permissions on the relation (that is, if the user
2094 * could view it directly anyway). For RLS violations, we
2095 * don't include the data since we don't know if the user
2096 * should be able to view the tuple as that depends on the
2099 case WCO_VIEW_CHECK:
2100 insertedCols = GetInsertedColumns(resultRelInfo, estate);
2101 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2102 modifiedCols = bms_union(insertedCols, updatedCols);
2103 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2110 (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2111 errmsg("new row violates check option for view \"%s\"",
2113 val_desc ? errdetail("Failing row contains %s.",
2116 case WCO_RLS_INSERT_CHECK:
2117 case WCO_RLS_UPDATE_CHECK:
2118 if (wco->polname != NULL)
2120 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2121 errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2122 wco->polname, wco->relname)));
2125 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2126 errmsg("new row violates row-level security policy for table \"%s\"",
2129 case WCO_RLS_CONFLICT_CHECK:
2130 if (wco->polname != NULL)
2132 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2133 errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2134 wco->polname, wco->relname)));
2137 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2138 errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2142 elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2150 * ExecBuildSlotValueDescription -- construct a string representing a tuple
2152 * This is intentionally very similar to BuildIndexValueDescription, but
2153 * unlike that function, we truncate long field values (to at most maxfieldlen
2154 * bytes). That seems necessary here since heap field values could be very
2155 * long, whereas index entries typically aren't so wide.
2157 * Also, unlike the case with index entries, we need to be prepared to ignore
2158 * dropped columns. We used to use the slot's tuple descriptor to decode the
2159 * data, but the slot's descriptor doesn't identify dropped columns, so we
2160 * now need to be passed the relation's descriptor.
2162 * Note that, like BuildIndexValueDescription, if the user does not have
2163 * permission to view any of the columns involved, a NULL is returned. Unlike
2164 * BuildIndexValueDescription, if the user has access to view a subset of the
2165 * column involved, that subset will be returned with a key identifying which
2169 ExecBuildSlotValueDescription(Oid reloid,
2170 TupleTableSlot *slot,
2172 Bitmapset *modifiedCols,
2176 StringInfoData collist;
2177 bool write_comma = false;
2178 bool write_comma_collist = false;
2180 AclResult aclresult;
2181 bool table_perm = false;
2182 bool any_perm = false;
2185 * Check if RLS is enabled and should be active for the relation; if so,
2186 * then don't return anything. Otherwise, go through normal permission
2189 if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2192 initStringInfo(&buf);
2194 appendStringInfoChar(&buf, '(');
2197 * Check if the user has permissions to see the row. Table-level SELECT
2198 * allows access to all columns. If the user does not have table-level
2199 * SELECT then we check each column and include those the user has SELECT
2200 * rights on. Additionally, we always include columns the user provided
2203 aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2204 if (aclresult != ACLCHECK_OK)
2206 /* Set up the buffer for the column list */
2207 initStringInfo(&collist);
2208 appendStringInfoChar(&collist, '(');
2211 table_perm = any_perm = true;
2213 /* Make sure the tuple is fully deconstructed */
2214 slot_getallattrs(slot);
2216 for (i = 0; i < tupdesc->natts; i++)
2218 bool column_perm = false;
2222 /* ignore dropped columns */
2223 if (tupdesc->attrs[i]->attisdropped)
2229 * No table-level SELECT, so need to make sure they either have
2230 * SELECT rights on the column or that they have provided the data
2231 * for the column. If not, omit this column from the error
2234 aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
2235 GetUserId(), ACL_SELECT);
2236 if (bms_is_member(tupdesc->attrs[i]->attnum - FirstLowInvalidHeapAttributeNumber,
2237 modifiedCols) || aclresult == ACLCHECK_OK)
2239 column_perm = any_perm = true;
2241 if (write_comma_collist)
2242 appendStringInfoString(&collist, ", ");
2244 write_comma_collist = true;
2246 appendStringInfoString(&collist, NameStr(tupdesc->attrs[i]->attname));
2250 if (table_perm || column_perm)
2252 if (slot->tts_isnull[i])
2259 getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
2260 &foutoid, &typisvarlena);
2261 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2265 appendStringInfoString(&buf, ", ");
2269 /* truncate if needed */
2270 vallen = strlen(val);
2271 if (vallen <= maxfieldlen)
2272 appendStringInfoString(&buf, val);
2275 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2276 appendBinaryStringInfo(&buf, val, vallen);
2277 appendStringInfoString(&buf, "...");
2282 /* If we end up with zero columns being returned, then return NULL. */
2286 appendStringInfoChar(&buf, ')');
2290 appendStringInfoString(&collist, ") = ");
2291 appendStringInfoString(&collist, buf.data);
2293 return collist.data;
2301 * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2302 * given ResultRelInfo
2305 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2308 Bitmapset *updatedCols;
2311 * Compute lock mode to use. If columns that are part of the key have not
2312 * been modified, then we can use a weaker lock, allowing for better
2315 updatedCols = GetUpdatedColumns(relinfo, estate);
2316 keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2317 INDEX_ATTR_BITMAP_KEY);
2319 if (bms_overlap(keyCols, updatedCols))
2320 return LockTupleExclusive;
2322 return LockTupleNoKeyExclusive;
2326 * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2328 * If no such struct, either return NULL or throw error depending on missing_ok
2331 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2335 foreach(lc, estate->es_rowMarks)
2337 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2339 if (erm->rti == rti)
2343 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2348 * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2350 * Inputs are the underlying ExecRowMark struct and the targetlist of the
2351 * input plan node (not planstate node!). We need the latter to find out
2352 * the column numbers of the resjunk columns.
2355 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2357 ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2360 aerm->rowmark = erm;
2362 /* Look up the resjunk columns associated with this rowmark */
2363 if (erm->markType != ROW_MARK_COPY)
2365 /* need ctid for all methods other than COPY */
2366 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2367 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2369 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2370 elog(ERROR, "could not find junk %s column", resname);
2374 /* need wholerow if COPY */
2375 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2376 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2378 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2379 elog(ERROR, "could not find junk %s column", resname);
2382 /* if child rel, need tableoid */
2383 if (erm->rti != erm->prti)
2385 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2386 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2388 if (!AttributeNumberIsValid(aerm->toidAttNo))
2389 elog(ERROR, "could not find junk %s column", resname);
2397 * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2398 * process the updated version under READ COMMITTED rules.
2400 * See backend/executor/README for some info about how this works.
2405 * Check a modified tuple to see if we want to process its updated version
2406 * under READ COMMITTED rules.
2408 * estate - outer executor state data
2409 * epqstate - state for EvalPlanQual rechecking
2410 * relation - table containing tuple
2411 * rti - rangetable index of table containing tuple
2412 * lockmode - requested tuple lock mode
2413 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2414 * priorXmax - t_xmax from the outdated tuple
2416 * *tid is also an output parameter: it's modified to hold the TID of the
2417 * latest version of the tuple (note this may be changed even on failure)
2419 * Returns a slot containing the new candidate update/delete tuple, or
2420 * NULL if we determine we shouldn't process the row.
2422 * Note: properly, lockmode should be declared as enum LockTupleMode,
2423 * but we use "int" to avoid having to include heapam.h in executor.h.
2426 EvalPlanQual(EState *estate, EPQState *epqstate,
2427 Relation relation, Index rti, int lockmode,
2428 ItemPointer tid, TransactionId priorXmax)
2430 TupleTableSlot *slot;
2431 HeapTuple copyTuple;
2436 * Get and lock the updated version of the row; if fail, return NULL.
2438 copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2441 if (copyTuple == NULL)
2445 * For UPDATE/DELETE we have to return tid of actual row we're executing
2448 *tid = copyTuple->t_self;
2451 * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2453 EvalPlanQualBegin(epqstate, estate);
2456 * Free old test tuple, if any, and store new tuple where relation's scan
2459 EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2462 * Fetch any non-locked source rows
2464 EvalPlanQualFetchRowMarks(epqstate);
2467 * Run the EPQ query. We assume it will return at most one tuple.
2469 slot = EvalPlanQualNext(epqstate);
2472 * If we got a tuple, force the slot to materialize the tuple so that it
2473 * is not dependent on any local state in the EPQ query (in particular,
2474 * it's highly likely that the slot contains references to any pass-by-ref
2475 * datums that may be present in copyTuple). As with the next step, this
2476 * is to guard against early re-use of the EPQ query.
2478 if (!TupIsNull(slot))
2479 (void) ExecMaterializeSlot(slot);
2482 * Clear out the test tuple. This is needed in case the EPQ query is
2483 * re-used to test a tuple for a different relation. (Not clear that can
2484 * really happen, but let's be safe.)
2486 EvalPlanQualSetTuple(epqstate, rti, NULL);
2492 * Fetch a copy of the newest version of an outdated tuple
2494 * estate - executor state data
2495 * relation - table containing tuple
2496 * lockmode - requested tuple lock mode
2497 * wait_policy - requested lock wait policy
2498 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2499 * priorXmax - t_xmax from the outdated tuple
2501 * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2502 * that there is no newest version (ie, the row was deleted not updated).
2503 * We also return NULL if the tuple is locked and the wait policy is to skip
2506 * If successful, we have locked the newest tuple version, so caller does not
2507 * need to worry about it changing anymore.
2509 * Note: properly, lockmode should be declared as enum LockTupleMode,
2510 * but we use "int" to avoid having to include heapam.h in executor.h.
2513 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2514 LockWaitPolicy wait_policy,
2515 ItemPointer tid, TransactionId priorXmax)
2517 HeapTuple copyTuple = NULL;
2518 HeapTupleData tuple;
2519 SnapshotData SnapshotDirty;
2522 * fetch target tuple
2524 * Loop here to deal with updated or busy tuples
2526 InitDirtySnapshot(SnapshotDirty);
2527 tuple.t_self = *tid;
2532 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2535 HeapUpdateFailureData hufd;
2538 * If xmin isn't what we're expecting, the slot must have been
2539 * recycled and reused for an unrelated tuple. This implies that
2540 * the latest version of the row was deleted, so we need do
2541 * nothing. (Should be safe to examine xmin without getting
2542 * buffer's content lock. We assume reading a TransactionId to be
2543 * atomic, and Xmin never changes in an existing tuple, except to
2544 * invalid or frozen, and neither of those can match priorXmax.)
2546 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2549 ReleaseBuffer(buffer);
2553 /* otherwise xmin should not be dirty... */
2554 if (TransactionIdIsValid(SnapshotDirty.xmin))
2555 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2558 * If tuple is being updated by other transaction then we have to
2559 * wait for its commit/abort, or die trying.
2561 if (TransactionIdIsValid(SnapshotDirty.xmax))
2563 ReleaseBuffer(buffer);
2564 switch (wait_policy)
2567 XactLockTableWait(SnapshotDirty.xmax,
2568 relation, &tuple.t_self,
2572 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2573 return NULL; /* skip instead of waiting */
2576 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2578 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2579 errmsg("could not obtain lock on row in relation \"%s\"",
2580 RelationGetRelationName(relation))));
2583 continue; /* loop back to repeat heap_fetch */
2587 * If tuple was inserted by our own transaction, we have to check
2588 * cmin against es_output_cid: cmin >= current CID means our
2589 * command cannot see the tuple, so we should ignore it. Otherwise
2590 * heap_lock_tuple() will throw an error, and so would any later
2591 * attempt to update or delete the tuple. (We need not check cmax
2592 * because HeapTupleSatisfiesDirty will consider a tuple deleted
2593 * by our transaction dead, regardless of cmax.) We just checked
2594 * that priorXmax == xmin, so we can test that variable instead of
2595 * doing HeapTupleHeaderGetXmin again.
2597 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2598 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2600 ReleaseBuffer(buffer);
2605 * This is a live tuple, so now try to lock it.
2607 test = heap_lock_tuple(relation, &tuple,
2608 estate->es_output_cid,
2609 lockmode, wait_policy,
2610 false, &buffer, &hufd);
2611 /* We now have two pins on the buffer, get rid of one */
2612 ReleaseBuffer(buffer);
2616 case HeapTupleSelfUpdated:
2619 * The target tuple was already updated or deleted by the
2620 * current command, or by a later command in the current
2621 * transaction. We *must* ignore the tuple in the former
2622 * case, so as to avoid the "Halloween problem" of
2623 * repeated update attempts. In the latter case it might
2624 * be sensible to fetch the updated tuple instead, but
2625 * doing so would require changing heap_update and
2626 * heap_delete to not complain about updating "invisible"
2627 * tuples, which seems pretty scary (heap_lock_tuple will
2628 * not complain, but few callers expect
2629 * HeapTupleInvisible, and we're not one of them). So for
2630 * now, treat the tuple as deleted and do not process.
2632 ReleaseBuffer(buffer);
2635 case HeapTupleMayBeUpdated:
2636 /* successfully locked */
2639 case HeapTupleUpdated:
2640 ReleaseBuffer(buffer);
2641 if (IsolationUsesXactSnapshot())
2643 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2644 errmsg("could not serialize access due to concurrent update")));
2646 /* Should not encounter speculative tuple on recheck */
2647 Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
2648 if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2650 /* it was updated, so look at the updated version */
2651 tuple.t_self = hufd.ctid;
2652 /* updated row should have xmin matching this xmax */
2653 priorXmax = hufd.xmax;
2656 /* tuple was deleted, so give up */
2659 case HeapTupleWouldBlock:
2660 ReleaseBuffer(buffer);
2663 case HeapTupleInvisible:
2664 elog(ERROR, "attempted to lock invisible tuple");
2667 ReleaseBuffer(buffer);
2668 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2670 return NULL; /* keep compiler quiet */
2674 * We got tuple - now copy it for use by recheck query.
2676 copyTuple = heap_copytuple(&tuple);
2677 ReleaseBuffer(buffer);
2682 * If the referenced slot was actually empty, the latest version of
2683 * the row must have been deleted, so we need do nothing.
2685 if (tuple.t_data == NULL)
2687 ReleaseBuffer(buffer);
2692 * As above, if xmin isn't what we're expecting, do nothing.
2694 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2697 ReleaseBuffer(buffer);
2702 * If we get here, the tuple was found but failed SnapshotDirty.
2703 * Assuming the xmin is either a committed xact or our own xact (as it
2704 * certainly should be if we're trying to modify the tuple), this must
2705 * mean that the row was updated or deleted by either a committed xact
2706 * or our own xact. If it was deleted, we can ignore it; if it was
2707 * updated then chain up to the next version and repeat the whole
2710 * As above, it should be safe to examine xmax and t_ctid without the
2711 * buffer content lock, because they can't be changing.
2713 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2715 /* deleted, so forget about it */
2716 ReleaseBuffer(buffer);
2720 /* updated, so look at the updated row */
2721 tuple.t_self = tuple.t_data->t_ctid;
2722 /* updated row should have xmin matching this xmax */
2723 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2724 ReleaseBuffer(buffer);
2725 /* loop back to fetch next in chain */
2729 * Return the copied tuple
2735 * EvalPlanQualInit -- initialize during creation of a plan state node
2736 * that might need to invoke EPQ processing.
2738 * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2739 * with EvalPlanQualSetPlan.
2742 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2743 Plan *subplan, List *auxrowmarks, int epqParam)
2745 /* Mark the EPQ state inactive */
2746 epqstate->estate = NULL;
2747 epqstate->planstate = NULL;
2748 epqstate->origslot = NULL;
2749 /* ... and remember data that EvalPlanQualBegin will need */
2750 epqstate->plan = subplan;
2751 epqstate->arowMarks = auxrowmarks;
2752 epqstate->epqParam = epqParam;
2756 * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2758 * We need this so that ModifyTable can deal with multiple subplans.
2761 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2763 /* If we have a live EPQ query, shut it down */
2764 EvalPlanQualEnd(epqstate);
2765 /* And set/change the plan pointer */
2766 epqstate->plan = subplan;
2767 /* The rowmarks depend on the plan, too */
2768 epqstate->arowMarks = auxrowmarks;
2772 * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2774 * NB: passed tuple must be palloc'd; it may get freed later
2777 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2779 EState *estate = epqstate->estate;
2784 * free old test tuple, if any, and store new tuple where relation's scan
2787 if (estate->es_epqTuple[rti - 1] != NULL)
2788 heap_freetuple(estate->es_epqTuple[rti - 1]);
2789 estate->es_epqTuple[rti - 1] = tuple;
2790 estate->es_epqTupleSet[rti - 1] = true;
2794 * Fetch back the current test tuple (if any) for the specified RTI
2797 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2799 EState *estate = epqstate->estate;
2803 return estate->es_epqTuple[rti - 1];
2807 * Fetch the current row values for any non-locked relations that need
2808 * to be scanned by an EvalPlanQual operation. origslot must have been set
2809 * to contain the current result row (top-level row) that we need to recheck.
2812 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2816 Assert(epqstate->origslot != NULL);
2818 foreach(l, epqstate->arowMarks)
2820 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2821 ExecRowMark *erm = aerm->rowmark;
2824 HeapTupleData tuple;
2826 if (RowMarkRequiresRowShareLock(erm->markType))
2827 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2829 /* clear any leftover test tuple for this rel */
2830 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2832 /* if child rel, must check whether it produced this row */
2833 if (erm->rti != erm->prti)
2837 datum = ExecGetJunkAttribute(epqstate->origslot,
2840 /* non-locked rels could be on the inside of outer joins */
2843 tableoid = DatumGetObjectId(datum);
2845 Assert(OidIsValid(erm->relid));
2846 if (tableoid != erm->relid)
2848 /* this child is inactive right now */
2853 if (erm->markType == ROW_MARK_REFERENCE)
2855 HeapTuple copyTuple;
2857 Assert(erm->relation != NULL);
2859 /* fetch the tuple's ctid */
2860 datum = ExecGetJunkAttribute(epqstate->origslot,
2863 /* non-locked rels could be on the inside of outer joins */
2867 /* fetch requests on foreign tables must be passed to their FDW */
2868 if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2870 FdwRoutine *fdwroutine;
2871 bool updated = false;
2873 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2874 /* this should have been checked already, but let's be safe */
2875 if (fdwroutine->RefetchForeignRow == NULL)
2877 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2878 errmsg("cannot lock rows in foreign table \"%s\"",
2879 RelationGetRelationName(erm->relation))));
2880 copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2884 if (copyTuple == NULL)
2885 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2888 * Ideally we'd insist on updated == false here, but that
2889 * assumes that FDWs can track that exactly, which they might
2890 * not be able to. So just ignore the flag.
2895 /* ordinary table, fetch the tuple */
2898 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2899 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2901 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2903 /* successful, copy tuple */
2904 copyTuple = heap_copytuple(&tuple);
2905 ReleaseBuffer(buffer);
2909 EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
2915 Assert(erm->markType == ROW_MARK_COPY);
2917 /* fetch the whole-row Var for the relation */
2918 datum = ExecGetJunkAttribute(epqstate->origslot,
2921 /* non-locked rels could be on the inside of outer joins */
2924 td = DatumGetHeapTupleHeader(datum);
2926 /* build a temporary HeapTuple control structure */
2927 tuple.t_len = HeapTupleHeaderGetDatumLength(td);
2929 /* relation might be a foreign table, if so provide tableoid */
2930 tuple.t_tableOid = erm->relid;
2931 /* also copy t_ctid in case there's valid data there */
2932 tuple.t_self = td->t_ctid;
2934 /* copy and store tuple */
2935 EvalPlanQualSetTuple(epqstate, erm->rti,
2936 heap_copytuple(&tuple));
2942 * Fetch the next row (if any) from EvalPlanQual testing
2944 * (In practice, there should never be more than one row...)
2947 EvalPlanQualNext(EPQState *epqstate)
2949 MemoryContext oldcontext;
2950 TupleTableSlot *slot;
2952 oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
2953 slot = ExecProcNode(epqstate->planstate);
2954 MemoryContextSwitchTo(oldcontext);
2960 * Initialize or reset an EvalPlanQual state tree
2963 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
2965 EState *estate = epqstate->estate;
2969 /* First time through, so create a child EState */
2970 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
2975 * We already have a suitable child EPQ tree, so just reset it.
2977 int rtsize = list_length(parentestate->es_range_table);
2978 PlanState *planstate = epqstate->planstate;
2980 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
2982 /* Recopy current values of parent parameters */
2983 if (parentestate->es_plannedstmt->nParamExec > 0)
2985 int i = parentestate->es_plannedstmt->nParamExec;
2989 /* copy value if any, but not execPlan link */
2990 estate->es_param_exec_vals[i].value =
2991 parentestate->es_param_exec_vals[i].value;
2992 estate->es_param_exec_vals[i].isnull =
2993 parentestate->es_param_exec_vals[i].isnull;
2998 * Mark child plan tree as needing rescan at all scan nodes. The
2999 * first ExecProcNode will take care of actually doing the rescan.
3001 planstate->chgParam = bms_add_member(planstate->chgParam,
3002 epqstate->epqParam);
3007 * Start execution of an EvalPlanQual plan tree.
3009 * This is a cut-down version of ExecutorStart(): we copy some state from
3010 * the top-level estate rather than initializing it fresh.
3013 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
3017 MemoryContext oldcontext;
3020 rtsize = list_length(parentestate->es_range_table);
3022 epqstate->estate = estate = CreateExecutorState();
3024 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3027 * Child EPQ EStates share the parent's copy of unchanging state such as
3028 * the snapshot, rangetable, result-rel info, and external Param info.
3029 * They need their own copies of local state, including a tuple table,
3030 * es_param_exec_vals, etc.
3032 * The ResultRelInfo array management is trickier than it looks. We
3033 * create a fresh array for the child but copy all the content from the
3034 * parent. This is because it's okay for the child to share any
3035 * per-relation state the parent has already created --- but if the child
3036 * sets up any ResultRelInfo fields, such as its own junkfilter, that
3037 * state must *not* propagate back to the parent. (For one thing, the
3038 * pointed-to data is in a memory context that won't last long enough.)
3040 estate->es_direction = ForwardScanDirection;
3041 estate->es_snapshot = parentestate->es_snapshot;
3042 estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
3043 estate->es_range_table = parentestate->es_range_table;
3044 estate->es_plannedstmt = parentestate->es_plannedstmt;
3045 estate->es_junkFilter = parentestate->es_junkFilter;
3046 estate->es_output_cid = parentestate->es_output_cid;
3047 if (parentestate->es_num_result_relations > 0)
3049 int numResultRelations = parentestate->es_num_result_relations;
3050 ResultRelInfo *resultRelInfos;
3052 resultRelInfos = (ResultRelInfo *)
3053 palloc(numResultRelations * sizeof(ResultRelInfo));
3054 memcpy(resultRelInfos, parentestate->es_result_relations,
3055 numResultRelations * sizeof(ResultRelInfo));
3056 estate->es_result_relations = resultRelInfos;
3057 estate->es_num_result_relations = numResultRelations;
3059 /* es_result_relation_info must NOT be copied */
3060 /* es_trig_target_relations must NOT be copied */
3061 estate->es_rowMarks = parentestate->es_rowMarks;
3062 estate->es_top_eflags = parentestate->es_top_eflags;
3063 estate->es_instrument = parentestate->es_instrument;
3064 /* es_auxmodifytables must NOT be copied */
3067 * The external param list is simply shared from parent. The internal
3068 * param workspace has to be local state, but we copy the initial values
3069 * from the parent, so as to have access to any param values that were
3070 * already set from other parts of the parent's plan tree.
3072 estate->es_param_list_info = parentestate->es_param_list_info;
3073 if (parentestate->es_plannedstmt->nParamExec > 0)
3075 int i = parentestate->es_plannedstmt->nParamExec;
3077 estate->es_param_exec_vals = (ParamExecData *)
3078 palloc0(i * sizeof(ParamExecData));
3081 /* copy value if any, but not execPlan link */
3082 estate->es_param_exec_vals[i].value =
3083 parentestate->es_param_exec_vals[i].value;
3084 estate->es_param_exec_vals[i].isnull =
3085 parentestate->es_param_exec_vals[i].isnull;
3090 * Each EState must have its own es_epqScanDone state, but if we have
3091 * nested EPQ checks they should share es_epqTuple arrays. This allows
3092 * sub-rechecks to inherit the values being examined by an outer recheck.
3094 estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
3095 if (parentestate->es_epqTuple != NULL)
3097 estate->es_epqTuple = parentestate->es_epqTuple;
3098 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
3102 estate->es_epqTuple = (HeapTuple *)
3103 palloc0(rtsize * sizeof(HeapTuple));
3104 estate->es_epqTupleSet = (bool *)
3105 palloc0(rtsize * sizeof(bool));
3109 * Each estate also has its own tuple table.
3111 estate->es_tupleTable = NIL;
3114 * Initialize private state information for each SubPlan. We must do this
3115 * before running ExecInitNode on the main query tree, since
3116 * ExecInitSubPlan expects to be able to find these entries. Some of the
3117 * SubPlans might not be used in the part of the plan tree we intend to
3118 * run, but since it's not easy to tell which, we just initialize them
3121 Assert(estate->es_subplanstates == NIL);
3122 foreach(l, parentestate->es_plannedstmt->subplans)
3124 Plan *subplan = (Plan *) lfirst(l);
3125 PlanState *subplanstate;
3127 subplanstate = ExecInitNode(subplan, estate, 0);
3128 estate->es_subplanstates = lappend(estate->es_subplanstates,
3133 * Initialize the private state information for all the nodes in the part
3134 * of the plan tree we need to run. This opens files, allocates storage
3135 * and leaves us ready to start processing tuples.
3137 epqstate->planstate = ExecInitNode(planTree, estate, 0);
3139 MemoryContextSwitchTo(oldcontext);
3143 * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3144 * or if we are done with the current EPQ child.
3146 * This is a cut-down version of ExecutorEnd(); basically we want to do most
3147 * of the normal cleanup, but *not* close result relations (which we are
3148 * just sharing from the outer query). We do, however, have to close any
3149 * trigger target relations that got opened, since those are not shared.
3150 * (There probably shouldn't be any of the latter, but just in case...)
3153 EvalPlanQualEnd(EPQState *epqstate)
3155 EState *estate = epqstate->estate;
3156 MemoryContext oldcontext;
3160 return; /* idle, so nothing to do */
3162 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3164 ExecEndNode(epqstate->planstate);
3166 foreach(l, estate->es_subplanstates)
3168 PlanState *subplanstate = (PlanState *) lfirst(l);
3170 ExecEndNode(subplanstate);
3173 /* throw away the per-estate tuple table */
3174 ExecResetTupleTable(estate->es_tupleTable, false);
3176 /* close any trigger target relations attached to this EState */
3177 ExecCleanUpTriggerState(estate);
3179 MemoryContextSwitchTo(oldcontext);
3181 FreeExecutorState(estate);
3183 /* Mark EPQState idle */
3184 epqstate->estate = NULL;
3185 epqstate->planstate = NULL;
3186 epqstate->origslot = NULL;
3190 * ExecSetupPartitionTupleRouting - set up information needed during
3191 * tuple routing for partitioned tables
3194 * 'pd' receives an array of PartitionDispatch objects with one entry for
3195 * every partitioned table in the partition tree
3196 * 'partitions' receives an array of ResultRelInfo objects with one entry for
3197 * every leaf partition in the partition tree
3198 * 'tup_conv_maps' receives an array of TupleConversionMap objects with one
3199 * entry for every leaf partition (required to convert input tuple based
3200 * on the root table's rowtype to a leaf partition's rowtype after tuple
3202 * 'partition_tuple_slot' receives a standalone TupleTableSlot to be used
3203 * to manipulate any given leaf partition's rowtype after that partition
3204 * is chosen by tuple-routing.
3205 * 'num_parted' receives the number of partitioned tables in the partition
3206 * tree (= the number of entries in the 'pd' output array)
3207 * 'num_partitions' receives the number of leaf partitions in the partition
3208 * tree (= the number of entries in the 'partitions' and 'tup_conv_maps'
3211 * Note that all the relations in the partition tree are locked using the
3212 * RowExclusiveLock mode upon return from this function.
3215 ExecSetupPartitionTupleRouting(Relation rel,
3216 PartitionDispatch **pd,
3217 ResultRelInfo **partitions,
3218 TupleConversionMap ***tup_conv_maps,
3219 TupleTableSlot **partition_tuple_slot,
3220 int *num_parted, int *num_partitions)
3222 TupleDesc tupDesc = RelationGetDescr(rel);
3226 ResultRelInfo *leaf_part_rri;
3228 /* Get the tuple-routing information and lock partitions */
3229 *pd = RelationGetPartitionDispatchInfo(rel, RowExclusiveLock, num_parted,
3231 *num_partitions = list_length(leaf_parts);
3232 *partitions = (ResultRelInfo *) palloc(*num_partitions *
3233 sizeof(ResultRelInfo));
3234 *tup_conv_maps = (TupleConversionMap **) palloc0(*num_partitions *
3235 sizeof(TupleConversionMap *));
3238 * Initialize an empty slot that will be used to manipulate tuples of any
3239 * given partition's rowtype. It is attached to the caller-specified node
3240 * (such as ModifyTableState) and released when the node finishes
3243 *partition_tuple_slot = MakeTupleTableSlot();
3245 leaf_part_rri = *partitions;
3247 foreach(cell, leaf_parts)
3250 TupleDesc part_tupdesc;
3253 * We locked all the partitions above including the leaf partitions.
3254 * Note that each of the relations in *partitions are eventually
3255 * closed by the caller.
3257 partrel = heap_open(lfirst_oid(cell), NoLock);
3258 part_tupdesc = RelationGetDescr(partrel);
3261 * Verify result relation is a valid target for the current operation.
3263 CheckValidResultRel(partrel, CMD_INSERT);
3266 * Save a tuple conversion map to convert a tuple routed to this
3267 * partition from the parent's type to the partition's.
3269 (*tup_conv_maps)[i] = convert_tuples_by_name(tupDesc, part_tupdesc,
3270 gettext_noop("could not convert row type"));
3272 InitResultRelInfo(leaf_part_rri,
3279 * Open partition indices (remember we do not support ON CONFLICT in
3280 * case of partitioned tables, so we do not need support information
3281 * for speculative insertion)
3283 if (leaf_part_rri->ri_RelationDesc->rd_rel->relhasindex &&
3284 leaf_part_rri->ri_IndexRelationDescs == NULL)
3285 ExecOpenIndices(leaf_part_rri, false);
3293 * ExecFindPartition -- Find a leaf partition in the partition tree rooted
3294 * at parent, for the heap tuple contained in *slot
3296 * estate must be non-NULL; we'll need it to compute any expressions in the
3299 * If no leaf partition is found, this routine errors out with the appropriate
3300 * error message, else it returns the leaf partition sequence number returned
3301 * by get_partition_for_tuple() unchanged.
3304 ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd,
3305 TupleTableSlot *slot, EState *estate)
3308 PartitionDispatchData *failed_at;
3309 TupleTableSlot *failed_slot;
3312 * First check the root table's partition constraint, if any. No point in
3313 * routing the tuple it if it doesn't belong in the root table itself.
3315 if (resultRelInfo->ri_PartitionCheck)
3316 ExecPartitionCheck(resultRelInfo, slot, estate);
3318 result = get_partition_for_tuple(pd, slot, estate,
3319 &failed_at, &failed_slot);
3322 Relation failed_rel;
3323 Datum key_values[PARTITION_MAX_KEYS];
3324 bool key_isnull[PARTITION_MAX_KEYS];
3326 ExprContext *ecxt = GetPerTupleExprContext(estate);
3328 failed_rel = failed_at->reldesc;
3329 ecxt->ecxt_scantuple = failed_slot;
3330 FormPartitionKeyDatum(failed_at, failed_slot, estate,
3331 key_values, key_isnull);
3332 val_desc = ExecBuildSlotPartitionKeyDescription(failed_rel,
3336 Assert(OidIsValid(RelationGetRelid(failed_rel)));
3338 (errcode(ERRCODE_CHECK_VIOLATION),
3339 errmsg("no partition of relation \"%s\" found for row",
3340 RelationGetRelationName(failed_rel)),
3341 val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
3348 * BuildSlotPartitionKeyDescription
3350 * This works very much like BuildIndexValueDescription() and is currently
3351 * used for building error messages when ExecFindPartition() fails to find
3352 * partition for a row.
3355 ExecBuildSlotPartitionKeyDescription(Relation rel,
3361 PartitionKey key = RelationGetPartitionKey(rel);
3362 int partnatts = get_partition_natts(key);
3364 Oid relid = RelationGetRelid(rel);
3365 AclResult aclresult;
3367 if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
3370 /* If the user has table-level access, just go build the description. */
3371 aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
3372 if (aclresult != ACLCHECK_OK)
3375 * Step through the columns of the partition key and make sure the
3376 * user has SELECT rights on all of them.
3378 for (i = 0; i < partnatts; i++)
3380 AttrNumber attnum = get_partition_col_attnum(key, i);
3383 * If this partition key column is an expression, we return no
3384 * detail rather than try to figure out what column(s) the
3385 * expression includes and if the user has SELECT rights on them.
3387 if (attnum == InvalidAttrNumber ||
3388 pg_attribute_aclcheck(relid, attnum, GetUserId(),
3389 ACL_SELECT) != ACLCHECK_OK)
3394 initStringInfo(&buf);
3395 appendStringInfo(&buf, "(%s) = (",
3396 pg_get_partkeydef_columns(relid, true));
3398 for (i = 0; i < partnatts; i++)
3410 getTypeOutputInfo(get_partition_col_typid(key, i),
3411 &foutoid, &typisvarlena);
3412 val = OidOutputFunctionCall(foutoid, values[i]);
3416 appendStringInfoString(&buf, ", ");
3418 /* truncate if needed */
3419 vallen = strlen(val);
3420 if (vallen <= maxfieldlen)
3421 appendStringInfoString(&buf, val);
3424 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
3425 appendBinaryStringInfo(&buf, val, vallen);
3426 appendStringInfoString(&buf, "...");
3430 appendStringInfoChar(&buf, ')');