1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.334 2009/10/26 02:26:29 tgl Exp $
31 *-------------------------------------------------------------------------
35 #include "access/reloptions.h"
36 #include "access/sysattr.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "parser/parse_clause.h"
49 #include "parser/parsetree.h"
50 #include "storage/bufmgr.h"
51 #include "storage/lmgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/snapmgr.h"
56 #include "utils/tqual.h"
59 /* Hooks for plugins to get control in ExecutorStart/Run/End() */
60 ExecutorStart_hook_type ExecutorStart_hook = NULL;
61 ExecutorRun_hook_type ExecutorRun_hook = NULL;
62 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
64 /* decls for local routines only used within this module */
65 static void InitPlan(QueryDesc *queryDesc, int eflags);
66 static void ExecEndPlan(PlanState *planstate, EState *estate);
67 static void ExecutePlan(EState *estate, PlanState *planstate,
71 ScanDirection direction,
73 static void ExecCheckRTPerms(List *rangeTable);
74 static void ExecCheckRTEPerms(RangeTblEntry *rte);
75 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
76 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
78 static void OpenIntoRel(QueryDesc *queryDesc);
79 static void CloseIntoRel(QueryDesc *queryDesc);
80 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
81 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
82 static void intorel_shutdown(DestReceiver *self);
83 static void intorel_destroy(DestReceiver *self);
85 /* end of local decls */
88 /* ----------------------------------------------------------------
91 * This routine must be called at the beginning of any execution of any
94 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
95 * clear why we bother to separate the two functions, but...). The tupDesc
96 * field of the QueryDesc is filled in to describe the tuples that will be
97 * returned, and the internal fields (estate and planstate) are set up.
99 * eflags contains flag bits as described in executor.h.
101 * NB: the CurrentMemoryContext when this is called will become the parent
102 * of the per-query context used for this Executor invocation.
104 * We provide a function hook variable that lets loadable plugins
105 * get control when ExecutorStart is called. Such a plugin would
106 * normally call standard_ExecutorStart().
108 * ----------------------------------------------------------------
111 ExecutorStart(QueryDesc *queryDesc, int eflags)
113 if (ExecutorStart_hook)
114 (*ExecutorStart_hook) (queryDesc, eflags);
116 standard_ExecutorStart(queryDesc, eflags);
120 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
123 MemoryContext oldcontext;
125 /* sanity checks: queryDesc must not be started already */
126 Assert(queryDesc != NULL);
127 Assert(queryDesc->estate == NULL);
130 * If the transaction is read-only, we need to check if any writes are
131 * planned to non-temporary tables. EXPLAIN is considered read-only.
133 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
134 ExecCheckXactReadOnly(queryDesc->plannedstmt);
137 * Build EState, switch into per-query memory context for startup.
139 estate = CreateExecutorState();
140 queryDesc->estate = estate;
142 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
145 * Fill in external parameters, if any, from queryDesc; and allocate
146 * workspace for internal parameters
148 estate->es_param_list_info = queryDesc->params;
150 if (queryDesc->plannedstmt->nParamExec > 0)
151 estate->es_param_exec_vals = (ParamExecData *)
152 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
155 * If non-read-only query, set the command ID to mark output tuples with
157 switch (queryDesc->operation)
160 /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
161 if (queryDesc->plannedstmt->intoClause != NULL ||
162 queryDesc->plannedstmt->rowMarks != NIL)
163 estate->es_output_cid = GetCurrentCommandId(true);
169 estate->es_output_cid = GetCurrentCommandId(true);
173 elog(ERROR, "unrecognized operation code: %d",
174 (int) queryDesc->operation);
179 * Copy other important information into the EState
181 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
182 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
183 estate->es_instrument = queryDesc->doInstrument;
186 * Initialize the plan state tree
188 InitPlan(queryDesc, eflags);
190 MemoryContextSwitchTo(oldcontext);
193 /* ----------------------------------------------------------------
196 * This is the main routine of the executor module. It accepts
197 * the query descriptor from the traffic cop and executes the
200 * ExecutorStart must have been called already.
202 * If direction is NoMovementScanDirection then nothing is done
203 * except to start up/shut down the destination. Otherwise,
204 * we retrieve up to 'count' tuples in the specified direction.
206 * Note: count = 0 is interpreted as no portal limit, i.e., run to
209 * There is no return value, but output tuples (if any) are sent to
210 * the destination receiver specified in the QueryDesc; and the number
211 * of tuples processed at the top level can be found in
212 * estate->es_processed.
214 * We provide a function hook variable that lets loadable plugins
215 * get control when ExecutorRun is called. Such a plugin would
216 * normally call standard_ExecutorRun().
218 * ----------------------------------------------------------------
221 ExecutorRun(QueryDesc *queryDesc,
222 ScanDirection direction, long count)
224 if (ExecutorRun_hook)
225 (*ExecutorRun_hook) (queryDesc, direction, count);
227 standard_ExecutorRun(queryDesc, direction, count);
231 standard_ExecutorRun(QueryDesc *queryDesc,
232 ScanDirection direction, long count)
238 MemoryContext oldcontext;
241 Assert(queryDesc != NULL);
243 estate = queryDesc->estate;
245 Assert(estate != NULL);
248 * Switch into per-query memory context
250 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
252 /* Allow instrumentation of ExecutorRun overall runtime */
253 if (queryDesc->totaltime)
254 InstrStartNode(queryDesc->totaltime);
257 * extract information from the query descriptor and the query feature.
259 operation = queryDesc->operation;
260 dest = queryDesc->dest;
263 * startup tuple receiver, if we will be emitting tuples
265 estate->es_processed = 0;
266 estate->es_lastoid = InvalidOid;
268 sendTuples = (operation == CMD_SELECT ||
269 queryDesc->plannedstmt->hasReturning);
272 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
277 if (!ScanDirectionIsNoMovement(direction))
279 queryDesc->planstate,
287 * shutdown tuple receiver, if we started it
290 (*dest->rShutdown) (dest);
292 if (queryDesc->totaltime)
293 InstrStopNode(queryDesc->totaltime, estate->es_processed);
295 MemoryContextSwitchTo(oldcontext);
298 /* ----------------------------------------------------------------
301 * This routine must be called at the end of execution of any
304 * We provide a function hook variable that lets loadable plugins
305 * get control when ExecutorEnd is called. Such a plugin would
306 * normally call standard_ExecutorEnd().
308 * ----------------------------------------------------------------
311 ExecutorEnd(QueryDesc *queryDesc)
313 if (ExecutorEnd_hook)
314 (*ExecutorEnd_hook) (queryDesc);
316 standard_ExecutorEnd(queryDesc);
320 standard_ExecutorEnd(QueryDesc *queryDesc)
323 MemoryContext oldcontext;
326 Assert(queryDesc != NULL);
328 estate = queryDesc->estate;
330 Assert(estate != NULL);
333 * Switch into per-query memory context to run ExecEndPlan
335 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
337 ExecEndPlan(queryDesc->planstate, estate);
340 * Close the SELECT INTO relation if any
342 if (estate->es_select_into)
343 CloseIntoRel(queryDesc);
345 /* do away with our snapshots */
346 UnregisterSnapshot(estate->es_snapshot);
347 UnregisterSnapshot(estate->es_crosscheck_snapshot);
350 * Must switch out of context before destroying it
352 MemoryContextSwitchTo(oldcontext);
355 * Release EState and per-query memory context. This should release
356 * everything the executor has allocated.
358 FreeExecutorState(estate);
360 /* Reset queryDesc fields that no longer point to anything */
361 queryDesc->tupDesc = NULL;
362 queryDesc->estate = NULL;
363 queryDesc->planstate = NULL;
364 queryDesc->totaltime = NULL;
367 /* ----------------------------------------------------------------
370 * This routine may be called on an open queryDesc to rewind it
372 * ----------------------------------------------------------------
375 ExecutorRewind(QueryDesc *queryDesc)
378 MemoryContext oldcontext;
381 Assert(queryDesc != NULL);
383 estate = queryDesc->estate;
385 Assert(estate != NULL);
387 /* It's probably not sensible to rescan updating queries */
388 Assert(queryDesc->operation == CMD_SELECT);
391 * Switch into per-query memory context
393 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
398 ExecReScan(queryDesc->planstate, NULL);
400 MemoryContextSwitchTo(oldcontext);
406 * Check access permissions for all relations listed in a range table.
409 ExecCheckRTPerms(List *rangeTable)
413 foreach(l, rangeTable)
415 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
421 * Check access permissions for a single RTE.
424 ExecCheckRTEPerms(RangeTblEntry *rte)
426 AclMode requiredPerms;
428 AclMode remainingPerms;
435 * Only plain-relation RTEs need to be checked here. Function RTEs are
436 * checked by init_fcache when the function is prepared for execution.
437 * Join, subquery, and special RTEs need no checks.
439 if (rte->rtekind != RTE_RELATION)
443 * No work if requiredPerms is empty.
445 requiredPerms = rte->requiredPerms;
446 if (requiredPerms == 0)
452 * userid to check as: current user unless we have a setuid indication.
454 * Note: GetUserId() is presently fast enough that there's no harm in
455 * calling it separately for each RTE. If that stops being true, we could
456 * call it once in ExecCheckRTPerms and pass the userid down from there.
457 * But for now, no need for the extra clutter.
459 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
462 * We must have *all* the requiredPerms bits, but some of the bits can be
463 * satisfied from column-level rather than relation-level permissions.
464 * First, remove any bits that are satisfied by relation permissions.
466 relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
467 remainingPerms = requiredPerms & ~relPerms;
468 if (remainingPerms != 0)
471 * If we lack any permissions that exist only as relation permissions,
472 * we can fail straight away.
474 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
475 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
476 get_rel_name(relOid));
479 * Check to see if we have the needed privileges at column level.
481 * Note: failures just report a table-level error; it would be nicer
482 * to report a column-level error if we have some but not all of the
485 if (remainingPerms & ACL_SELECT)
488 * When the query doesn't explicitly reference any columns (for
489 * example, SELECT COUNT(*) FROM table), allow the query if we
490 * have SELECT on any column of the rel, as per SQL spec.
492 if (bms_is_empty(rte->selectedCols))
494 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
495 ACLMASK_ANY) != ACLCHECK_OK)
496 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
497 get_rel_name(relOid));
500 tmpset = bms_copy(rte->selectedCols);
501 while ((col = bms_first_member(tmpset)) >= 0)
503 /* remove the column number offset */
504 col += FirstLowInvalidHeapAttributeNumber;
505 if (col == InvalidAttrNumber)
507 /* Whole-row reference, must have priv on all cols */
508 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
509 ACLMASK_ALL) != ACLCHECK_OK)
510 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
511 get_rel_name(relOid));
515 if (pg_attribute_aclcheck(relOid, col, userid, ACL_SELECT)
517 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
518 get_rel_name(relOid));
525 * Basically the same for the mod columns, with either INSERT or
526 * UPDATE privilege as specified by remainingPerms.
528 remainingPerms &= ~ACL_SELECT;
529 if (remainingPerms != 0)
532 * When the query doesn't explicitly change any columns, allow the
533 * query if we have permission on any column of the rel. This is
534 * to handle SELECT FOR UPDATE as well as possible corner cases in
537 if (bms_is_empty(rte->modifiedCols))
539 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
540 ACLMASK_ANY) != ACLCHECK_OK)
541 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
542 get_rel_name(relOid));
545 tmpset = bms_copy(rte->modifiedCols);
546 while ((col = bms_first_member(tmpset)) >= 0)
548 /* remove the column number offset */
549 col += FirstLowInvalidHeapAttributeNumber;
550 if (col == InvalidAttrNumber)
552 /* whole-row reference can't happen here */
553 elog(ERROR, "whole-row update is not implemented");
557 if (pg_attribute_aclcheck(relOid, col, userid, remainingPerms)
559 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
560 get_rel_name(relOid));
569 * Check that the query does not imply any writes to non-temp tables.
572 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
577 * CREATE TABLE AS or SELECT INTO?
579 * XXX should we allow this if the destination is temp?
581 if (plannedstmt->intoClause != NULL)
584 /* Fail if write permissions are requested on any non-temp table */
585 foreach(l, plannedstmt->rtable)
587 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
589 if (rte->rtekind != RTE_RELATION)
592 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
595 if (isTempNamespace(get_rel_namespace(rte->relid)))
605 (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
606 errmsg("transaction is read-only")));
610 /* ----------------------------------------------------------------
613 * Initializes the query plan: open files, allocate storage
614 * and start up the rule manager
615 * ----------------------------------------------------------------
618 InitPlan(QueryDesc *queryDesc, int eflags)
620 CmdType operation = queryDesc->operation;
621 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
622 Plan *plan = plannedstmt->planTree;
623 List *rangeTable = plannedstmt->rtable;
624 EState *estate = queryDesc->estate;
625 PlanState *planstate;
631 * Do permissions checks
633 ExecCheckRTPerms(rangeTable);
636 * initialize the node's execution state
638 estate->es_range_table = rangeTable;
639 estate->es_plannedstmt = plannedstmt;
642 * initialize result relation stuff, and open/lock the result rels.
644 * We must do this before initializing the plan tree, else we might
645 * try to do a lock upgrade if a result rel is also a source rel.
647 if (plannedstmt->resultRelations)
649 List *resultRelations = plannedstmt->resultRelations;
650 int numResultRelations = list_length(resultRelations);
651 ResultRelInfo *resultRelInfos;
652 ResultRelInfo *resultRelInfo;
654 resultRelInfos = (ResultRelInfo *)
655 palloc(numResultRelations * sizeof(ResultRelInfo));
656 resultRelInfo = resultRelInfos;
657 foreach(l, resultRelations)
659 Index resultRelationIndex = lfirst_int(l);
660 Oid resultRelationOid;
661 Relation resultRelation;
663 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
664 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
665 InitResultRelInfo(resultRelInfo,
669 estate->es_instrument);
672 estate->es_result_relations = resultRelInfos;
673 estate->es_num_result_relations = numResultRelations;
674 /* es_result_relation_info is NULL except when within ModifyTable */
675 estate->es_result_relation_info = NULL;
680 * if no result relation, then set state appropriately
682 estate->es_result_relations = NULL;
683 estate->es_num_result_relations = 0;
684 estate->es_result_relation_info = NULL;
688 * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
689 * before we initialize the plan tree, else we'd be risking lock
690 * upgrades. While we are at it, build the ExecRowMark list.
692 estate->es_rowMarks = NIL;
693 foreach(l, plannedstmt->rowMarks)
695 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
700 /* ignore "parent" rowmarks; they are irrelevant at runtime */
704 switch (rc->markType)
706 case ROW_MARK_EXCLUSIVE:
708 relid = getrelid(rc->rti, rangeTable);
709 relation = heap_open(relid, RowShareLock);
711 case ROW_MARK_REFERENCE:
712 relid = getrelid(rc->rti, rangeTable);
713 relation = heap_open(relid, AccessShareLock);
716 /* there's no real table here ... */
720 elog(ERROR, "unrecognized markType: %d", rc->markType);
721 relation = NULL; /* keep compiler quiet */
725 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
726 erm->relation = relation;
728 erm->prti = rc->prti;
729 erm->markType = rc->markType;
730 erm->noWait = rc->noWait;
731 erm->ctidAttNo = rc->ctidAttNo;
732 erm->toidAttNo = rc->toidAttNo;
733 erm->wholeAttNo = rc->wholeAttNo;
734 ItemPointerSetInvalid(&(erm->curCtid));
735 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
739 * Detect whether we're doing SELECT INTO. If so, set the es_into_oids
740 * flag appropriately so that the plan tree will be initialized with the
741 * correct tuple descriptors. (Other SELECT INTO stuff comes later.)
743 estate->es_select_into = false;
744 if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
746 estate->es_select_into = true;
747 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
751 * Initialize the executor's tuple table to empty.
753 estate->es_tupleTable = NIL;
754 estate->es_trig_tuple_slot = NULL;
756 /* mark EvalPlanQual not active */
757 estate->es_epqTuple = NULL;
758 estate->es_epqTupleSet = NULL;
759 estate->es_epqScanDone = NULL;
762 * Initialize private state information for each SubPlan. We must do this
763 * before running ExecInitNode on the main query tree, since
764 * ExecInitSubPlan expects to be able to find these entries.
766 Assert(estate->es_subplanstates == NIL);
767 i = 1; /* subplan indices count from 1 */
768 foreach(l, plannedstmt->subplans)
770 Plan *subplan = (Plan *) lfirst(l);
771 PlanState *subplanstate;
775 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
776 * it is a parameterless subplan (not initplan), we suggest that it be
777 * prepared to handle REWIND efficiently; otherwise there is no need.
779 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
780 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
781 sp_eflags |= EXEC_FLAG_REWIND;
783 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
785 estate->es_subplanstates = lappend(estate->es_subplanstates,
792 * Initialize the private state information for all the nodes in the query
793 * tree. This opens files, allocates storage and leaves us ready to start
796 planstate = ExecInitNode(plan, estate, eflags);
799 * Get the tuple descriptor describing the type of tuples to return. (this
800 * is especially important if we are creating a relation with "SELECT
803 tupType = ExecGetResultType(planstate);
806 * Initialize the junk filter if needed. SELECT queries need a
807 * filter if there are any junk attrs in the top-level tlist.
809 if (operation == CMD_SELECT)
811 bool junk_filter_needed = false;
814 foreach(tlist, plan->targetlist)
816 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
820 junk_filter_needed = true;
825 if (junk_filter_needed)
829 j = ExecInitJunkFilter(planstate->plan->targetlist,
831 ExecInitExtraTupleSlot(estate));
832 estate->es_junkFilter = j;
834 /* Want to return the cleaned tuple type */
835 tupType = j->jf_cleanTupType;
839 queryDesc->tupDesc = tupType;
840 queryDesc->planstate = planstate;
843 * If doing SELECT INTO, initialize the "into" relation. We must wait
844 * till now so we have the "clean" result tuple type to create the new
847 * If EXPLAIN, skip creating the "into" relation.
849 if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
850 OpenIntoRel(queryDesc);
854 * Initialize ResultRelInfo data for one result relation
857 InitResultRelInfo(ResultRelInfo *resultRelInfo,
858 Relation resultRelationDesc,
859 Index resultRelationIndex,
864 * Check valid relkind ... parser and/or planner should have noticed this
865 * already, but let's make sure.
867 switch (resultRelationDesc->rd_rel->relkind)
869 case RELKIND_RELATION:
872 case RELKIND_SEQUENCE:
874 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
875 errmsg("cannot change sequence \"%s\"",
876 RelationGetRelationName(resultRelationDesc))));
878 case RELKIND_TOASTVALUE:
880 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
881 errmsg("cannot change TOAST relation \"%s\"",
882 RelationGetRelationName(resultRelationDesc))));
886 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
887 errmsg("cannot change view \"%s\"",
888 RelationGetRelationName(resultRelationDesc))));
892 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
893 errmsg("cannot change relation \"%s\"",
894 RelationGetRelationName(resultRelationDesc))));
898 /* OK, fill in the node */
899 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
900 resultRelInfo->type = T_ResultRelInfo;
901 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
902 resultRelInfo->ri_RelationDesc = resultRelationDesc;
903 resultRelInfo->ri_NumIndices = 0;
904 resultRelInfo->ri_IndexRelationDescs = NULL;
905 resultRelInfo->ri_IndexRelationInfo = NULL;
906 /* make a copy so as not to depend on relcache info not changing... */
907 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
908 if (resultRelInfo->ri_TrigDesc)
910 int n = resultRelInfo->ri_TrigDesc->numtriggers;
912 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
913 palloc0(n * sizeof(FmgrInfo));
915 resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
917 resultRelInfo->ri_TrigInstrument = NULL;
921 resultRelInfo->ri_TrigFunctions = NULL;
922 resultRelInfo->ri_TrigInstrument = NULL;
924 resultRelInfo->ri_ConstraintExprs = NULL;
925 resultRelInfo->ri_junkFilter = NULL;
926 resultRelInfo->ri_projectReturning = NULL;
929 * If there are indices on the result relation, open them and save
930 * descriptors in the result relation info, so that we can add new index
931 * entries for the tuples we add/update. We need not do this for a
932 * DELETE, however, since deletion doesn't affect indexes.
934 if (resultRelationDesc->rd_rel->relhasindex &&
935 operation != CMD_DELETE)
936 ExecOpenIndices(resultRelInfo);
940 * ExecGetTriggerResultRel
942 * Get a ResultRelInfo for a trigger target relation. Most of the time,
943 * triggers are fired on one of the result relations of the query, and so
944 * we can just return a member of the es_result_relations array. (Note: in
945 * self-join situations there might be multiple members with the same OID;
946 * if so it doesn't matter which one we pick.) However, it is sometimes
947 * necessary to fire triggers on other relations; this happens mainly when an
948 * RI update trigger queues additional triggers on other relations, which will
949 * be processed in the context of the outer query. For efficiency's sake,
950 * we want to have a ResultRelInfo for those triggers too; that can avoid
951 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
952 * ANALYZE to report the runtimes of such triggers.) So we make additional
953 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
956 ExecGetTriggerResultRel(EState *estate, Oid relid)
958 ResultRelInfo *rInfo;
962 MemoryContext oldcontext;
964 /* First, search through the query result relations */
965 rInfo = estate->es_result_relations;
966 nr = estate->es_num_result_relations;
969 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
974 /* Nope, but maybe we already made an extra ResultRelInfo for it */
975 foreach(l, estate->es_trig_target_relations)
977 rInfo = (ResultRelInfo *) lfirst(l);
978 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
981 /* Nope, so we need a new one */
984 * Open the target relation's relcache entry. We assume that an
985 * appropriate lock is still held by the backend from whenever the trigger
986 * event got queued, so we need take no new lock here.
988 rel = heap_open(relid, NoLock);
991 * Make the new entry in the right context. Currently, we don't need any
992 * index information in ResultRelInfos used only for triggers, so tell
993 * InitResultRelInfo it's a DELETE.
995 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
996 rInfo = makeNode(ResultRelInfo);
997 InitResultRelInfo(rInfo,
999 0, /* dummy rangetable index */
1001 estate->es_instrument);
1002 estate->es_trig_target_relations =
1003 lappend(estate->es_trig_target_relations, rInfo);
1004 MemoryContextSwitchTo(oldcontext);
1010 * ExecContextForcesOids
1012 * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1013 * we need to ensure that result tuples have space for an OID iff they are
1014 * going to be stored into a relation that has OIDs. In other contexts
1015 * we are free to choose whether to leave space for OIDs in result tuples
1016 * (we generally don't want to, but we do if a physical-tlist optimization
1017 * is possible). This routine checks the plan context and returns TRUE if the
1018 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1019 * *hasoids is set to the required value.
1021 * One reason this is ugly is that all plan nodes in the plan tree will emit
1022 * tuples with space for an OID, though we really only need the topmost node
1023 * to do so. However, node types like Sort don't project new tuples but just
1024 * return their inputs, and in those cases the requirement propagates down
1025 * to the input node. Eventually we might make this code smart enough to
1026 * recognize how far down the requirement really goes, but for now we just
1027 * make all plan nodes do the same thing if the top level forces the choice.
1029 * We assume that if we are generating tuples for INSERT or UPDATE,
1030 * estate->es_result_relation_info is already set up to describe the target
1031 * relation. Note that in an UPDATE that spans an inheritance tree, some of
1032 * the target relations may have OIDs and some not. We have to make the
1033 * decisions on a per-relation basis as we initialize each of the subplans of
1034 * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1035 * while initializing each subplan.
1037 * SELECT INTO is even uglier, because we don't have the INTO relation's
1038 * descriptor available when this code runs; we have to look aside at a
1039 * flag set by InitPlan().
1042 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1044 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1048 Relation rel = ri->ri_RelationDesc;
1052 *hasoids = rel->rd_rel->relhasoids;
1057 if (planstate->state->es_select_into)
1059 *hasoids = planstate->state->es_into_oids;
1066 /* ----------------------------------------------------------------
1069 * Cleans up the query plan -- closes files and frees up storage
1071 * NOTE: we are no longer very worried about freeing storage per se
1072 * in this code; FreeExecutorState should be guaranteed to release all
1073 * memory that needs to be released. What we are worried about doing
1074 * is closing relations and dropping buffer pins. Thus, for example,
1075 * tuple tables must be cleared or dropped to ensure pins are released.
1076 * ----------------------------------------------------------------
1079 ExecEndPlan(PlanState *planstate, EState *estate)
1081 ResultRelInfo *resultRelInfo;
1086 * shut down the node-type-specific query processing
1088 ExecEndNode(planstate);
1093 foreach(l, estate->es_subplanstates)
1095 PlanState *subplanstate = (PlanState *) lfirst(l);
1097 ExecEndNode(subplanstate);
1101 * destroy the executor's tuple table. Actually we only care about
1102 * releasing buffer pins and tupdesc refcounts; there's no need to
1103 * pfree the TupleTableSlots, since the containing memory context
1104 * is about to go away anyway.
1106 ExecResetTupleTable(estate->es_tupleTable, false);
1109 * close the result relation(s) if any, but hold locks until xact commit.
1111 resultRelInfo = estate->es_result_relations;
1112 for (i = estate->es_num_result_relations; i > 0; i--)
1114 /* Close indices and then the relation itself */
1115 ExecCloseIndices(resultRelInfo);
1116 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1121 * likewise close any trigger target relations
1123 foreach(l, estate->es_trig_target_relations)
1125 resultRelInfo = (ResultRelInfo *) lfirst(l);
1126 /* Close indices and then the relation itself */
1127 ExecCloseIndices(resultRelInfo);
1128 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1132 * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1134 foreach(l, estate->es_rowMarks)
1136 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1139 heap_close(erm->relation, NoLock);
1143 /* ----------------------------------------------------------------
1146 * Processes the query plan until we have processed 'numberTuples' tuples,
1147 * moving in the specified direction.
1149 * Runs to completion if numberTuples is 0
1151 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1153 * ----------------------------------------------------------------
1156 ExecutePlan(EState *estate,
1157 PlanState *planstate,
1161 ScanDirection direction,
1164 TupleTableSlot *slot;
1165 long current_tuple_count;
1168 * initialize local variables
1170 current_tuple_count = 0;
1173 * Set the direction.
1175 estate->es_direction = direction;
1178 * Loop until we've processed the proper number of tuples from the plan.
1182 /* Reset the per-output-tuple exprcontext */
1183 ResetPerTupleExprContext(estate);
1186 * Execute the plan and obtain a tuple
1188 slot = ExecProcNode(planstate);
1191 * if the tuple is null, then we assume there is nothing more to
1192 * process so we just end the loop...
1194 if (TupIsNull(slot))
1198 * If we have a junk filter, then project a new tuple with the junk
1201 * Store this new "clean" tuple in the junkfilter's resultSlot.
1202 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1203 * because that tuple slot has the wrong descriptor.)
1205 if (estate->es_junkFilter != NULL)
1206 slot = ExecFilterJunk(estate->es_junkFilter, slot);
1209 * If we are supposed to send the tuple somewhere, do so.
1210 * (In practice, this is probably always the case at this point.)
1213 (*dest->receiveSlot) (slot, dest);
1216 * Count tuples processed, if this is a SELECT. (For other operation
1217 * types, the ModifyTable plan node must count the appropriate
1220 if (operation == CMD_SELECT)
1221 (estate->es_processed)++;
1224 * check our tuple count.. if we've processed the proper number then
1225 * quit, else loop again and process more tuples. Zero numberTuples
1228 current_tuple_count++;
1229 if (numberTuples && numberTuples == current_tuple_count)
1236 * ExecRelCheck --- check that tuple meets constraints for result relation
1239 ExecRelCheck(ResultRelInfo *resultRelInfo,
1240 TupleTableSlot *slot, EState *estate)
1242 Relation rel = resultRelInfo->ri_RelationDesc;
1243 int ncheck = rel->rd_att->constr->num_check;
1244 ConstrCheck *check = rel->rd_att->constr->check;
1245 ExprContext *econtext;
1246 MemoryContext oldContext;
1251 * If first time through for this result relation, build expression
1252 * nodetrees for rel's constraint expressions. Keep them in the per-query
1253 * memory context so they'll survive throughout the query.
1255 if (resultRelInfo->ri_ConstraintExprs == NULL)
1257 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1258 resultRelInfo->ri_ConstraintExprs =
1259 (List **) palloc(ncheck * sizeof(List *));
1260 for (i = 0; i < ncheck; i++)
1262 /* ExecQual wants implicit-AND form */
1263 qual = make_ands_implicit(stringToNode(check[i].ccbin));
1264 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1265 ExecPrepareExpr((Expr *) qual, estate);
1267 MemoryContextSwitchTo(oldContext);
1271 * We will use the EState's per-tuple context for evaluating constraint
1272 * expressions (creating it if it's not already there).
1274 econtext = GetPerTupleExprContext(estate);
1276 /* Arrange for econtext's scan tuple to be the tuple under test */
1277 econtext->ecxt_scantuple = slot;
1279 /* And evaluate the constraints */
1280 for (i = 0; i < ncheck; i++)
1282 qual = resultRelInfo->ri_ConstraintExprs[i];
1285 * NOTE: SQL92 specifies that a NULL result from a constraint
1286 * expression is not to be treated as a failure. Therefore, tell
1287 * ExecQual to return TRUE for NULL.
1289 if (!ExecQual(qual, econtext, true))
1290 return check[i].ccname;
1293 /* NULL result means no error */
1298 ExecConstraints(ResultRelInfo *resultRelInfo,
1299 TupleTableSlot *slot, EState *estate)
1301 Relation rel = resultRelInfo->ri_RelationDesc;
1302 TupleConstr *constr = rel->rd_att->constr;
1306 if (constr->has_not_null)
1308 int natts = rel->rd_att->natts;
1311 for (attrChk = 1; attrChk <= natts; attrChk++)
1313 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1314 slot_attisnull(slot, attrChk))
1316 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1317 errmsg("null value in column \"%s\" violates not-null constraint",
1318 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1322 if (constr->num_check > 0)
1326 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1328 (errcode(ERRCODE_CHECK_VIOLATION),
1329 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1330 RelationGetRelationName(rel), failed)));
1336 * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1337 * process the updated version under READ COMMITTED rules.
1339 * See backend/executor/README for some info about how this works.
1344 * Check a modified tuple to see if we want to process its updated version
1345 * under READ COMMITTED rules.
1347 * estate - outer executor state data
1348 * epqstate - state for EvalPlanQual rechecking
1349 * relation - table containing tuple
1350 * rti - rangetable index of table containing tuple
1351 * *tid - t_ctid from the outdated tuple (ie, next updated version)
1352 * priorXmax - t_xmax from the outdated tuple
1354 * *tid is also an output parameter: it's modified to hold the TID of the
1355 * latest version of the tuple (note this may be changed even on failure)
1357 * Returns a slot containing the new candidate update/delete tuple, or
1358 * NULL if we determine we shouldn't process the row.
1361 EvalPlanQual(EState *estate, EPQState *epqstate,
1362 Relation relation, Index rti,
1363 ItemPointer tid, TransactionId priorXmax)
1365 TupleTableSlot *slot;
1366 HeapTuple copyTuple;
1371 * Get and lock the updated version of the row; if fail, return NULL.
1373 copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
1376 if (copyTuple == NULL)
1380 * For UPDATE/DELETE we have to return tid of actual row we're executing
1383 *tid = copyTuple->t_self;
1386 * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
1388 EvalPlanQualBegin(epqstate, estate);
1391 * Free old test tuple, if any, and store new tuple where relation's
1392 * scan node will see it
1394 EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1397 * Fetch any non-locked source rows
1399 EvalPlanQualFetchRowMarks(epqstate);
1402 * Run the EPQ query. We assume it will return at most one tuple.
1404 slot = EvalPlanQualNext(epqstate);
1407 * Clear out the test tuple. This is needed in case the EPQ query
1408 * is re-used to test a tuple for a different relation. (Not clear
1409 * that can really happen, but let's be safe.)
1411 EvalPlanQualSetTuple(epqstate, rti, NULL);
1417 * Fetch a copy of the newest version of an outdated tuple
1419 * estate - executor state data
1420 * relation - table containing tuple
1421 * lockmode - requested tuple lock mode
1422 * *tid - t_ctid from the outdated tuple (ie, next updated version)
1423 * priorXmax - t_xmax from the outdated tuple
1425 * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1426 * that there is no newest version (ie, the row was deleted not updated).
1427 * If successful, we have locked the newest tuple version, so caller does not
1428 * need to worry about it changing anymore.
1430 * Note: properly, lockmode should be declared as enum LockTupleMode,
1431 * but we use "int" to avoid having to include heapam.h in executor.h.
1434 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1435 ItemPointer tid, TransactionId priorXmax)
1437 HeapTuple copyTuple = NULL;
1438 HeapTupleData tuple;
1439 SnapshotData SnapshotDirty;
1442 * fetch target tuple
1444 * Loop here to deal with updated or busy tuples
1446 InitDirtySnapshot(SnapshotDirty);
1447 tuple.t_self = *tid;
1452 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1455 ItemPointerData update_ctid;
1456 TransactionId update_xmax;
1459 * If xmin isn't what we're expecting, the slot must have been
1460 * recycled and reused for an unrelated tuple. This implies that
1461 * the latest version of the row was deleted, so we need do
1462 * nothing. (Should be safe to examine xmin without getting
1463 * buffer's content lock, since xmin never changes in an existing
1466 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1469 ReleaseBuffer(buffer);
1473 /* otherwise xmin should not be dirty... */
1474 if (TransactionIdIsValid(SnapshotDirty.xmin))
1475 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1478 * If tuple is being updated by other transaction then we have to
1479 * wait for its commit/abort.
1481 if (TransactionIdIsValid(SnapshotDirty.xmax))
1483 ReleaseBuffer(buffer);
1484 XactLockTableWait(SnapshotDirty.xmax);
1485 continue; /* loop back to repeat heap_fetch */
1489 * If tuple was inserted by our own transaction, we have to check
1490 * cmin against es_output_cid: cmin >= current CID means our
1491 * command cannot see the tuple, so we should ignore it. Without
1492 * this we are open to the "Halloween problem" of indefinitely
1493 * re-updating the same tuple. (We need not check cmax because
1494 * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1495 * transaction dead, regardless of cmax.) We just checked that
1496 * priorXmax == xmin, so we can test that variable instead of
1497 * doing HeapTupleHeaderGetXmin again.
1499 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1500 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
1502 ReleaseBuffer(buffer);
1507 * This is a live tuple, so now try to lock it.
1509 test = heap_lock_tuple(relation, &tuple, &buffer,
1510 &update_ctid, &update_xmax,
1511 estate->es_output_cid,
1513 /* We now have two pins on the buffer, get rid of one */
1514 ReleaseBuffer(buffer);
1518 case HeapTupleSelfUpdated:
1519 /* treat it as deleted; do not process */
1520 ReleaseBuffer(buffer);
1523 case HeapTupleMayBeUpdated:
1524 /* successfully locked */
1527 case HeapTupleUpdated:
1528 ReleaseBuffer(buffer);
1529 if (IsXactIsoLevelSerializable)
1531 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1532 errmsg("could not serialize access due to concurrent update")));
1533 if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
1535 /* it was updated, so look at the updated version */
1536 tuple.t_self = update_ctid;
1539 /* tuple was deleted, so give up */
1543 ReleaseBuffer(buffer);
1544 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1546 return NULL; /* keep compiler quiet */
1550 * We got tuple - now copy it for use by recheck query.
1552 copyTuple = heap_copytuple(&tuple);
1553 ReleaseBuffer(buffer);
1558 * If the referenced slot was actually empty, the latest version of
1559 * the row must have been deleted, so we need do nothing.
1561 if (tuple.t_data == NULL)
1563 ReleaseBuffer(buffer);
1568 * As above, if xmin isn't what we're expecting, do nothing.
1570 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1573 ReleaseBuffer(buffer);
1578 * If we get here, the tuple was found but failed SnapshotDirty.
1579 * Assuming the xmin is either a committed xact or our own xact (as it
1580 * certainly should be if we're trying to modify the tuple), this must
1581 * mean that the row was updated or deleted by either a committed xact
1582 * or our own xact. If it was deleted, we can ignore it; if it was
1583 * updated then chain up to the next version and repeat the whole
1586 * As above, it should be safe to examine xmax and t_ctid without the
1587 * buffer content lock, because they can't be changing.
1589 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1591 /* deleted, so forget about it */
1592 ReleaseBuffer(buffer);
1596 /* updated, so look at the updated row */
1597 tuple.t_self = tuple.t_data->t_ctid;
1598 /* updated row should have xmin matching this xmax */
1599 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1600 ReleaseBuffer(buffer);
1601 /* loop back to fetch next in chain */
1605 * Return the copied tuple
1611 * EvalPlanQualInit -- initialize during creation of a plan state node
1612 * that might need to invoke EPQ processing.
1613 * Note: subplan can be NULL if it will be set later with EvalPlanQualSetPlan.
1616 EvalPlanQualInit(EPQState *epqstate, EState *estate,
1617 Plan *subplan, int epqParam)
1619 /* Mark the EPQ state inactive */
1620 epqstate->estate = NULL;
1621 epqstate->planstate = NULL;
1622 epqstate->origslot = NULL;
1623 /* ... and remember data that EvalPlanQualBegin will need */
1624 epqstate->plan = subplan;
1625 epqstate->rowMarks = NIL;
1626 epqstate->epqParam = epqParam;
1630 * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
1632 * We need this so that ModifyTuple can deal with multiple subplans.
1635 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan)
1637 /* If we have a live EPQ query, shut it down */
1638 EvalPlanQualEnd(epqstate);
1639 /* And set/change the plan pointer */
1640 epqstate->plan = subplan;
1644 * EvalPlanQualAddRowMark -- add an ExecRowMark that EPQ needs to handle.
1646 * Currently, only non-locking RowMarks are supported.
1649 EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm)
1651 if (RowMarkRequiresRowShareLock(erm->markType))
1652 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
1653 epqstate->rowMarks = lappend(epqstate->rowMarks, erm);
1657 * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
1659 * NB: passed tuple must be palloc'd; it may get freed later
1662 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
1664 EState *estate = epqstate->estate;
1669 * free old test tuple, if any, and store new tuple where relation's
1670 * scan node will see it
1672 if (estate->es_epqTuple[rti - 1] != NULL)
1673 heap_freetuple(estate->es_epqTuple[rti - 1]);
1674 estate->es_epqTuple[rti - 1] = tuple;
1675 estate->es_epqTupleSet[rti - 1] = true;
1679 * Fetch back the current test tuple (if any) for the specified RTI
1682 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
1684 EState *estate = epqstate->estate;
1688 return estate->es_epqTuple[rti - 1];
1692 * Fetch the current row values for any non-locked relations that need
1693 * to be scanned by an EvalPlanQual operation. origslot must have been set
1694 * to contain the current result row (top-level row) that we need to recheck.
1697 EvalPlanQualFetchRowMarks(EPQState *epqstate)
1701 Assert(epqstate->origslot != NULL);
1703 foreach(l, epqstate->rowMarks)
1705 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1708 HeapTupleData tuple;
1710 /* clear any leftover test tuple for this rel */
1711 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
1717 Assert(erm->markType == ROW_MARK_REFERENCE);
1719 /* if child rel, must check whether it produced this row */
1720 if (erm->rti != erm->prti)
1724 datum = ExecGetJunkAttribute(epqstate->origslot,
1727 /* non-locked rels could be on the inside of outer joins */
1730 tableoid = DatumGetObjectId(datum);
1732 if (tableoid != RelationGetRelid(erm->relation))
1734 /* this child is inactive right now */
1739 /* fetch the tuple's ctid */
1740 datum = ExecGetJunkAttribute(epqstate->origslot,
1743 /* non-locked rels could be on the inside of outer joins */
1746 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1748 /* okay, fetch the tuple */
1749 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
1751 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
1753 /* successful, copy and store tuple */
1754 EvalPlanQualSetTuple(epqstate, erm->rti,
1755 heap_copytuple(&tuple));
1756 ReleaseBuffer(buffer);
1762 Assert(erm->markType == ROW_MARK_COPY);
1764 /* fetch the whole-row Var for the relation */
1765 datum = ExecGetJunkAttribute(epqstate->origslot,
1768 /* non-locked rels could be on the inside of outer joins */
1771 td = DatumGetHeapTupleHeader(datum);
1773 /* build a temporary HeapTuple control structure */
1774 tuple.t_len = HeapTupleHeaderGetDatumLength(td);
1775 ItemPointerSetInvalid(&(tuple.t_self));
1776 tuple.t_tableOid = InvalidOid;
1779 /* copy and store tuple */
1780 EvalPlanQualSetTuple(epqstate, erm->rti,
1781 heap_copytuple(&tuple));
1787 * Fetch the next row (if any) from EvalPlanQual testing
1789 * (In practice, there should never be more than one row...)
1792 EvalPlanQualNext(EPQState *epqstate)
1794 MemoryContext oldcontext;
1795 TupleTableSlot *slot;
1797 oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
1798 slot = ExecProcNode(epqstate->planstate);
1799 MemoryContextSwitchTo(oldcontext);
1805 * Initialize or reset an EvalPlanQual state tree
1808 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
1810 EState *estate = epqstate->estate;
1814 /* First time through, so create a child EState */
1815 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
1820 * We already have a suitable child EPQ tree, so just reset it.
1822 int rtsize = list_length(parentestate->es_range_table);
1823 PlanState *planstate = epqstate->planstate;
1825 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
1827 /* Recopy current values of parent parameters */
1828 if (parentestate->es_plannedstmt->nParamExec > 0)
1830 int i = parentestate->es_plannedstmt->nParamExec;
1834 /* copy value if any, but not execPlan link */
1835 estate->es_param_exec_vals[i].value =
1836 parentestate->es_param_exec_vals[i].value;
1837 estate->es_param_exec_vals[i].isnull =
1838 parentestate->es_param_exec_vals[i].isnull;
1843 * Mark child plan tree as needing rescan at all scan nodes. The
1844 * first ExecProcNode will take care of actually doing the rescan.
1846 planstate->chgParam = bms_add_member(planstate->chgParam,
1847 epqstate->epqParam);
1852 * Start execution of an EvalPlanQual plan tree.
1854 * This is a cut-down version of ExecutorStart(): we copy some state from
1855 * the top-level estate rather than initializing it fresh.
1858 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
1862 MemoryContext oldcontext;
1865 rtsize = list_length(parentestate->es_range_table);
1867 epqstate->estate = estate = CreateExecutorState();
1869 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1872 * Child EPQ EStates share the parent's copy of unchanging state such as
1873 * the snapshot, rangetable, result-rel info, and external Param info.
1874 * They need their own copies of local state, including a tuple table,
1875 * es_param_exec_vals, etc.
1877 estate->es_direction = ForwardScanDirection;
1878 estate->es_snapshot = parentestate->es_snapshot;
1879 estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
1880 estate->es_range_table = parentestate->es_range_table;
1881 estate->es_plannedstmt = parentestate->es_plannedstmt;
1882 estate->es_junkFilter = parentestate->es_junkFilter;
1883 estate->es_output_cid = parentestate->es_output_cid;
1884 estate->es_result_relations = parentestate->es_result_relations;
1885 estate->es_num_result_relations = parentestate->es_num_result_relations;
1886 estate->es_result_relation_info = parentestate->es_result_relation_info;
1887 /* es_trig_target_relations must NOT be copied */
1888 estate->es_rowMarks = parentestate->es_rowMarks;
1889 estate->es_instrument = parentestate->es_instrument;
1890 estate->es_select_into = parentestate->es_select_into;
1891 estate->es_into_oids = parentestate->es_into_oids;
1894 * The external param list is simply shared from parent. The internal
1895 * param workspace has to be local state, but we copy the initial values
1896 * from the parent, so as to have access to any param values that were
1897 * already set from other parts of the parent's plan tree.
1899 estate->es_param_list_info = parentestate->es_param_list_info;
1900 if (parentestate->es_plannedstmt->nParamExec > 0)
1902 int i = parentestate->es_plannedstmt->nParamExec;
1904 estate->es_param_exec_vals = (ParamExecData *)
1905 palloc0(i * sizeof(ParamExecData));
1908 /* copy value if any, but not execPlan link */
1909 estate->es_param_exec_vals[i].value =
1910 parentestate->es_param_exec_vals[i].value;
1911 estate->es_param_exec_vals[i].isnull =
1912 parentestate->es_param_exec_vals[i].isnull;
1917 * Each EState must have its own es_epqScanDone state, but if we have
1918 * nested EPQ checks they should share es_epqTuple arrays. This allows
1919 * sub-rechecks to inherit the values being examined by an outer recheck.
1921 estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
1922 if (parentestate->es_epqTuple != NULL)
1924 estate->es_epqTuple = parentestate->es_epqTuple;
1925 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
1929 estate->es_epqTuple = (HeapTuple *)
1930 palloc0(rtsize * sizeof(HeapTuple));
1931 estate->es_epqTupleSet = (bool *)
1932 palloc0(rtsize * sizeof(bool));
1936 * Each estate also has its own tuple table.
1938 estate->es_tupleTable = NIL;
1941 * Initialize private state information for each SubPlan. We must do this
1942 * before running ExecInitNode on the main query tree, since
1943 * ExecInitSubPlan expects to be able to find these entries.
1944 * Some of the SubPlans might not be used in the part of the plan tree
1945 * we intend to run, but since it's not easy to tell which, we just
1946 * initialize them all.
1948 Assert(estate->es_subplanstates == NIL);
1949 foreach(l, parentestate->es_plannedstmt->subplans)
1951 Plan *subplan = (Plan *) lfirst(l);
1952 PlanState *subplanstate;
1954 subplanstate = ExecInitNode(subplan, estate, 0);
1956 estate->es_subplanstates = lappend(estate->es_subplanstates,
1961 * Initialize the private state information for all the nodes in the
1962 * part of the plan tree we need to run. This opens files, allocates
1963 * storage and leaves us ready to start processing tuples.
1965 epqstate->planstate = ExecInitNode(planTree, estate, 0);
1967 MemoryContextSwitchTo(oldcontext);
1971 * EvalPlanQualEnd -- shut down at termination of parent plan state node,
1972 * or if we are done with the current EPQ child.
1974 * This is a cut-down version of ExecutorEnd(); basically we want to do most
1975 * of the normal cleanup, but *not* close result relations (which we are
1976 * just sharing from the outer query). We do, however, have to close any
1977 * trigger target relations that got opened, since those are not shared.
1978 * (There probably shouldn't be any of the latter, but just in case...)
1981 EvalPlanQualEnd(EPQState *epqstate)
1983 EState *estate = epqstate->estate;
1984 MemoryContext oldcontext;
1988 return; /* idle, so nothing to do */
1990 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1992 ExecEndNode(epqstate->planstate);
1994 foreach(l, estate->es_subplanstates)
1996 PlanState *subplanstate = (PlanState *) lfirst(l);
1998 ExecEndNode(subplanstate);
2001 /* throw away the per-estate tuple table */
2002 ExecResetTupleTable(estate->es_tupleTable, false);
2004 /* close any trigger target relations attached to this EState */
2005 foreach(l, estate->es_trig_target_relations)
2007 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2009 /* Close indices and then the relation itself */
2010 ExecCloseIndices(resultRelInfo);
2011 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2014 MemoryContextSwitchTo(oldcontext);
2016 FreeExecutorState(estate);
2018 /* Mark EPQState idle */
2019 epqstate->estate = NULL;
2020 epqstate->planstate = NULL;
2021 epqstate->origslot = NULL;
2026 * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2028 * We implement SELECT INTO by diverting SELECT's normal output with
2029 * a specialized DestReceiver type.
2034 DestReceiver pub; /* publicly-known function pointers */
2035 EState *estate; /* EState we are working with */
2036 Relation rel; /* Relation to write to */
2037 int hi_options; /* heap_insert performance options */
2038 BulkInsertState bistate; /* bulk insert state */
2042 * OpenIntoRel --- actually create the SELECT INTO target relation
2044 * This also replaces QueryDesc->dest with the special DestReceiver for
2045 * SELECT INTO. We assume that the correct result tuple type has already
2046 * been placed in queryDesc->tupDesc.
2049 OpenIntoRel(QueryDesc *queryDesc)
2051 IntoClause *into = queryDesc->plannedstmt->intoClause;
2052 EState *estate = queryDesc->estate;
2053 Relation intoRelationDesc;
2058 AclResult aclresult;
2061 DR_intorel *myState;
2062 static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2067 * Check consistency of arguments
2069 if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2071 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2072 errmsg("ON COMMIT can only be used on temporary tables")));
2075 * Find namespace to create in, check its permissions
2077 intoName = into->rel->relname;
2078 namespaceId = RangeVarGetCreationNamespace(into->rel);
2080 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2082 if (aclresult != ACLCHECK_OK)
2083 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2084 get_namespace_name(namespaceId));
2087 * Select tablespace to use. If not specified, use default tablespace
2088 * (which may in turn default to database's default).
2090 if (into->tableSpaceName)
2092 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2093 if (!OidIsValid(tablespaceId))
2095 (errcode(ERRCODE_UNDEFINED_OBJECT),
2096 errmsg("tablespace \"%s\" does not exist",
2097 into->tableSpaceName)));
2101 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2102 /* note InvalidOid is OK in this case */
2105 /* Check permissions except when using the database's default space */
2106 if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2108 AclResult aclresult;
2110 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2113 if (aclresult != ACLCHECK_OK)
2114 aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2115 get_tablespace_name(tablespaceId));
2118 /* Parse and validate any reloptions */
2119 reloptions = transformRelOptions((Datum) 0,
2125 (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2127 /* Copy the tupdesc because heap_create_with_catalog modifies it */
2128 tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2130 /* Now we can actually create the new relation */
2131 intoRelationId = heap_create_with_catalog(intoName,
2146 allowSystemTableMods);
2148 FreeTupleDesc(tupdesc);
2151 * Advance command counter so that the newly-created relation's catalog
2152 * tuples will be visible to heap_open.
2154 CommandCounterIncrement();
2157 * If necessary, create a TOAST table for the INTO relation. Note that
2158 * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2159 * the TOAST table will be visible for insertion.
2161 reloptions = transformRelOptions((Datum) 0,
2168 (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2170 AlterTableCreateToastTable(intoRelationId, InvalidOid, reloptions, false);
2173 * And open the constructed table for writing.
2175 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2178 * Now replace the query's DestReceiver with one for SELECT INTO
2180 queryDesc->dest = CreateDestReceiver(DestIntoRel);
2181 myState = (DR_intorel *) queryDesc->dest;
2182 Assert(myState->pub.mydest == DestIntoRel);
2183 myState->estate = estate;
2184 myState->rel = intoRelationDesc;
2187 * We can skip WAL-logging the insertions, unless PITR is in use. We can
2188 * skip the FSM in any case.
2190 myState->hi_options = HEAP_INSERT_SKIP_FSM |
2191 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2192 myState->bistate = GetBulkInsertState();
2194 /* Not using WAL requires rd_targblock be initially invalid */
2195 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2199 * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2202 CloseIntoRel(QueryDesc *queryDesc)
2204 DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2206 /* OpenIntoRel might never have gotten called */
2207 if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2209 FreeBulkInsertState(myState->bistate);
2211 /* If we skipped using WAL, must heap_sync before commit */
2212 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2213 heap_sync(myState->rel);
2215 /* close rel, but keep lock until commit */
2216 heap_close(myState->rel, NoLock);
2218 myState->rel = NULL;
2223 * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2226 CreateIntoRelDestReceiver(void)
2228 DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2230 self->pub.receiveSlot = intorel_receive;
2231 self->pub.rStartup = intorel_startup;
2232 self->pub.rShutdown = intorel_shutdown;
2233 self->pub.rDestroy = intorel_destroy;
2234 self->pub.mydest = DestIntoRel;
2236 /* private fields will be set by OpenIntoRel */
2238 return (DestReceiver *) self;
2242 * intorel_startup --- executor startup
2245 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2251 * intorel_receive --- receive one tuple
2254 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2256 DR_intorel *myState = (DR_intorel *) self;
2260 * get the heap tuple out of the tuple table slot, making sure we have a
2263 tuple = ExecMaterializeSlot(slot);
2266 * force assignment of new OID (see comments in ExecInsert)
2268 if (myState->rel->rd_rel->relhasoids)
2269 HeapTupleSetOid(tuple, InvalidOid);
2271 heap_insert(myState->rel,
2273 myState->estate->es_output_cid,
2274 myState->hi_options,
2277 /* We know this is a newly created relation, so there are no indexes */
2281 * intorel_shutdown --- executor end
2284 intorel_shutdown(DestReceiver *self)
2290 * intorel_destroy --- release DestReceiver object
2293 intorel_destroy(DestReceiver *self)