1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.322 2009/02/02 19:31:39 alvherre Exp $
31 *-------------------------------------------------------------------------
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/sysattr.h"
38 #include "access/transam.h"
39 #include "access/xact.h"
40 #include "catalog/heap.h"
41 #include "catalog/namespace.h"
42 #include "catalog/toasting.h"
43 #include "commands/tablespace.h"
44 #include "commands/trigger.h"
45 #include "executor/execdebug.h"
46 #include "executor/instrument.h"
47 #include "executor/nodeSubplan.h"
48 #include "miscadmin.h"
49 #include "nodes/nodeFuncs.h"
50 #include "optimizer/clauses.h"
51 #include "parser/parse_clause.h"
52 #include "parser/parsetree.h"
53 #include "storage/bufmgr.h"
54 #include "storage/lmgr.h"
55 #include "storage/smgr.h"
56 #include "utils/acl.h"
57 #include "utils/builtins.h"
58 #include "utils/lsyscache.h"
59 #include "utils/memutils.h"
60 #include "utils/snapmgr.h"
61 #include "utils/tqual.h"
64 /* Hooks for plugins to get control in ExecutorStart/Run/End() */
65 ExecutorStart_hook_type ExecutorStart_hook = NULL;
66 ExecutorRun_hook_type ExecutorRun_hook = NULL;
67 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
69 typedef struct evalPlanQual
74 struct evalPlanQual *next; /* stack of active PlanQual plans */
75 struct evalPlanQual *free; /* list of free PlanQual plans */
78 /* decls for local routines only used within this module */
79 static void InitPlan(QueryDesc *queryDesc, int eflags);
80 static void ExecCheckPlanOutput(Relation resultRel, List *targetList);
81 static void ExecEndPlan(PlanState *planstate, EState *estate);
82 static void ExecutePlan(EState *estate, PlanState *planstate,
85 ScanDirection direction,
87 static void ExecSelect(TupleTableSlot *slot,
88 DestReceiver *dest, EState *estate);
89 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
90 TupleTableSlot *planSlot,
91 DestReceiver *dest, EState *estate);
92 static void ExecDelete(ItemPointer tupleid,
93 TupleTableSlot *planSlot,
94 DestReceiver *dest, EState *estate);
95 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
96 TupleTableSlot *planSlot,
97 DestReceiver *dest, EState *estate);
98 static void ExecProcessReturning(ProjectionInfo *projectReturning,
99 TupleTableSlot *tupleSlot,
100 TupleTableSlot *planSlot,
102 static TupleTableSlot *EvalPlanQualNext(EState *estate);
103 static void EndEvalPlanQual(EState *estate);
104 static void ExecCheckRTPerms(List *rangeTable);
105 static void ExecCheckRTEPerms(RangeTblEntry *rte);
106 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
107 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
108 evalPlanQual *priorepq);
109 static void EvalPlanQualStop(evalPlanQual *epq);
110 static void OpenIntoRel(QueryDesc *queryDesc);
111 static void CloseIntoRel(QueryDesc *queryDesc);
112 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
113 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
114 static void intorel_shutdown(DestReceiver *self);
115 static void intorel_destroy(DestReceiver *self);
117 /* end of local decls */
120 /* ----------------------------------------------------------------
123 * This routine must be called at the beginning of any execution of any
126 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
127 * clear why we bother to separate the two functions, but...). The tupDesc
128 * field of the QueryDesc is filled in to describe the tuples that will be
129 * returned, and the internal fields (estate and planstate) are set up.
131 * eflags contains flag bits as described in executor.h.
133 * NB: the CurrentMemoryContext when this is called will become the parent
134 * of the per-query context used for this Executor invocation.
136 * We provide a function hook variable that lets loadable plugins
137 * get control when ExecutorStart is called. Such a plugin would
138 * normally call standard_ExecutorStart().
140 * ----------------------------------------------------------------
143 ExecutorStart(QueryDesc *queryDesc, int eflags)
145 if (ExecutorStart_hook)
146 (*ExecutorStart_hook) (queryDesc, eflags);
148 standard_ExecutorStart(queryDesc, eflags);
152 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
155 MemoryContext oldcontext;
157 /* sanity checks: queryDesc must not be started already */
158 Assert(queryDesc != NULL);
159 Assert(queryDesc->estate == NULL);
162 * If the transaction is read-only, we need to check if any writes are
163 * planned to non-temporary tables. EXPLAIN is considered read-only.
165 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
166 ExecCheckXactReadOnly(queryDesc->plannedstmt);
169 * Build EState, switch into per-query memory context for startup.
171 estate = CreateExecutorState();
172 queryDesc->estate = estate;
174 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
177 * Fill in parameters, if any, from queryDesc
179 estate->es_param_list_info = queryDesc->params;
181 if (queryDesc->plannedstmt->nParamExec > 0)
182 estate->es_param_exec_vals = (ParamExecData *)
183 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
186 * If non-read-only query, set the command ID to mark output tuples with
188 switch (queryDesc->operation)
191 /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
192 if (queryDesc->plannedstmt->intoClause != NULL ||
193 queryDesc->plannedstmt->rowMarks != NIL)
194 estate->es_output_cid = GetCurrentCommandId(true);
200 estate->es_output_cid = GetCurrentCommandId(true);
204 elog(ERROR, "unrecognized operation code: %d",
205 (int) queryDesc->operation);
210 * Copy other important information into the EState
212 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
213 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
214 estate->es_instrument = queryDesc->doInstrument;
217 * Initialize the plan state tree
219 InitPlan(queryDesc, eflags);
221 MemoryContextSwitchTo(oldcontext);
224 /* ----------------------------------------------------------------
227 * This is the main routine of the executor module. It accepts
228 * the query descriptor from the traffic cop and executes the
231 * ExecutorStart must have been called already.
233 * If direction is NoMovementScanDirection then nothing is done
234 * except to start up/shut down the destination. Otherwise,
235 * we retrieve up to 'count' tuples in the specified direction.
237 * Note: count = 0 is interpreted as no portal limit, i.e., run to
240 * There is no return value, but output tuples (if any) are sent to
241 * the destination receiver specified in the QueryDesc; and the number
242 * of tuples processed at the top level can be found in
243 * estate->es_processed.
245 * We provide a function hook variable that lets loadable plugins
246 * get control when ExecutorRun is called. Such a plugin would
247 * normally call standard_ExecutorRun().
249 * ----------------------------------------------------------------
252 ExecutorRun(QueryDesc *queryDesc,
253 ScanDirection direction, long count)
255 if (ExecutorRun_hook)
256 (*ExecutorRun_hook) (queryDesc, direction, count);
258 standard_ExecutorRun(queryDesc, direction, count);
262 standard_ExecutorRun(QueryDesc *queryDesc,
263 ScanDirection direction, long count)
269 MemoryContext oldcontext;
272 Assert(queryDesc != NULL);
274 estate = queryDesc->estate;
276 Assert(estate != NULL);
279 * Switch into per-query memory context
281 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
283 /* Allow instrumentation of ExecutorRun overall runtime */
284 if (queryDesc->totaltime)
285 InstrStartNode(queryDesc->totaltime);
288 * extract information from the query descriptor and the query feature.
290 operation = queryDesc->operation;
291 dest = queryDesc->dest;
294 * startup tuple receiver, if we will be emitting tuples
296 estate->es_processed = 0;
297 estate->es_lastoid = InvalidOid;
299 sendTuples = (operation == CMD_SELECT ||
300 queryDesc->plannedstmt->returningLists);
303 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
308 if (!ScanDirectionIsNoMovement(direction))
310 queryDesc->planstate,
317 * shutdown tuple receiver, if we started it
320 (*dest->rShutdown) (dest);
322 if (queryDesc->totaltime)
323 InstrStopNode(queryDesc->totaltime, estate->es_processed);
325 MemoryContextSwitchTo(oldcontext);
328 /* ----------------------------------------------------------------
331 * This routine must be called at the end of execution of any
334 * We provide a function hook variable that lets loadable plugins
335 * get control when ExecutorEnd is called. Such a plugin would
336 * normally call standard_ExecutorEnd().
338 * ----------------------------------------------------------------
341 ExecutorEnd(QueryDesc *queryDesc)
343 if (ExecutorEnd_hook)
344 (*ExecutorEnd_hook) (queryDesc);
346 standard_ExecutorEnd(queryDesc);
350 standard_ExecutorEnd(QueryDesc *queryDesc)
353 MemoryContext oldcontext;
356 Assert(queryDesc != NULL);
358 estate = queryDesc->estate;
360 Assert(estate != NULL);
363 * Switch into per-query memory context to run ExecEndPlan
365 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
367 ExecEndPlan(queryDesc->planstate, estate);
370 * Close the SELECT INTO relation if any
372 if (estate->es_select_into)
373 CloseIntoRel(queryDesc);
375 /* do away with our snapshots */
376 UnregisterSnapshot(estate->es_snapshot);
377 UnregisterSnapshot(estate->es_crosscheck_snapshot);
380 * Must switch out of context before destroying it
382 MemoryContextSwitchTo(oldcontext);
385 * Release EState and per-query memory context. This should release
386 * everything the executor has allocated.
388 FreeExecutorState(estate);
390 /* Reset queryDesc fields that no longer point to anything */
391 queryDesc->tupDesc = NULL;
392 queryDesc->estate = NULL;
393 queryDesc->planstate = NULL;
394 queryDesc->totaltime = NULL;
397 /* ----------------------------------------------------------------
400 * This routine may be called on an open queryDesc to rewind it
402 * ----------------------------------------------------------------
405 ExecutorRewind(QueryDesc *queryDesc)
408 MemoryContext oldcontext;
411 Assert(queryDesc != NULL);
413 estate = queryDesc->estate;
415 Assert(estate != NULL);
417 /* It's probably not sensible to rescan updating queries */
418 Assert(queryDesc->operation == CMD_SELECT);
421 * Switch into per-query memory context
423 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
428 ExecReScan(queryDesc->planstate, NULL);
430 MemoryContextSwitchTo(oldcontext);
436 * Check access permissions for all relations listed in a range table.
439 ExecCheckRTPerms(List *rangeTable)
443 foreach(l, rangeTable)
445 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
451 * Check access permissions for a single RTE.
454 ExecCheckRTEPerms(RangeTblEntry *rte)
456 AclMode requiredPerms;
458 AclMode remainingPerms;
465 * Only plain-relation RTEs need to be checked here. Function RTEs are
466 * checked by init_fcache when the function is prepared for execution.
467 * Join, subquery, and special RTEs need no checks.
469 if (rte->rtekind != RTE_RELATION)
473 * No work if requiredPerms is empty.
475 requiredPerms = rte->requiredPerms;
476 if (requiredPerms == 0)
482 * userid to check as: current user unless we have a setuid indication.
484 * Note: GetUserId() is presently fast enough that there's no harm in
485 * calling it separately for each RTE. If that stops being true, we could
486 * call it once in ExecCheckRTPerms and pass the userid down from there.
487 * But for now, no need for the extra clutter.
489 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
492 * We must have *all* the requiredPerms bits, but some of the bits can be
493 * satisfied from column-level rather than relation-level permissions.
494 * First, remove any bits that are satisfied by relation permissions.
496 relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
497 remainingPerms = requiredPerms & ~relPerms;
498 if (remainingPerms != 0)
501 * If we lack any permissions that exist only as relation permissions,
502 * we can fail straight away.
504 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
505 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
506 get_rel_name(relOid));
509 * Check to see if we have the needed privileges at column level.
511 * Note: failures just report a table-level error; it would be nicer
512 * to report a column-level error if we have some but not all of the
515 if (remainingPerms & ACL_SELECT)
518 * When the query doesn't explicitly reference any columns (for
519 * example, SELECT COUNT(*) FROM table), allow the query if we
520 * have SELECT on any column of the rel, as per SQL spec.
522 if (bms_is_empty(rte->selectedCols))
524 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
525 ACLMASK_ANY) != ACLCHECK_OK)
526 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
527 get_rel_name(relOid));
530 tmpset = bms_copy(rte->selectedCols);
531 while ((col = bms_first_member(tmpset)) >= 0)
533 /* remove the column number offset */
534 col += FirstLowInvalidHeapAttributeNumber;
535 if (col == InvalidAttrNumber)
537 /* Whole-row reference, must have priv on all cols */
538 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
539 ACLMASK_ALL) != ACLCHECK_OK)
540 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
541 get_rel_name(relOid));
545 if (pg_attribute_aclcheck(relOid, col, userid, ACL_SELECT)
547 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
548 get_rel_name(relOid));
555 * Basically the same for the mod columns, with either INSERT or UPDATE
556 * privilege as specified by remainingPerms.
558 remainingPerms &= ~ACL_SELECT;
559 if (remainingPerms != 0)
562 * When the query doesn't explicitly change any columns, allow
563 * the query if we have permission on any column of the rel. This
564 * is to handle SELECT FOR UPDATE as well as possible corner cases
565 * in INSERT and UPDATE.
567 if (bms_is_empty(rte->modifiedCols))
569 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
570 ACLMASK_ANY) != ACLCHECK_OK)
571 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
572 get_rel_name(relOid));
575 tmpset = bms_copy(rte->modifiedCols);
576 while ((col = bms_first_member(tmpset)) >= 0)
578 /* remove the column number offset */
579 col += FirstLowInvalidHeapAttributeNumber;
580 if (col == InvalidAttrNumber)
582 /* whole-row reference can't happen here */
583 elog(ERROR, "whole-row update is not implemented");
587 if (pg_attribute_aclcheck(relOid, col, userid, remainingPerms)
589 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
590 get_rel_name(relOid));
599 * Check that the query does not imply any writes to non-temp tables.
602 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
607 * CREATE TABLE AS or SELECT INTO?
609 * XXX should we allow this if the destination is temp?
611 if (plannedstmt->intoClause != NULL)
614 /* Fail if write permissions are requested on any non-temp table */
615 foreach(l, plannedstmt->rtable)
617 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
619 if (rte->rtekind != RTE_RELATION)
622 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
625 if (isTempNamespace(get_rel_namespace(rte->relid)))
635 (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
636 errmsg("transaction is read-only")));
640 /* ----------------------------------------------------------------
643 * Initializes the query plan: open files, allocate storage
644 * and start up the rule manager
645 * ----------------------------------------------------------------
648 InitPlan(QueryDesc *queryDesc, int eflags)
650 CmdType operation = queryDesc->operation;
651 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
652 Plan *plan = plannedstmt->planTree;
653 List *rangeTable = plannedstmt->rtable;
654 EState *estate = queryDesc->estate;
655 PlanState *planstate;
661 * Do permissions checks
663 ExecCheckRTPerms(rangeTable);
666 * initialize the node's execution state
668 estate->es_range_table = rangeTable;
671 * initialize result relation stuff
673 if (plannedstmt->resultRelations)
675 List *resultRelations = plannedstmt->resultRelations;
676 int numResultRelations = list_length(resultRelations);
677 ResultRelInfo *resultRelInfos;
678 ResultRelInfo *resultRelInfo;
680 resultRelInfos = (ResultRelInfo *)
681 palloc(numResultRelations * sizeof(ResultRelInfo));
682 resultRelInfo = resultRelInfos;
683 foreach(l, resultRelations)
685 Index resultRelationIndex = lfirst_int(l);
686 Oid resultRelationOid;
687 Relation resultRelation;
689 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
690 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
691 InitResultRelInfo(resultRelInfo,
695 estate->es_instrument);
698 estate->es_result_relations = resultRelInfos;
699 estate->es_num_result_relations = numResultRelations;
700 /* Initialize to first or only result rel */
701 estate->es_result_relation_info = resultRelInfos;
706 * if no result relation, then set state appropriately
708 estate->es_result_relations = NULL;
709 estate->es_num_result_relations = 0;
710 estate->es_result_relation_info = NULL;
714 * Detect whether we're doing SELECT INTO. If so, set the es_into_oids
715 * flag appropriately so that the plan tree will be initialized with the
716 * correct tuple descriptors. (Other SELECT INTO stuff comes later.)
718 estate->es_select_into = false;
719 if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
721 estate->es_select_into = true;
722 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
726 * Have to lock relations selected FOR UPDATE/FOR SHARE before we
727 * initialize the plan tree, else we'd be doing a lock upgrade. While we
728 * are at it, build the ExecRowMark list.
730 estate->es_rowMarks = NIL;
731 foreach(l, plannedstmt->rowMarks)
733 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
738 /* ignore "parent" rowmarks; they are irrelevant at runtime */
742 relid = getrelid(rc->rti, rangeTable);
743 relation = heap_open(relid, RowShareLock);
744 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
745 erm->relation = relation;
747 erm->prti = rc->prti;
748 erm->forUpdate = rc->forUpdate;
749 erm->noWait = rc->noWait;
750 /* We'll locate the junk attrs below */
751 erm->ctidAttNo = InvalidAttrNumber;
752 erm->toidAttNo = InvalidAttrNumber;
753 ItemPointerSetInvalid(&(erm->curCtid));
754 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
758 * Initialize the executor "tuple" table. We need slots for all the plan
759 * nodes, plus possibly output slots for the junkfilter(s). At this point
760 * we aren't sure if we need junkfilters, so just add slots for them
761 * unconditionally. Also, if it's not a SELECT, set up a slot for use for
762 * trigger output tuples. Also, one for RETURNING-list evaluation.
767 /* Slots for the main plan tree */
768 nSlots = ExecCountSlotsNode(plan);
769 /* Add slots for subplans and initplans */
770 foreach(l, plannedstmt->subplans)
772 Plan *subplan = (Plan *) lfirst(l);
774 nSlots += ExecCountSlotsNode(subplan);
776 /* Add slots for junkfilter(s) */
777 if (plannedstmt->resultRelations != NIL)
778 nSlots += list_length(plannedstmt->resultRelations);
781 if (operation != CMD_SELECT)
782 nSlots++; /* for es_trig_tuple_slot */
783 if (plannedstmt->returningLists)
784 nSlots++; /* for RETURNING projection */
786 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
788 if (operation != CMD_SELECT)
789 estate->es_trig_tuple_slot =
790 ExecAllocTableSlot(estate->es_tupleTable);
793 /* mark EvalPlanQual not active */
794 estate->es_plannedstmt = plannedstmt;
795 estate->es_evalPlanQual = NULL;
796 estate->es_evTupleNull = NULL;
797 estate->es_evTuple = NULL;
798 estate->es_useEvalPlan = false;
801 * Initialize private state information for each SubPlan. We must do this
802 * before running ExecInitNode on the main query tree, since
803 * ExecInitSubPlan expects to be able to find these entries.
805 Assert(estate->es_subplanstates == NIL);
806 i = 1; /* subplan indices count from 1 */
807 foreach(l, plannedstmt->subplans)
809 Plan *subplan = (Plan *) lfirst(l);
810 PlanState *subplanstate;
814 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
815 * it is a parameterless subplan (not initplan), we suggest that it be
816 * prepared to handle REWIND efficiently; otherwise there is no need.
818 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
819 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
820 sp_eflags |= EXEC_FLAG_REWIND;
822 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
824 estate->es_subplanstates = lappend(estate->es_subplanstates,
831 * Initialize the private state information for all the nodes in the query
832 * tree. This opens files, allocates storage and leaves us ready to start
835 planstate = ExecInitNode(plan, estate, eflags);
838 * Get the tuple descriptor describing the type of tuples to return. (this
839 * is especially important if we are creating a relation with "SELECT
842 tupType = ExecGetResultType(planstate);
845 * Initialize the junk filter if needed. SELECT and INSERT queries need a
846 * filter if there are any junk attrs in the tlist. UPDATE and
847 * DELETE always need a filter, since there's always a junk 'ctid'
848 * attribute present --- no need to look first.
850 * This section of code is also a convenient place to verify that the
851 * output of an INSERT or UPDATE matches the target table(s).
854 bool junk_filter_needed = false;
861 foreach(tlist, plan->targetlist)
863 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
867 junk_filter_needed = true;
874 junk_filter_needed = true;
880 if (junk_filter_needed)
883 * If there are multiple result relations, each one needs its own
884 * junk filter. Note this is only possible for UPDATE/DELETE, so
885 * we can't be fooled by some needing a filter and some not.
887 if (list_length(plannedstmt->resultRelations) > 1)
889 PlanState **appendplans;
891 ResultRelInfo *resultRelInfo;
893 /* Top plan had better be an Append here. */
894 Assert(IsA(plan, Append));
895 Assert(((Append *) plan)->isTarget);
896 Assert(IsA(planstate, AppendState));
897 appendplans = ((AppendState *) planstate)->appendplans;
898 as_nplans = ((AppendState *) planstate)->as_nplans;
899 Assert(as_nplans == estate->es_num_result_relations);
900 resultRelInfo = estate->es_result_relations;
901 for (i = 0; i < as_nplans; i++)
903 PlanState *subplan = appendplans[i];
906 if (operation == CMD_UPDATE)
907 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
908 subplan->plan->targetlist);
910 j = ExecInitJunkFilter(subplan->plan->targetlist,
911 resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
912 ExecAllocTableSlot(estate->es_tupleTable));
915 * Since it must be UPDATE/DELETE, there had better be a
916 * "ctid" junk attribute in the tlist ... but ctid could
917 * be at a different resno for each result relation. We
918 * look up the ctid resnos now and save them in the
921 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
922 if (!AttributeNumberIsValid(j->jf_junkAttNo))
923 elog(ERROR, "could not find junk ctid column");
924 resultRelInfo->ri_junkFilter = j;
929 * Set active junkfilter too; at this point ExecInitAppend has
930 * already selected an active result relation...
932 estate->es_junkFilter =
933 estate->es_result_relation_info->ri_junkFilter;
936 * We currently can't support rowmarks in this case, because
937 * the associated junk CTIDs might have different resnos in
938 * different subplans.
940 if (estate->es_rowMarks)
942 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
943 errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations")));
947 /* Normal case with just one JunkFilter */
950 if (operation == CMD_INSERT || operation == CMD_UPDATE)
951 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
952 planstate->plan->targetlist);
954 j = ExecInitJunkFilter(planstate->plan->targetlist,
956 ExecAllocTableSlot(estate->es_tupleTable));
957 estate->es_junkFilter = j;
958 if (estate->es_result_relation_info)
959 estate->es_result_relation_info->ri_junkFilter = j;
961 if (operation == CMD_SELECT)
963 /* For SELECT, want to return the cleaned tuple type */
964 tupType = j->jf_cleanTupType;
966 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
968 /* For UPDATE/DELETE, find the ctid junk attr now */
969 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
970 if (!AttributeNumberIsValid(j->jf_junkAttNo))
971 elog(ERROR, "could not find junk ctid column");
974 /* For SELECT FOR UPDATE/SHARE, find the junk attrs now */
975 foreach(l, estate->es_rowMarks)
977 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
980 /* always need the ctid */
981 snprintf(resname, sizeof(resname), "ctid%u",
983 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
984 if (!AttributeNumberIsValid(erm->ctidAttNo))
985 elog(ERROR, "could not find junk \"%s\" column",
987 /* if child relation, need tableoid too */
988 if (erm->rti != erm->prti)
990 snprintf(resname, sizeof(resname), "tableoid%u",
992 erm->toidAttNo = ExecFindJunkAttribute(j, resname);
993 if (!AttributeNumberIsValid(erm->toidAttNo))
994 elog(ERROR, "could not find junk \"%s\" column",
1002 if (operation == CMD_INSERT)
1003 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
1004 planstate->plan->targetlist);
1006 estate->es_junkFilter = NULL;
1007 if (estate->es_rowMarks)
1008 elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns");
1013 * Initialize RETURNING projections if needed.
1015 if (plannedstmt->returningLists)
1017 TupleTableSlot *slot;
1018 ExprContext *econtext;
1019 ResultRelInfo *resultRelInfo;
1022 * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
1023 * We assume all the sublists will generate the same output tupdesc.
1025 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
1028 /* Set up a slot for the output of the RETURNING projection(s) */
1029 slot = ExecAllocTableSlot(estate->es_tupleTable);
1030 ExecSetSlotDescriptor(slot, tupType);
1031 /* Need an econtext too */
1032 econtext = CreateExprContext(estate);
1035 * Build a projection for each result rel. Note that any SubPlans in
1036 * the RETURNING lists get attached to the topmost plan node.
1038 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
1039 resultRelInfo = estate->es_result_relations;
1040 foreach(l, plannedstmt->returningLists)
1042 List *rlist = (List *) lfirst(l);
1045 rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
1046 resultRelInfo->ri_projectReturning =
1047 ExecBuildProjectionInfo(rliststate, econtext, slot,
1048 resultRelInfo->ri_RelationDesc->rd_att);
1053 queryDesc->tupDesc = tupType;
1054 queryDesc->planstate = planstate;
1057 * If doing SELECT INTO, initialize the "into" relation. We must wait
1058 * till now so we have the "clean" result tuple type to create the new
1061 * If EXPLAIN, skip creating the "into" relation.
1063 if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
1064 OpenIntoRel(queryDesc);
1068 * Initialize ResultRelInfo data for one result relation
1071 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1072 Relation resultRelationDesc,
1073 Index resultRelationIndex,
1078 * Check valid relkind ... parser and/or planner should have noticed this
1079 * already, but let's make sure.
1081 switch (resultRelationDesc->rd_rel->relkind)
1083 case RELKIND_RELATION:
1086 case RELKIND_SEQUENCE:
1088 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1089 errmsg("cannot change sequence \"%s\"",
1090 RelationGetRelationName(resultRelationDesc))));
1092 case RELKIND_TOASTVALUE:
1094 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1095 errmsg("cannot change TOAST relation \"%s\"",
1096 RelationGetRelationName(resultRelationDesc))));
1100 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1101 errmsg("cannot change view \"%s\"",
1102 RelationGetRelationName(resultRelationDesc))));
1106 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1107 errmsg("cannot change relation \"%s\"",
1108 RelationGetRelationName(resultRelationDesc))));
1112 /* OK, fill in the node */
1113 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1114 resultRelInfo->type = T_ResultRelInfo;
1115 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1116 resultRelInfo->ri_RelationDesc = resultRelationDesc;
1117 resultRelInfo->ri_NumIndices = 0;
1118 resultRelInfo->ri_IndexRelationDescs = NULL;
1119 resultRelInfo->ri_IndexRelationInfo = NULL;
1120 /* make a copy so as not to depend on relcache info not changing... */
1121 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1122 if (resultRelInfo->ri_TrigDesc)
1124 int n = resultRelInfo->ri_TrigDesc->numtriggers;
1126 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1127 palloc0(n * sizeof(FmgrInfo));
1129 resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
1131 resultRelInfo->ri_TrigInstrument = NULL;
1135 resultRelInfo->ri_TrigFunctions = NULL;
1136 resultRelInfo->ri_TrigInstrument = NULL;
1138 resultRelInfo->ri_ConstraintExprs = NULL;
1139 resultRelInfo->ri_junkFilter = NULL;
1140 resultRelInfo->ri_projectReturning = NULL;
1143 * If there are indices on the result relation, open them and save
1144 * descriptors in the result relation info, so that we can add new index
1145 * entries for the tuples we add/update. We need not do this for a
1146 * DELETE, however, since deletion doesn't affect indexes.
1148 if (resultRelationDesc->rd_rel->relhasindex &&
1149 operation != CMD_DELETE)
1150 ExecOpenIndices(resultRelInfo);
1154 * Verify that the tuples to be produced by INSERT or UPDATE match the
1155 * target relation's rowtype
1157 * We do this to guard against stale plans. If plan invalidation is
1158 * functioning properly then we should never get a failure here, but better
1159 * safe than sorry. Note that this is called after we have obtained lock
1160 * on the target rel, so the rowtype can't change underneath us.
1162 * The plan output is represented by its targetlist, because that makes
1163 * handling the dropped-column case easier.
1166 ExecCheckPlanOutput(Relation resultRel, List *targetList)
1168 TupleDesc resultDesc = RelationGetDescr(resultRel);
1172 foreach(lc, targetList)
1174 TargetEntry *tle = (TargetEntry *) lfirst(lc);
1175 Form_pg_attribute attr;
1178 continue; /* ignore junk tlist items */
1180 if (attno >= resultDesc->natts)
1182 (errcode(ERRCODE_DATATYPE_MISMATCH),
1183 errmsg("table row type and query-specified row type do not match"),
1184 errdetail("Query has too many columns.")));
1185 attr = resultDesc->attrs[attno++];
1187 if (!attr->attisdropped)
1189 /* Normal case: demand type match */
1190 if (exprType((Node *) tle->expr) != attr->atttypid)
1192 (errcode(ERRCODE_DATATYPE_MISMATCH),
1193 errmsg("table row type and query-specified row type do not match"),
1194 errdetail("Table has type %s at ordinal position %d, but query expects %s.",
1195 format_type_be(attr->atttypid),
1197 format_type_be(exprType((Node *) tle->expr)))));
1202 * For a dropped column, we can't check atttypid (it's likely 0).
1203 * In any case the planner has most likely inserted an INT4 null.
1204 * What we insist on is just *some* NULL constant.
1206 if (!IsA(tle->expr, Const) ||
1207 !((Const *) tle->expr)->constisnull)
1209 (errcode(ERRCODE_DATATYPE_MISMATCH),
1210 errmsg("table row type and query-specified row type do not match"),
1211 errdetail("Query provides a value for a dropped column at ordinal position %d.",
1215 if (attno != resultDesc->natts)
1217 (errcode(ERRCODE_DATATYPE_MISMATCH),
1218 errmsg("table row type and query-specified row type do not match"),
1219 errdetail("Query has too few columns.")));
1223 * ExecGetTriggerResultRel
1225 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1226 * triggers are fired on one of the result relations of the query, and so
1227 * we can just return a member of the es_result_relations array. (Note: in
1228 * self-join situations there might be multiple members with the same OID;
1229 * if so it doesn't matter which one we pick.) However, it is sometimes
1230 * necessary to fire triggers on other relations; this happens mainly when an
1231 * RI update trigger queues additional triggers on other relations, which will
1232 * be processed in the context of the outer query. For efficiency's sake,
1233 * we want to have a ResultRelInfo for those triggers too; that can avoid
1234 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1235 * ANALYZE to report the runtimes of such triggers.) So we make additional
1236 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1239 ExecGetTriggerResultRel(EState *estate, Oid relid)
1241 ResultRelInfo *rInfo;
1245 MemoryContext oldcontext;
1247 /* First, search through the query result relations */
1248 rInfo = estate->es_result_relations;
1249 nr = estate->es_num_result_relations;
1252 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1257 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1258 foreach(l, estate->es_trig_target_relations)
1260 rInfo = (ResultRelInfo *) lfirst(l);
1261 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1264 /* Nope, so we need a new one */
1267 * Open the target relation's relcache entry. We assume that an
1268 * appropriate lock is still held by the backend from whenever the trigger
1269 * event got queued, so we need take no new lock here.
1271 rel = heap_open(relid, NoLock);
1274 * Make the new entry in the right context. Currently, we don't need any
1275 * index information in ResultRelInfos used only for triggers, so tell
1276 * InitResultRelInfo it's a DELETE.
1278 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1279 rInfo = makeNode(ResultRelInfo);
1280 InitResultRelInfo(rInfo,
1282 0, /* dummy rangetable index */
1284 estate->es_instrument);
1285 estate->es_trig_target_relations =
1286 lappend(estate->es_trig_target_relations, rInfo);
1287 MemoryContextSwitchTo(oldcontext);
1293 * ExecContextForcesOids
1295 * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1296 * we need to ensure that result tuples have space for an OID iff they are
1297 * going to be stored into a relation that has OIDs. In other contexts
1298 * we are free to choose whether to leave space for OIDs in result tuples
1299 * (we generally don't want to, but we do if a physical-tlist optimization
1300 * is possible). This routine checks the plan context and returns TRUE if the
1301 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1302 * *hasoids is set to the required value.
1304 * One reason this is ugly is that all plan nodes in the plan tree will emit
1305 * tuples with space for an OID, though we really only need the topmost node
1306 * to do so. However, node types like Sort don't project new tuples but just
1307 * return their inputs, and in those cases the requirement propagates down
1308 * to the input node. Eventually we might make this code smart enough to
1309 * recognize how far down the requirement really goes, but for now we just
1310 * make all plan nodes do the same thing if the top level forces the choice.
1312 * We assume that estate->es_result_relation_info is already set up to
1313 * describe the target relation. Note that in an UPDATE that spans an
1314 * inheritance tree, some of the target relations may have OIDs and some not.
1315 * We have to make the decisions on a per-relation basis as we initialize
1316 * each of the child plans of the topmost Append plan.
1318 * SELECT INTO is even uglier, because we don't have the INTO relation's
1319 * descriptor available when this code runs; we have to look aside at a
1320 * flag set by InitPlan().
1323 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1325 if (planstate->state->es_select_into)
1327 *hasoids = planstate->state->es_into_oids;
1332 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1336 Relation rel = ri->ri_RelationDesc;
1340 *hasoids = rel->rd_rel->relhasoids;
1349 /* ----------------------------------------------------------------
1352 * Cleans up the query plan -- closes files and frees up storage
1354 * NOTE: we are no longer very worried about freeing storage per se
1355 * in this code; FreeExecutorState should be guaranteed to release all
1356 * memory that needs to be released. What we are worried about doing
1357 * is closing relations and dropping buffer pins. Thus, for example,
1358 * tuple tables must be cleared or dropped to ensure pins are released.
1359 * ----------------------------------------------------------------
1362 ExecEndPlan(PlanState *planstate, EState *estate)
1364 ResultRelInfo *resultRelInfo;
1369 * shut down any PlanQual processing we were doing
1371 if (estate->es_evalPlanQual != NULL)
1372 EndEvalPlanQual(estate);
1375 * shut down the node-type-specific query processing
1377 ExecEndNode(planstate);
1382 foreach(l, estate->es_subplanstates)
1384 PlanState *subplanstate = (PlanState *) lfirst(l);
1386 ExecEndNode(subplanstate);
1390 * destroy the executor "tuple" table.
1392 ExecDropTupleTable(estate->es_tupleTable, true);
1393 estate->es_tupleTable = NULL;
1396 * close the result relation(s) if any, but hold locks until xact commit.
1398 resultRelInfo = estate->es_result_relations;
1399 for (i = estate->es_num_result_relations; i > 0; i--)
1401 /* Close indices and then the relation itself */
1402 ExecCloseIndices(resultRelInfo);
1403 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1408 * likewise close any trigger target relations
1410 foreach(l, estate->es_trig_target_relations)
1412 resultRelInfo = (ResultRelInfo *) lfirst(l);
1413 /* Close indices and then the relation itself */
1414 ExecCloseIndices(resultRelInfo);
1415 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1419 * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1421 foreach(l, estate->es_rowMarks)
1423 ExecRowMark *erm = lfirst(l);
1425 heap_close(erm->relation, NoLock);
1429 /* ----------------------------------------------------------------
1432 * Processes the query plan until we have processed 'numberTuples' tuples,
1433 * moving in the specified direction.
1435 * Runs to completion if numberTuples is 0
1437 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1439 * ----------------------------------------------------------------
1442 ExecutePlan(EState *estate,
1443 PlanState *planstate,
1446 ScanDirection direction,
1449 JunkFilter *junkfilter;
1450 TupleTableSlot *planSlot;
1451 TupleTableSlot *slot;
1452 ItemPointer tupleid = NULL;
1453 ItemPointerData tuple_ctid;
1454 long current_tuple_count;
1457 * initialize local variables
1459 current_tuple_count = 0;
1462 * Set the direction.
1464 estate->es_direction = direction;
1467 * Process BEFORE EACH STATEMENT triggers
1472 ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1475 ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1478 ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1486 * Loop until we've processed the proper number of tuples from the plan.
1490 /* Reset the per-output-tuple exprcontext */
1491 ResetPerTupleExprContext(estate);
1494 * Execute the plan and obtain a tuple
1497 if (estate->es_useEvalPlan)
1499 planSlot = EvalPlanQualNext(estate);
1500 if (TupIsNull(planSlot))
1501 planSlot = ExecProcNode(planstate);
1504 planSlot = ExecProcNode(planstate);
1507 * if the tuple is null, then we assume there is nothing more to
1508 * process so we just end the loop...
1510 if (TupIsNull(planSlot))
1515 * If we have a junk filter, then project a new tuple with the junk
1518 * Store this new "clean" tuple in the junkfilter's resultSlot.
1519 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1520 * because that tuple slot has the wrong descriptor.)
1522 * But first, extract all the junk information we need.
1524 if ((junkfilter = estate->es_junkFilter) != NULL)
1527 * Process any FOR UPDATE or FOR SHARE locking requested.
1529 if (estate->es_rowMarks != NIL)
1534 foreach(l, estate->es_rowMarks)
1536 ExecRowMark *erm = lfirst(l);
1539 HeapTupleData tuple;
1541 ItemPointerData update_ctid;
1542 TransactionId update_xmax;
1543 TupleTableSlot *newSlot;
1544 LockTupleMode lockmode;
1547 /* if child rel, must check whether it produced this row */
1548 if (erm->rti != erm->prti)
1552 datum = ExecGetJunkAttribute(slot,
1555 /* shouldn't ever get a null result... */
1557 elog(ERROR, "tableoid is NULL");
1558 tableoid = DatumGetObjectId(datum);
1560 if (tableoid != RelationGetRelid(erm->relation))
1562 /* this child is inactive right now */
1563 ItemPointerSetInvalid(&(erm->curCtid));
1568 /* okay, fetch the tuple by ctid */
1569 datum = ExecGetJunkAttribute(slot,
1572 /* shouldn't ever get a null result... */
1574 elog(ERROR, "ctid is NULL");
1575 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1578 lockmode = LockTupleExclusive;
1580 lockmode = LockTupleShared;
1582 test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1583 &update_ctid, &update_xmax,
1584 estate->es_output_cid,
1585 lockmode, erm->noWait);
1586 ReleaseBuffer(buffer);
1589 case HeapTupleSelfUpdated:
1590 /* treat it as deleted; do not process */
1593 case HeapTupleMayBeUpdated:
1596 case HeapTupleUpdated:
1597 if (IsXactIsoLevelSerializable)
1599 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1600 errmsg("could not serialize access due to concurrent update")));
1601 if (!ItemPointerEquals(&update_ctid,
1604 /* updated, so look at updated version */
1605 newSlot = EvalPlanQual(estate,
1609 if (!TupIsNull(newSlot))
1611 slot = planSlot = newSlot;
1612 estate->es_useEvalPlan = true;
1618 * if tuple was deleted or PlanQual failed for
1619 * updated tuple - we must not return this tuple!
1624 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1628 /* Remember tuple TID for WHERE CURRENT OF */
1629 erm->curCtid = tuple.t_self;
1634 * extract the 'ctid' junk attribute.
1636 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1641 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1643 /* shouldn't ever get a null result... */
1645 elog(ERROR, "ctid is NULL");
1647 tupleid = (ItemPointer) DatumGetPointer(datum);
1648 tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
1649 tupleid = &tuple_ctid;
1653 * Create a new "clean" tuple with all junk attributes removed. We
1654 * don't need to do this for DELETE, however (there will in fact
1655 * be no non-junk attributes in a DELETE!)
1657 if (operation != CMD_DELETE)
1658 slot = ExecFilterJunk(junkfilter, slot);
1662 * now that we have a tuple, do the appropriate thing with it.. either
1663 * send it to the output destination, add it to a relation someplace,
1664 * delete it from a relation, or modify some of its attributes.
1669 ExecSelect(slot, dest, estate);
1673 ExecInsert(slot, tupleid, planSlot, dest, estate);
1677 ExecDelete(tupleid, planSlot, dest, estate);
1681 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1685 elog(ERROR, "unrecognized operation code: %d",
1691 * check our tuple count.. if we've processed the proper number then
1692 * quit, else loop again and process more tuples. Zero numberTuples
1695 current_tuple_count++;
1696 if (numberTuples && numberTuples == current_tuple_count)
1701 * Process AFTER EACH STATEMENT triggers
1706 ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1709 ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1712 ExecASInsertTriggers(estate, estate->es_result_relation_info);
1720 /* ----------------------------------------------------------------
1723 * SELECTs are easy.. we just pass the tuple to the appropriate
1725 * ----------------------------------------------------------------
1728 ExecSelect(TupleTableSlot *slot,
1732 (*dest->receiveSlot) (slot, dest);
1734 (estate->es_processed)++;
1737 /* ----------------------------------------------------------------
1740 * INSERTs are trickier.. we have to insert the tuple into
1741 * the base relation and insert appropriate tuples into the
1743 * ----------------------------------------------------------------
1746 ExecInsert(TupleTableSlot *slot,
1747 ItemPointer tupleid,
1748 TupleTableSlot *planSlot,
1753 ResultRelInfo *resultRelInfo;
1754 Relation resultRelationDesc;
1758 * get the heap tuple out of the tuple table slot, making sure we have a
1761 tuple = ExecMaterializeSlot(slot);
1764 * get information on the (current) result relation
1766 resultRelInfo = estate->es_result_relation_info;
1767 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1769 /* BEFORE ROW INSERT Triggers */
1770 if (resultRelInfo->ri_TrigDesc &&
1771 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1775 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1777 if (newtuple == NULL) /* "do nothing" */
1780 if (newtuple != tuple) /* modified by Trigger(s) */
1783 * Put the modified tuple into a slot for convenience of routines
1784 * below. We assume the tuple was allocated in per-tuple memory
1785 * context, and therefore will go away by itself. The tuple table
1786 * slot should not try to clear it.
1788 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1790 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1791 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1792 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1799 * Check the constraints of the tuple
1801 if (resultRelationDesc->rd_att->constr)
1802 ExecConstraints(resultRelInfo, slot, estate);
1807 * Note: heap_insert returns the tid (location) of the new tuple in the
1810 newId = heap_insert(resultRelationDesc, tuple,
1811 estate->es_output_cid, 0, NULL);
1814 (estate->es_processed)++;
1815 estate->es_lastoid = newId;
1816 setLastTid(&(tuple->t_self));
1819 * insert index entries for tuple
1821 if (resultRelInfo->ri_NumIndices > 0)
1822 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1824 /* AFTER ROW INSERT Triggers */
1825 ExecARInsertTriggers(estate, resultRelInfo, tuple);
1827 /* Process RETURNING if present */
1828 if (resultRelInfo->ri_projectReturning)
1829 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1830 slot, planSlot, dest);
1833 /* ----------------------------------------------------------------
1836 * DELETE is like UPDATE, except that we delete the tuple and no
1837 * index modifications are needed
1838 * ----------------------------------------------------------------
1841 ExecDelete(ItemPointer tupleid,
1842 TupleTableSlot *planSlot,
1846 ResultRelInfo *resultRelInfo;
1847 Relation resultRelationDesc;
1849 ItemPointerData update_ctid;
1850 TransactionId update_xmax;
1853 * get information on the (current) result relation
1855 resultRelInfo = estate->es_result_relation_info;
1856 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1858 /* BEFORE ROW DELETE Triggers */
1859 if (resultRelInfo->ri_TrigDesc &&
1860 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1864 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1866 if (!dodelete) /* "do nothing" */
1873 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1874 * the row to be deleted is visible to that snapshot, and throw a can't-
1875 * serialize error if not. This is a special-case behavior needed for
1876 * referential integrity updates in serializable transactions.
1879 result = heap_delete(resultRelationDesc, tupleid,
1880 &update_ctid, &update_xmax,
1881 estate->es_output_cid,
1882 estate->es_crosscheck_snapshot,
1883 true /* wait for commit */ );
1886 case HeapTupleSelfUpdated:
1887 /* already deleted by self; nothing to do */
1890 case HeapTupleMayBeUpdated:
1893 case HeapTupleUpdated:
1894 if (IsXactIsoLevelSerializable)
1896 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1897 errmsg("could not serialize access due to concurrent update")));
1898 else if (!ItemPointerEquals(tupleid, &update_ctid))
1900 TupleTableSlot *epqslot;
1902 epqslot = EvalPlanQual(estate,
1903 resultRelInfo->ri_RangeTableIndex,
1906 if (!TupIsNull(epqslot))
1908 *tupleid = update_ctid;
1912 /* tuple already deleted; nothing to do */
1916 elog(ERROR, "unrecognized heap_delete status: %u", result);
1921 (estate->es_processed)++;
1924 * Note: Normally one would think that we have to delete index tuples
1925 * associated with the heap tuple now...
1927 * ... but in POSTGRES, we have no need to do this because VACUUM will
1928 * take care of it later. We can't delete index tuples immediately
1929 * anyway, since the tuple is still visible to other transactions.
1932 /* AFTER ROW DELETE Triggers */
1933 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1935 /* Process RETURNING if present */
1936 if (resultRelInfo->ri_projectReturning)
1939 * We have to put the target tuple into a slot, which means first we
1940 * gotta fetch it. We can use the trigger tuple slot.
1942 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1943 HeapTupleData deltuple;
1946 deltuple.t_self = *tupleid;
1947 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1948 &deltuple, &delbuffer, false, NULL))
1949 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1951 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1952 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1953 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1955 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1956 slot, planSlot, dest);
1958 ExecClearTuple(slot);
1959 ReleaseBuffer(delbuffer);
1963 /* ----------------------------------------------------------------
1966 * note: we can't run UPDATE queries with transactions
1967 * off because UPDATEs are actually INSERTs and our
1968 * scan will mistakenly loop forever, updating the tuple
1969 * it just inserted.. This should be fixed but until it
1970 * is, we don't want to get stuck in an infinite loop
1971 * which corrupts your database..
1972 * ----------------------------------------------------------------
1975 ExecUpdate(TupleTableSlot *slot,
1976 ItemPointer tupleid,
1977 TupleTableSlot *planSlot,
1982 ResultRelInfo *resultRelInfo;
1983 Relation resultRelationDesc;
1985 ItemPointerData update_ctid;
1986 TransactionId update_xmax;
1989 * abort the operation if not running transactions
1991 if (IsBootstrapProcessingMode())
1992 elog(ERROR, "cannot UPDATE during bootstrap");
1995 * get the heap tuple out of the tuple table slot, making sure we have a
1998 tuple = ExecMaterializeSlot(slot);
2001 * get information on the (current) result relation
2003 resultRelInfo = estate->es_result_relation_info;
2004 resultRelationDesc = resultRelInfo->ri_RelationDesc;
2006 /* BEFORE ROW UPDATE Triggers */
2007 if (resultRelInfo->ri_TrigDesc &&
2008 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
2012 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
2015 if (newtuple == NULL) /* "do nothing" */
2018 if (newtuple != tuple) /* modified by Trigger(s) */
2021 * Put the modified tuple into a slot for convenience of routines
2022 * below. We assume the tuple was allocated in per-tuple memory
2023 * context, and therefore will go away by itself. The tuple table
2024 * slot should not try to clear it.
2026 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2028 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
2029 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
2030 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2037 * Check the constraints of the tuple
2039 * If we generate a new candidate tuple after EvalPlanQual testing, we
2040 * must loop back here and recheck constraints. (We don't need to redo
2041 * triggers, however. If there are any BEFORE triggers then trigger.c
2042 * will have done heap_lock_tuple to lock the correct tuple, so there's no
2043 * need to do them again.)
2046 if (resultRelationDesc->rd_att->constr)
2047 ExecConstraints(resultRelInfo, slot, estate);
2050 * replace the heap tuple
2052 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2053 * the row to be updated is visible to that snapshot, and throw a can't-
2054 * serialize error if not. This is a special-case behavior needed for
2055 * referential integrity updates in serializable transactions.
2057 result = heap_update(resultRelationDesc, tupleid, tuple,
2058 &update_ctid, &update_xmax,
2059 estate->es_output_cid,
2060 estate->es_crosscheck_snapshot,
2061 true /* wait for commit */ );
2064 case HeapTupleSelfUpdated:
2065 /* already deleted by self; nothing to do */
2068 case HeapTupleMayBeUpdated:
2071 case HeapTupleUpdated:
2072 if (IsXactIsoLevelSerializable)
2074 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2075 errmsg("could not serialize access due to concurrent update")));
2076 else if (!ItemPointerEquals(tupleid, &update_ctid))
2078 TupleTableSlot *epqslot;
2080 epqslot = EvalPlanQual(estate,
2081 resultRelInfo->ri_RangeTableIndex,
2084 if (!TupIsNull(epqslot))
2086 *tupleid = update_ctid;
2087 slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
2088 tuple = ExecMaterializeSlot(slot);
2092 /* tuple already deleted; nothing to do */
2096 elog(ERROR, "unrecognized heap_update status: %u", result);
2101 (estate->es_processed)++;
2104 * Note: instead of having to update the old index tuples associated with
2105 * the heap tuple, all we do is form and insert new index tuples. This is
2106 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
2107 * deletion is done later by VACUUM (see notes in ExecDelete). All we do
2108 * here is insert new index tuples. -cim 9/27/89
2112 * insert index entries for tuple
2114 * Note: heap_update returns the tid (location) of the new tuple in the
2117 * If it's a HOT update, we mustn't insert new index entries.
2119 if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
2120 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
2122 /* AFTER ROW UPDATE Triggers */
2123 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
2125 /* Process RETURNING if present */
2126 if (resultRelInfo->ri_projectReturning)
2127 ExecProcessReturning(resultRelInfo->ri_projectReturning,
2128 slot, planSlot, dest);
2132 * ExecRelCheck --- check that tuple meets constraints for result relation
2135 ExecRelCheck(ResultRelInfo *resultRelInfo,
2136 TupleTableSlot *slot, EState *estate)
2138 Relation rel = resultRelInfo->ri_RelationDesc;
2139 int ncheck = rel->rd_att->constr->num_check;
2140 ConstrCheck *check = rel->rd_att->constr->check;
2141 ExprContext *econtext;
2142 MemoryContext oldContext;
2147 * If first time through for this result relation, build expression
2148 * nodetrees for rel's constraint expressions. Keep them in the per-query
2149 * memory context so they'll survive throughout the query.
2151 if (resultRelInfo->ri_ConstraintExprs == NULL)
2153 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2154 resultRelInfo->ri_ConstraintExprs =
2155 (List **) palloc(ncheck * sizeof(List *));
2156 for (i = 0; i < ncheck; i++)
2158 /* ExecQual wants implicit-AND form */
2159 qual = make_ands_implicit(stringToNode(check[i].ccbin));
2160 resultRelInfo->ri_ConstraintExprs[i] = (List *)
2161 ExecPrepareExpr((Expr *) qual, estate);
2163 MemoryContextSwitchTo(oldContext);
2167 * We will use the EState's per-tuple context for evaluating constraint
2168 * expressions (creating it if it's not already there).
2170 econtext = GetPerTupleExprContext(estate);
2172 /* Arrange for econtext's scan tuple to be the tuple under test */
2173 econtext->ecxt_scantuple = slot;
2175 /* And evaluate the constraints */
2176 for (i = 0; i < ncheck; i++)
2178 qual = resultRelInfo->ri_ConstraintExprs[i];
2181 * NOTE: SQL92 specifies that a NULL result from a constraint
2182 * expression is not to be treated as a failure. Therefore, tell
2183 * ExecQual to return TRUE for NULL.
2185 if (!ExecQual(qual, econtext, true))
2186 return check[i].ccname;
2189 /* NULL result means no error */
2194 ExecConstraints(ResultRelInfo *resultRelInfo,
2195 TupleTableSlot *slot, EState *estate)
2197 Relation rel = resultRelInfo->ri_RelationDesc;
2198 TupleConstr *constr = rel->rd_att->constr;
2202 if (constr->has_not_null)
2204 int natts = rel->rd_att->natts;
2207 for (attrChk = 1; attrChk <= natts; attrChk++)
2209 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
2210 slot_attisnull(slot, attrChk))
2212 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2213 errmsg("null value in column \"%s\" violates not-null constraint",
2214 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
2218 if (constr->num_check > 0)
2222 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2224 (errcode(ERRCODE_CHECK_VIOLATION),
2225 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2226 RelationGetRelationName(rel), failed)));
2231 * ExecProcessReturning --- evaluate a RETURNING list and send to dest
2233 * projectReturning: RETURNING projection info for current result rel
2234 * tupleSlot: slot holding tuple actually inserted/updated/deleted
2235 * planSlot: slot holding tuple returned by top plan node
2236 * dest: where to send the output
2239 ExecProcessReturning(ProjectionInfo *projectReturning,
2240 TupleTableSlot *tupleSlot,
2241 TupleTableSlot *planSlot,
2244 ExprContext *econtext = projectReturning->pi_exprContext;
2245 TupleTableSlot *retSlot;
2248 * Reset per-tuple memory context to free any expression evaluation
2249 * storage allocated in the previous cycle.
2251 ResetExprContext(econtext);
2253 /* Make tuple and any needed join variables available to ExecProject */
2254 econtext->ecxt_scantuple = tupleSlot;
2255 econtext->ecxt_outertuple = planSlot;
2257 /* Compute the RETURNING expressions */
2258 retSlot = ExecProject(projectReturning, NULL);
2261 (*dest->receiveSlot) (retSlot, dest);
2263 ExecClearTuple(retSlot);
2267 * Check a modified tuple to see if we want to process its updated version
2268 * under READ COMMITTED rules.
2270 * See backend/executor/README for some info about how this works.
2272 * estate - executor state data
2273 * rti - rangetable index of table containing tuple
2274 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2275 * priorXmax - t_xmax from the outdated tuple
2277 * *tid is also an output parameter: it's modified to hold the TID of the
2278 * latest version of the tuple (note this may be changed even on failure)
2280 * Returns a slot containing the new candidate update/delete tuple, or
2281 * NULL if we determine we shouldn't process the row.
2284 EvalPlanQual(EState *estate, Index rti,
2285 ItemPointer tid, TransactionId priorXmax)
2290 HeapTupleData tuple;
2291 HeapTuple copyTuple = NULL;
2292 SnapshotData SnapshotDirty;
2298 * find relation containing target tuple
2300 if (estate->es_result_relation_info != NULL &&
2301 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2302 relation = estate->es_result_relation_info->ri_RelationDesc;
2308 foreach(l, estate->es_rowMarks)
2310 ExecRowMark *erm = lfirst(l);
2312 if (erm->rti == rti)
2314 relation = erm->relation;
2318 if (relation == NULL)
2319 elog(ERROR, "could not find RowMark for RT index %u", rti);
2325 * Loop here to deal with updated or busy tuples
2327 InitDirtySnapshot(SnapshotDirty);
2328 tuple.t_self = *tid;
2333 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2336 * If xmin isn't what we're expecting, the slot must have been
2337 * recycled and reused for an unrelated tuple. This implies that
2338 * the latest version of the row was deleted, so we need do
2339 * nothing. (Should be safe to examine xmin without getting
2340 * buffer's content lock, since xmin never changes in an existing
2343 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2346 ReleaseBuffer(buffer);
2350 /* otherwise xmin should not be dirty... */
2351 if (TransactionIdIsValid(SnapshotDirty.xmin))
2352 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2355 * If tuple is being updated by other transaction then we have to
2356 * wait for its commit/abort.
2358 if (TransactionIdIsValid(SnapshotDirty.xmax))
2360 ReleaseBuffer(buffer);
2361 XactLockTableWait(SnapshotDirty.xmax);
2362 continue; /* loop back to repeat heap_fetch */
2366 * If tuple was inserted by our own transaction, we have to check
2367 * cmin against es_output_cid: cmin >= current CID means our
2368 * command cannot see the tuple, so we should ignore it. Without
2369 * this we are open to the "Halloween problem" of indefinitely
2370 * re-updating the same tuple. (We need not check cmax because
2371 * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2372 * transaction dead, regardless of cmax.) We just checked that
2373 * priorXmax == xmin, so we can test that variable instead of
2374 * doing HeapTupleHeaderGetXmin again.
2376 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2377 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2379 ReleaseBuffer(buffer);
2384 * We got tuple - now copy it for use by recheck query.
2386 copyTuple = heap_copytuple(&tuple);
2387 ReleaseBuffer(buffer);
2392 * If the referenced slot was actually empty, the latest version of
2393 * the row must have been deleted, so we need do nothing.
2395 if (tuple.t_data == NULL)
2397 ReleaseBuffer(buffer);
2402 * As above, if xmin isn't what we're expecting, do nothing.
2404 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2407 ReleaseBuffer(buffer);
2412 * If we get here, the tuple was found but failed SnapshotDirty.
2413 * Assuming the xmin is either a committed xact or our own xact (as it
2414 * certainly should be if we're trying to modify the tuple), this must
2415 * mean that the row was updated or deleted by either a committed xact
2416 * or our own xact. If it was deleted, we can ignore it; if it was
2417 * updated then chain up to the next version and repeat the whole
2420 * As above, it should be safe to examine xmax and t_ctid without the
2421 * buffer content lock, because they can't be changing.
2423 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2425 /* deleted, so forget about it */
2426 ReleaseBuffer(buffer);
2430 /* updated, so look at the updated row */
2431 tuple.t_self = tuple.t_data->t_ctid;
2432 /* updated row should have xmin matching this xmax */
2433 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2434 ReleaseBuffer(buffer);
2435 /* loop back to fetch next in chain */
2439 * For UPDATE/DELETE we have to return tid of actual row we're executing
2442 *tid = tuple.t_self;
2445 * Need to run a recheck subquery. Find or create a PQ stack entry.
2447 epq = estate->es_evalPlanQual;
2450 if (epq != NULL && epq->rti == 0)
2452 /* Top PQ stack entry is idle, so re-use it */
2453 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2459 * If this is request for another RTE - Ra, - then we have to check wasn't
2460 * PlanQual requested for Ra already and if so then Ra' row was updated
2461 * again and we have to re-start old execution for Ra and forget all what
2462 * we done after Ra was suspended. Cool? -:))
2464 if (epq != NULL && epq->rti != rti &&
2465 epq->estate->es_evTuple[rti - 1] != NULL)
2469 evalPlanQual *oldepq;
2471 /* stop execution */
2472 EvalPlanQualStop(epq);
2473 /* pop previous PlanQual from the stack */
2475 Assert(oldepq && oldepq->rti != 0);
2476 /* push current PQ to freePQ stack */
2479 estate->es_evalPlanQual = epq;
2480 } while (epq->rti != rti);
2484 * If we are requested for another RTE then we have to suspend execution
2485 * of current PlanQual and start execution for new one.
2487 if (epq == NULL || epq->rti != rti)
2489 /* try to reuse plan used previously */
2490 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2492 if (newepq == NULL) /* first call or freePQ stack is empty */
2494 newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2495 newepq->free = NULL;
2496 newepq->estate = NULL;
2497 newepq->planstate = NULL;
2501 /* recycle previously used PlanQual */
2502 Assert(newepq->estate == NULL);
2505 /* push current PQ to the stack */
2508 estate->es_evalPlanQual = epq;
2513 Assert(epq->rti == rti);
2516 * Ok - we're requested for the same RTE. Unfortunately we still have to
2517 * end and restart execution of the plan, because ExecReScan wouldn't
2518 * ensure that upper plan nodes would reset themselves. We could make
2519 * that work if insertion of the target tuple were integrated with the
2520 * Param mechanism somehow, so that the upper plan nodes know that their
2521 * children's outputs have changed.
2523 * Note that the stack of free evalPlanQual nodes is quite useless at the
2524 * moment, since it only saves us from pallocing/releasing the
2525 * evalPlanQual nodes themselves. But it will be useful once we implement
2526 * ReScan instead of end/restart for re-using PlanQual nodes.
2530 /* stop execution */
2531 EvalPlanQualStop(epq);
2535 * Initialize new recheck query.
2537 * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2538 * instead copy down changeable state from the top plan (including
2539 * es_result_relation_info, es_junkFilter) and reset locally changeable
2540 * state in the epq (including es_param_exec_vals, es_evTupleNull).
2542 EvalPlanQualStart(epq, estate, epq->next);
2545 * free old RTE' tuple, if any, and store target tuple where relation's
2546 * scan node will see it
2548 epqstate = epq->estate;
2549 if (epqstate->es_evTuple[rti - 1] != NULL)
2550 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2551 epqstate->es_evTuple[rti - 1] = copyTuple;
2553 return EvalPlanQualNext(estate);
2556 static TupleTableSlot *
2557 EvalPlanQualNext(EState *estate)
2559 evalPlanQual *epq = estate->es_evalPlanQual;
2560 MemoryContext oldcontext;
2561 TupleTableSlot *slot;
2563 Assert(epq->rti != 0);
2566 oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2567 slot = ExecProcNode(epq->planstate);
2568 MemoryContextSwitchTo(oldcontext);
2571 * No more tuples for this PQ. Continue previous one.
2573 if (TupIsNull(slot))
2575 evalPlanQual *oldepq;
2577 /* stop execution */
2578 EvalPlanQualStop(epq);
2579 /* pop old PQ from the stack */
2583 /* this is the first (oldest) PQ - mark as free */
2585 estate->es_useEvalPlan = false;
2586 /* and continue Query execution */
2589 Assert(oldepq->rti != 0);
2590 /* push current PQ to freePQ stack */
2593 estate->es_evalPlanQual = epq;
2601 EndEvalPlanQual(EState *estate)
2603 evalPlanQual *epq = estate->es_evalPlanQual;
2605 if (epq->rti == 0) /* plans already shutdowned */
2607 Assert(epq->next == NULL);
2613 evalPlanQual *oldepq;
2615 /* stop execution */
2616 EvalPlanQualStop(epq);
2617 /* pop old PQ from the stack */
2621 /* this is the first (oldest) PQ - mark as free */
2623 estate->es_useEvalPlan = false;
2626 Assert(oldepq->rti != 0);
2627 /* push current PQ to freePQ stack */
2630 estate->es_evalPlanQual = epq;
2635 * Start execution of one level of PlanQual.
2637 * This is a cut-down version of ExecutorStart(): we copy some state from
2638 * the top-level estate rather than initializing it fresh.
2641 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2645 MemoryContext oldcontext;
2648 rtsize = list_length(estate->es_range_table);
2650 epq->estate = epqstate = CreateExecutorState();
2652 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2655 * The epqstates share the top query's copy of unchanging state such as
2656 * the snapshot, rangetable, result-rel info, and external Param info.
2657 * They need their own copies of local state, including a tuple table,
2658 * es_param_exec_vals, etc.
2660 epqstate->es_direction = ForwardScanDirection;
2661 epqstate->es_snapshot = estate->es_snapshot;
2662 epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2663 epqstate->es_range_table = estate->es_range_table;
2664 epqstate->es_output_cid = estate->es_output_cid;
2665 epqstate->es_result_relations = estate->es_result_relations;
2666 epqstate->es_num_result_relations = estate->es_num_result_relations;
2667 epqstate->es_result_relation_info = estate->es_result_relation_info;
2668 epqstate->es_junkFilter = estate->es_junkFilter;
2669 /* es_trig_target_relations must NOT be copied */
2670 epqstate->es_param_list_info = estate->es_param_list_info;
2671 if (estate->es_plannedstmt->nParamExec > 0)
2672 epqstate->es_param_exec_vals = (ParamExecData *)
2673 palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2674 epqstate->es_rowMarks = estate->es_rowMarks;
2675 epqstate->es_instrument = estate->es_instrument;
2676 epqstate->es_select_into = estate->es_select_into;
2677 epqstate->es_into_oids = estate->es_into_oids;
2678 epqstate->es_plannedstmt = estate->es_plannedstmt;
2681 * Each epqstate must have its own es_evTupleNull state, but all the stack
2682 * entries share es_evTuple state. This allows sub-rechecks to inherit
2683 * the value being examined by an outer recheck.
2685 epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2686 if (priorepq == NULL)
2687 /* first PQ stack entry */
2688 epqstate->es_evTuple = (HeapTuple *)
2689 palloc0(rtsize * sizeof(HeapTuple));
2691 /* later stack entries share the same storage */
2692 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2695 * Create sub-tuple-table; we needn't redo the CountSlots work though.
2697 epqstate->es_tupleTable =
2698 ExecCreateTupleTable(estate->es_tupleTable->size);
2701 * Initialize private state information for each SubPlan. We must do this
2702 * before running ExecInitNode on the main query tree, since
2703 * ExecInitSubPlan expects to be able to find these entries.
2705 Assert(epqstate->es_subplanstates == NIL);
2706 foreach(l, estate->es_plannedstmt->subplans)
2708 Plan *subplan = (Plan *) lfirst(l);
2709 PlanState *subplanstate;
2711 subplanstate = ExecInitNode(subplan, epqstate, 0);
2713 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2718 * Initialize the private state information for all the nodes in the query
2719 * tree. This opens files, allocates storage and leaves us ready to start
2720 * processing tuples.
2722 epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2724 MemoryContextSwitchTo(oldcontext);
2728 * End execution of one level of PlanQual.
2730 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2731 * of the normal cleanup, but *not* close result relations (which we are
2732 * just sharing from the outer query). We do, however, have to close any
2733 * trigger target relations that got opened, since those are not shared.
2736 EvalPlanQualStop(evalPlanQual *epq)
2738 EState *epqstate = epq->estate;
2739 MemoryContext oldcontext;
2742 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2744 ExecEndNode(epq->planstate);
2746 foreach(l, epqstate->es_subplanstates)
2748 PlanState *subplanstate = (PlanState *) lfirst(l);
2750 ExecEndNode(subplanstate);
2753 ExecDropTupleTable(epqstate->es_tupleTable, true);
2754 epqstate->es_tupleTable = NULL;
2756 if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2758 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2759 epqstate->es_evTuple[epq->rti - 1] = NULL;
2762 foreach(l, epqstate->es_trig_target_relations)
2764 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2766 /* Close indices and then the relation itself */
2767 ExecCloseIndices(resultRelInfo);
2768 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2771 MemoryContextSwitchTo(oldcontext);
2773 FreeExecutorState(epqstate);
2776 epq->planstate = NULL;
2780 * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2782 * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2783 * are looking at a row returned by the EvalPlanQual machinery, we need
2784 * to look at the subsidiary state instead.
2787 ExecGetActivePlanTree(QueryDesc *queryDesc)
2789 EState *estate = queryDesc->estate;
2791 if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2792 return estate->es_evalPlanQual->planstate;
2794 return queryDesc->planstate;
2799 * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2801 * We implement SELECT INTO by diverting SELECT's normal output with
2802 * a specialized DestReceiver type.
2807 DestReceiver pub; /* publicly-known function pointers */
2808 EState *estate; /* EState we are working with */
2809 Relation rel; /* Relation to write to */
2810 int hi_options; /* heap_insert performance options */
2811 BulkInsertState bistate; /* bulk insert state */
2815 * OpenIntoRel --- actually create the SELECT INTO target relation
2817 * This also replaces QueryDesc->dest with the special DestReceiver for
2818 * SELECT INTO. We assume that the correct result tuple type has already
2819 * been placed in queryDesc->tupDesc.
2822 OpenIntoRel(QueryDesc *queryDesc)
2824 IntoClause *into = queryDesc->plannedstmt->intoClause;
2825 EState *estate = queryDesc->estate;
2826 Relation intoRelationDesc;
2831 AclResult aclresult;
2834 DR_intorel *myState;
2835 static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2840 * Check consistency of arguments
2842 if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2844 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2845 errmsg("ON COMMIT can only be used on temporary tables")));
2848 * Find namespace to create in, check its permissions
2850 intoName = into->rel->relname;
2851 namespaceId = RangeVarGetCreationNamespace(into->rel);
2853 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2855 if (aclresult != ACLCHECK_OK)
2856 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2857 get_namespace_name(namespaceId));
2860 * Select tablespace to use. If not specified, use default tablespace
2861 * (which may in turn default to database's default).
2863 if (into->tableSpaceName)
2865 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2866 if (!OidIsValid(tablespaceId))
2868 (errcode(ERRCODE_UNDEFINED_OBJECT),
2869 errmsg("tablespace \"%s\" does not exist",
2870 into->tableSpaceName)));
2874 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2875 /* note InvalidOid is OK in this case */
2878 /* Check permissions except when using the database's default space */
2879 if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2881 AclResult aclresult;
2883 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2886 if (aclresult != ACLCHECK_OK)
2887 aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2888 get_tablespace_name(tablespaceId));
2891 /* Parse and validate any reloptions */
2892 reloptions = transformRelOptions((Datum) 0,
2898 (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2900 /* Copy the tupdesc because heap_create_with_catalog modifies it */
2901 tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2903 /* Now we can actually create the new relation */
2904 intoRelationId = heap_create_with_catalog(intoName,
2917 allowSystemTableMods);
2919 FreeTupleDesc(tupdesc);
2922 * Advance command counter so that the newly-created relation's catalog
2923 * tuples will be visible to heap_open.
2925 CommandCounterIncrement();
2928 * If necessary, create a TOAST table for the INTO relation. Note that
2929 * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2930 * the TOAST table will be visible for insertion.
2932 reloptions = transformRelOptions((Datum) 0,
2939 (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2941 AlterTableCreateToastTable(intoRelationId, reloptions);
2944 * And open the constructed table for writing.
2946 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2949 * Now replace the query's DestReceiver with one for SELECT INTO
2951 queryDesc->dest = CreateDestReceiver(DestIntoRel);
2952 myState = (DR_intorel *) queryDesc->dest;
2953 Assert(myState->pub.mydest == DestIntoRel);
2954 myState->estate = estate;
2955 myState->rel = intoRelationDesc;
2958 * We can skip WAL-logging the insertions, unless PITR is in use. We
2959 * can skip the FSM in any case.
2961 myState->hi_options = HEAP_INSERT_SKIP_FSM |
2962 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2963 myState->bistate = GetBulkInsertState();
2965 /* Not using WAL requires rd_targblock be initially invalid */
2966 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2970 * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2973 CloseIntoRel(QueryDesc *queryDesc)
2975 DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2977 /* OpenIntoRel might never have gotten called */
2978 if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2980 FreeBulkInsertState(myState->bistate);
2982 /* If we skipped using WAL, must heap_sync before commit */
2983 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2984 heap_sync(myState->rel);
2986 /* close rel, but keep lock until commit */
2987 heap_close(myState->rel, NoLock);
2989 myState->rel = NULL;
2994 * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2997 CreateIntoRelDestReceiver(void)
2999 DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
3001 self->pub.receiveSlot = intorel_receive;
3002 self->pub.rStartup = intorel_startup;
3003 self->pub.rShutdown = intorel_shutdown;
3004 self->pub.rDestroy = intorel_destroy;
3005 self->pub.mydest = DestIntoRel;
3007 /* private fields will be set by OpenIntoRel */
3009 return (DestReceiver *) self;
3013 * intorel_startup --- executor startup
3016 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
3022 * intorel_receive --- receive one tuple
3025 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
3027 DR_intorel *myState = (DR_intorel *) self;
3031 * get the heap tuple out of the tuple table slot, making sure we have a
3034 tuple = ExecMaterializeSlot(slot);
3036 heap_insert(myState->rel,
3038 myState->estate->es_output_cid,
3039 myState->hi_options,
3042 /* We know this is a newly created relation, so there are no indexes */
3048 * intorel_shutdown --- executor end
3051 intorel_shutdown(DestReceiver *self)
3057 * intorel_destroy --- release DestReceiver object
3060 intorel_destroy(DestReceiver *self)