1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.296 2007/08/15 21:39:50 tgl Exp $
31 *-------------------------------------------------------------------------
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "optimizer/clauses.h"
49 #include "parser/parse_clause.h"
50 #include "parser/parsetree.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
57 typedef struct evalPlanQual
62 struct evalPlanQual *next; /* stack of active PlanQual plans */
63 struct evalPlanQual *free; /* list of free PlanQual plans */
66 /* decls for local routines only used within this module */
67 static void InitPlan(QueryDesc *queryDesc, int eflags);
68 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
69 Relation resultRelationDesc,
70 Index resultRelationIndex,
73 static void ExecEndPlan(PlanState *planstate, EState *estate);
74 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
77 ScanDirection direction,
79 static void ExecSelect(TupleTableSlot *slot,
80 DestReceiver *dest, EState *estate);
81 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
82 TupleTableSlot *planSlot,
83 DestReceiver *dest, EState *estate);
84 static void ExecDelete(ItemPointer tupleid,
85 TupleTableSlot *planSlot,
86 DestReceiver *dest, EState *estate);
87 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
88 TupleTableSlot *planSlot,
89 DestReceiver *dest, EState *estate);
90 static void ExecProcessReturning(ProjectionInfo *projectReturning,
91 TupleTableSlot *tupleSlot,
92 TupleTableSlot *planSlot,
94 static TupleTableSlot *EvalPlanQualNext(EState *estate);
95 static void EndEvalPlanQual(EState *estate);
96 static void ExecCheckRTPerms(List *rangeTable);
97 static void ExecCheckRTEPerms(RangeTblEntry *rte);
98 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
99 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
100 evalPlanQual *priorepq);
101 static void EvalPlanQualStop(evalPlanQual *epq);
102 static void OpenIntoRel(QueryDesc *queryDesc);
103 static void CloseIntoRel(QueryDesc *queryDesc);
104 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
105 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
106 static void intorel_shutdown(DestReceiver *self);
107 static void intorel_destroy(DestReceiver *self);
109 /* end of local decls */
112 /* ----------------------------------------------------------------
115 * This routine must be called at the beginning of any execution of any
118 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
119 * clear why we bother to separate the two functions, but...). The tupDesc
120 * field of the QueryDesc is filled in to describe the tuples that will be
121 * returned, and the internal fields (estate and planstate) are set up.
123 * eflags contains flag bits as described in executor.h.
125 * NB: the CurrentMemoryContext when this is called will become the parent
126 * of the per-query context used for this Executor invocation.
127 * ----------------------------------------------------------------
130 ExecutorStart(QueryDesc *queryDesc, int eflags)
133 MemoryContext oldcontext;
135 /* sanity checks: queryDesc must not be started already */
136 Assert(queryDesc != NULL);
137 Assert(queryDesc->estate == NULL);
140 * If the transaction is read-only, we need to check if any writes are
141 * planned to non-temporary tables. EXPLAIN is considered read-only.
143 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
144 ExecCheckXactReadOnly(queryDesc->plannedstmt);
147 * Build EState, switch into per-query memory context for startup.
149 estate = CreateExecutorState();
150 queryDesc->estate = estate;
152 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
155 * Fill in parameters, if any, from queryDesc
157 estate->es_param_list_info = queryDesc->params;
159 if (queryDesc->plannedstmt->nParamExec > 0)
160 estate->es_param_exec_vals = (ParamExecData *)
161 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
164 * Copy other important information into the EState
166 estate->es_snapshot = queryDesc->snapshot;
167 estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
168 estate->es_instrument = queryDesc->doInstrument;
171 * Initialize the plan state tree
173 InitPlan(queryDesc, eflags);
175 MemoryContextSwitchTo(oldcontext);
178 /* ----------------------------------------------------------------
181 * This is the main routine of the executor module. It accepts
182 * the query descriptor from the traffic cop and executes the
185 * ExecutorStart must have been called already.
187 * If direction is NoMovementScanDirection then nothing is done
188 * except to start up/shut down the destination. Otherwise,
189 * we retrieve up to 'count' tuples in the specified direction.
191 * Note: count = 0 is interpreted as no portal limit, i.e., run to
194 * ----------------------------------------------------------------
197 ExecutorRun(QueryDesc *queryDesc,
198 ScanDirection direction, long count)
204 TupleTableSlot *result;
205 MemoryContext oldcontext;
208 Assert(queryDesc != NULL);
210 estate = queryDesc->estate;
212 Assert(estate != NULL);
215 * Switch into per-query memory context
217 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
220 * extract information from the query descriptor and the query feature.
222 operation = queryDesc->operation;
223 dest = queryDesc->dest;
226 * startup tuple receiver, if we will be emitting tuples
228 estate->es_processed = 0;
229 estate->es_lastoid = InvalidOid;
231 sendTuples = (operation == CMD_SELECT ||
232 queryDesc->plannedstmt->returningLists);
235 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
240 if (ScanDirectionIsNoMovement(direction))
243 result = ExecutePlan(estate,
244 queryDesc->planstate,
251 * shutdown tuple receiver, if we started it
254 (*dest->rShutdown) (dest);
256 MemoryContextSwitchTo(oldcontext);
261 /* ----------------------------------------------------------------
264 * This routine must be called at the end of execution of any
266 * ----------------------------------------------------------------
269 ExecutorEnd(QueryDesc *queryDesc)
272 MemoryContext oldcontext;
275 Assert(queryDesc != NULL);
277 estate = queryDesc->estate;
279 Assert(estate != NULL);
282 * Switch into per-query memory context to run ExecEndPlan
284 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
286 ExecEndPlan(queryDesc->planstate, estate);
289 * Close the SELECT INTO relation if any
291 if (estate->es_select_into)
292 CloseIntoRel(queryDesc);
295 * Must switch out of context before destroying it
297 MemoryContextSwitchTo(oldcontext);
300 * Release EState and per-query memory context. This should release
301 * everything the executor has allocated.
303 FreeExecutorState(estate);
305 /* Reset queryDesc fields that no longer point to anything */
306 queryDesc->tupDesc = NULL;
307 queryDesc->estate = NULL;
308 queryDesc->planstate = NULL;
311 /* ----------------------------------------------------------------
314 * This routine may be called on an open queryDesc to rewind it
316 * ----------------------------------------------------------------
319 ExecutorRewind(QueryDesc *queryDesc)
322 MemoryContext oldcontext;
325 Assert(queryDesc != NULL);
327 estate = queryDesc->estate;
329 Assert(estate != NULL);
331 /* It's probably not sensible to rescan updating queries */
332 Assert(queryDesc->operation == CMD_SELECT);
335 * Switch into per-query memory context
337 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
342 ExecReScan(queryDesc->planstate, NULL);
344 MemoryContextSwitchTo(oldcontext);
350 * Check access permissions for all relations listed in a range table.
353 ExecCheckRTPerms(List *rangeTable)
357 foreach(l, rangeTable)
359 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
365 * Check access permissions for a single RTE.
368 ExecCheckRTEPerms(RangeTblEntry *rte)
370 AclMode requiredPerms;
375 * Only plain-relation RTEs need to be checked here. Function RTEs are
376 * checked by init_fcache when the function is prepared for execution.
377 * Join, subquery, and special RTEs need no checks.
379 if (rte->rtekind != RTE_RELATION)
383 * No work if requiredPerms is empty.
385 requiredPerms = rte->requiredPerms;
386 if (requiredPerms == 0)
392 * userid to check as: current user unless we have a setuid indication.
394 * Note: GetUserId() is presently fast enough that there's no harm in
395 * calling it separately for each RTE. If that stops being true, we could
396 * call it once in ExecCheckRTPerms and pass the userid down from there.
397 * But for now, no need for the extra clutter.
399 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
402 * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
404 if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
406 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
407 get_rel_name(relOid));
411 * Check that the query does not imply any writes to non-temp tables.
414 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
419 * CREATE TABLE AS or SELECT INTO?
421 * XXX should we allow this if the destination is temp?
423 if (plannedstmt->intoClause != NULL)
426 /* Fail if write permissions are requested on any non-temp table */
427 foreach(l, plannedstmt->rtable)
429 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
431 if (rte->rtekind != RTE_RELATION)
434 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
437 if (isTempNamespace(get_rel_namespace(rte->relid)))
447 (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
448 errmsg("transaction is read-only")));
452 /* ----------------------------------------------------------------
455 * Initializes the query plan: open files, allocate storage
456 * and start up the rule manager
457 * ----------------------------------------------------------------
460 InitPlan(QueryDesc *queryDesc, int eflags)
462 CmdType operation = queryDesc->operation;
463 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
464 Plan *plan = plannedstmt->planTree;
465 List *rangeTable = plannedstmt->rtable;
466 EState *estate = queryDesc->estate;
467 PlanState *planstate;
473 * Do permissions checks
475 ExecCheckRTPerms(rangeTable);
478 * initialize the node's execution state
480 estate->es_range_table = rangeTable;
483 * initialize result relation stuff
485 if (plannedstmt->resultRelations)
487 List *resultRelations = plannedstmt->resultRelations;
488 int numResultRelations = list_length(resultRelations);
489 ResultRelInfo *resultRelInfos;
490 ResultRelInfo *resultRelInfo;
492 resultRelInfos = (ResultRelInfo *)
493 palloc(numResultRelations * sizeof(ResultRelInfo));
494 resultRelInfo = resultRelInfos;
495 foreach(l, resultRelations)
497 Index resultRelationIndex = lfirst_int(l);
498 Oid resultRelationOid;
499 Relation resultRelation;
501 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
502 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
503 initResultRelInfo(resultRelInfo,
507 estate->es_instrument);
510 estate->es_result_relations = resultRelInfos;
511 estate->es_num_result_relations = numResultRelations;
512 /* Initialize to first or only result rel */
513 estate->es_result_relation_info = resultRelInfos;
518 * if no result relation, then set state appropriately
520 estate->es_result_relations = NULL;
521 estate->es_num_result_relations = 0;
522 estate->es_result_relation_info = NULL;
526 * Detect whether we're doing SELECT INTO. If so, set the es_into_oids
527 * flag appropriately so that the plan tree will be initialized with the
528 * correct tuple descriptors. (Other SELECT INTO stuff comes later.)
530 estate->es_select_into = false;
531 if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
533 estate->es_select_into = true;
534 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
538 * Have to lock relations selected FOR UPDATE/FOR SHARE before we
539 * initialize the plan tree, else we'd be doing a lock upgrade.
540 * While we are at it, build the ExecRowMark list.
542 estate->es_rowMarks = NIL;
543 foreach(l, plannedstmt->rowMarks)
545 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
546 Oid relid = getrelid(rc->rti, rangeTable);
550 relation = heap_open(relid, RowShareLock);
551 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
552 erm->relation = relation;
554 erm->forUpdate = rc->forUpdate;
555 erm->noWait = rc->noWait;
556 /* We'll set up ctidAttno below */
557 erm->ctidAttNo = InvalidAttrNumber;
558 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
562 * Initialize the executor "tuple" table. We need slots for all the plan
563 * nodes, plus possibly output slots for the junkfilter(s). At this point
564 * we aren't sure if we need junkfilters, so just add slots for them
565 * unconditionally. Also, if it's not a SELECT, set up a slot for use for
566 * trigger output tuples. Also, one for RETURNING-list evaluation.
571 /* Slots for the main plan tree */
572 nSlots = ExecCountSlotsNode(plan);
573 /* Add slots for subplans and initplans */
574 foreach(l, plannedstmt->subplans)
576 Plan *subplan = (Plan *) lfirst(l);
578 nSlots += ExecCountSlotsNode(subplan);
580 /* Add slots for junkfilter(s) */
581 if (plannedstmt->resultRelations != NIL)
582 nSlots += list_length(plannedstmt->resultRelations);
585 if (operation != CMD_SELECT)
586 nSlots++; /* for es_trig_tuple_slot */
587 if (plannedstmt->returningLists)
588 nSlots++; /* for RETURNING projection */
590 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
592 if (operation != CMD_SELECT)
593 estate->es_trig_tuple_slot =
594 ExecAllocTableSlot(estate->es_tupleTable);
597 /* mark EvalPlanQual not active */
598 estate->es_plannedstmt = plannedstmt;
599 estate->es_evalPlanQual = NULL;
600 estate->es_evTupleNull = NULL;
601 estate->es_evTuple = NULL;
602 estate->es_useEvalPlan = false;
605 * Initialize private state information for each SubPlan. We must do
606 * this before running ExecInitNode on the main query tree, since
607 * ExecInitSubPlan expects to be able to find these entries.
609 Assert(estate->es_subplanstates == NIL);
610 i = 1; /* subplan indices count from 1 */
611 foreach(l, plannedstmt->subplans)
613 Plan *subplan = (Plan *) lfirst(l);
614 PlanState *subplanstate;
618 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE.
619 * If it is a parameterless subplan (not initplan), we suggest that it
620 * be prepared to handle REWIND efficiently; otherwise there is no
623 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
624 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
625 sp_eflags |= EXEC_FLAG_REWIND;
627 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
629 estate->es_subplanstates = lappend(estate->es_subplanstates,
636 * Initialize the private state information for all the nodes in the query
637 * tree. This opens files, allocates storage and leaves us ready to start
640 planstate = ExecInitNode(plan, estate, eflags);
643 * Get the tuple descriptor describing the type of tuples to return. (this
644 * is especially important if we are creating a relation with "SELECT
647 tupType = ExecGetResultType(planstate);
650 * Initialize the junk filter if needed. SELECT and INSERT queries need a
651 * filter if there are any junk attrs in the tlist. INSERT and SELECT
652 * INTO also need a filter if the plan may return raw disk tuples (else
653 * heap_insert will be scribbling on the source relation!). UPDATE and
654 * DELETE always need a filter, since there's always a junk 'ctid'
655 * attribute present --- no need to look first.
658 bool junk_filter_needed = false;
665 foreach(tlist, plan->targetlist)
667 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
671 junk_filter_needed = true;
675 if (!junk_filter_needed &&
676 (operation == CMD_INSERT || estate->es_select_into) &&
677 ExecMayReturnRawTuples(planstate))
678 junk_filter_needed = true;
682 junk_filter_needed = true;
688 if (junk_filter_needed)
691 * If there are multiple result relations, each one needs its own
692 * junk filter. Note this is only possible for UPDATE/DELETE, so
693 * we can't be fooled by some needing a filter and some not.
695 if (list_length(plannedstmt->resultRelations) > 1)
697 PlanState **appendplans;
699 ResultRelInfo *resultRelInfo;
701 /* Top plan had better be an Append here. */
702 Assert(IsA(plan, Append));
703 Assert(((Append *) plan)->isTarget);
704 Assert(IsA(planstate, AppendState));
705 appendplans = ((AppendState *) planstate)->appendplans;
706 as_nplans = ((AppendState *) planstate)->as_nplans;
707 Assert(as_nplans == estate->es_num_result_relations);
708 resultRelInfo = estate->es_result_relations;
709 for (i = 0; i < as_nplans; i++)
711 PlanState *subplan = appendplans[i];
714 j = ExecInitJunkFilter(subplan->plan->targetlist,
715 resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
716 ExecAllocTableSlot(estate->es_tupleTable));
718 * Since it must be UPDATE/DELETE, there had better be
719 * a "ctid" junk attribute in the tlist ... but ctid could
720 * be at a different resno for each result relation.
721 * We look up the ctid resnos now and save them in the
724 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
725 if (!AttributeNumberIsValid(j->jf_junkAttNo))
726 elog(ERROR, "could not find junk ctid column");
727 resultRelInfo->ri_junkFilter = j;
732 * Set active junkfilter too; at this point ExecInitAppend has
733 * already selected an active result relation...
735 estate->es_junkFilter =
736 estate->es_result_relation_info->ri_junkFilter;
740 /* Normal case with just one JunkFilter */
743 j = ExecInitJunkFilter(planstate->plan->targetlist,
745 ExecAllocTableSlot(estate->es_tupleTable));
746 estate->es_junkFilter = j;
747 if (estate->es_result_relation_info)
748 estate->es_result_relation_info->ri_junkFilter = j;
750 if (operation == CMD_SELECT)
752 /* For SELECT, want to return the cleaned tuple type */
753 tupType = j->jf_cleanTupType;
754 /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
755 foreach(l, estate->es_rowMarks)
757 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
760 snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
761 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
762 if (!AttributeNumberIsValid(erm->ctidAttNo))
763 elog(ERROR, "could not find junk \"%s\" column",
767 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
769 /* For UPDATE/DELETE, find the ctid junk attr now */
770 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
771 if (!AttributeNumberIsValid(j->jf_junkAttNo))
772 elog(ERROR, "could not find junk ctid column");
777 estate->es_junkFilter = NULL;
781 * Initialize RETURNING projections if needed.
783 if (plannedstmt->returningLists)
785 TupleTableSlot *slot;
786 ExprContext *econtext;
787 ResultRelInfo *resultRelInfo;
790 * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
791 * We assume all the sublists will generate the same output tupdesc.
793 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
796 /* Set up a slot for the output of the RETURNING projection(s) */
797 slot = ExecAllocTableSlot(estate->es_tupleTable);
798 ExecSetSlotDescriptor(slot, tupType);
799 /* Need an econtext too */
800 econtext = CreateExprContext(estate);
803 * Build a projection for each result rel. Note that any SubPlans in
804 * the RETURNING lists get attached to the topmost plan node.
806 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
807 resultRelInfo = estate->es_result_relations;
808 foreach(l, plannedstmt->returningLists)
810 List *rlist = (List *) lfirst(l);
813 rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
814 resultRelInfo->ri_projectReturning =
815 ExecBuildProjectionInfo(rliststate, econtext, slot,
816 resultRelInfo->ri_RelationDesc->rd_att);
821 queryDesc->tupDesc = tupType;
822 queryDesc->planstate = planstate;
825 * If doing SELECT INTO, initialize the "into" relation. We must wait
826 * till now so we have the "clean" result tuple type to create the new
829 * If EXPLAIN, skip creating the "into" relation.
831 if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
832 OpenIntoRel(queryDesc);
836 * Initialize ResultRelInfo data for one result relation
839 initResultRelInfo(ResultRelInfo *resultRelInfo,
840 Relation resultRelationDesc,
841 Index resultRelationIndex,
846 * Check valid relkind ... parser and/or planner should have noticed
847 * this already, but let's make sure.
849 switch (resultRelationDesc->rd_rel->relkind)
851 case RELKIND_RELATION:
854 case RELKIND_SEQUENCE:
856 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
857 errmsg("cannot change sequence \"%s\"",
858 RelationGetRelationName(resultRelationDesc))));
860 case RELKIND_TOASTVALUE:
862 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
863 errmsg("cannot change TOAST relation \"%s\"",
864 RelationGetRelationName(resultRelationDesc))));
868 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
869 errmsg("cannot change view \"%s\"",
870 RelationGetRelationName(resultRelationDesc))));
874 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
875 errmsg("cannot change relation \"%s\"",
876 RelationGetRelationName(resultRelationDesc))));
880 /* OK, fill in the node */
881 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
882 resultRelInfo->type = T_ResultRelInfo;
883 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
884 resultRelInfo->ri_RelationDesc = resultRelationDesc;
885 resultRelInfo->ri_NumIndices = 0;
886 resultRelInfo->ri_IndexRelationDescs = NULL;
887 resultRelInfo->ri_IndexRelationInfo = NULL;
888 /* make a copy so as not to depend on relcache info not changing... */
889 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
890 if (resultRelInfo->ri_TrigDesc)
892 int n = resultRelInfo->ri_TrigDesc->numtriggers;
894 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
895 palloc0(n * sizeof(FmgrInfo));
897 resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
899 resultRelInfo->ri_TrigInstrument = NULL;
903 resultRelInfo->ri_TrigFunctions = NULL;
904 resultRelInfo->ri_TrigInstrument = NULL;
906 resultRelInfo->ri_ConstraintExprs = NULL;
907 resultRelInfo->ri_junkFilter = NULL;
908 resultRelInfo->ri_projectReturning = NULL;
911 * If there are indices on the result relation, open them and save
912 * descriptors in the result relation info, so that we can add new index
913 * entries for the tuples we add/update. We need not do this for a
914 * DELETE, however, since deletion doesn't affect indexes.
916 if (resultRelationDesc->rd_rel->relhasindex &&
917 operation != CMD_DELETE)
918 ExecOpenIndices(resultRelInfo);
922 * ExecGetTriggerResultRel
924 * Get a ResultRelInfo for a trigger target relation. Most of the time,
925 * triggers are fired on one of the result relations of the query, and so
926 * we can just return a member of the es_result_relations array. (Note: in
927 * self-join situations there might be multiple members with the same OID;
928 * if so it doesn't matter which one we pick.) However, it is sometimes
929 * necessary to fire triggers on other relations; this happens mainly when an
930 * RI update trigger queues additional triggers on other relations, which will
931 * be processed in the context of the outer query. For efficiency's sake,
932 * we want to have a ResultRelInfo for those triggers too; that can avoid
933 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
934 * ANALYZE to report the runtimes of such triggers.) So we make additional
935 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
938 ExecGetTriggerResultRel(EState *estate, Oid relid)
940 ResultRelInfo *rInfo;
944 MemoryContext oldcontext;
946 /* First, search through the query result relations */
947 rInfo = estate->es_result_relations;
948 nr = estate->es_num_result_relations;
951 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
956 /* Nope, but maybe we already made an extra ResultRelInfo for it */
957 foreach(l, estate->es_trig_target_relations)
959 rInfo = (ResultRelInfo *) lfirst(l);
960 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
963 /* Nope, so we need a new one */
966 * Open the target relation's relcache entry. We assume that an
967 * appropriate lock is still held by the backend from whenever the
968 * trigger event got queued, so we need take no new lock here.
970 rel = heap_open(relid, NoLock);
973 * Make the new entry in the right context. Currently, we don't need
974 * any index information in ResultRelInfos used only for triggers,
975 * so tell initResultRelInfo it's a DELETE.
977 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
978 rInfo = makeNode(ResultRelInfo);
979 initResultRelInfo(rInfo,
981 0, /* dummy rangetable index */
983 estate->es_instrument);
984 estate->es_trig_target_relations =
985 lappend(estate->es_trig_target_relations, rInfo);
986 MemoryContextSwitchTo(oldcontext);
992 * ExecContextForcesOids
994 * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
995 * we need to ensure that result tuples have space for an OID iff they are
996 * going to be stored into a relation that has OIDs. In other contexts
997 * we are free to choose whether to leave space for OIDs in result tuples
998 * (we generally don't want to, but we do if a physical-tlist optimization
999 * is possible). This routine checks the plan context and returns TRUE if the
1000 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1001 * *hasoids is set to the required value.
1003 * One reason this is ugly is that all plan nodes in the plan tree will emit
1004 * tuples with space for an OID, though we really only need the topmost node
1005 * to do so. However, node types like Sort don't project new tuples but just
1006 * return their inputs, and in those cases the requirement propagates down
1007 * to the input node. Eventually we might make this code smart enough to
1008 * recognize how far down the requirement really goes, but for now we just
1009 * make all plan nodes do the same thing if the top level forces the choice.
1011 * We assume that estate->es_result_relation_info is already set up to
1012 * describe the target relation. Note that in an UPDATE that spans an
1013 * inheritance tree, some of the target relations may have OIDs and some not.
1014 * We have to make the decisions on a per-relation basis as we initialize
1015 * each of the child plans of the topmost Append plan.
1017 * SELECT INTO is even uglier, because we don't have the INTO relation's
1018 * descriptor available when this code runs; we have to look aside at a
1019 * flag set by InitPlan().
1022 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1024 if (planstate->state->es_select_into)
1026 *hasoids = planstate->state->es_into_oids;
1031 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1035 Relation rel = ri->ri_RelationDesc;
1039 *hasoids = rel->rd_rel->relhasoids;
1048 /* ----------------------------------------------------------------
1051 * Cleans up the query plan -- closes files and frees up storage
1053 * NOTE: we are no longer very worried about freeing storage per se
1054 * in this code; FreeExecutorState should be guaranteed to release all
1055 * memory that needs to be released. What we are worried about doing
1056 * is closing relations and dropping buffer pins. Thus, for example,
1057 * tuple tables must be cleared or dropped to ensure pins are released.
1058 * ----------------------------------------------------------------
1061 ExecEndPlan(PlanState *planstate, EState *estate)
1063 ResultRelInfo *resultRelInfo;
1068 * shut down any PlanQual processing we were doing
1070 if (estate->es_evalPlanQual != NULL)
1071 EndEvalPlanQual(estate);
1074 * shut down the node-type-specific query processing
1076 ExecEndNode(planstate);
1081 foreach(l, estate->es_subplanstates)
1083 PlanState *subplanstate = (PlanState *) lfirst(l);
1085 ExecEndNode(subplanstate);
1089 * destroy the executor "tuple" table.
1091 ExecDropTupleTable(estate->es_tupleTable, true);
1092 estate->es_tupleTable = NULL;
1095 * close the result relation(s) if any, but hold locks until xact commit.
1097 resultRelInfo = estate->es_result_relations;
1098 for (i = estate->es_num_result_relations; i > 0; i--)
1100 /* Close indices and then the relation itself */
1101 ExecCloseIndices(resultRelInfo);
1102 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1107 * likewise close any trigger target relations
1109 foreach(l, estate->es_trig_target_relations)
1111 resultRelInfo = (ResultRelInfo *) lfirst(l);
1112 /* Close indices and then the relation itself */
1113 ExecCloseIndices(resultRelInfo);
1114 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1118 * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1120 foreach(l, estate->es_rowMarks)
1122 ExecRowMark *erm = lfirst(l);
1124 heap_close(erm->relation, NoLock);
1128 /* ----------------------------------------------------------------
1131 * processes the query plan to retrieve 'numberTuples' tuples in the
1132 * direction specified.
1134 * Retrieves all tuples if numberTuples is 0
1136 * result is either a slot containing the last tuple in the case
1137 * of a SELECT or NULL otherwise.
1139 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1141 * ----------------------------------------------------------------
1143 static TupleTableSlot *
1144 ExecutePlan(EState *estate,
1145 PlanState *planstate,
1148 ScanDirection direction,
1151 JunkFilter *junkfilter;
1152 TupleTableSlot *planSlot;
1153 TupleTableSlot *slot;
1154 ItemPointer tupleid = NULL;
1155 ItemPointerData tuple_ctid;
1156 long current_tuple_count;
1157 TupleTableSlot *result;
1160 * initialize local variables
1162 current_tuple_count = 0;
1166 * Set the direction.
1168 estate->es_direction = direction;
1171 * Process BEFORE EACH STATEMENT triggers
1176 ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1179 ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1182 ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1190 * Loop until we've processed the proper number of tuples from the plan.
1195 /* Reset the per-output-tuple exprcontext */
1196 ResetPerTupleExprContext(estate);
1199 * Execute the plan and obtain a tuple
1202 if (estate->es_useEvalPlan)
1204 planSlot = EvalPlanQualNext(estate);
1205 if (TupIsNull(planSlot))
1206 planSlot = ExecProcNode(planstate);
1209 planSlot = ExecProcNode(planstate);
1212 * if the tuple is null, then we assume there is nothing more to
1213 * process so we just return null...
1215 if (TupIsNull(planSlot))
1223 * if we have a junk filter, then project a new tuple with the junk
1226 * Store this new "clean" tuple in the junkfilter's resultSlot.
1227 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1228 * because that tuple slot has the wrong descriptor.)
1230 * Also, extract all the junk information we need.
1232 if ((junkfilter = estate->es_junkFilter) != NULL)
1238 * extract the 'ctid' junk attribute.
1240 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1242 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1244 /* shouldn't ever get a null result... */
1246 elog(ERROR, "ctid is NULL");
1248 tupleid = (ItemPointer) DatumGetPointer(datum);
1249 tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
1250 tupleid = &tuple_ctid;
1254 * Process any FOR UPDATE or FOR SHARE locking requested.
1256 else if (estate->es_rowMarks != NIL)
1261 foreach(l, estate->es_rowMarks)
1263 ExecRowMark *erm = lfirst(l);
1264 HeapTupleData tuple;
1266 ItemPointerData update_ctid;
1267 TransactionId update_xmax;
1268 TupleTableSlot *newSlot;
1269 LockTupleMode lockmode;
1272 datum = ExecGetJunkAttribute(slot,
1275 /* shouldn't ever get a null result... */
1277 elog(ERROR, "ctid is NULL");
1279 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1282 lockmode = LockTupleExclusive;
1284 lockmode = LockTupleShared;
1286 test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1287 &update_ctid, &update_xmax,
1288 estate->es_snapshot->curcid,
1289 lockmode, erm->noWait);
1290 ReleaseBuffer(buffer);
1293 case HeapTupleSelfUpdated:
1294 /* treat it as deleted; do not process */
1297 case HeapTupleMayBeUpdated:
1300 case HeapTupleUpdated:
1301 if (IsXactIsoLevelSerializable)
1303 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1304 errmsg("could not serialize access due to concurrent update")));
1305 if (!ItemPointerEquals(&update_ctid,
1308 /* updated, so look at updated version */
1309 newSlot = EvalPlanQual(estate,
1313 estate->es_snapshot->curcid);
1314 if (!TupIsNull(newSlot))
1316 slot = planSlot = newSlot;
1317 estate->es_useEvalPlan = true;
1323 * if tuple was deleted or PlanQual failed for
1324 * updated tuple - we must not return this tuple!
1329 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1337 * Create a new "clean" tuple with all junk attributes removed. We
1338 * don't need to do this for DELETE, however (there will in fact
1339 * be no non-junk attributes in a DELETE!)
1341 if (operation != CMD_DELETE)
1342 slot = ExecFilterJunk(junkfilter, slot);
1346 * now that we have a tuple, do the appropriate thing with it.. either
1347 * return it to the user, add it to a relation someplace, delete it
1348 * from a relation, or modify some of its attributes.
1353 ExecSelect(slot, dest, estate);
1358 ExecInsert(slot, tupleid, planSlot, dest, estate);
1363 ExecDelete(tupleid, planSlot, dest, estate);
1368 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1373 elog(ERROR, "unrecognized operation code: %d",
1380 * check our tuple count.. if we've processed the proper number then
1381 * quit, else loop again and process more tuples. Zero numberTuples
1384 current_tuple_count++;
1385 if (numberTuples && numberTuples == current_tuple_count)
1390 * Process AFTER EACH STATEMENT triggers
1395 ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1398 ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1401 ExecASInsertTriggers(estate, estate->es_result_relation_info);
1409 * here, result is either a slot containing a tuple in the case of a
1410 * SELECT or NULL otherwise.
1415 /* ----------------------------------------------------------------
1418 * SELECTs are easy.. we just pass the tuple to the appropriate
1420 * ----------------------------------------------------------------
1423 ExecSelect(TupleTableSlot *slot,
1427 (*dest->receiveSlot) (slot, dest);
1429 (estate->es_processed)++;
1432 /* ----------------------------------------------------------------
1435 * INSERTs are trickier.. we have to insert the tuple into
1436 * the base relation and insert appropriate tuples into the
1438 * ----------------------------------------------------------------
1441 ExecInsert(TupleTableSlot *slot,
1442 ItemPointer tupleid,
1443 TupleTableSlot *planSlot,
1448 ResultRelInfo *resultRelInfo;
1449 Relation resultRelationDesc;
1453 * get the heap tuple out of the tuple table slot, making sure we have a
1456 tuple = ExecMaterializeSlot(slot);
1459 * get information on the (current) result relation
1461 resultRelInfo = estate->es_result_relation_info;
1462 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1464 /* BEFORE ROW INSERT Triggers */
1465 if (resultRelInfo->ri_TrigDesc &&
1466 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1470 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1472 if (newtuple == NULL) /* "do nothing" */
1475 if (newtuple != tuple) /* modified by Trigger(s) */
1478 * Put the modified tuple into a slot for convenience of routines
1479 * below. We assume the tuple was allocated in per-tuple memory
1480 * context, and therefore will go away by itself. The tuple table
1481 * slot should not try to clear it.
1483 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1485 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1486 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1487 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1494 * Check the constraints of the tuple
1496 if (resultRelationDesc->rd_att->constr)
1497 ExecConstraints(resultRelInfo, slot, estate);
1502 * Note: heap_insert returns the tid (location) of the new tuple in the
1505 newId = heap_insert(resultRelationDesc, tuple,
1506 estate->es_snapshot->curcid,
1510 (estate->es_processed)++;
1511 estate->es_lastoid = newId;
1512 setLastTid(&(tuple->t_self));
1515 * insert index entries for tuple
1517 if (resultRelInfo->ri_NumIndices > 0)
1518 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1520 /* AFTER ROW INSERT Triggers */
1521 ExecARInsertTriggers(estate, resultRelInfo, tuple);
1523 /* Process RETURNING if present */
1524 if (resultRelInfo->ri_projectReturning)
1525 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1526 slot, planSlot, dest);
1529 /* ----------------------------------------------------------------
1532 * DELETE is like UPDATE, except that we delete the tuple and no
1533 * index modifications are needed
1534 * ----------------------------------------------------------------
1537 ExecDelete(ItemPointer tupleid,
1538 TupleTableSlot *planSlot,
1542 ResultRelInfo *resultRelInfo;
1543 Relation resultRelationDesc;
1545 ItemPointerData update_ctid;
1546 TransactionId update_xmax;
1549 * get information on the (current) result relation
1551 resultRelInfo = estate->es_result_relation_info;
1552 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1554 /* BEFORE ROW DELETE Triggers */
1555 if (resultRelInfo->ri_TrigDesc &&
1556 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1560 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1561 estate->es_snapshot->curcid);
1563 if (!dodelete) /* "do nothing" */
1570 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1571 * the row to be deleted is visible to that snapshot, and throw a can't-
1572 * serialize error if not. This is a special-case behavior needed for
1573 * referential integrity updates in serializable transactions.
1576 result = heap_delete(resultRelationDesc, tupleid,
1577 &update_ctid, &update_xmax,
1578 estate->es_snapshot->curcid,
1579 estate->es_crosscheck_snapshot,
1580 true /* wait for commit */ );
1583 case HeapTupleSelfUpdated:
1584 /* already deleted by self; nothing to do */
1587 case HeapTupleMayBeUpdated:
1590 case HeapTupleUpdated:
1591 if (IsXactIsoLevelSerializable)
1593 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1594 errmsg("could not serialize access due to concurrent update")));
1595 else if (!ItemPointerEquals(tupleid, &update_ctid))
1597 TupleTableSlot *epqslot;
1599 epqslot = EvalPlanQual(estate,
1600 resultRelInfo->ri_RangeTableIndex,
1603 estate->es_snapshot->curcid);
1604 if (!TupIsNull(epqslot))
1606 *tupleid = update_ctid;
1610 /* tuple already deleted; nothing to do */
1614 elog(ERROR, "unrecognized heap_delete status: %u", result);
1619 (estate->es_processed)++;
1622 * Note: Normally one would think that we have to delete index tuples
1623 * associated with the heap tuple now...
1625 * ... but in POSTGRES, we have no need to do this because VACUUM will
1626 * take care of it later. We can't delete index tuples immediately
1627 * anyway, since the tuple is still visible to other transactions.
1630 /* AFTER ROW DELETE Triggers */
1631 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1633 /* Process RETURNING if present */
1634 if (resultRelInfo->ri_projectReturning)
1637 * We have to put the target tuple into a slot, which means first we
1638 * gotta fetch it. We can use the trigger tuple slot.
1640 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1641 HeapTupleData deltuple;
1644 deltuple.t_self = *tupleid;
1645 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1646 &deltuple, &delbuffer, false, NULL))
1647 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1649 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1650 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1651 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1653 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1654 slot, planSlot, dest);
1656 ExecClearTuple(slot);
1657 ReleaseBuffer(delbuffer);
1661 /* ----------------------------------------------------------------
1664 * note: we can't run UPDATE queries with transactions
1665 * off because UPDATEs are actually INSERTs and our
1666 * scan will mistakenly loop forever, updating the tuple
1667 * it just inserted.. This should be fixed but until it
1668 * is, we don't want to get stuck in an infinite loop
1669 * which corrupts your database..
1670 * ----------------------------------------------------------------
1673 ExecUpdate(TupleTableSlot *slot,
1674 ItemPointer tupleid,
1675 TupleTableSlot *planSlot,
1680 ResultRelInfo *resultRelInfo;
1681 Relation resultRelationDesc;
1683 ItemPointerData update_ctid;
1684 TransactionId update_xmax;
1687 * abort the operation if not running transactions
1689 if (IsBootstrapProcessingMode())
1690 elog(ERROR, "cannot UPDATE during bootstrap");
1693 * get the heap tuple out of the tuple table slot, making sure we have a
1696 tuple = ExecMaterializeSlot(slot);
1699 * get information on the (current) result relation
1701 resultRelInfo = estate->es_result_relation_info;
1702 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1704 /* BEFORE ROW UPDATE Triggers */
1705 if (resultRelInfo->ri_TrigDesc &&
1706 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1710 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1712 estate->es_snapshot->curcid);
1714 if (newtuple == NULL) /* "do nothing" */
1717 if (newtuple != tuple) /* modified by Trigger(s) */
1720 * Put the modified tuple into a slot for convenience of routines
1721 * below. We assume the tuple was allocated in per-tuple memory
1722 * context, and therefore will go away by itself. The tuple table
1723 * slot should not try to clear it.
1725 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1727 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1728 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1729 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1736 * Check the constraints of the tuple
1738 * If we generate a new candidate tuple after EvalPlanQual testing, we
1739 * must loop back here and recheck constraints. (We don't need to redo
1740 * triggers, however. If there are any BEFORE triggers then trigger.c
1741 * will have done heap_lock_tuple to lock the correct tuple, so there's no
1742 * need to do them again.)
1745 if (resultRelationDesc->rd_att->constr)
1746 ExecConstraints(resultRelInfo, slot, estate);
1749 * replace the heap tuple
1751 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1752 * the row to be updated is visible to that snapshot, and throw a can't-
1753 * serialize error if not. This is a special-case behavior needed for
1754 * referential integrity updates in serializable transactions.
1756 result = heap_update(resultRelationDesc, tupleid, tuple,
1757 &update_ctid, &update_xmax,
1758 estate->es_snapshot->curcid,
1759 estate->es_crosscheck_snapshot,
1760 true /* wait for commit */ );
1763 case HeapTupleSelfUpdated:
1764 /* already deleted by self; nothing to do */
1767 case HeapTupleMayBeUpdated:
1770 case HeapTupleUpdated:
1771 if (IsXactIsoLevelSerializable)
1773 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1774 errmsg("could not serialize access due to concurrent update")));
1775 else if (!ItemPointerEquals(tupleid, &update_ctid))
1777 TupleTableSlot *epqslot;
1779 epqslot = EvalPlanQual(estate,
1780 resultRelInfo->ri_RangeTableIndex,
1783 estate->es_snapshot->curcid);
1784 if (!TupIsNull(epqslot))
1786 *tupleid = update_ctid;
1787 slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1788 tuple = ExecMaterializeSlot(slot);
1792 /* tuple already deleted; nothing to do */
1796 elog(ERROR, "unrecognized heap_update status: %u", result);
1801 (estate->es_processed)++;
1804 * Note: instead of having to update the old index tuples associated with
1805 * the heap tuple, all we do is form and insert new index tuples. This is
1806 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1807 * deletion is done later by VACUUM (see notes in ExecDelete). All we do
1808 * here is insert new index tuples. -cim 9/27/89
1812 * insert index entries for tuple
1814 * Note: heap_update returns the tid (location) of the new tuple in the
1817 if (resultRelInfo->ri_NumIndices > 0)
1818 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1820 /* AFTER ROW UPDATE Triggers */
1821 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1823 /* Process RETURNING if present */
1824 if (resultRelInfo->ri_projectReturning)
1825 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1826 slot, planSlot, dest);
1830 * ExecRelCheck --- check that tuple meets constraints for result relation
1833 ExecRelCheck(ResultRelInfo *resultRelInfo,
1834 TupleTableSlot *slot, EState *estate)
1836 Relation rel = resultRelInfo->ri_RelationDesc;
1837 int ncheck = rel->rd_att->constr->num_check;
1838 ConstrCheck *check = rel->rd_att->constr->check;
1839 ExprContext *econtext;
1840 MemoryContext oldContext;
1845 * If first time through for this result relation, build expression
1846 * nodetrees for rel's constraint expressions. Keep them in the per-query
1847 * memory context so they'll survive throughout the query.
1849 if (resultRelInfo->ri_ConstraintExprs == NULL)
1851 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1852 resultRelInfo->ri_ConstraintExprs =
1853 (List **) palloc(ncheck * sizeof(List *));
1854 for (i = 0; i < ncheck; i++)
1856 /* ExecQual wants implicit-AND form */
1857 qual = make_ands_implicit(stringToNode(check[i].ccbin));
1858 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1859 ExecPrepareExpr((Expr *) qual, estate);
1861 MemoryContextSwitchTo(oldContext);
1865 * We will use the EState's per-tuple context for evaluating constraint
1866 * expressions (creating it if it's not already there).
1868 econtext = GetPerTupleExprContext(estate);
1870 /* Arrange for econtext's scan tuple to be the tuple under test */
1871 econtext->ecxt_scantuple = slot;
1873 /* And evaluate the constraints */
1874 for (i = 0; i < ncheck; i++)
1876 qual = resultRelInfo->ri_ConstraintExprs[i];
1879 * NOTE: SQL92 specifies that a NULL result from a constraint
1880 * expression is not to be treated as a failure. Therefore, tell
1881 * ExecQual to return TRUE for NULL.
1883 if (!ExecQual(qual, econtext, true))
1884 return check[i].ccname;
1887 /* NULL result means no error */
1892 ExecConstraints(ResultRelInfo *resultRelInfo,
1893 TupleTableSlot *slot, EState *estate)
1895 Relation rel = resultRelInfo->ri_RelationDesc;
1896 TupleConstr *constr = rel->rd_att->constr;
1900 if (constr->has_not_null)
1902 int natts = rel->rd_att->natts;
1905 for (attrChk = 1; attrChk <= natts; attrChk++)
1907 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1908 slot_attisnull(slot, attrChk))
1910 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1911 errmsg("null value in column \"%s\" violates not-null constraint",
1912 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1916 if (constr->num_check > 0)
1920 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1922 (errcode(ERRCODE_CHECK_VIOLATION),
1923 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1924 RelationGetRelationName(rel), failed)));
1929 * ExecProcessReturning --- evaluate a RETURNING list and send to dest
1931 * projectReturning: RETURNING projection info for current result rel
1932 * tupleSlot: slot holding tuple actually inserted/updated/deleted
1933 * planSlot: slot holding tuple returned by top plan node
1934 * dest: where to send the output
1937 ExecProcessReturning(ProjectionInfo *projectReturning,
1938 TupleTableSlot *tupleSlot,
1939 TupleTableSlot *planSlot,
1942 ExprContext *econtext = projectReturning->pi_exprContext;
1943 TupleTableSlot *retSlot;
1946 * Reset per-tuple memory context to free any expression evaluation
1947 * storage allocated in the previous cycle.
1949 ResetExprContext(econtext);
1951 /* Make tuple and any needed join variables available to ExecProject */
1952 econtext->ecxt_scantuple = tupleSlot;
1953 econtext->ecxt_outertuple = planSlot;
1955 /* Compute the RETURNING expressions */
1956 retSlot = ExecProject(projectReturning, NULL);
1959 (*dest->receiveSlot) (retSlot, dest);
1961 ExecClearTuple(retSlot);
1965 * Check a modified tuple to see if we want to process its updated version
1966 * under READ COMMITTED rules.
1968 * See backend/executor/README for some info about how this works.
1970 * estate - executor state data
1971 * rti - rangetable index of table containing tuple
1972 * *tid - t_ctid from the outdated tuple (ie, next updated version)
1973 * priorXmax - t_xmax from the outdated tuple
1974 * curCid - command ID of current command of my transaction
1976 * *tid is also an output parameter: it's modified to hold the TID of the
1977 * latest version of the tuple (note this may be changed even on failure)
1979 * Returns a slot containing the new candidate update/delete tuple, or
1980 * NULL if we determine we shouldn't process the row.
1983 EvalPlanQual(EState *estate, Index rti,
1984 ItemPointer tid, TransactionId priorXmax, CommandId curCid)
1989 HeapTupleData tuple;
1990 HeapTuple copyTuple = NULL;
1991 SnapshotData SnapshotDirty;
1997 * find relation containing target tuple
1999 if (estate->es_result_relation_info != NULL &&
2000 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2001 relation = estate->es_result_relation_info->ri_RelationDesc;
2007 foreach(l, estate->es_rowMarks)
2009 if (((ExecRowMark *) lfirst(l))->rti == rti)
2011 relation = ((ExecRowMark *) lfirst(l))->relation;
2015 if (relation == NULL)
2016 elog(ERROR, "could not find RowMark for RT index %u", rti);
2022 * Loop here to deal with updated or busy tuples
2024 InitDirtySnapshot(SnapshotDirty);
2025 tuple.t_self = *tid;
2030 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2033 * If xmin isn't what we're expecting, the slot must have been
2034 * recycled and reused for an unrelated tuple. This implies that
2035 * the latest version of the row was deleted, so we need do
2036 * nothing. (Should be safe to examine xmin without getting
2037 * buffer's content lock, since xmin never changes in an existing
2040 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2043 ReleaseBuffer(buffer);
2047 /* otherwise xmin should not be dirty... */
2048 if (TransactionIdIsValid(SnapshotDirty.xmin))
2049 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2052 * If tuple is being updated by other transaction then we have to
2053 * wait for its commit/abort.
2055 if (TransactionIdIsValid(SnapshotDirty.xmax))
2057 ReleaseBuffer(buffer);
2058 XactLockTableWait(SnapshotDirty.xmax);
2059 continue; /* loop back to repeat heap_fetch */
2063 * If tuple was inserted by our own transaction, we have to check
2064 * cmin against curCid: cmin >= curCid means our command cannot
2065 * see the tuple, so we should ignore it. Without this we are
2066 * open to the "Halloween problem" of indefinitely re-updating the
2067 * same tuple. (We need not check cmax because
2068 * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2069 * transaction dead, regardless of cmax.) We just checked that
2070 * priorXmax == xmin, so we can test that variable instead of
2071 * doing HeapTupleHeaderGetXmin again.
2073 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2074 HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
2076 ReleaseBuffer(buffer);
2081 * We got tuple - now copy it for use by recheck query.
2083 copyTuple = heap_copytuple(&tuple);
2084 ReleaseBuffer(buffer);
2089 * If the referenced slot was actually empty, the latest version of
2090 * the row must have been deleted, so we need do nothing.
2092 if (tuple.t_data == NULL)
2094 ReleaseBuffer(buffer);
2099 * As above, if xmin isn't what we're expecting, do nothing.
2101 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2104 ReleaseBuffer(buffer);
2109 * If we get here, the tuple was found but failed SnapshotDirty.
2110 * Assuming the xmin is either a committed xact or our own xact (as it
2111 * certainly should be if we're trying to modify the tuple), this must
2112 * mean that the row was updated or deleted by either a committed xact
2113 * or our own xact. If it was deleted, we can ignore it; if it was
2114 * updated then chain up to the next version and repeat the whole
2117 * As above, it should be safe to examine xmax and t_ctid without the
2118 * buffer content lock, because they can't be changing.
2120 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2122 /* deleted, so forget about it */
2123 ReleaseBuffer(buffer);
2127 /* updated, so look at the updated row */
2128 tuple.t_self = tuple.t_data->t_ctid;
2129 /* updated row should have xmin matching this xmax */
2130 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2131 ReleaseBuffer(buffer);
2132 /* loop back to fetch next in chain */
2136 * For UPDATE/DELETE we have to return tid of actual row we're executing
2139 *tid = tuple.t_self;
2142 * Need to run a recheck subquery. Find or create a PQ stack entry.
2144 epq = estate->es_evalPlanQual;
2147 if (epq != NULL && epq->rti == 0)
2149 /* Top PQ stack entry is idle, so re-use it */
2150 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2156 * If this is request for another RTE - Ra, - then we have to check wasn't
2157 * PlanQual requested for Ra already and if so then Ra' row was updated
2158 * again and we have to re-start old execution for Ra and forget all what
2159 * we done after Ra was suspended. Cool? -:))
2161 if (epq != NULL && epq->rti != rti &&
2162 epq->estate->es_evTuple[rti - 1] != NULL)
2166 evalPlanQual *oldepq;
2168 /* stop execution */
2169 EvalPlanQualStop(epq);
2170 /* pop previous PlanQual from the stack */
2172 Assert(oldepq && oldepq->rti != 0);
2173 /* push current PQ to freePQ stack */
2176 estate->es_evalPlanQual = epq;
2177 } while (epq->rti != rti);
2181 * If we are requested for another RTE then we have to suspend execution
2182 * of current PlanQual and start execution for new one.
2184 if (epq == NULL || epq->rti != rti)
2186 /* try to reuse plan used previously */
2187 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2189 if (newepq == NULL) /* first call or freePQ stack is empty */
2191 newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2192 newepq->free = NULL;
2193 newepq->estate = NULL;
2194 newepq->planstate = NULL;
2198 /* recycle previously used PlanQual */
2199 Assert(newepq->estate == NULL);
2202 /* push current PQ to the stack */
2205 estate->es_evalPlanQual = epq;
2210 Assert(epq->rti == rti);
2213 * Ok - we're requested for the same RTE. Unfortunately we still have to
2214 * end and restart execution of the plan, because ExecReScan wouldn't
2215 * ensure that upper plan nodes would reset themselves. We could make
2216 * that work if insertion of the target tuple were integrated with the
2217 * Param mechanism somehow, so that the upper plan nodes know that their
2218 * children's outputs have changed.
2220 * Note that the stack of free evalPlanQual nodes is quite useless at the
2221 * moment, since it only saves us from pallocing/releasing the
2222 * evalPlanQual nodes themselves. But it will be useful once we implement
2223 * ReScan instead of end/restart for re-using PlanQual nodes.
2227 /* stop execution */
2228 EvalPlanQualStop(epq);
2232 * Initialize new recheck query.
2234 * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2235 * instead copy down changeable state from the top plan (including
2236 * es_result_relation_info, es_junkFilter) and reset locally changeable
2237 * state in the epq (including es_param_exec_vals, es_evTupleNull).
2239 EvalPlanQualStart(epq, estate, epq->next);
2242 * free old RTE' tuple, if any, and store target tuple where relation's
2243 * scan node will see it
2245 epqstate = epq->estate;
2246 if (epqstate->es_evTuple[rti - 1] != NULL)
2247 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2248 epqstate->es_evTuple[rti - 1] = copyTuple;
2250 return EvalPlanQualNext(estate);
2253 static TupleTableSlot *
2254 EvalPlanQualNext(EState *estate)
2256 evalPlanQual *epq = estate->es_evalPlanQual;
2257 MemoryContext oldcontext;
2258 TupleTableSlot *slot;
2260 Assert(epq->rti != 0);
2263 oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2264 slot = ExecProcNode(epq->planstate);
2265 MemoryContextSwitchTo(oldcontext);
2268 * No more tuples for this PQ. Continue previous one.
2270 if (TupIsNull(slot))
2272 evalPlanQual *oldepq;
2274 /* stop execution */
2275 EvalPlanQualStop(epq);
2276 /* pop old PQ from the stack */
2280 /* this is the first (oldest) PQ - mark as free */
2282 estate->es_useEvalPlan = false;
2283 /* and continue Query execution */
2286 Assert(oldepq->rti != 0);
2287 /* push current PQ to freePQ stack */
2290 estate->es_evalPlanQual = epq;
2298 EndEvalPlanQual(EState *estate)
2300 evalPlanQual *epq = estate->es_evalPlanQual;
2302 if (epq->rti == 0) /* plans already shutdowned */
2304 Assert(epq->next == NULL);
2310 evalPlanQual *oldepq;
2312 /* stop execution */
2313 EvalPlanQualStop(epq);
2314 /* pop old PQ from the stack */
2318 /* this is the first (oldest) PQ - mark as free */
2320 estate->es_useEvalPlan = false;
2323 Assert(oldepq->rti != 0);
2324 /* push current PQ to freePQ stack */
2327 estate->es_evalPlanQual = epq;
2332 * Start execution of one level of PlanQual.
2334 * This is a cut-down version of ExecutorStart(): we copy some state from
2335 * the top-level estate rather than initializing it fresh.
2338 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2342 MemoryContext oldcontext;
2345 rtsize = list_length(estate->es_range_table);
2347 epq->estate = epqstate = CreateExecutorState();
2349 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2352 * The epqstates share the top query's copy of unchanging state such as
2353 * the snapshot, rangetable, result-rel info, and external Param info.
2354 * They need their own copies of local state, including a tuple table,
2355 * es_param_exec_vals, etc.
2357 epqstate->es_direction = ForwardScanDirection;
2358 epqstate->es_snapshot = estate->es_snapshot;
2359 epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2360 epqstate->es_range_table = estate->es_range_table;
2361 epqstate->es_result_relations = estate->es_result_relations;
2362 epqstate->es_num_result_relations = estate->es_num_result_relations;
2363 epqstate->es_result_relation_info = estate->es_result_relation_info;
2364 epqstate->es_junkFilter = estate->es_junkFilter;
2365 /* es_trig_target_relations must NOT be copied */
2366 epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2367 epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2368 epqstate->es_param_list_info = estate->es_param_list_info;
2369 if (estate->es_plannedstmt->nParamExec > 0)
2370 epqstate->es_param_exec_vals = (ParamExecData *)
2371 palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2372 epqstate->es_rowMarks = estate->es_rowMarks;
2373 epqstate->es_instrument = estate->es_instrument;
2374 epqstate->es_select_into = estate->es_select_into;
2375 epqstate->es_into_oids = estate->es_into_oids;
2376 epqstate->es_plannedstmt = estate->es_plannedstmt;
2379 * Each epqstate must have its own es_evTupleNull state, but all the stack
2380 * entries share es_evTuple state. This allows sub-rechecks to inherit
2381 * the value being examined by an outer recheck.
2383 epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2384 if (priorepq == NULL)
2385 /* first PQ stack entry */
2386 epqstate->es_evTuple = (HeapTuple *)
2387 palloc0(rtsize * sizeof(HeapTuple));
2389 /* later stack entries share the same storage */
2390 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2393 * Create sub-tuple-table; we needn't redo the CountSlots work though.
2395 epqstate->es_tupleTable =
2396 ExecCreateTupleTable(estate->es_tupleTable->size);
2399 * Initialize private state information for each SubPlan. We must do
2400 * this before running ExecInitNode on the main query tree, since
2401 * ExecInitSubPlan expects to be able to find these entries.
2403 Assert(epqstate->es_subplanstates == NIL);
2404 foreach(l, estate->es_plannedstmt->subplans)
2406 Plan *subplan = (Plan *) lfirst(l);
2407 PlanState *subplanstate;
2409 subplanstate = ExecInitNode(subplan, epqstate, 0);
2411 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2416 * Initialize the private state information for all the nodes in the query
2417 * tree. This opens files, allocates storage and leaves us ready to start
2418 * processing tuples.
2420 epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2422 MemoryContextSwitchTo(oldcontext);
2426 * End execution of one level of PlanQual.
2428 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2429 * of the normal cleanup, but *not* close result relations (which we are
2430 * just sharing from the outer query). We do, however, have to close any
2431 * trigger target relations that got opened, since those are not shared.
2434 EvalPlanQualStop(evalPlanQual *epq)
2436 EState *epqstate = epq->estate;
2437 MemoryContext oldcontext;
2440 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2442 ExecEndNode(epq->planstate);
2444 foreach(l, epqstate->es_subplanstates)
2446 PlanState *subplanstate = (PlanState *) lfirst(l);
2448 ExecEndNode(subplanstate);
2451 ExecDropTupleTable(epqstate->es_tupleTable, true);
2452 epqstate->es_tupleTable = NULL;
2454 if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2456 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2457 epqstate->es_evTuple[epq->rti - 1] = NULL;
2460 foreach(l, epqstate->es_trig_target_relations)
2462 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2464 /* Close indices and then the relation itself */
2465 ExecCloseIndices(resultRelInfo);
2466 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2469 MemoryContextSwitchTo(oldcontext);
2471 FreeExecutorState(epqstate);
2474 epq->planstate = NULL;
2478 * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2480 * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2481 * are looking at a row returned by the EvalPlanQual machinery, we need
2482 * to look at the subsidiary state instead.
2485 ExecGetActivePlanTree(QueryDesc *queryDesc)
2487 EState *estate = queryDesc->estate;
2489 if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2490 return estate->es_evalPlanQual->planstate;
2492 return queryDesc->planstate;
2497 * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2499 * We implement SELECT INTO by diverting SELECT's normal output with
2500 * a specialized DestReceiver type.
2502 * TODO: remove some of the INTO-specific cruft from EState, and keep
2503 * it in the DestReceiver instead.
2508 DestReceiver pub; /* publicly-known function pointers */
2509 EState *estate; /* EState we are working with */
2513 * OpenIntoRel --- actually create the SELECT INTO target relation
2515 * This also replaces QueryDesc->dest with the special DestReceiver for
2516 * SELECT INTO. We assume that the correct result tuple type has already
2517 * been placed in queryDesc->tupDesc.
2520 OpenIntoRel(QueryDesc *queryDesc)
2522 IntoClause *into = queryDesc->plannedstmt->intoClause;
2523 EState *estate = queryDesc->estate;
2524 Relation intoRelationDesc;
2529 AclResult aclresult;
2532 DR_intorel *myState;
2537 * Check consistency of arguments
2539 if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2541 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2542 errmsg("ON COMMIT can only be used on temporary tables")));
2545 * Find namespace to create in, check its permissions
2547 intoName = into->rel->relname;
2548 namespaceId = RangeVarGetCreationNamespace(into->rel);
2550 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2552 if (aclresult != ACLCHECK_OK)
2553 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2554 get_namespace_name(namespaceId));
2557 * Select tablespace to use. If not specified, use default tablespace
2558 * (which may in turn default to database's default).
2560 if (into->tableSpaceName)
2562 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2563 if (!OidIsValid(tablespaceId))
2565 (errcode(ERRCODE_UNDEFINED_OBJECT),
2566 errmsg("tablespace \"%s\" does not exist",
2567 into->tableSpaceName)));
2571 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2572 /* note InvalidOid is OK in this case */
2575 /* Check permissions except when using the database's default space */
2576 if (OidIsValid(tablespaceId))
2578 AclResult aclresult;
2580 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2583 if (aclresult != ACLCHECK_OK)
2584 aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2585 get_tablespace_name(tablespaceId));
2588 /* Parse and validate any reloptions */
2589 reloptions = transformRelOptions((Datum) 0,
2593 (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2595 /* have to copy the actual tupdesc to get rid of any constraints */
2596 tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2598 /* Now we can actually create the new relation */
2599 intoRelationId = heap_create_with_catalog(intoName,
2611 allowSystemTableMods);
2613 FreeTupleDesc(tupdesc);
2616 * Advance command counter so that the newly-created relation's catalog
2617 * tuples will be visible to heap_open.
2619 CommandCounterIncrement();
2622 * If necessary, create a TOAST table for the INTO relation. Note that
2623 * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2624 * the TOAST table will be visible for insertion.
2626 AlterTableCreateToastTable(intoRelationId);
2629 * And open the constructed table for writing.
2631 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2633 /* use_wal off requires rd_targblock be initially invalid */
2634 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2637 * We can skip WAL-logging the insertions, unless PITR is in use.
2639 * Note that for a non-temp INTO table, this is safe only because we know
2640 * that the catalog changes above will have been WAL-logged, and so
2641 * RecordTransactionCommit will think it needs to WAL-log the eventual
2642 * transaction commit. Else the commit might be lost, even though all the
2643 * data is safely fsync'd ...
2645 estate->es_into_relation_use_wal = XLogArchivingActive();
2646 estate->es_into_relation_descriptor = intoRelationDesc;
2649 * Now replace the query's DestReceiver with one for SELECT INTO
2651 queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2652 myState = (DR_intorel *) queryDesc->dest;
2653 Assert(myState->pub.mydest == DestIntoRel);
2654 myState->estate = estate;
2658 * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2661 CloseIntoRel(QueryDesc *queryDesc)
2663 EState *estate = queryDesc->estate;
2665 /* OpenIntoRel might never have gotten called */
2666 if (estate->es_into_relation_descriptor)
2668 /* If we skipped using WAL, must heap_sync before commit */
2669 if (!estate->es_into_relation_use_wal)
2670 heap_sync(estate->es_into_relation_descriptor);
2672 /* close rel, but keep lock until commit */
2673 heap_close(estate->es_into_relation_descriptor, NoLock);
2675 estate->es_into_relation_descriptor = NULL;
2680 * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2682 * Since CreateDestReceiver doesn't accept the parameters we'd need,
2683 * we just leave the private fields empty here. OpenIntoRel will
2687 CreateIntoRelDestReceiver(void)
2689 DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
2691 self->pub.receiveSlot = intorel_receive;
2692 self->pub.rStartup = intorel_startup;
2693 self->pub.rShutdown = intorel_shutdown;
2694 self->pub.rDestroy = intorel_destroy;
2695 self->pub.mydest = DestIntoRel;
2697 self->estate = NULL;
2699 return (DestReceiver *) self;
2703 * intorel_startup --- executor startup
2706 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2712 * intorel_receive --- receive one tuple
2715 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2717 DR_intorel *myState = (DR_intorel *) self;
2718 EState *estate = myState->estate;
2721 tuple = ExecCopySlotTuple(slot);
2723 heap_insert(estate->es_into_relation_descriptor,
2725 estate->es_snapshot->curcid,
2726 estate->es_into_relation_use_wal,
2727 false); /* never any point in using FSM */
2729 /* We know this is a newly created relation, so there are no indexes */
2731 heap_freetuple(tuple);
2737 * intorel_shutdown --- executor end
2740 intorel_shutdown(DestReceiver *self)
2746 * intorel_destroy --- release DestReceiver object
2749 intorel_destroy(DestReceiver *self)