1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.273 2006/07/03 22:45:38 tgl Exp $
31 *-------------------------------------------------------------------------
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/xlog.h"
38 #include "catalog/heap.h"
39 #include "catalog/namespace.h"
40 #include "commands/tablecmds.h"
41 #include "commands/tablespace.h"
42 #include "commands/trigger.h"
43 #include "executor/execdebug.h"
44 #include "executor/execdefs.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "optimizer/var.h"
49 #include "parser/parsetree.h"
50 #include "parser/parse_clause.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/guc.h"
54 #include "utils/lsyscache.h"
55 #include "utils/memutils.h"
58 typedef struct evalPlanQual
63 struct evalPlanQual *next; /* stack of active PlanQual plans */
64 struct evalPlanQual *free; /* list of free PlanQual plans */
67 /* decls for local routines only used within this module */
68 static void InitPlan(QueryDesc *queryDesc, int eflags);
69 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
70 Index resultRelationIndex,
74 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
77 ScanDirection direction,
79 static void ExecSelect(TupleTableSlot *slot,
82 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
84 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
86 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
88 static TupleTableSlot *EvalPlanQualNext(EState *estate);
89 static void EndEvalPlanQual(EState *estate);
90 static void ExecCheckRTEPerms(RangeTblEntry *rte);
91 static void ExecCheckXactReadOnly(Query *parsetree);
92 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
93 evalPlanQual *priorepq);
94 static void EvalPlanQualStop(evalPlanQual *epq);
96 /* end of local decls */
99 /* ----------------------------------------------------------------
102 * This routine must be called at the beginning of any execution of any
105 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
106 * clear why we bother to separate the two functions, but...). The tupDesc
107 * field of the QueryDesc is filled in to describe the tuples that will be
108 * returned, and the internal fields (estate and planstate) are set up.
110 * eflags contains flag bits as described in executor.h.
112 * NB: the CurrentMemoryContext when this is called will become the parent
113 * of the per-query context used for this Executor invocation.
114 * ----------------------------------------------------------------
117 ExecutorStart(QueryDesc *queryDesc, int eflags)
120 MemoryContext oldcontext;
122 /* sanity checks: queryDesc must not be started already */
123 Assert(queryDesc != NULL);
124 Assert(queryDesc->estate == NULL);
127 * If the transaction is read-only, we need to check if any writes are
128 * planned to non-temporary tables. EXPLAIN is considered read-only.
130 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
131 ExecCheckXactReadOnly(queryDesc->parsetree);
134 * Build EState, switch into per-query memory context for startup.
136 estate = CreateExecutorState();
137 queryDesc->estate = estate;
139 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
142 * Fill in parameters, if any, from queryDesc
144 estate->es_param_list_info = queryDesc->params;
146 if (queryDesc->plantree->nParamExec > 0)
147 estate->es_param_exec_vals = (ParamExecData *)
148 palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
151 * Copy other important information into the EState
153 estate->es_snapshot = queryDesc->snapshot;
154 estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
155 estate->es_instrument = queryDesc->doInstrument;
158 * Initialize the plan state tree
160 InitPlan(queryDesc, eflags);
162 MemoryContextSwitchTo(oldcontext);
165 /* ----------------------------------------------------------------
168 * This is the main routine of the executor module. It accepts
169 * the query descriptor from the traffic cop and executes the
172 * ExecutorStart must have been called already.
174 * If direction is NoMovementScanDirection then nothing is done
175 * except to start up/shut down the destination. Otherwise,
176 * we retrieve up to 'count' tuples in the specified direction.
178 * Note: count = 0 is interpreted as no portal limit, i.e., run to
181 * ----------------------------------------------------------------
184 ExecutorRun(QueryDesc *queryDesc,
185 ScanDirection direction, long count)
190 TupleTableSlot *result;
191 MemoryContext oldcontext;
194 Assert(queryDesc != NULL);
196 estate = queryDesc->estate;
198 Assert(estate != NULL);
201 * Switch into per-query memory context
203 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
206 * extract information from the query descriptor and the query feature.
208 operation = queryDesc->operation;
209 dest = queryDesc->dest;
212 * startup tuple receiver
214 estate->es_processed = 0;
215 estate->es_lastoid = InvalidOid;
217 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
222 if (ScanDirectionIsNoMovement(direction))
225 result = ExecutePlan(estate,
226 queryDesc->planstate,
235 (*dest->rShutdown) (dest);
237 MemoryContextSwitchTo(oldcontext);
242 /* ----------------------------------------------------------------
245 * This routine must be called at the end of execution of any
247 * ----------------------------------------------------------------
250 ExecutorEnd(QueryDesc *queryDesc)
253 MemoryContext oldcontext;
256 Assert(queryDesc != NULL);
258 estate = queryDesc->estate;
260 Assert(estate != NULL);
263 * Switch into per-query memory context to run ExecEndPlan
265 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
267 ExecEndPlan(queryDesc->planstate, estate);
270 * Must switch out of context before destroying it
272 MemoryContextSwitchTo(oldcontext);
275 * Release EState and per-query memory context. This should release
276 * everything the executor has allocated.
278 FreeExecutorState(estate);
280 /* Reset queryDesc fields that no longer point to anything */
281 queryDesc->tupDesc = NULL;
282 queryDesc->estate = NULL;
283 queryDesc->planstate = NULL;
286 /* ----------------------------------------------------------------
289 * This routine may be called on an open queryDesc to rewind it
291 * ----------------------------------------------------------------
294 ExecutorRewind(QueryDesc *queryDesc)
297 MemoryContext oldcontext;
300 Assert(queryDesc != NULL);
302 estate = queryDesc->estate;
304 Assert(estate != NULL);
306 /* It's probably not sensible to rescan updating queries */
307 Assert(queryDesc->operation == CMD_SELECT);
310 * Switch into per-query memory context
312 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
317 ExecReScan(queryDesc->planstate, NULL);
319 MemoryContextSwitchTo(oldcontext);
325 * Check access permissions for all relations listed in a range table.
328 ExecCheckRTPerms(List *rangeTable)
332 foreach(l, rangeTable)
334 RangeTblEntry *rte = lfirst(l);
336 ExecCheckRTEPerms(rte);
342 * Check access permissions for a single RTE.
345 ExecCheckRTEPerms(RangeTblEntry *rte)
347 AclMode requiredPerms;
352 * Only plain-relation RTEs need to be checked here. Subquery RTEs are
353 * checked by ExecInitSubqueryScan if the subquery is still a separate
354 * subquery --- if it's been pulled up into our query level then the RTEs
355 * are in our rangetable and will be checked here. Function RTEs are
356 * checked by init_fcache when the function is prepared for execution.
357 * Join and special RTEs need no checks.
359 if (rte->rtekind != RTE_RELATION)
363 * No work if requiredPerms is empty.
365 requiredPerms = rte->requiredPerms;
366 if (requiredPerms == 0)
372 * userid to check as: current user unless we have a setuid indication.
374 * Note: GetUserId() is presently fast enough that there's no harm in
375 * calling it separately for each RTE. If that stops being true, we could
376 * call it once in ExecCheckRTPerms and pass the userid down from there.
377 * But for now, no need for the extra clutter.
379 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
382 * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
384 if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
386 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
387 get_rel_name(relOid));
391 * Check that the query does not imply any writes to non-temp tables.
394 ExecCheckXactReadOnly(Query *parsetree)
399 * CREATE TABLE AS or SELECT INTO?
401 * XXX should we allow this if the destination is temp?
403 if (parsetree->into != NULL)
406 /* Fail if write permissions are requested on any non-temp table */
407 foreach(l, parsetree->rtable)
409 RangeTblEntry *rte = lfirst(l);
411 if (rte->rtekind == RTE_SUBQUERY)
413 ExecCheckXactReadOnly(rte->subquery);
417 if (rte->rtekind != RTE_RELATION)
420 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
423 if (isTempNamespace(get_rel_namespace(rte->relid)))
433 (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
434 errmsg("transaction is read-only")));
438 /* ----------------------------------------------------------------
441 * Initializes the query plan: open files, allocate storage
442 * and start up the rule manager
443 * ----------------------------------------------------------------
446 InitPlan(QueryDesc *queryDesc, int eflags)
448 CmdType operation = queryDesc->operation;
449 Query *parseTree = queryDesc->parsetree;
450 Plan *plan = queryDesc->plantree;
451 EState *estate = queryDesc->estate;
452 PlanState *planstate;
454 Relation intoRelationDesc;
460 * Do permissions checks. It's sufficient to examine the query's top
461 * rangetable here --- subplan RTEs will be checked during
464 ExecCheckRTPerms(parseTree->rtable);
467 * get information from query descriptor
469 rangeTable = parseTree->rtable;
472 * initialize the node's execution state
474 estate->es_range_table = rangeTable;
477 * if there is a result relation, initialize result relation stuff
479 if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
481 List *resultRelations = parseTree->resultRelations;
482 int numResultRelations;
483 ResultRelInfo *resultRelInfos;
485 if (resultRelations != NIL)
488 * Multiple result relations (due to inheritance)
489 * parseTree->resultRelations identifies them all
491 ResultRelInfo *resultRelInfo;
493 numResultRelations = list_length(resultRelations);
494 resultRelInfos = (ResultRelInfo *)
495 palloc(numResultRelations * sizeof(ResultRelInfo));
496 resultRelInfo = resultRelInfos;
497 foreach(l, resultRelations)
499 initResultRelInfo(resultRelInfo,
503 estate->es_instrument);
510 * Single result relation identified by parseTree->resultRelation
512 numResultRelations = 1;
513 resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
514 initResultRelInfo(resultRelInfos,
515 parseTree->resultRelation,
518 estate->es_instrument);
521 estate->es_result_relations = resultRelInfos;
522 estate->es_num_result_relations = numResultRelations;
523 /* Initialize to first or only result rel */
524 estate->es_result_relation_info = resultRelInfos;
529 * if no result relation, then set state appropriately
531 estate->es_result_relations = NULL;
532 estate->es_num_result_relations = 0;
533 estate->es_result_relation_info = NULL;
537 * Detect whether we're doing SELECT INTO. If so, set the es_into_oids
538 * flag appropriately so that the plan tree will be initialized with the
539 * correct tuple descriptors.
541 do_select_into = false;
543 if (operation == CMD_SELECT && parseTree->into != NULL)
545 do_select_into = true;
546 estate->es_select_into = true;
547 estate->es_into_oids = interpretOidsOption(parseTree->intoOptions);
551 * Have to lock relations selected FOR UPDATE/FOR SHARE
553 estate->es_rowMarks = NIL;
554 foreach(l, parseTree->rowMarks)
556 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
557 Oid relid = getrelid(rc->rti, rangeTable);
561 relation = heap_open(relid, RowShareLock);
562 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
563 erm->relation = relation;
565 erm->forUpdate = rc->forUpdate;
566 erm->noWait = rc->noWait;
567 snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rc->rti);
568 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
572 * initialize the executor "tuple" table. We need slots for all the plan
573 * nodes, plus possibly output slots for the junkfilter(s). At this point
574 * we aren't sure if we need junkfilters, so just add slots for them
575 * unconditionally. Also, if it's not a SELECT, set up a slot for use for
576 * trigger output tuples.
579 int nSlots = ExecCountSlotsNode(plan);
581 if (parseTree->resultRelations != NIL)
582 nSlots += list_length(parseTree->resultRelations);
585 if (operation != CMD_SELECT)
588 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
590 if (operation != CMD_SELECT)
591 estate->es_trig_tuple_slot =
592 ExecAllocTableSlot(estate->es_tupleTable);
595 /* mark EvalPlanQual not active */
596 estate->es_topPlan = plan;
597 estate->es_evalPlanQual = NULL;
598 estate->es_evTupleNull = NULL;
599 estate->es_evTuple = NULL;
600 estate->es_useEvalPlan = false;
603 * initialize the private state information for all the nodes in the query
604 * tree. This opens files, allocates storage and leaves us ready to start
607 planstate = ExecInitNode(plan, estate, eflags);
610 * Get the tuple descriptor describing the type of tuples to return. (this
611 * is especially important if we are creating a relation with "SELECT
614 tupType = ExecGetResultType(planstate);
617 * Initialize the junk filter if needed. SELECT and INSERT queries need a
618 * filter if there are any junk attrs in the tlist. INSERT and SELECT
619 * INTO also need a filter if the plan may return raw disk tuples (else
620 * heap_insert will be scribbling on the source relation!). UPDATE and
621 * DELETE always need a filter, since there's always a junk 'ctid'
622 * attribute present --- no need to look first.
625 bool junk_filter_needed = false;
632 foreach(tlist, plan->targetlist)
634 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
638 junk_filter_needed = true;
642 if (!junk_filter_needed &&
643 (operation == CMD_INSERT || do_select_into) &&
644 ExecMayReturnRawTuples(planstate))
645 junk_filter_needed = true;
649 junk_filter_needed = true;
655 if (junk_filter_needed)
658 * If there are multiple result relations, each one needs its own
659 * junk filter. Note this is only possible for UPDATE/DELETE, so
660 * we can't be fooled by some needing a filter and some not.
662 if (parseTree->resultRelations != NIL)
664 PlanState **appendplans;
666 ResultRelInfo *resultRelInfo;
669 /* Top plan had better be an Append here. */
670 Assert(IsA(plan, Append));
671 Assert(((Append *) plan)->isTarget);
672 Assert(IsA(planstate, AppendState));
673 appendplans = ((AppendState *) planstate)->appendplans;
674 as_nplans = ((AppendState *) planstate)->as_nplans;
675 Assert(as_nplans == estate->es_num_result_relations);
676 resultRelInfo = estate->es_result_relations;
677 for (i = 0; i < as_nplans; i++)
679 PlanState *subplan = appendplans[i];
682 j = ExecInitJunkFilter(subplan->plan->targetlist,
683 resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
684 ExecAllocTableSlot(estate->es_tupleTable));
685 resultRelInfo->ri_junkFilter = j;
690 * Set active junkfilter too; at this point ExecInitAppend has
691 * already selected an active result relation...
693 estate->es_junkFilter =
694 estate->es_result_relation_info->ri_junkFilter;
698 /* Normal case with just one JunkFilter */
701 j = ExecInitJunkFilter(planstate->plan->targetlist,
703 ExecAllocTableSlot(estate->es_tupleTable));
704 estate->es_junkFilter = j;
705 if (estate->es_result_relation_info)
706 estate->es_result_relation_info->ri_junkFilter = j;
708 /* For SELECT, want to return the cleaned tuple type */
709 if (operation == CMD_SELECT)
710 tupType = j->jf_cleanTupType;
714 estate->es_junkFilter = NULL;
718 * If doing SELECT INTO, initialize the "into" relation. We must wait
719 * till now so we have the "clean" result tuple type to create the new
722 * If EXPLAIN, skip creating the "into" relation.
724 intoRelationDesc = NULL;
726 if (do_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
737 * Check consistency of arguments
739 if (parseTree->intoOnCommit != ONCOMMIT_NOOP && !parseTree->into->istemp)
741 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
742 errmsg("ON COMMIT can only be used on temporary tables")));
745 * find namespace to create in, check permissions
747 intoName = parseTree->into->relname;
748 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
750 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
752 if (aclresult != ACLCHECK_OK)
753 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
754 get_namespace_name(namespaceId));
757 * Select tablespace to use. If not specified, use default_tablespace
758 * (which may in turn default to database's default).
760 if (parseTree->intoTableSpaceName)
762 tablespaceId = get_tablespace_oid(parseTree->intoTableSpaceName);
763 if (!OidIsValid(tablespaceId))
765 (errcode(ERRCODE_UNDEFINED_OBJECT),
766 errmsg("tablespace \"%s\" does not exist",
767 parseTree->intoTableSpaceName)));
770 tablespaceId = GetDefaultTablespace();
771 /* note InvalidOid is OK in this case */
774 /* Parse and validate any reloptions */
775 reloptions = transformRelOptions((Datum) 0,
776 parseTree->intoOptions,
779 (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
781 /* Check permissions except when using the database's default */
782 if (OidIsValid(tablespaceId))
786 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
789 if (aclresult != ACLCHECK_OK)
790 aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
791 get_tablespace_name(tablespaceId));
795 * have to copy tupType to get rid of constraints
797 tupdesc = CreateTupleDescCopy(tupType);
799 intoRelationId = heap_create_with_catalog(intoName,
809 parseTree->intoOnCommit,
811 allowSystemTableMods);
813 FreeTupleDesc(tupdesc);
816 * Advance command counter so that the newly-created relation's
817 * catalog tuples will be visible to heap_open.
819 CommandCounterIncrement();
822 * If necessary, create a TOAST table for the into relation. Note that
823 * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
824 * that the TOAST table will be visible for insertion.
826 AlterTableCreateToastTable(intoRelationId, true);
829 * And open the constructed table for writing.
831 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
833 /* use_wal off requires rd_targblock be initially invalid */
834 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
837 * We can skip WAL-logging the insertions, unless PITR is in use.
839 * Note that for a non-temp INTO table, this is safe only because we
840 * know that the catalog changes above will have been WAL-logged, and
841 * so RecordTransactionCommit will think it needs to WAL-log the
842 * eventual transaction commit. Else the commit might be lost, even
843 * though all the data is safely fsync'd ...
845 estate->es_into_relation_use_wal = XLogArchivingActive();
848 estate->es_into_relation_descriptor = intoRelationDesc;
850 queryDesc->tupDesc = tupType;
851 queryDesc->planstate = planstate;
855 * Initialize ResultRelInfo data for one result relation
858 initResultRelInfo(ResultRelInfo *resultRelInfo,
859 Index resultRelationIndex,
864 Oid resultRelationOid;
865 Relation resultRelationDesc;
867 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
868 resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
870 switch (resultRelationDesc->rd_rel->relkind)
872 case RELKIND_SEQUENCE:
874 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
875 errmsg("cannot change sequence \"%s\"",
876 RelationGetRelationName(resultRelationDesc))));
878 case RELKIND_TOASTVALUE:
880 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
881 errmsg("cannot change TOAST relation \"%s\"",
882 RelationGetRelationName(resultRelationDesc))));
886 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
887 errmsg("cannot change view \"%s\"",
888 RelationGetRelationName(resultRelationDesc))));
892 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
893 resultRelInfo->type = T_ResultRelInfo;
894 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
895 resultRelInfo->ri_RelationDesc = resultRelationDesc;
896 resultRelInfo->ri_NumIndices = 0;
897 resultRelInfo->ri_IndexRelationDescs = NULL;
898 resultRelInfo->ri_IndexRelationInfo = NULL;
899 /* make a copy so as not to depend on relcache info not changing... */
900 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
901 if (resultRelInfo->ri_TrigDesc)
903 int n = resultRelInfo->ri_TrigDesc->numtriggers;
905 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
906 palloc0(n * sizeof(FmgrInfo));
908 resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
910 resultRelInfo->ri_TrigInstrument = NULL;
914 resultRelInfo->ri_TrigFunctions = NULL;
915 resultRelInfo->ri_TrigInstrument = NULL;
917 resultRelInfo->ri_ConstraintExprs = NULL;
918 resultRelInfo->ri_junkFilter = NULL;
921 * If there are indices on the result relation, open them and save
922 * descriptors in the result relation info, so that we can add new index
923 * entries for the tuples we add/update. We need not do this for a
924 * DELETE, however, since deletion doesn't affect indexes.
926 if (resultRelationDesc->rd_rel->relhasindex &&
927 operation != CMD_DELETE)
928 ExecOpenIndices(resultRelInfo);
932 * ExecContextForcesOids
934 * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
935 * we need to ensure that result tuples have space for an OID iff they are
936 * going to be stored into a relation that has OIDs. In other contexts
937 * we are free to choose whether to leave space for OIDs in result tuples
938 * (we generally don't want to, but we do if a physical-tlist optimization
939 * is possible). This routine checks the plan context and returns TRUE if the
940 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
941 * *hasoids is set to the required value.
943 * One reason this is ugly is that all plan nodes in the plan tree will emit
944 * tuples with space for an OID, though we really only need the topmost node
945 * to do so. However, node types like Sort don't project new tuples but just
946 * return their inputs, and in those cases the requirement propagates down
947 * to the input node. Eventually we might make this code smart enough to
948 * recognize how far down the requirement really goes, but for now we just
949 * make all plan nodes do the same thing if the top level forces the choice.
951 * We assume that estate->es_result_relation_info is already set up to
952 * describe the target relation. Note that in an UPDATE that spans an
953 * inheritance tree, some of the target relations may have OIDs and some not.
954 * We have to make the decisions on a per-relation basis as we initialize
955 * each of the child plans of the topmost Append plan.
957 * SELECT INTO is even uglier, because we don't have the INTO relation's
958 * descriptor available when this code runs; we have to look aside at a
959 * flag set by InitPlan().
962 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
964 if (planstate->state->es_select_into)
966 *hasoids = planstate->state->es_into_oids;
971 ResultRelInfo *ri = planstate->state->es_result_relation_info;
975 Relation rel = ri->ri_RelationDesc;
979 *hasoids = rel->rd_rel->relhasoids;
988 /* ----------------------------------------------------------------
991 * Cleans up the query plan -- closes files and frees up storage
993 * NOTE: we are no longer very worried about freeing storage per se
994 * in this code; FreeExecutorState should be guaranteed to release all
995 * memory that needs to be released. What we are worried about doing
996 * is closing relations and dropping buffer pins. Thus, for example,
997 * tuple tables must be cleared or dropped to ensure pins are released.
998 * ----------------------------------------------------------------
1001 ExecEndPlan(PlanState *planstate, EState *estate)
1003 ResultRelInfo *resultRelInfo;
1008 * shut down any PlanQual processing we were doing
1010 if (estate->es_evalPlanQual != NULL)
1011 EndEvalPlanQual(estate);
1014 * shut down the node-type-specific query processing
1016 ExecEndNode(planstate);
1019 * destroy the executor "tuple" table.
1021 ExecDropTupleTable(estate->es_tupleTable, true);
1022 estate->es_tupleTable = NULL;
1025 * close the result relation(s) if any, but hold locks until xact commit.
1027 resultRelInfo = estate->es_result_relations;
1028 for (i = estate->es_num_result_relations; i > 0; i--)
1030 /* Close indices and then the relation itself */
1031 ExecCloseIndices(resultRelInfo);
1032 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1037 * close the "into" relation if necessary, again keeping lock
1039 if (estate->es_into_relation_descriptor != NULL)
1042 * If we skipped using WAL, and it's not a temp relation, we must
1043 * force the relation down to disk before it's safe to commit the
1044 * transaction. This requires forcing out any dirty buffers and then
1045 * doing a forced fsync.
1047 if (!estate->es_into_relation_use_wal &&
1048 !estate->es_into_relation_descriptor->rd_istemp)
1050 FlushRelationBuffers(estate->es_into_relation_descriptor);
1051 /* FlushRelationBuffers will have opened rd_smgr */
1052 smgrimmedsync(estate->es_into_relation_descriptor->rd_smgr);
1055 heap_close(estate->es_into_relation_descriptor, NoLock);
1059 * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1061 foreach(l, estate->es_rowMarks)
1063 ExecRowMark *erm = lfirst(l);
1065 heap_close(erm->relation, NoLock);
1069 /* ----------------------------------------------------------------
1072 * processes the query plan to retrieve 'numberTuples' tuples in the
1073 * direction specified.
1075 * Retrieves all tuples if numberTuples is 0
1077 * result is either a slot containing the last tuple in the case
1078 * of a SELECT or NULL otherwise.
1080 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1082 * ----------------------------------------------------------------
1084 static TupleTableSlot *
1085 ExecutePlan(EState *estate,
1086 PlanState *planstate,
1089 ScanDirection direction,
1092 JunkFilter *junkfilter;
1093 TupleTableSlot *slot;
1094 ItemPointer tupleid = NULL;
1095 ItemPointerData tuple_ctid;
1096 long current_tuple_count;
1097 TupleTableSlot *result;
1100 * initialize local variables
1103 current_tuple_count = 0;
1107 * Set the direction.
1109 estate->es_direction = direction;
1112 * Process BEFORE EACH STATEMENT triggers
1117 ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1120 ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1123 ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1131 * Loop until we've processed the proper number of tuples from the plan.
1136 /* Reset the per-output-tuple exprcontext */
1137 ResetPerTupleExprContext(estate);
1140 * Execute the plan and obtain a tuple
1143 if (estate->es_useEvalPlan)
1145 slot = EvalPlanQualNext(estate);
1146 if (TupIsNull(slot))
1147 slot = ExecProcNode(planstate);
1150 slot = ExecProcNode(planstate);
1153 * if the tuple is null, then we assume there is nothing more to
1154 * process so we just return null...
1156 if (TupIsNull(slot))
1163 * if we have a junk filter, then project a new tuple with the junk
1166 * Store this new "clean" tuple in the junkfilter's resultSlot.
1167 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1168 * because that tuple slot has the wrong descriptor.)
1170 * Also, extract all the junk information we need.
1172 if ((junkfilter = estate->es_junkFilter) != NULL)
1178 * extract the 'ctid' junk attribute.
1180 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1182 if (!ExecGetJunkAttribute(junkfilter,
1187 elog(ERROR, "could not find junk ctid column");
1189 /* shouldn't ever get a null result... */
1191 elog(ERROR, "ctid is NULL");
1193 tupleid = (ItemPointer) DatumGetPointer(datum);
1194 tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
1195 tupleid = &tuple_ctid;
1199 * Process any FOR UPDATE or FOR SHARE locking requested.
1201 else if (estate->es_rowMarks != NIL)
1206 foreach(l, estate->es_rowMarks)
1208 ExecRowMark *erm = lfirst(l);
1209 HeapTupleData tuple;
1211 ItemPointerData update_ctid;
1212 TransactionId update_xmax;
1213 TupleTableSlot *newSlot;
1214 LockTupleMode lockmode;
1217 if (!ExecGetJunkAttribute(junkfilter,
1222 elog(ERROR, "could not find junk \"%s\" column",
1225 /* shouldn't ever get a null result... */
1227 elog(ERROR, "\"%s\" is NULL", erm->resname);
1229 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1232 lockmode = LockTupleExclusive;
1234 lockmode = LockTupleShared;
1236 test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1237 &update_ctid, &update_xmax,
1238 estate->es_snapshot->curcid,
1239 lockmode, erm->noWait);
1240 ReleaseBuffer(buffer);
1243 case HeapTupleSelfUpdated:
1244 /* treat it as deleted; do not process */
1247 case HeapTupleMayBeUpdated:
1250 case HeapTupleUpdated:
1251 if (IsXactIsoLevelSerializable)
1253 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1254 errmsg("could not serialize access due to concurrent update")));
1255 if (!ItemPointerEquals(&update_ctid,
1258 /* updated, so look at updated version */
1259 newSlot = EvalPlanQual(estate,
1263 estate->es_snapshot->curcid);
1264 if (!TupIsNull(newSlot))
1267 estate->es_useEvalPlan = true;
1273 * if tuple was deleted or PlanQual failed for
1274 * updated tuple - we must not return this tuple!
1279 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1287 * Finally create a new "clean" tuple with all junk attributes
1290 slot = ExecFilterJunk(junkfilter, slot);
1294 * now that we have a tuple, do the appropriate thing with it.. either
1295 * return it to the user, add it to a relation someplace, delete it
1296 * from a relation, or modify some of its attributes.
1301 ExecSelect(slot, /* slot containing tuple */
1302 dest, /* destination's tuple-receiver obj */
1308 ExecInsert(slot, tupleid, estate);
1313 ExecDelete(slot, tupleid, estate);
1318 ExecUpdate(slot, tupleid, estate);
1323 elog(ERROR, "unrecognized operation code: %d",
1330 * check our tuple count.. if we've processed the proper number then
1331 * quit, else loop again and process more tuples. Zero numberTuples
1334 current_tuple_count++;
1335 if (numberTuples && numberTuples == current_tuple_count)
1340 * Process AFTER EACH STATEMENT triggers
1345 ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1348 ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1351 ExecASInsertTriggers(estate, estate->es_result_relation_info);
1359 * here, result is either a slot containing a tuple in the case of a
1360 * SELECT or NULL otherwise.
1365 /* ----------------------------------------------------------------
1368 * SELECTs are easy.. we just pass the tuple to the appropriate
1369 * print function. The only complexity is when we do a
1370 * "SELECT INTO", in which case we insert the tuple into
1371 * the appropriate relation (note: this is a newly created relation
1372 * so we don't need to worry about indices or locks.)
1373 * ----------------------------------------------------------------
1376 ExecSelect(TupleTableSlot *slot,
1381 * insert the tuple into the "into relation"
1383 * XXX this probably ought to be replaced by a separate destination
1385 if (estate->es_into_relation_descriptor != NULL)
1389 tuple = ExecCopySlotTuple(slot);
1390 heap_insert(estate->es_into_relation_descriptor, tuple,
1391 estate->es_snapshot->curcid,
1392 estate->es_into_relation_use_wal,
1393 false); /* never any point in using FSM */
1394 /* we know there are no indexes to update */
1395 heap_freetuple(tuple);
1400 * send the tuple to the destination
1402 (*dest->receiveSlot) (slot, dest);
1404 (estate->es_processed)++;
1407 /* ----------------------------------------------------------------
1410 * INSERTs are trickier.. we have to insert the tuple into
1411 * the base relation and insert appropriate tuples into the
1413 * ----------------------------------------------------------------
1416 ExecInsert(TupleTableSlot *slot,
1417 ItemPointer tupleid,
1421 ResultRelInfo *resultRelInfo;
1422 Relation resultRelationDesc;
1426 * get the heap tuple out of the tuple table slot, making sure we have a
1429 tuple = ExecMaterializeSlot(slot);
1432 * get information on the (current) result relation
1434 resultRelInfo = estate->es_result_relation_info;
1435 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1437 /* BEFORE ROW INSERT Triggers */
1438 if (resultRelInfo->ri_TrigDesc &&
1439 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1443 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1445 if (newtuple == NULL) /* "do nothing" */
1448 if (newtuple != tuple) /* modified by Trigger(s) */
1451 * Put the modified tuple into a slot for convenience of routines
1452 * below. We assume the tuple was allocated in per-tuple memory
1453 * context, and therefore will go away by itself. The tuple table
1454 * slot should not try to clear it.
1456 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1458 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1459 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1460 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1467 * Check the constraints of the tuple
1469 if (resultRelationDesc->rd_att->constr)
1470 ExecConstraints(resultRelInfo, slot, estate);
1475 * Note: heap_insert returns the tid (location) of the new tuple in the
1478 newId = heap_insert(resultRelationDesc, tuple,
1479 estate->es_snapshot->curcid,
1483 (estate->es_processed)++;
1484 estate->es_lastoid = newId;
1485 setLastTid(&(tuple->t_self));
1488 * insert index entries for tuple
1490 if (resultRelInfo->ri_NumIndices > 0)
1491 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1493 /* AFTER ROW INSERT Triggers */
1494 ExecARInsertTriggers(estate, resultRelInfo, tuple);
1497 /* ----------------------------------------------------------------
1500 * DELETE is like UPDATE, except that we delete the tuple and no
1501 * index modifications are needed
1502 * ----------------------------------------------------------------
1505 ExecDelete(TupleTableSlot *slot,
1506 ItemPointer tupleid,
1509 ResultRelInfo *resultRelInfo;
1510 Relation resultRelationDesc;
1512 ItemPointerData update_ctid;
1513 TransactionId update_xmax;
1516 * get information on the (current) result relation
1518 resultRelInfo = estate->es_result_relation_info;
1519 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1521 /* BEFORE ROW DELETE Triggers */
1522 if (resultRelInfo->ri_TrigDesc &&
1523 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1527 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1528 estate->es_snapshot->curcid);
1530 if (!dodelete) /* "do nothing" */
1537 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1538 * the row to be deleted is visible to that snapshot, and throw a can't-
1539 * serialize error if not. This is a special-case behavior needed for
1540 * referential integrity updates in serializable transactions.
1543 result = heap_delete(resultRelationDesc, tupleid,
1544 &update_ctid, &update_xmax,
1545 estate->es_snapshot->curcid,
1546 estate->es_crosscheck_snapshot,
1547 true /* wait for commit */ );
1550 case HeapTupleSelfUpdated:
1551 /* already deleted by self; nothing to do */
1554 case HeapTupleMayBeUpdated:
1557 case HeapTupleUpdated:
1558 if (IsXactIsoLevelSerializable)
1560 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1561 errmsg("could not serialize access due to concurrent update")));
1562 else if (!ItemPointerEquals(tupleid, &update_ctid))
1564 TupleTableSlot *epqslot;
1566 epqslot = EvalPlanQual(estate,
1567 resultRelInfo->ri_RangeTableIndex,
1570 estate->es_snapshot->curcid);
1571 if (!TupIsNull(epqslot))
1573 *tupleid = update_ctid;
1577 /* tuple already deleted; nothing to do */
1581 elog(ERROR, "unrecognized heap_delete status: %u", result);
1586 (estate->es_processed)++;
1589 * Note: Normally one would think that we have to delete index tuples
1590 * associated with the heap tuple now...
1592 * ... but in POSTGRES, we have no need to do this because VACUUM will
1593 * take care of it later. We can't delete index tuples immediately
1594 * anyway, since the tuple is still visible to other transactions.
1597 /* AFTER ROW DELETE Triggers */
1598 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1601 /* ----------------------------------------------------------------
1604 * note: we can't run UPDATE queries with transactions
1605 * off because UPDATEs are actually INSERTs and our
1606 * scan will mistakenly loop forever, updating the tuple
1607 * it just inserted.. This should be fixed but until it
1608 * is, we don't want to get stuck in an infinite loop
1609 * which corrupts your database..
1610 * ----------------------------------------------------------------
1613 ExecUpdate(TupleTableSlot *slot,
1614 ItemPointer tupleid,
1618 ResultRelInfo *resultRelInfo;
1619 Relation resultRelationDesc;
1621 ItemPointerData update_ctid;
1622 TransactionId update_xmax;
1625 * abort the operation if not running transactions
1627 if (IsBootstrapProcessingMode())
1628 elog(ERROR, "cannot UPDATE during bootstrap");
1631 * get the heap tuple out of the tuple table slot, making sure we have a
1634 tuple = ExecMaterializeSlot(slot);
1637 * get information on the (current) result relation
1639 resultRelInfo = estate->es_result_relation_info;
1640 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1642 /* BEFORE ROW UPDATE Triggers */
1643 if (resultRelInfo->ri_TrigDesc &&
1644 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1648 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1650 estate->es_snapshot->curcid);
1652 if (newtuple == NULL) /* "do nothing" */
1655 if (newtuple != tuple) /* modified by Trigger(s) */
1658 * Put the modified tuple into a slot for convenience of routines
1659 * below. We assume the tuple was allocated in per-tuple memory
1660 * context, and therefore will go away by itself. The tuple table
1661 * slot should not try to clear it.
1663 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1665 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1666 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1667 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1674 * Check the constraints of the tuple
1676 * If we generate a new candidate tuple after EvalPlanQual testing, we
1677 * must loop back here and recheck constraints. (We don't need to redo
1678 * triggers, however. If there are any BEFORE triggers then trigger.c
1679 * will have done heap_lock_tuple to lock the correct tuple, so there's no
1680 * need to do them again.)
1683 if (resultRelationDesc->rd_att->constr)
1684 ExecConstraints(resultRelInfo, slot, estate);
1687 * replace the heap tuple
1689 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1690 * the row to be updated is visible to that snapshot, and throw a can't-
1691 * serialize error if not. This is a special-case behavior needed for
1692 * referential integrity updates in serializable transactions.
1694 result = heap_update(resultRelationDesc, tupleid, tuple,
1695 &update_ctid, &update_xmax,
1696 estate->es_snapshot->curcid,
1697 estate->es_crosscheck_snapshot,
1698 true /* wait for commit */ );
1701 case HeapTupleSelfUpdated:
1702 /* already deleted by self; nothing to do */
1705 case HeapTupleMayBeUpdated:
1708 case HeapTupleUpdated:
1709 if (IsXactIsoLevelSerializable)
1711 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1712 errmsg("could not serialize access due to concurrent update")));
1713 else if (!ItemPointerEquals(tupleid, &update_ctid))
1715 TupleTableSlot *epqslot;
1717 epqslot = EvalPlanQual(estate,
1718 resultRelInfo->ri_RangeTableIndex,
1721 estate->es_snapshot->curcid);
1722 if (!TupIsNull(epqslot))
1724 *tupleid = update_ctid;
1725 slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1726 tuple = ExecMaterializeSlot(slot);
1730 /* tuple already deleted; nothing to do */
1734 elog(ERROR, "unrecognized heap_update status: %u", result);
1739 (estate->es_processed)++;
1742 * Note: instead of having to update the old index tuples associated with
1743 * the heap tuple, all we do is form and insert new index tuples. This is
1744 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1745 * deletion is done later by VACUUM (see notes in ExecDelete). All we do
1746 * here is insert new index tuples. -cim 9/27/89
1750 * insert index entries for tuple
1752 * Note: heap_update returns the tid (location) of the new tuple in the
1755 if (resultRelInfo->ri_NumIndices > 0)
1756 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1758 /* AFTER ROW UPDATE Triggers */
1759 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1763 ExecRelCheck(ResultRelInfo *resultRelInfo,
1764 TupleTableSlot *slot, EState *estate)
1766 Relation rel = resultRelInfo->ri_RelationDesc;
1767 int ncheck = rel->rd_att->constr->num_check;
1768 ConstrCheck *check = rel->rd_att->constr->check;
1769 ExprContext *econtext;
1770 MemoryContext oldContext;
1775 * If first time through for this result relation, build expression
1776 * nodetrees for rel's constraint expressions. Keep them in the per-query
1777 * memory context so they'll survive throughout the query.
1779 if (resultRelInfo->ri_ConstraintExprs == NULL)
1781 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1782 resultRelInfo->ri_ConstraintExprs =
1783 (List **) palloc(ncheck * sizeof(List *));
1784 for (i = 0; i < ncheck; i++)
1786 /* ExecQual wants implicit-AND form */
1787 qual = make_ands_implicit(stringToNode(check[i].ccbin));
1788 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1789 ExecPrepareExpr((Expr *) qual, estate);
1791 MemoryContextSwitchTo(oldContext);
1795 * We will use the EState's per-tuple context for evaluating constraint
1796 * expressions (creating it if it's not already there).
1798 econtext = GetPerTupleExprContext(estate);
1800 /* Arrange for econtext's scan tuple to be the tuple under test */
1801 econtext->ecxt_scantuple = slot;
1803 /* And evaluate the constraints */
1804 for (i = 0; i < ncheck; i++)
1806 qual = resultRelInfo->ri_ConstraintExprs[i];
1809 * NOTE: SQL92 specifies that a NULL result from a constraint
1810 * expression is not to be treated as a failure. Therefore, tell
1811 * ExecQual to return TRUE for NULL.
1813 if (!ExecQual(qual, econtext, true))
1814 return check[i].ccname;
1817 /* NULL result means no error */
1822 ExecConstraints(ResultRelInfo *resultRelInfo,
1823 TupleTableSlot *slot, EState *estate)
1825 Relation rel = resultRelInfo->ri_RelationDesc;
1826 TupleConstr *constr = rel->rd_att->constr;
1830 if (constr->has_not_null)
1832 int natts = rel->rd_att->natts;
1835 for (attrChk = 1; attrChk <= natts; attrChk++)
1837 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1838 slot_attisnull(slot, attrChk))
1840 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1841 errmsg("null value in column \"%s\" violates not-null constraint",
1842 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1846 if (constr->num_check > 0)
1850 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1852 (errcode(ERRCODE_CHECK_VIOLATION),
1853 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1854 RelationGetRelationName(rel), failed)));
1859 * Check a modified tuple to see if we want to process its updated version
1860 * under READ COMMITTED rules.
1862 * See backend/executor/README for some info about how this works.
1864 * estate - executor state data
1865 * rti - rangetable index of table containing tuple
1866 * *tid - t_ctid from the outdated tuple (ie, next updated version)
1867 * priorXmax - t_xmax from the outdated tuple
1868 * curCid - command ID of current command of my transaction
1870 * *tid is also an output parameter: it's modified to hold the TID of the
1871 * latest version of the tuple (note this may be changed even on failure)
1873 * Returns a slot containing the new candidate update/delete tuple, or
1874 * NULL if we determine we shouldn't process the row.
1877 EvalPlanQual(EState *estate, Index rti,
1878 ItemPointer tid, TransactionId priorXmax, CommandId curCid)
1883 HeapTupleData tuple;
1884 HeapTuple copyTuple = NULL;
1890 * find relation containing target tuple
1892 if (estate->es_result_relation_info != NULL &&
1893 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1894 relation = estate->es_result_relation_info->ri_RelationDesc;
1900 foreach(l, estate->es_rowMarks)
1902 if (((ExecRowMark *) lfirst(l))->rti == rti)
1904 relation = ((ExecRowMark *) lfirst(l))->relation;
1908 if (relation == NULL)
1909 elog(ERROR, "could not find RowMark for RT index %u", rti);
1915 * Loop here to deal with updated or busy tuples
1917 tuple.t_self = *tid;
1922 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL))
1925 * If xmin isn't what we're expecting, the slot must have been
1926 * recycled and reused for an unrelated tuple. This implies that
1927 * the latest version of the row was deleted, so we need do
1928 * nothing. (Should be safe to examine xmin without getting
1929 * buffer's content lock, since xmin never changes in an existing
1932 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1935 ReleaseBuffer(buffer);
1939 /* otherwise xmin should not be dirty... */
1940 if (TransactionIdIsValid(SnapshotDirty->xmin))
1941 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1944 * If tuple is being updated by other transaction then we have to
1945 * wait for its commit/abort.
1947 if (TransactionIdIsValid(SnapshotDirty->xmax))
1949 ReleaseBuffer(buffer);
1950 XactLockTableWait(SnapshotDirty->xmax);
1951 continue; /* loop back to repeat heap_fetch */
1955 * If tuple was inserted by our own transaction, we have to check
1956 * cmin against curCid: cmin >= curCid means our command cannot
1957 * see the tuple, so we should ignore it. Without this we are
1958 * open to the "Halloween problem" of indefinitely re-updating
1959 * the same tuple. (We need not check cmax because
1960 * HeapTupleSatisfiesDirty will consider a tuple deleted by
1961 * our transaction dead, regardless of cmax.) We just checked
1962 * that priorXmax == xmin, so we can test that variable instead
1963 * of doing HeapTupleHeaderGetXmin again.
1965 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1966 HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
1968 ReleaseBuffer(buffer);
1973 * We got tuple - now copy it for use by recheck query.
1975 copyTuple = heap_copytuple(&tuple);
1976 ReleaseBuffer(buffer);
1981 * If the referenced slot was actually empty, the latest version of
1982 * the row must have been deleted, so we need do nothing.
1984 if (tuple.t_data == NULL)
1986 ReleaseBuffer(buffer);
1991 * As above, if xmin isn't what we're expecting, do nothing.
1993 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1996 ReleaseBuffer(buffer);
2001 * If we get here, the tuple was found but failed SnapshotDirty.
2002 * Assuming the xmin is either a committed xact or our own xact (as it
2003 * certainly should be if we're trying to modify the tuple), this must
2004 * mean that the row was updated or deleted by either a committed xact
2005 * or our own xact. If it was deleted, we can ignore it; if it was
2006 * updated then chain up to the next version and repeat the whole
2009 * As above, it should be safe to examine xmax and t_ctid without the
2010 * buffer content lock, because they can't be changing.
2012 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2014 /* deleted, so forget about it */
2015 ReleaseBuffer(buffer);
2019 /* updated, so look at the updated row */
2020 tuple.t_self = tuple.t_data->t_ctid;
2021 /* updated row should have xmin matching this xmax */
2022 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2023 ReleaseBuffer(buffer);
2024 /* loop back to fetch next in chain */
2028 * For UPDATE/DELETE we have to return tid of actual row we're executing
2031 *tid = tuple.t_self;
2034 * Need to run a recheck subquery. Find or create a PQ stack entry.
2036 epq = estate->es_evalPlanQual;
2039 if (epq != NULL && epq->rti == 0)
2041 /* Top PQ stack entry is idle, so re-use it */
2042 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2048 * If this is request for another RTE - Ra, - then we have to check wasn't
2049 * PlanQual requested for Ra already and if so then Ra' row was updated
2050 * again and we have to re-start old execution for Ra and forget all what
2051 * we done after Ra was suspended. Cool? -:))
2053 if (epq != NULL && epq->rti != rti &&
2054 epq->estate->es_evTuple[rti - 1] != NULL)
2058 evalPlanQual *oldepq;
2060 /* stop execution */
2061 EvalPlanQualStop(epq);
2062 /* pop previous PlanQual from the stack */
2064 Assert(oldepq && oldepq->rti != 0);
2065 /* push current PQ to freePQ stack */
2068 estate->es_evalPlanQual = epq;
2069 } while (epq->rti != rti);
2073 * If we are requested for another RTE then we have to suspend execution
2074 * of current PlanQual and start execution for new one.
2076 if (epq == NULL || epq->rti != rti)
2078 /* try to reuse plan used previously */
2079 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2081 if (newepq == NULL) /* first call or freePQ stack is empty */
2083 newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2084 newepq->free = NULL;
2085 newepq->estate = NULL;
2086 newepq->planstate = NULL;
2090 /* recycle previously used PlanQual */
2091 Assert(newepq->estate == NULL);
2094 /* push current PQ to the stack */
2097 estate->es_evalPlanQual = epq;
2102 Assert(epq->rti == rti);
2105 * Ok - we're requested for the same RTE. Unfortunately we still have to
2106 * end and restart execution of the plan, because ExecReScan wouldn't
2107 * ensure that upper plan nodes would reset themselves. We could make
2108 * that work if insertion of the target tuple were integrated with the
2109 * Param mechanism somehow, so that the upper plan nodes know that their
2110 * children's outputs have changed.
2112 * Note that the stack of free evalPlanQual nodes is quite useless at the
2113 * moment, since it only saves us from pallocing/releasing the
2114 * evalPlanQual nodes themselves. But it will be useful once we implement
2115 * ReScan instead of end/restart for re-using PlanQual nodes.
2119 /* stop execution */
2120 EvalPlanQualStop(epq);
2124 * Initialize new recheck query.
2126 * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2127 * instead copy down changeable state from the top plan (including
2128 * es_result_relation_info, es_junkFilter) and reset locally changeable
2129 * state in the epq (including es_param_exec_vals, es_evTupleNull).
2131 EvalPlanQualStart(epq, estate, epq->next);
2134 * free old RTE' tuple, if any, and store target tuple where relation's
2135 * scan node will see it
2137 epqstate = epq->estate;
2138 if (epqstate->es_evTuple[rti - 1] != NULL)
2139 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2140 epqstate->es_evTuple[rti - 1] = copyTuple;
2142 return EvalPlanQualNext(estate);
2145 static TupleTableSlot *
2146 EvalPlanQualNext(EState *estate)
2148 evalPlanQual *epq = estate->es_evalPlanQual;
2149 MemoryContext oldcontext;
2150 TupleTableSlot *slot;
2152 Assert(epq->rti != 0);
2155 oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2156 slot = ExecProcNode(epq->planstate);
2157 MemoryContextSwitchTo(oldcontext);
2160 * No more tuples for this PQ. Continue previous one.
2162 if (TupIsNull(slot))
2164 evalPlanQual *oldepq;
2166 /* stop execution */
2167 EvalPlanQualStop(epq);
2168 /* pop old PQ from the stack */
2172 /* this is the first (oldest) PQ - mark as free */
2174 estate->es_useEvalPlan = false;
2175 /* and continue Query execution */
2178 Assert(oldepq->rti != 0);
2179 /* push current PQ to freePQ stack */
2182 estate->es_evalPlanQual = epq;
2190 EndEvalPlanQual(EState *estate)
2192 evalPlanQual *epq = estate->es_evalPlanQual;
2194 if (epq->rti == 0) /* plans already shutdowned */
2196 Assert(epq->next == NULL);
2202 evalPlanQual *oldepq;
2204 /* stop execution */
2205 EvalPlanQualStop(epq);
2206 /* pop old PQ from the stack */
2210 /* this is the first (oldest) PQ - mark as free */
2212 estate->es_useEvalPlan = false;
2215 Assert(oldepq->rti != 0);
2216 /* push current PQ to freePQ stack */
2219 estate->es_evalPlanQual = epq;
2224 * Start execution of one level of PlanQual.
2226 * This is a cut-down version of ExecutorStart(): we copy some state from
2227 * the top-level estate rather than initializing it fresh.
2230 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2234 MemoryContext oldcontext;
2236 rtsize = list_length(estate->es_range_table);
2238 epq->estate = epqstate = CreateExecutorState();
2240 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2243 * The epqstates share the top query's copy of unchanging state such as
2244 * the snapshot, rangetable, result-rel info, and external Param info.
2245 * They need their own copies of local state, including a tuple table,
2246 * es_param_exec_vals, etc.
2248 epqstate->es_direction = ForwardScanDirection;
2249 epqstate->es_snapshot = estate->es_snapshot;
2250 epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2251 epqstate->es_range_table = estate->es_range_table;
2252 epqstate->es_result_relations = estate->es_result_relations;
2253 epqstate->es_num_result_relations = estate->es_num_result_relations;
2254 epqstate->es_result_relation_info = estate->es_result_relation_info;
2255 epqstate->es_junkFilter = estate->es_junkFilter;
2256 epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2257 epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2258 epqstate->es_param_list_info = estate->es_param_list_info;
2259 if (estate->es_topPlan->nParamExec > 0)
2260 epqstate->es_param_exec_vals = (ParamExecData *)
2261 palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
2262 epqstate->es_rowMarks = estate->es_rowMarks;
2263 epqstate->es_instrument = estate->es_instrument;
2264 epqstate->es_select_into = estate->es_select_into;
2265 epqstate->es_into_oids = estate->es_into_oids;
2266 epqstate->es_topPlan = estate->es_topPlan;
2269 * Each epqstate must have its own es_evTupleNull state, but all the stack
2270 * entries share es_evTuple state. This allows sub-rechecks to inherit
2271 * the value being examined by an outer recheck.
2273 epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2274 if (priorepq == NULL)
2275 /* first PQ stack entry */
2276 epqstate->es_evTuple = (HeapTuple *)
2277 palloc0(rtsize * sizeof(HeapTuple));
2279 /* later stack entries share the same storage */
2280 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2282 epqstate->es_tupleTable =
2283 ExecCreateTupleTable(estate->es_tupleTable->size);
2285 epq->planstate = ExecInitNode(estate->es_topPlan, epqstate, 0);
2287 MemoryContextSwitchTo(oldcontext);
2291 * End execution of one level of PlanQual.
2293 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2294 * of the normal cleanup, but *not* close result relations (which we are
2295 * just sharing from the outer query).
2298 EvalPlanQualStop(evalPlanQual *epq)
2300 EState *epqstate = epq->estate;
2301 MemoryContext oldcontext;
2303 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2305 ExecEndNode(epq->planstate);
2307 ExecDropTupleTable(epqstate->es_tupleTable, true);
2308 epqstate->es_tupleTable = NULL;
2310 if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2312 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2313 epqstate->es_evTuple[epq->rti - 1] = NULL;
2316 MemoryContextSwitchTo(oldcontext);
2318 FreeExecutorState(epqstate);
2321 epq->planstate = NULL;