]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Re-run pgindent, fixing a problem where comment lines after a blank
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.261 2005/11/22 18:17:10 momjian Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/xlog.h"
37 #include "catalog/heap.h"
38 #include "catalog/namespace.h"
39 #include "commands/tablecmds.h"
40 #include "commands/trigger.h"
41 #include "executor/execdebug.h"
42 #include "executor/execdefs.h"
43 #include "executor/instrument.h"
44 #include "miscadmin.h"
45 #include "optimizer/clauses.h"
46 #include "optimizer/var.h"
47 #include "parser/parsetree.h"
48 #include "storage/smgr.h"
49 #include "utils/acl.h"
50 #include "utils/guc.h"
51 #include "utils/lsyscache.h"
52 #include "utils/memutils.h"
53
54
55 typedef struct execRowMark
56 {
57         Relation        relation;
58         Index           rti;
59         char            resname[32];
60 } execRowMark;
61
62 typedef struct evalPlanQual
63 {
64         Index           rti;
65         EState     *estate;
66         PlanState  *planstate;
67         struct evalPlanQual *next;      /* stack of active PlanQual plans */
68         struct evalPlanQual *free;      /* list of free PlanQual plans */
69 } evalPlanQual;
70
71 /* decls for local routines only used within this module */
72 static void InitPlan(QueryDesc *queryDesc, bool explainOnly);
73 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
74                                   Index resultRelationIndex,
75                                   List *rangeTable,
76                                   CmdType operation,
77                                   bool doInstrument);
78 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
79                         CmdType operation,
80                         long numberTuples,
81                         ScanDirection direction,
82                         DestReceiver *dest);
83 static void ExecSelect(TupleTableSlot *slot,
84                    DestReceiver *dest,
85                    EState *estate);
86 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
87                    EState *estate);
88 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
89                    EState *estate);
90 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
91                    EState *estate);
92 static TupleTableSlot *EvalPlanQualNext(EState *estate);
93 static void EndEvalPlanQual(EState *estate);
94 static void ExecCheckRTEPerms(RangeTblEntry *rte);
95 static void ExecCheckXactReadOnly(Query *parsetree);
96 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
97                                   evalPlanQual *priorepq);
98 static void EvalPlanQualStop(evalPlanQual *epq);
99
100 /* end of local decls */
101
102
103 /* ----------------------------------------------------------------
104  *              ExecutorStart
105  *
106  *              This routine must be called at the beginning of any execution of any
107  *              query plan
108  *
109  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
110  * clear why we bother to separate the two functions, but...).  The tupDesc
111  * field of the QueryDesc is filled in to describe the tuples that will be
112  * returned, and the internal fields (estate and planstate) are set up.
113  *
114  * If explainOnly is true, we are not actually intending to run the plan,
115  * only to set up for EXPLAIN; so skip unwanted side-effects.
116  *
117  * NB: the CurrentMemoryContext when this is called will become the parent
118  * of the per-query context used for this Executor invocation.
119  * ----------------------------------------------------------------
120  */
121 void
122 ExecutorStart(QueryDesc *queryDesc, bool explainOnly)
123 {
124         EState     *estate;
125         MemoryContext oldcontext;
126
127         /* sanity checks: queryDesc must not be started already */
128         Assert(queryDesc != NULL);
129         Assert(queryDesc->estate == NULL);
130
131         /*
132          * If the transaction is read-only, we need to check if any writes are
133          * planned to non-temporary tables.
134          */
135         if (XactReadOnly && !explainOnly)
136                 ExecCheckXactReadOnly(queryDesc->parsetree);
137
138         /*
139          * Build EState, switch into per-query memory context for startup.
140          */
141         estate = CreateExecutorState();
142         queryDesc->estate = estate;
143
144         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
145
146         /*
147          * Fill in parameters, if any, from queryDesc
148          */
149         estate->es_param_list_info = queryDesc->params;
150
151         if (queryDesc->plantree->nParamExec > 0)
152                 estate->es_param_exec_vals = (ParamExecData *)
153                         palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
154
155         /*
156          * Copy other important information into the EState
157          */
158         estate->es_snapshot = queryDesc->snapshot;
159         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
160         estate->es_instrument = queryDesc->doInstrument;
161
162         /*
163          * Initialize the plan state tree
164          */
165         InitPlan(queryDesc, explainOnly);
166
167         MemoryContextSwitchTo(oldcontext);
168 }
169
170 /* ----------------------------------------------------------------
171  *              ExecutorRun
172  *
173  *              This is the main routine of the executor module. It accepts
174  *              the query descriptor from the traffic cop and executes the
175  *              query plan.
176  *
177  *              ExecutorStart must have been called already.
178  *
179  *              If direction is NoMovementScanDirection then nothing is done
180  *              except to start up/shut down the destination.  Otherwise,
181  *              we retrieve up to 'count' tuples in the specified direction.
182  *
183  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
184  *              completion.
185  *
186  * ----------------------------------------------------------------
187  */
188 TupleTableSlot *
189 ExecutorRun(QueryDesc *queryDesc,
190                         ScanDirection direction, long count)
191 {
192         EState     *estate;
193         CmdType         operation;
194         DestReceiver *dest;
195         TupleTableSlot *result;
196         MemoryContext oldcontext;
197
198         /* sanity checks */
199         Assert(queryDesc != NULL);
200
201         estate = queryDesc->estate;
202
203         Assert(estate != NULL);
204
205         /*
206          * Switch into per-query memory context
207          */
208         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
209
210         /*
211          * extract information from the query descriptor and the query feature.
212          */
213         operation = queryDesc->operation;
214         dest = queryDesc->dest;
215
216         /*
217          * startup tuple receiver
218          */
219         estate->es_processed = 0;
220         estate->es_lastoid = InvalidOid;
221
222         (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
223
224         /*
225          * run plan
226          */
227         if (direction == NoMovementScanDirection)
228                 result = NULL;
229         else
230                 result = ExecutePlan(estate,
231                                                          queryDesc->planstate,
232                                                          operation,
233                                                          count,
234                                                          direction,
235                                                          dest);
236
237         /*
238          * shutdown receiver
239          */
240         (*dest->rShutdown) (dest);
241
242         MemoryContextSwitchTo(oldcontext);
243
244         return result;
245 }
246
247 /* ----------------------------------------------------------------
248  *              ExecutorEnd
249  *
250  *              This routine must be called at the end of execution of any
251  *              query plan
252  * ----------------------------------------------------------------
253  */
254 void
255 ExecutorEnd(QueryDesc *queryDesc)
256 {
257         EState     *estate;
258         MemoryContext oldcontext;
259
260         /* sanity checks */
261         Assert(queryDesc != NULL);
262
263         estate = queryDesc->estate;
264
265         Assert(estate != NULL);
266
267         /*
268          * Switch into per-query memory context to run ExecEndPlan
269          */
270         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
271
272         ExecEndPlan(queryDesc->planstate, estate);
273
274         /*
275          * Must switch out of context before destroying it
276          */
277         MemoryContextSwitchTo(oldcontext);
278
279         /*
280          * Release EState and per-query memory context.  This should release
281          * everything the executor has allocated.
282          */
283         FreeExecutorState(estate);
284
285         /* Reset queryDesc fields that no longer point to anything */
286         queryDesc->tupDesc = NULL;
287         queryDesc->estate = NULL;
288         queryDesc->planstate = NULL;
289 }
290
291 /* ----------------------------------------------------------------
292  *              ExecutorRewind
293  *
294  *              This routine may be called on an open queryDesc to rewind it
295  *              to the start.
296  * ----------------------------------------------------------------
297  */
298 void
299 ExecutorRewind(QueryDesc *queryDesc)
300 {
301         EState     *estate;
302         MemoryContext oldcontext;
303
304         /* sanity checks */
305         Assert(queryDesc != NULL);
306
307         estate = queryDesc->estate;
308
309         Assert(estate != NULL);
310
311         /* It's probably not sensible to rescan updating queries */
312         Assert(queryDesc->operation == CMD_SELECT);
313
314         /*
315          * Switch into per-query memory context
316          */
317         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
318
319         /*
320          * rescan plan
321          */
322         ExecReScan(queryDesc->planstate, NULL);
323
324         MemoryContextSwitchTo(oldcontext);
325 }
326
327
328 /*
329  * ExecCheckRTPerms
330  *              Check access permissions for all relations listed in a range table.
331  */
332 void
333 ExecCheckRTPerms(List *rangeTable)
334 {
335         ListCell   *l;
336
337         foreach(l, rangeTable)
338         {
339                 RangeTblEntry *rte = lfirst(l);
340
341                 ExecCheckRTEPerms(rte);
342         }
343 }
344
345 /*
346  * ExecCheckRTEPerms
347  *              Check access permissions for a single RTE.
348  */
349 static void
350 ExecCheckRTEPerms(RangeTblEntry *rte)
351 {
352         AclMode         requiredPerms;
353         Oid                     relOid;
354         Oid                     userid;
355
356         /*
357          * Only plain-relation RTEs need to be checked here.  Subquery RTEs are
358          * checked by ExecInitSubqueryScan if the subquery is still a separate
359          * subquery --- if it's been pulled up into our query level then the RTEs
360          * are in our rangetable and will be checked here. Function RTEs are
361          * checked by init_fcache when the function is prepared for execution.
362          * Join and special RTEs need no checks.
363          */
364         if (rte->rtekind != RTE_RELATION)
365                 return;
366
367         /*
368          * No work if requiredPerms is empty.
369          */
370         requiredPerms = rte->requiredPerms;
371         if (requiredPerms == 0)
372                 return;
373
374         relOid = rte->relid;
375
376         /*
377          * userid to check as: current user unless we have a setuid indication.
378          *
379          * Note: GetUserId() is presently fast enough that there's no harm in
380          * calling it separately for each RTE.  If that stops being true, we could
381          * call it once in ExecCheckRTPerms and pass the userid down from there.
382          * But for now, no need for the extra clutter.
383          */
384         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
385
386         /*
387          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
388          */
389         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
390                 != requiredPerms)
391                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
392                                            get_rel_name(relOid));
393 }
394
395 /*
396  * Check that the query does not imply any writes to non-temp tables.
397  */
398 static void
399 ExecCheckXactReadOnly(Query *parsetree)
400 {
401         ListCell   *l;
402
403         /*
404          * CREATE TABLE AS or SELECT INTO?
405          *
406          * XXX should we allow this if the destination is temp?
407          */
408         if (parsetree->into != NULL)
409                 goto fail;
410
411         /* Fail if write permissions are requested on any non-temp table */
412         foreach(l, parsetree->rtable)
413         {
414                 RangeTblEntry *rte = lfirst(l);
415
416                 if (rte->rtekind == RTE_SUBQUERY)
417                 {
418                         ExecCheckXactReadOnly(rte->subquery);
419                         continue;
420                 }
421
422                 if (rte->rtekind != RTE_RELATION)
423                         continue;
424
425                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
426                         continue;
427
428                 if (isTempNamespace(get_rel_namespace(rte->relid)))
429                         continue;
430
431                 goto fail;
432         }
433
434         return;
435
436 fail:
437         ereport(ERROR,
438                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
439                          errmsg("transaction is read-only")));
440 }
441
442
443 /* ----------------------------------------------------------------
444  *              InitPlan
445  *
446  *              Initializes the query plan: open files, allocate storage
447  *              and start up the rule manager
448  * ----------------------------------------------------------------
449  */
450 static void
451 InitPlan(QueryDesc *queryDesc, bool explainOnly)
452 {
453         CmdType         operation = queryDesc->operation;
454         Query      *parseTree = queryDesc->parsetree;
455         Plan       *plan = queryDesc->plantree;
456         EState     *estate = queryDesc->estate;
457         PlanState  *planstate;
458         List       *rangeTable;
459         Relation        intoRelationDesc;
460         bool            do_select_into;
461         TupleDesc       tupType;
462
463         /*
464          * Do permissions checks.  It's sufficient to examine the query's top
465          * rangetable here --- subplan RTEs will be checked during
466          * ExecInitSubPlan().
467          */
468         ExecCheckRTPerms(parseTree->rtable);
469
470         /*
471          * get information from query descriptor
472          */
473         rangeTable = parseTree->rtable;
474
475         /*
476          * initialize the node's execution state
477          */
478         estate->es_range_table = rangeTable;
479
480         /*
481          * if there is a result relation, initialize result relation stuff
482          */
483         if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
484         {
485                 List       *resultRelations = parseTree->resultRelations;
486                 int                     numResultRelations;
487                 ResultRelInfo *resultRelInfos;
488
489                 if (resultRelations != NIL)
490                 {
491                         /*
492                          * Multiple result relations (due to inheritance)
493                          * parseTree->resultRelations identifies them all
494                          */
495                         ResultRelInfo *resultRelInfo;
496                         ListCell   *l;
497
498                         numResultRelations = list_length(resultRelations);
499                         resultRelInfos = (ResultRelInfo *)
500                                 palloc(numResultRelations * sizeof(ResultRelInfo));
501                         resultRelInfo = resultRelInfos;
502                         foreach(l, resultRelations)
503                         {
504                                 initResultRelInfo(resultRelInfo,
505                                                                   lfirst_int(l),
506                                                                   rangeTable,
507                                                                   operation,
508                                                                   estate->es_instrument);
509                                 resultRelInfo++;
510                         }
511                 }
512                 else
513                 {
514                         /*
515                          * Single result relation identified by parseTree->resultRelation
516                          */
517                         numResultRelations = 1;
518                         resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
519                         initResultRelInfo(resultRelInfos,
520                                                           parseTree->resultRelation,
521                                                           rangeTable,
522                                                           operation,
523                                                           estate->es_instrument);
524                 }
525
526                 estate->es_result_relations = resultRelInfos;
527                 estate->es_num_result_relations = numResultRelations;
528                 /* Initialize to first or only result rel */
529                 estate->es_result_relation_info = resultRelInfos;
530         }
531         else
532         {
533                 /*
534                  * if no result relation, then set state appropriately
535                  */
536                 estate->es_result_relations = NULL;
537                 estate->es_num_result_relations = 0;
538                 estate->es_result_relation_info = NULL;
539         }
540
541         /*
542          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
543          * flag appropriately so that the plan tree will be initialized with the
544          * correct tuple descriptors.
545          */
546         do_select_into = false;
547
548         if (operation == CMD_SELECT && parseTree->into != NULL)
549         {
550                 do_select_into = true;
551                 estate->es_select_into = true;
552                 estate->es_into_oids = parseTree->intoHasOids;
553         }
554
555         /*
556          * Have to lock relations selected FOR UPDATE/FOR SHARE
557          */
558         estate->es_rowMarks = NIL;
559         estate->es_forUpdate = parseTree->forUpdate;
560         estate->es_rowNoWait = parseTree->rowNoWait;
561         if (parseTree->rowMarks != NIL)
562         {
563                 ListCell   *l;
564
565                 foreach(l, parseTree->rowMarks)
566                 {
567                         Index           rti = lfirst_int(l);
568                         Oid                     relid = getrelid(rti, rangeTable);
569                         Relation        relation;
570                         execRowMark *erm;
571
572                         relation = heap_open(relid, RowShareLock);
573                         erm = (execRowMark *) palloc(sizeof(execRowMark));
574                         erm->relation = relation;
575                         erm->rti = rti;
576                         snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rti);
577                         estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
578                 }
579         }
580
581         /*
582          * initialize the executor "tuple" table.  We need slots for all the plan
583          * nodes, plus possibly output slots for the junkfilter(s). At this point
584          * we aren't sure if we need junkfilters, so just add slots for them
585          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
586          * trigger output tuples.
587          */
588         {
589                 int                     nSlots = ExecCountSlotsNode(plan);
590
591                 if (parseTree->resultRelations != NIL)
592                         nSlots += list_length(parseTree->resultRelations);
593                 else
594                         nSlots += 1;
595                 if (operation != CMD_SELECT)
596                         nSlots++;
597
598                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
599
600                 if (operation != CMD_SELECT)
601                         estate->es_trig_tuple_slot =
602                                 ExecAllocTableSlot(estate->es_tupleTable);
603         }
604
605         /* mark EvalPlanQual not active */
606         estate->es_topPlan = plan;
607         estate->es_evalPlanQual = NULL;
608         estate->es_evTupleNull = NULL;
609         estate->es_evTuple = NULL;
610         estate->es_useEvalPlan = false;
611
612         /*
613          * initialize the private state information for all the nodes in the query
614          * tree.  This opens files, allocates storage and leaves us ready to start
615          * processing tuples.
616          */
617         planstate = ExecInitNode(plan, estate);
618
619         /*
620          * Get the tuple descriptor describing the type of tuples to return. (this
621          * is especially important if we are creating a relation with "SELECT
622          * INTO")
623          */
624         tupType = ExecGetResultType(planstate);
625
626         /*
627          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
628          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
629          * INTO also need a filter if the plan may return raw disk tuples (else
630          * heap_insert will be scribbling on the source relation!). UPDATE and
631          * DELETE always need a filter, since there's always a junk 'ctid'
632          * attribute present --- no need to look first.
633          */
634         {
635                 bool            junk_filter_needed = false;
636                 ListCell   *tlist;
637
638                 switch (operation)
639                 {
640                         case CMD_SELECT:
641                         case CMD_INSERT:
642                                 foreach(tlist, plan->targetlist)
643                                 {
644                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
645
646                                         if (tle->resjunk)
647                                         {
648                                                 junk_filter_needed = true;
649                                                 break;
650                                         }
651                                 }
652                                 if (!junk_filter_needed &&
653                                         (operation == CMD_INSERT || do_select_into) &&
654                                         ExecMayReturnRawTuples(planstate))
655                                         junk_filter_needed = true;
656                                 break;
657                         case CMD_UPDATE:
658                         case CMD_DELETE:
659                                 junk_filter_needed = true;
660                                 break;
661                         default:
662                                 break;
663                 }
664
665                 if (junk_filter_needed)
666                 {
667                         /*
668                          * If there are multiple result relations, each one needs its own
669                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
670                          * we can't be fooled by some needing a filter and some not.
671                          */
672                         if (parseTree->resultRelations != NIL)
673                         {
674                                 PlanState **appendplans;
675                                 int                     as_nplans;
676                                 ResultRelInfo *resultRelInfo;
677                                 int                     i;
678
679                                 /* Top plan had better be an Append here. */
680                                 Assert(IsA(plan, Append));
681                                 Assert(((Append *) plan)->isTarget);
682                                 Assert(IsA(planstate, AppendState));
683                                 appendplans = ((AppendState *) planstate)->appendplans;
684                                 as_nplans = ((AppendState *) planstate)->as_nplans;
685                                 Assert(as_nplans == estate->es_num_result_relations);
686                                 resultRelInfo = estate->es_result_relations;
687                                 for (i = 0; i < as_nplans; i++)
688                                 {
689                                         PlanState  *subplan = appendplans[i];
690                                         JunkFilter *j;
691
692                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
693                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
694                                                                   ExecAllocTableSlot(estate->es_tupleTable));
695                                         resultRelInfo->ri_junkFilter = j;
696                                         resultRelInfo++;
697                                 }
698
699                                 /*
700                                  * Set active junkfilter too; at this point ExecInitAppend has
701                                  * already selected an active result relation...
702                                  */
703                                 estate->es_junkFilter =
704                                         estate->es_result_relation_info->ri_junkFilter;
705                         }
706                         else
707                         {
708                                 /* Normal case with just one JunkFilter */
709                                 JunkFilter *j;
710
711                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
712                                                                            tupType->tdhasoid,
713                                                                   ExecAllocTableSlot(estate->es_tupleTable));
714                                 estate->es_junkFilter = j;
715                                 if (estate->es_result_relation_info)
716                                         estate->es_result_relation_info->ri_junkFilter = j;
717
718                                 /* For SELECT, want to return the cleaned tuple type */
719                                 if (operation == CMD_SELECT)
720                                         tupType = j->jf_cleanTupType;
721                         }
722                 }
723                 else
724                         estate->es_junkFilter = NULL;
725         }
726
727         /*
728          * If doing SELECT INTO, initialize the "into" relation.  We must wait
729          * till now so we have the "clean" result tuple type to create the new
730          * table from.
731          *
732          * If EXPLAIN, skip creating the "into" relation.
733          */
734         intoRelationDesc = NULL;
735
736         if (do_select_into && !explainOnly)
737         {
738                 char       *intoName;
739                 Oid                     namespaceId;
740                 AclResult       aclresult;
741                 Oid                     intoRelationId;
742                 TupleDesc       tupdesc;
743
744                 /*
745                  * find namespace to create in, check permissions
746                  */
747                 intoName = parseTree->into->relname;
748                 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
749
750                 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
751                                                                                   ACL_CREATE);
752                 if (aclresult != ACLCHECK_OK)
753                         aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
754                                                    get_namespace_name(namespaceId));
755
756                 /*
757                  * have to copy tupType to get rid of constraints
758                  */
759                 tupdesc = CreateTupleDescCopy(tupType);
760
761                 intoRelationId = heap_create_with_catalog(intoName,
762                                                                                                   namespaceId,
763                                                                                                   InvalidOid,
764                                                                                                   InvalidOid,
765                                                                                                   GetUserId(),
766                                                                                                   tupdesc,
767                                                                                                   RELKIND_RELATION,
768                                                                                                   false,
769                                                                                                   true,
770                                                                                                   0,
771                                                                                                   ONCOMMIT_NOOP,
772                                                                                                   allowSystemTableMods);
773
774                 FreeTupleDesc(tupdesc);
775
776                 /*
777                  * Advance command counter so that the newly-created relation's
778                  * catalog tuples will be visible to heap_open.
779                  */
780                 CommandCounterIncrement();
781
782                 /*
783                  * If necessary, create a TOAST table for the into relation. Note that
784                  * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
785                  * that the TOAST table will be visible for insertion.
786                  */
787                 AlterTableCreateToastTable(intoRelationId, true);
788
789                 /*
790                  * And open the constructed table for writing.
791                  */
792                 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
793
794                 /* use_wal off requires rd_targblock be initially invalid */
795                 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
796
797                 /*
798                  * We can skip WAL-logging the insertions, unless PITR is in use.
799                  *
800                  * Note that for a non-temp INTO table, this is safe only because we
801                  * know that the catalog changes above will have been WAL-logged, and
802                  * so RecordTransactionCommit will think it needs to WAL-log the
803                  * eventual transaction commit.  Else the commit might be lost, even
804                  * though all the data is safely fsync'd ...
805                  */
806                 estate->es_into_relation_use_wal = XLogArchivingActive();
807         }
808
809         estate->es_into_relation_descriptor = intoRelationDesc;
810
811         queryDesc->tupDesc = tupType;
812         queryDesc->planstate = planstate;
813 }
814
815 /*
816  * Initialize ResultRelInfo data for one result relation
817  */
818 static void
819 initResultRelInfo(ResultRelInfo *resultRelInfo,
820                                   Index resultRelationIndex,
821                                   List *rangeTable,
822                                   CmdType operation,
823                                   bool doInstrument)
824 {
825         Oid                     resultRelationOid;
826         Relation        resultRelationDesc;
827
828         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
829         resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
830
831         switch (resultRelationDesc->rd_rel->relkind)
832         {
833                 case RELKIND_SEQUENCE:
834                         ereport(ERROR,
835                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
836                                          errmsg("cannot change sequence \"%s\"",
837                                                         RelationGetRelationName(resultRelationDesc))));
838                         break;
839                 case RELKIND_TOASTVALUE:
840                         ereport(ERROR,
841                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
842                                          errmsg("cannot change TOAST relation \"%s\"",
843                                                         RelationGetRelationName(resultRelationDesc))));
844                         break;
845                 case RELKIND_VIEW:
846                         ereport(ERROR,
847                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
848                                          errmsg("cannot change view \"%s\"",
849                                                         RelationGetRelationName(resultRelationDesc))));
850                         break;
851         }
852
853         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
854         resultRelInfo->type = T_ResultRelInfo;
855         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
856         resultRelInfo->ri_RelationDesc = resultRelationDesc;
857         resultRelInfo->ri_NumIndices = 0;
858         resultRelInfo->ri_IndexRelationDescs = NULL;
859         resultRelInfo->ri_IndexRelationInfo = NULL;
860         /* make a copy so as not to depend on relcache info not changing... */
861         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
862         if (resultRelInfo->ri_TrigDesc)
863         {
864                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
865
866                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
867                         palloc0(n * sizeof(FmgrInfo));
868                 if (doInstrument)
869                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
870                 else
871                         resultRelInfo->ri_TrigInstrument = NULL;
872         }
873         else
874         {
875                 resultRelInfo->ri_TrigFunctions = NULL;
876                 resultRelInfo->ri_TrigInstrument = NULL;
877         }
878         resultRelInfo->ri_ConstraintExprs = NULL;
879         resultRelInfo->ri_junkFilter = NULL;
880
881         /*
882          * If there are indices on the result relation, open them and save
883          * descriptors in the result relation info, so that we can add new index
884          * entries for the tuples we add/update.  We need not do this for a
885          * DELETE, however, since deletion doesn't affect indexes.
886          */
887         if (resultRelationDesc->rd_rel->relhasindex &&
888                 operation != CMD_DELETE)
889                 ExecOpenIndices(resultRelInfo);
890 }
891
892 /*
893  *              ExecContextForcesOids
894  *
895  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
896  * we need to ensure that result tuples have space for an OID iff they are
897  * going to be stored into a relation that has OIDs.  In other contexts
898  * we are free to choose whether to leave space for OIDs in result tuples
899  * (we generally don't want to, but we do if a physical-tlist optimization
900  * is possible).  This routine checks the plan context and returns TRUE if the
901  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
902  * *hasoids is set to the required value.
903  *
904  * One reason this is ugly is that all plan nodes in the plan tree will emit
905  * tuples with space for an OID, though we really only need the topmost node
906  * to do so.  However, node types like Sort don't project new tuples but just
907  * return their inputs, and in those cases the requirement propagates down
908  * to the input node.  Eventually we might make this code smart enough to
909  * recognize how far down the requirement really goes, but for now we just
910  * make all plan nodes do the same thing if the top level forces the choice.
911  *
912  * We assume that estate->es_result_relation_info is already set up to
913  * describe the target relation.  Note that in an UPDATE that spans an
914  * inheritance tree, some of the target relations may have OIDs and some not.
915  * We have to make the decisions on a per-relation basis as we initialize
916  * each of the child plans of the topmost Append plan.
917  *
918  * SELECT INTO is even uglier, because we don't have the INTO relation's
919  * descriptor available when this code runs; we have to look aside at a
920  * flag set by InitPlan().
921  */
922 bool
923 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
924 {
925         if (planstate->state->es_select_into)
926         {
927                 *hasoids = planstate->state->es_into_oids;
928                 return true;
929         }
930         else
931         {
932                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
933
934                 if (ri != NULL)
935                 {
936                         Relation        rel = ri->ri_RelationDesc;
937
938                         if (rel != NULL)
939                         {
940                                 *hasoids = rel->rd_rel->relhasoids;
941                                 return true;
942                         }
943                 }
944         }
945
946         return false;
947 }
948
949 /* ----------------------------------------------------------------
950  *              ExecEndPlan
951  *
952  *              Cleans up the query plan -- closes files and frees up storage
953  *
954  * NOTE: we are no longer very worried about freeing storage per se
955  * in this code; FreeExecutorState should be guaranteed to release all
956  * memory that needs to be released.  What we are worried about doing
957  * is closing relations and dropping buffer pins.  Thus, for example,
958  * tuple tables must be cleared or dropped to ensure pins are released.
959  * ----------------------------------------------------------------
960  */
961 void
962 ExecEndPlan(PlanState *planstate, EState *estate)
963 {
964         ResultRelInfo *resultRelInfo;
965         int                     i;
966         ListCell   *l;
967
968         /*
969          * shut down any PlanQual processing we were doing
970          */
971         if (estate->es_evalPlanQual != NULL)
972                 EndEvalPlanQual(estate);
973
974         /*
975          * shut down the node-type-specific query processing
976          */
977         ExecEndNode(planstate);
978
979         /*
980          * destroy the executor "tuple" table.
981          */
982         ExecDropTupleTable(estate->es_tupleTable, true);
983         estate->es_tupleTable = NULL;
984
985         /*
986          * close the result relation(s) if any, but hold locks until xact commit.
987          */
988         resultRelInfo = estate->es_result_relations;
989         for (i = estate->es_num_result_relations; i > 0; i--)
990         {
991                 /* Close indices and then the relation itself */
992                 ExecCloseIndices(resultRelInfo);
993                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
994                 resultRelInfo++;
995         }
996
997         /*
998          * close the "into" relation if necessary, again keeping lock
999          */
1000         if (estate->es_into_relation_descriptor != NULL)
1001         {
1002                 /*
1003                  * If we skipped using WAL, and it's not a temp relation, we must
1004                  * force the relation down to disk before it's safe to commit the
1005                  * transaction.  This requires forcing out any dirty buffers and then
1006                  * doing a forced fsync.
1007                  */
1008                 if (!estate->es_into_relation_use_wal &&
1009                         !estate->es_into_relation_descriptor->rd_istemp)
1010                 {
1011                         FlushRelationBuffers(estate->es_into_relation_descriptor);
1012                         smgrimmedsync(estate->es_into_relation_descriptor->rd_smgr);
1013                 }
1014
1015                 heap_close(estate->es_into_relation_descriptor, NoLock);
1016         }
1017
1018         /*
1019          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1020          */
1021         foreach(l, estate->es_rowMarks)
1022         {
1023                 execRowMark *erm = lfirst(l);
1024
1025                 heap_close(erm->relation, NoLock);
1026         }
1027 }
1028
1029 /* ----------------------------------------------------------------
1030  *              ExecutePlan
1031  *
1032  *              processes the query plan to retrieve 'numberTuples' tuples in the
1033  *              direction specified.
1034  *
1035  *              Retrieves all tuples if numberTuples is 0
1036  *
1037  *              result is either a slot containing the last tuple in the case
1038  *              of a SELECT or NULL otherwise.
1039  *
1040  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1041  * user can see it
1042  * ----------------------------------------------------------------
1043  */
1044 static TupleTableSlot *
1045 ExecutePlan(EState *estate,
1046                         PlanState *planstate,
1047                         CmdType operation,
1048                         long numberTuples,
1049                         ScanDirection direction,
1050                         DestReceiver *dest)
1051 {
1052         JunkFilter *junkfilter;
1053         TupleTableSlot *slot;
1054         ItemPointer tupleid = NULL;
1055         ItemPointerData tuple_ctid;
1056         long            current_tuple_count;
1057         TupleTableSlot *result;
1058
1059         /*
1060          * initialize local variables
1061          */
1062         slot = NULL;
1063         current_tuple_count = 0;
1064         result = NULL;
1065
1066         /*
1067          * Set the direction.
1068          */
1069         estate->es_direction = direction;
1070
1071         /*
1072          * Process BEFORE EACH STATEMENT triggers
1073          */
1074         switch (operation)
1075         {
1076                 case CMD_UPDATE:
1077                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1078                         break;
1079                 case CMD_DELETE:
1080                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1081                         break;
1082                 case CMD_INSERT:
1083                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1084                         break;
1085                 default:
1086                         /* do nothing */
1087                         break;
1088         }
1089
1090         /*
1091          * Loop until we've processed the proper number of tuples from the plan.
1092          */
1093
1094         for (;;)
1095         {
1096                 /* Reset the per-output-tuple exprcontext */
1097                 ResetPerTupleExprContext(estate);
1098
1099                 /*
1100                  * Execute the plan and obtain a tuple
1101                  */
1102 lnext:  ;
1103                 if (estate->es_useEvalPlan)
1104                 {
1105                         slot = EvalPlanQualNext(estate);
1106                         if (TupIsNull(slot))
1107                                 slot = ExecProcNode(planstate);
1108                 }
1109                 else
1110                         slot = ExecProcNode(planstate);
1111
1112                 /*
1113                  * if the tuple is null, then we assume there is nothing more to
1114                  * process so we just return null...
1115                  */
1116                 if (TupIsNull(slot))
1117                 {
1118                         result = NULL;
1119                         break;
1120                 }
1121
1122                 /*
1123                  * if we have a junk filter, then project a new tuple with the junk
1124                  * removed.
1125                  *
1126                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1127                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1128                  * because that tuple slot has the wrong descriptor.)
1129                  *
1130                  * Also, extract all the junk information we need.
1131                  */
1132                 if ((junkfilter = estate->es_junkFilter) != NULL)
1133                 {
1134                         Datum           datum;
1135                         bool            isNull;
1136
1137                         /*
1138                          * extract the 'ctid' junk attribute.
1139                          */
1140                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1141                         {
1142                                 if (!ExecGetJunkAttribute(junkfilter,
1143                                                                                   slot,
1144                                                                                   "ctid",
1145                                                                                   &datum,
1146                                                                                   &isNull))
1147                                         elog(ERROR, "could not find junk ctid column");
1148
1149                                 /* shouldn't ever get a null result... */
1150                                 if (isNull)
1151                                         elog(ERROR, "ctid is NULL");
1152
1153                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1154                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1155                                 tupleid = &tuple_ctid;
1156                         }
1157
1158                         /*
1159                          * Process any FOR UPDATE or FOR SHARE locking requested.
1160                          */
1161                         else if (estate->es_rowMarks != NIL)
1162                         {
1163                                 ListCell   *l;
1164
1165                 lmark:  ;
1166                                 foreach(l, estate->es_rowMarks)
1167                                 {
1168                                         execRowMark *erm = lfirst(l);
1169                                         HeapTupleData tuple;
1170                                         Buffer          buffer;
1171                                         ItemPointerData update_ctid;
1172                                         TransactionId update_xmax;
1173                                         TupleTableSlot *newSlot;
1174                                         LockTupleMode lockmode;
1175                                         HTSU_Result test;
1176
1177                                         if (!ExecGetJunkAttribute(junkfilter,
1178                                                                                           slot,
1179                                                                                           erm->resname,
1180                                                                                           &datum,
1181                                                                                           &isNull))
1182                                                 elog(ERROR, "could not find junk \"%s\" column",
1183                                                          erm->resname);
1184
1185                                         /* shouldn't ever get a null result... */
1186                                         if (isNull)
1187                                                 elog(ERROR, "\"%s\" is NULL", erm->resname);
1188
1189                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1190
1191                                         if (estate->es_forUpdate)
1192                                                 lockmode = LockTupleExclusive;
1193                                         else
1194                                                 lockmode = LockTupleShared;
1195
1196                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1197                                                                                    &update_ctid, &update_xmax,
1198                                                                                    estate->es_snapshot->curcid,
1199                                                                                    lockmode, estate->es_rowNoWait);
1200                                         ReleaseBuffer(buffer);
1201                                         switch (test)
1202                                         {
1203                                                 case HeapTupleSelfUpdated:
1204                                                         /* treat it as deleted; do not process */
1205                                                         goto lnext;
1206
1207                                                 case HeapTupleMayBeUpdated:
1208                                                         break;
1209
1210                                                 case HeapTupleUpdated:
1211                                                         if (IsXactIsoLevelSerializable)
1212                                                                 ereport(ERROR,
1213                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1214                                                                   errmsg("could not serialize access due to concurrent update")));
1215                                                         if (!ItemPointerEquals(&update_ctid,
1216                                                                                                    &tuple.t_self))
1217                                                         {
1218                                                                 /* updated, so look at updated version */
1219                                                                 newSlot = EvalPlanQual(estate,
1220                                                                                                            erm->rti,
1221                                                                                                            &update_ctid,
1222                                                                                                            update_xmax);
1223                                                                 if (!TupIsNull(newSlot))
1224                                                                 {
1225                                                                         slot = newSlot;
1226                                                                         estate->es_useEvalPlan = true;
1227                                                                         goto lmark;
1228                                                                 }
1229                                                         }
1230
1231                                                         /*
1232                                                          * if tuple was deleted or PlanQual failed for
1233                                                          * updated tuple - we must not return this tuple!
1234                                                          */
1235                                                         goto lnext;
1236
1237                                                 default:
1238                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1239                                                                  test);
1240                                                         return (NULL);
1241                                         }
1242                                 }
1243                         }
1244
1245                         /*
1246                          * Finally create a new "clean" tuple with all junk attributes
1247                          * removed
1248                          */
1249                         slot = ExecFilterJunk(junkfilter, slot);
1250                 }
1251
1252                 /*
1253                  * now that we have a tuple, do the appropriate thing with it.. either
1254                  * return it to the user, add it to a relation someplace, delete it
1255                  * from a relation, or modify some of its attributes.
1256                  */
1257                 switch (operation)
1258                 {
1259                         case CMD_SELECT:
1260                                 ExecSelect(slot,        /* slot containing tuple */
1261                                                    dest,        /* destination's tuple-receiver obj */
1262                                                    estate);
1263                                 result = slot;
1264                                 break;
1265
1266                         case CMD_INSERT:
1267                                 ExecInsert(slot, tupleid, estate);
1268                                 result = NULL;
1269                                 break;
1270
1271                         case CMD_DELETE:
1272                                 ExecDelete(slot, tupleid, estate);
1273                                 result = NULL;
1274                                 break;
1275
1276                         case CMD_UPDATE:
1277                                 ExecUpdate(slot, tupleid, estate);
1278                                 result = NULL;
1279                                 break;
1280
1281                         default:
1282                                 elog(ERROR, "unrecognized operation code: %d",
1283                                          (int) operation);
1284                                 result = NULL;
1285                                 break;
1286                 }
1287
1288                 /*
1289                  * check our tuple count.. if we've processed the proper number then
1290                  * quit, else loop again and process more tuples.  Zero numberTuples
1291                  * means no limit.
1292                  */
1293                 current_tuple_count++;
1294                 if (numberTuples && numberTuples == current_tuple_count)
1295                         break;
1296         }
1297
1298         /*
1299          * Process AFTER EACH STATEMENT triggers
1300          */
1301         switch (operation)
1302         {
1303                 case CMD_UPDATE:
1304                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1305                         break;
1306                 case CMD_DELETE:
1307                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1308                         break;
1309                 case CMD_INSERT:
1310                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1311                         break;
1312                 default:
1313                         /* do nothing */
1314                         break;
1315         }
1316
1317         /*
1318          * here, result is either a slot containing a tuple in the case of a
1319          * SELECT or NULL otherwise.
1320          */
1321         return result;
1322 }
1323
1324 /* ----------------------------------------------------------------
1325  *              ExecSelect
1326  *
1327  *              SELECTs are easy.. we just pass the tuple to the appropriate
1328  *              print function.  The only complexity is when we do a
1329  *              "SELECT INTO", in which case we insert the tuple into
1330  *              the appropriate relation (note: this is a newly created relation
1331  *              so we don't need to worry about indices or locks.)
1332  * ----------------------------------------------------------------
1333  */
1334 static void
1335 ExecSelect(TupleTableSlot *slot,
1336                    DestReceiver *dest,
1337                    EState *estate)
1338 {
1339         /*
1340          * insert the tuple into the "into relation"
1341          *
1342          * XXX this probably ought to be replaced by a separate destination
1343          */
1344         if (estate->es_into_relation_descriptor != NULL)
1345         {
1346                 HeapTuple       tuple;
1347
1348                 tuple = ExecCopySlotTuple(slot);
1349                 heap_insert(estate->es_into_relation_descriptor, tuple,
1350                                         estate->es_snapshot->curcid,
1351                                         estate->es_into_relation_use_wal,
1352                                         false);         /* never any point in using FSM */
1353                 /* we know there are no indexes to update */
1354                 heap_freetuple(tuple);
1355                 IncrAppended();
1356         }
1357
1358         /*
1359          * send the tuple to the destination
1360          */
1361         (*dest->receiveSlot) (slot, dest);
1362         IncrRetrieved();
1363         (estate->es_processed)++;
1364 }
1365
1366 /* ----------------------------------------------------------------
1367  *              ExecInsert
1368  *
1369  *              INSERTs are trickier.. we have to insert the tuple into
1370  *              the base relation and insert appropriate tuples into the
1371  *              index relations.
1372  * ----------------------------------------------------------------
1373  */
1374 static void
1375 ExecInsert(TupleTableSlot *slot,
1376                    ItemPointer tupleid,
1377                    EState *estate)
1378 {
1379         HeapTuple       tuple;
1380         ResultRelInfo *resultRelInfo;
1381         Relation        resultRelationDesc;
1382         Oid                     newId;
1383
1384         /*
1385          * get the heap tuple out of the tuple table slot, making sure we have a
1386          * writable copy
1387          */
1388         tuple = ExecMaterializeSlot(slot);
1389
1390         /*
1391          * get information on the (current) result relation
1392          */
1393         resultRelInfo = estate->es_result_relation_info;
1394         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1395
1396         /* BEFORE ROW INSERT Triggers */
1397         if (resultRelInfo->ri_TrigDesc &&
1398                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1399         {
1400                 HeapTuple       newtuple;
1401
1402                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1403
1404                 if (newtuple == NULL)   /* "do nothing" */
1405                         return;
1406
1407                 if (newtuple != tuple)  /* modified by Trigger(s) */
1408                 {
1409                         /*
1410                          * Put the modified tuple into a slot for convenience of routines
1411                          * below.  We assume the tuple was allocated in per-tuple memory
1412                          * context, and therefore will go away by itself. The tuple table
1413                          * slot should not try to clear it.
1414                          */
1415                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1416
1417                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1418                                 ExecSetSlotDescriptor(newslot,
1419                                                                           slot->tts_tupleDescriptor,
1420                                                                           false);
1421                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1422                         slot = newslot;
1423                         tuple = newtuple;
1424                 }
1425         }
1426
1427         /*
1428          * Check the constraints of the tuple
1429          */
1430         if (resultRelationDesc->rd_att->constr)
1431                 ExecConstraints(resultRelInfo, slot, estate);
1432
1433         /*
1434          * insert the tuple
1435          *
1436          * Note: heap_insert returns the tid (location) of the new tuple in the
1437          * t_self field.
1438          */
1439         newId = heap_insert(resultRelationDesc, tuple,
1440                                                 estate->es_snapshot->curcid,
1441                                                 true, true);
1442
1443         IncrAppended();
1444         (estate->es_processed)++;
1445         estate->es_lastoid = newId;
1446         setLastTid(&(tuple->t_self));
1447
1448         /*
1449          * insert index entries for tuple
1450          */
1451         if (resultRelInfo->ri_NumIndices > 0)
1452                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1453
1454         /* AFTER ROW INSERT Triggers */
1455         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1456 }
1457
1458 /* ----------------------------------------------------------------
1459  *              ExecDelete
1460  *
1461  *              DELETE is like UPDATE, except that we delete the tuple and no
1462  *              index modifications are needed
1463  * ----------------------------------------------------------------
1464  */
1465 static void
1466 ExecDelete(TupleTableSlot *slot,
1467                    ItemPointer tupleid,
1468                    EState *estate)
1469 {
1470         ResultRelInfo *resultRelInfo;
1471         Relation        resultRelationDesc;
1472         HTSU_Result result;
1473         ItemPointerData update_ctid;
1474         TransactionId update_xmax;
1475
1476         /*
1477          * get information on the (current) result relation
1478          */
1479         resultRelInfo = estate->es_result_relation_info;
1480         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1481
1482         /* BEFORE ROW DELETE Triggers */
1483         if (resultRelInfo->ri_TrigDesc &&
1484                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1485         {
1486                 bool            dodelete;
1487
1488                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1489                                                                                 estate->es_snapshot->curcid);
1490
1491                 if (!dodelete)                  /* "do nothing" */
1492                         return;
1493         }
1494
1495         /*
1496          * delete the tuple
1497          *
1498          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1499          * the row to be deleted is visible to that snapshot, and throw a can't-
1500          * serialize error if not.      This is a special-case behavior needed for
1501          * referential integrity updates in serializable transactions.
1502          */
1503 ldelete:;
1504         result = heap_delete(resultRelationDesc, tupleid,
1505                                                  &update_ctid, &update_xmax,
1506                                                  estate->es_snapshot->curcid,
1507                                                  estate->es_crosscheck_snapshot,
1508                                                  true /* wait for commit */ );
1509         switch (result)
1510         {
1511                 case HeapTupleSelfUpdated:
1512                         /* already deleted by self; nothing to do */
1513                         return;
1514
1515                 case HeapTupleMayBeUpdated:
1516                         break;
1517
1518                 case HeapTupleUpdated:
1519                         if (IsXactIsoLevelSerializable)
1520                                 ereport(ERROR,
1521                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1522                                                  errmsg("could not serialize access due to concurrent update")));
1523                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1524                         {
1525                                 TupleTableSlot *epqslot;
1526
1527                                 epqslot = EvalPlanQual(estate,
1528                                                                            resultRelInfo->ri_RangeTableIndex,
1529                                                                            &update_ctid,
1530                                                                            update_xmax);
1531                                 if (!TupIsNull(epqslot))
1532                                 {
1533                                         *tupleid = update_ctid;
1534                                         goto ldelete;
1535                                 }
1536                         }
1537                         /* tuple already deleted; nothing to do */
1538                         return;
1539
1540                 default:
1541                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1542                         return;
1543         }
1544
1545         IncrDeleted();
1546         (estate->es_processed)++;
1547
1548         /*
1549          * Note: Normally one would think that we have to delete index tuples
1550          * associated with the heap tuple now...
1551          *
1552          * ... but in POSTGRES, we have no need to do this because VACUUM will
1553          * take care of it later.  We can't delete index tuples immediately
1554          * anyway, since the tuple is still visible to other transactions.
1555          */
1556
1557         /* AFTER ROW DELETE Triggers */
1558         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1559 }
1560
1561 /* ----------------------------------------------------------------
1562  *              ExecUpdate
1563  *
1564  *              note: we can't run UPDATE queries with transactions
1565  *              off because UPDATEs are actually INSERTs and our
1566  *              scan will mistakenly loop forever, updating the tuple
1567  *              it just inserted..      This should be fixed but until it
1568  *              is, we don't want to get stuck in an infinite loop
1569  *              which corrupts your database..
1570  * ----------------------------------------------------------------
1571  */
1572 static void
1573 ExecUpdate(TupleTableSlot *slot,
1574                    ItemPointer tupleid,
1575                    EState *estate)
1576 {
1577         HeapTuple       tuple;
1578         ResultRelInfo *resultRelInfo;
1579         Relation        resultRelationDesc;
1580         HTSU_Result result;
1581         ItemPointerData update_ctid;
1582         TransactionId update_xmax;
1583
1584         /*
1585          * abort the operation if not running transactions
1586          */
1587         if (IsBootstrapProcessingMode())
1588                 elog(ERROR, "cannot UPDATE during bootstrap");
1589
1590         /*
1591          * get the heap tuple out of the tuple table slot, making sure we have a
1592          * writable copy
1593          */
1594         tuple = ExecMaterializeSlot(slot);
1595
1596         /*
1597          * get information on the (current) result relation
1598          */
1599         resultRelInfo = estate->es_result_relation_info;
1600         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1601
1602         /* BEFORE ROW UPDATE Triggers */
1603         if (resultRelInfo->ri_TrigDesc &&
1604                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1605         {
1606                 HeapTuple       newtuple;
1607
1608                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1609                                                                                 tupleid, tuple,
1610                                                                                 estate->es_snapshot->curcid);
1611
1612                 if (newtuple == NULL)   /* "do nothing" */
1613                         return;
1614
1615                 if (newtuple != tuple)  /* modified by Trigger(s) */
1616                 {
1617                         /*
1618                          * Put the modified tuple into a slot for convenience of routines
1619                          * below.  We assume the tuple was allocated in per-tuple memory
1620                          * context, and therefore will go away by itself. The tuple table
1621                          * slot should not try to clear it.
1622                          */
1623                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1624
1625                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1626                                 ExecSetSlotDescriptor(newslot,
1627                                                                           slot->tts_tupleDescriptor,
1628                                                                           false);
1629                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1630                         slot = newslot;
1631                         tuple = newtuple;
1632                 }
1633         }
1634
1635         /*
1636          * Check the constraints of the tuple
1637          *
1638          * If we generate a new candidate tuple after EvalPlanQual testing, we
1639          * must loop back here and recheck constraints.  (We don't need to redo
1640          * triggers, however.  If there are any BEFORE triggers then trigger.c
1641          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1642          * need to do them again.)
1643          */
1644 lreplace:;
1645         if (resultRelationDesc->rd_att->constr)
1646                 ExecConstraints(resultRelInfo, slot, estate);
1647
1648         /*
1649          * replace the heap tuple
1650          *
1651          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1652          * the row to be updated is visible to that snapshot, and throw a can't-
1653          * serialize error if not.      This is a special-case behavior needed for
1654          * referential integrity updates in serializable transactions.
1655          */
1656         result = heap_update(resultRelationDesc, tupleid, tuple,
1657                                                  &update_ctid, &update_xmax,
1658                                                  estate->es_snapshot->curcid,
1659                                                  estate->es_crosscheck_snapshot,
1660                                                  true /* wait for commit */ );
1661         switch (result)
1662         {
1663                 case HeapTupleSelfUpdated:
1664                         /* already deleted by self; nothing to do */
1665                         return;
1666
1667                 case HeapTupleMayBeUpdated:
1668                         break;
1669
1670                 case HeapTupleUpdated:
1671                         if (IsXactIsoLevelSerializable)
1672                                 ereport(ERROR,
1673                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1674                                                  errmsg("could not serialize access due to concurrent update")));
1675                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1676                         {
1677                                 TupleTableSlot *epqslot;
1678
1679                                 epqslot = EvalPlanQual(estate,
1680                                                                            resultRelInfo->ri_RangeTableIndex,
1681                                                                            &update_ctid,
1682                                                                            update_xmax);
1683                                 if (!TupIsNull(epqslot))
1684                                 {
1685                                         *tupleid = update_ctid;
1686                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1687                                         tuple = ExecMaterializeSlot(slot);
1688                                         goto lreplace;
1689                                 }
1690                         }
1691                         /* tuple already deleted; nothing to do */
1692                         return;
1693
1694                 default:
1695                         elog(ERROR, "unrecognized heap_update status: %u", result);
1696                         return;
1697         }
1698
1699         IncrReplaced();
1700         (estate->es_processed)++;
1701
1702         /*
1703          * Note: instead of having to update the old index tuples associated with
1704          * the heap tuple, all we do is form and insert new index tuples. This is
1705          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1706          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1707          * here is insert new index tuples.  -cim 9/27/89
1708          */
1709
1710         /*
1711          * insert index entries for tuple
1712          *
1713          * Note: heap_update returns the tid (location) of the new tuple in the
1714          * t_self field.
1715          */
1716         if (resultRelInfo->ri_NumIndices > 0)
1717                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1718
1719         /* AFTER ROW UPDATE Triggers */
1720         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1721 }
1722
1723 static const char *
1724 ExecRelCheck(ResultRelInfo *resultRelInfo,
1725                          TupleTableSlot *slot, EState *estate)
1726 {
1727         Relation        rel = resultRelInfo->ri_RelationDesc;
1728         int                     ncheck = rel->rd_att->constr->num_check;
1729         ConstrCheck *check = rel->rd_att->constr->check;
1730         ExprContext *econtext;
1731         MemoryContext oldContext;
1732         List       *qual;
1733         int                     i;
1734
1735         /*
1736          * If first time through for this result relation, build expression
1737          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1738          * memory context so they'll survive throughout the query.
1739          */
1740         if (resultRelInfo->ri_ConstraintExprs == NULL)
1741         {
1742                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1743                 resultRelInfo->ri_ConstraintExprs =
1744                         (List **) palloc(ncheck * sizeof(List *));
1745                 for (i = 0; i < ncheck; i++)
1746                 {
1747                         /* ExecQual wants implicit-AND form */
1748                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1749                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1750                                 ExecPrepareExpr((Expr *) qual, estate);
1751                 }
1752                 MemoryContextSwitchTo(oldContext);
1753         }
1754
1755         /*
1756          * We will use the EState's per-tuple context for evaluating constraint
1757          * expressions (creating it if it's not already there).
1758          */
1759         econtext = GetPerTupleExprContext(estate);
1760
1761         /* Arrange for econtext's scan tuple to be the tuple under test */
1762         econtext->ecxt_scantuple = slot;
1763
1764         /* And evaluate the constraints */
1765         for (i = 0; i < ncheck; i++)
1766         {
1767                 qual = resultRelInfo->ri_ConstraintExprs[i];
1768
1769                 /*
1770                  * NOTE: SQL92 specifies that a NULL result from a constraint
1771                  * expression is not to be treated as a failure.  Therefore, tell
1772                  * ExecQual to return TRUE for NULL.
1773                  */
1774                 if (!ExecQual(qual, econtext, true))
1775                         return check[i].ccname;
1776         }
1777
1778         /* NULL result means no error */
1779         return NULL;
1780 }
1781
1782 void
1783 ExecConstraints(ResultRelInfo *resultRelInfo,
1784                                 TupleTableSlot *slot, EState *estate)
1785 {
1786         Relation        rel = resultRelInfo->ri_RelationDesc;
1787         TupleConstr *constr = rel->rd_att->constr;
1788
1789         Assert(constr);
1790
1791         if (constr->has_not_null)
1792         {
1793                 int                     natts = rel->rd_att->natts;
1794                 int                     attrChk;
1795
1796                 for (attrChk = 1; attrChk <= natts; attrChk++)
1797                 {
1798                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1799                                 slot_attisnull(slot, attrChk))
1800                                 ereport(ERROR,
1801                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1802                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1803                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1804                 }
1805         }
1806
1807         if (constr->num_check > 0)
1808         {
1809                 const char *failed;
1810
1811                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1812                         ereport(ERROR,
1813                                         (errcode(ERRCODE_CHECK_VIOLATION),
1814                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1815                                                         RelationGetRelationName(rel), failed)));
1816         }
1817 }
1818
1819 /*
1820  * Check a modified tuple to see if we want to process its updated version
1821  * under READ COMMITTED rules.
1822  *
1823  * See backend/executor/README for some info about how this works.
1824  *
1825  *      estate - executor state data
1826  *      rti - rangetable index of table containing tuple
1827  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1828  *      priorXmax - t_xmax from the outdated tuple
1829  *
1830  * *tid is also an output parameter: it's modified to hold the TID of the
1831  * latest version of the tuple (note this may be changed even on failure)
1832  *
1833  * Returns a slot containing the new candidate update/delete tuple, or
1834  * NULL if we determine we shouldn't process the row.
1835  */
1836 TupleTableSlot *
1837 EvalPlanQual(EState *estate, Index rti,
1838                          ItemPointer tid, TransactionId priorXmax)
1839 {
1840         evalPlanQual *epq;
1841         EState     *epqstate;
1842         Relation        relation;
1843         HeapTupleData tuple;
1844         HeapTuple       copyTuple = NULL;
1845         bool            endNode;
1846
1847         Assert(rti != 0);
1848
1849         /*
1850          * find relation containing target tuple
1851          */
1852         if (estate->es_result_relation_info != NULL &&
1853                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1854                 relation = estate->es_result_relation_info->ri_RelationDesc;
1855         else
1856         {
1857                 ListCell   *l;
1858
1859                 relation = NULL;
1860                 foreach(l, estate->es_rowMarks)
1861                 {
1862                         if (((execRowMark *) lfirst(l))->rti == rti)
1863                         {
1864                                 relation = ((execRowMark *) lfirst(l))->relation;
1865                                 break;
1866                         }
1867                 }
1868                 if (relation == NULL)
1869                         elog(ERROR, "could not find RowMark for RT index %u", rti);
1870         }
1871
1872         /*
1873          * fetch tid tuple
1874          *
1875          * Loop here to deal with updated or busy tuples
1876          */
1877         tuple.t_self = *tid;
1878         for (;;)
1879         {
1880                 Buffer          buffer;
1881
1882                 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL))
1883                 {
1884                         /*
1885                          * If xmin isn't what we're expecting, the slot must have been
1886                          * recycled and reused for an unrelated tuple.  This implies that
1887                          * the latest version of the row was deleted, so we need do
1888                          * nothing.  (Should be safe to examine xmin without getting
1889                          * buffer's content lock, since xmin never changes in an existing
1890                          * tuple.)
1891                          */
1892                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1893                                                                          priorXmax))
1894                         {
1895                                 ReleaseBuffer(buffer);
1896                                 return NULL;
1897                         }
1898
1899                         /* otherwise xmin should not be dirty... */
1900                         if (TransactionIdIsValid(SnapshotDirty->xmin))
1901                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1902
1903                         /*
1904                          * If tuple is being updated by other transaction then we have to
1905                          * wait for its commit/abort.
1906                          */
1907                         if (TransactionIdIsValid(SnapshotDirty->xmax))
1908                         {
1909                                 ReleaseBuffer(buffer);
1910                                 XactLockTableWait(SnapshotDirty->xmax);
1911                                 continue;               /* loop back to repeat heap_fetch */
1912                         }
1913
1914                         /*
1915                          * We got tuple - now copy it for use by recheck query.
1916                          */
1917                         copyTuple = heap_copytuple(&tuple);
1918                         ReleaseBuffer(buffer);
1919                         break;
1920                 }
1921
1922                 /*
1923                  * If the referenced slot was actually empty, the latest version of
1924                  * the row must have been deleted, so we need do nothing.
1925                  */
1926                 if (tuple.t_data == NULL)
1927                 {
1928                         ReleaseBuffer(buffer);
1929                         return NULL;
1930                 }
1931
1932                 /*
1933                  * As above, if xmin isn't what we're expecting, do nothing.
1934                  */
1935                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1936                                                                  priorXmax))
1937                 {
1938                         ReleaseBuffer(buffer);
1939                         return NULL;
1940                 }
1941
1942                 /*
1943                  * If we get here, the tuple was found but failed SnapshotDirty.
1944                  * Assuming the xmin is either a committed xact or our own xact (as it
1945                  * certainly should be if we're trying to modify the tuple), this must
1946                  * mean that the row was updated or deleted by either a committed xact
1947                  * or our own xact.  If it was deleted, we can ignore it; if it was
1948                  * updated then chain up to the next version and repeat the whole
1949                  * test.
1950                  *
1951                  * As above, it should be safe to examine xmax and t_ctid without the
1952                  * buffer content lock, because they can't be changing.
1953                  */
1954                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1955                 {
1956                         /* deleted, so forget about it */
1957                         ReleaseBuffer(buffer);
1958                         return NULL;
1959                 }
1960
1961                 /* updated, so look at the updated row */
1962                 tuple.t_self = tuple.t_data->t_ctid;
1963                 /* updated row should have xmin matching this xmax */
1964                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1965                 ReleaseBuffer(buffer);
1966                 /* loop back to fetch next in chain */
1967         }
1968
1969         /*
1970          * For UPDATE/DELETE we have to return tid of actual row we're executing
1971          * PQ for.
1972          */
1973         *tid = tuple.t_self;
1974
1975         /*
1976          * Need to run a recheck subquery.      Find or create a PQ stack entry.
1977          */
1978         epq = estate->es_evalPlanQual;
1979         endNode = true;
1980
1981         if (epq != NULL && epq->rti == 0)
1982         {
1983                 /* Top PQ stack entry is idle, so re-use it */
1984                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
1985                 epq->rti = rti;
1986                 endNode = false;
1987         }
1988
1989         /*
1990          * If this is request for another RTE - Ra, - then we have to check wasn't
1991          * PlanQual requested for Ra already and if so then Ra' row was updated
1992          * again and we have to re-start old execution for Ra and forget all what
1993          * we done after Ra was suspended. Cool? -:))
1994          */
1995         if (epq != NULL && epq->rti != rti &&
1996                 epq->estate->es_evTuple[rti - 1] != NULL)
1997         {
1998                 do
1999                 {
2000                         evalPlanQual *oldepq;
2001
2002                         /* stop execution */
2003                         EvalPlanQualStop(epq);
2004                         /* pop previous PlanQual from the stack */
2005                         oldepq = epq->next;
2006                         Assert(oldepq && oldepq->rti != 0);
2007                         /* push current PQ to freePQ stack */
2008                         oldepq->free = epq;
2009                         epq = oldepq;
2010                         estate->es_evalPlanQual = epq;
2011                 } while (epq->rti != rti);
2012         }
2013
2014         /*
2015          * If we are requested for another RTE then we have to suspend execution
2016          * of current PlanQual and start execution for new one.
2017          */
2018         if (epq == NULL || epq->rti != rti)
2019         {
2020                 /* try to reuse plan used previously */
2021                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2022
2023                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2024                 {
2025                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2026                         newepq->free = NULL;
2027                         newepq->estate = NULL;
2028                         newepq->planstate = NULL;
2029                 }
2030                 else
2031                 {
2032                         /* recycle previously used PlanQual */
2033                         Assert(newepq->estate == NULL);
2034                         epq->free = NULL;
2035                 }
2036                 /* push current PQ to the stack */
2037                 newepq->next = epq;
2038                 epq = newepq;
2039                 estate->es_evalPlanQual = epq;
2040                 epq->rti = rti;
2041                 endNode = false;
2042         }
2043
2044         Assert(epq->rti == rti);
2045
2046         /*
2047          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2048          * end and restart execution of the plan, because ExecReScan wouldn't
2049          * ensure that upper plan nodes would reset themselves.  We could make
2050          * that work if insertion of the target tuple were integrated with the
2051          * Param mechanism somehow, so that the upper plan nodes know that their
2052          * children's outputs have changed.
2053          *
2054          * Note that the stack of free evalPlanQual nodes is quite useless at the
2055          * moment, since it only saves us from pallocing/releasing the
2056          * evalPlanQual nodes themselves.  But it will be useful once we implement
2057          * ReScan instead of end/restart for re-using PlanQual nodes.
2058          */
2059         if (endNode)
2060         {
2061                 /* stop execution */
2062                 EvalPlanQualStop(epq);
2063         }
2064
2065         /*
2066          * Initialize new recheck query.
2067          *
2068          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2069          * instead copy down changeable state from the top plan (including
2070          * es_result_relation_info, es_junkFilter) and reset locally changeable
2071          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2072          */
2073         EvalPlanQualStart(epq, estate, epq->next);
2074
2075         /*
2076          * free old RTE' tuple, if any, and store target tuple where relation's
2077          * scan node will see it
2078          */
2079         epqstate = epq->estate;
2080         if (epqstate->es_evTuple[rti - 1] != NULL)
2081                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2082         epqstate->es_evTuple[rti - 1] = copyTuple;
2083
2084         return EvalPlanQualNext(estate);
2085 }
2086
2087 static TupleTableSlot *
2088 EvalPlanQualNext(EState *estate)
2089 {
2090         evalPlanQual *epq = estate->es_evalPlanQual;
2091         MemoryContext oldcontext;
2092         TupleTableSlot *slot;
2093
2094         Assert(epq->rti != 0);
2095
2096 lpqnext:;
2097         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2098         slot = ExecProcNode(epq->planstate);
2099         MemoryContextSwitchTo(oldcontext);
2100
2101         /*
2102          * No more tuples for this PQ. Continue previous one.
2103          */
2104         if (TupIsNull(slot))
2105         {
2106                 evalPlanQual *oldepq;
2107
2108                 /* stop execution */
2109                 EvalPlanQualStop(epq);
2110                 /* pop old PQ from the stack */
2111                 oldepq = epq->next;
2112                 if (oldepq == NULL)
2113                 {
2114                         /* this is the first (oldest) PQ - mark as free */
2115                         epq->rti = 0;
2116                         estate->es_useEvalPlan = false;
2117                         /* and continue Query execution */
2118                         return (NULL);
2119                 }
2120                 Assert(oldepq->rti != 0);
2121                 /* push current PQ to freePQ stack */
2122                 oldepq->free = epq;
2123                 epq = oldepq;
2124                 estate->es_evalPlanQual = epq;
2125                 goto lpqnext;
2126         }
2127
2128         return (slot);
2129 }
2130
2131 static void
2132 EndEvalPlanQual(EState *estate)
2133 {
2134         evalPlanQual *epq = estate->es_evalPlanQual;
2135
2136         if (epq->rti == 0)                      /* plans already shutdowned */
2137         {
2138                 Assert(epq->next == NULL);
2139                 return;
2140         }
2141
2142         for (;;)
2143         {
2144                 evalPlanQual *oldepq;
2145
2146                 /* stop execution */
2147                 EvalPlanQualStop(epq);
2148                 /* pop old PQ from the stack */
2149                 oldepq = epq->next;
2150                 if (oldepq == NULL)
2151                 {
2152                         /* this is the first (oldest) PQ - mark as free */
2153                         epq->rti = 0;
2154                         estate->es_useEvalPlan = false;
2155                         break;
2156                 }
2157                 Assert(oldepq->rti != 0);
2158                 /* push current PQ to freePQ stack */
2159                 oldepq->free = epq;
2160                 epq = oldepq;
2161                 estate->es_evalPlanQual = epq;
2162         }
2163 }
2164
2165 /*
2166  * Start execution of one level of PlanQual.
2167  *
2168  * This is a cut-down version of ExecutorStart(): we copy some state from
2169  * the top-level estate rather than initializing it fresh.
2170  */
2171 static void
2172 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2173 {
2174         EState     *epqstate;
2175         int                     rtsize;
2176         MemoryContext oldcontext;
2177
2178         rtsize = list_length(estate->es_range_table);
2179
2180         epq->estate = epqstate = CreateExecutorState();
2181
2182         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2183
2184         /*
2185          * The epqstates share the top query's copy of unchanging state such as
2186          * the snapshot, rangetable, result-rel info, and external Param info.
2187          * They need their own copies of local state, including a tuple table,
2188          * es_param_exec_vals, etc.
2189          */
2190         epqstate->es_direction = ForwardScanDirection;
2191         epqstate->es_snapshot = estate->es_snapshot;
2192         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2193         epqstate->es_range_table = estate->es_range_table;
2194         epqstate->es_result_relations = estate->es_result_relations;
2195         epqstate->es_num_result_relations = estate->es_num_result_relations;
2196         epqstate->es_result_relation_info = estate->es_result_relation_info;
2197         epqstate->es_junkFilter = estate->es_junkFilter;
2198         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2199         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2200         epqstate->es_param_list_info = estate->es_param_list_info;
2201         if (estate->es_topPlan->nParamExec > 0)
2202                 epqstate->es_param_exec_vals = (ParamExecData *)
2203                         palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
2204         epqstate->es_rowMarks = estate->es_rowMarks;
2205         epqstate->es_forUpdate = estate->es_forUpdate;
2206         epqstate->es_rowNoWait = estate->es_rowNoWait;
2207         epqstate->es_instrument = estate->es_instrument;
2208         epqstate->es_select_into = estate->es_select_into;
2209         epqstate->es_into_oids = estate->es_into_oids;
2210         epqstate->es_topPlan = estate->es_topPlan;
2211
2212         /*
2213          * Each epqstate must have its own es_evTupleNull state, but all the stack
2214          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2215          * the value being examined by an outer recheck.
2216          */
2217         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2218         if (priorepq == NULL)
2219                 /* first PQ stack entry */
2220                 epqstate->es_evTuple = (HeapTuple *)
2221                         palloc0(rtsize * sizeof(HeapTuple));
2222         else
2223                 /* later stack entries share the same storage */
2224                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2225
2226         epqstate->es_tupleTable =
2227                 ExecCreateTupleTable(estate->es_tupleTable->size);
2228
2229         epq->planstate = ExecInitNode(estate->es_topPlan, epqstate);
2230
2231         MemoryContextSwitchTo(oldcontext);
2232 }
2233
2234 /*
2235  * End execution of one level of PlanQual.
2236  *
2237  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2238  * of the normal cleanup, but *not* close result relations (which we are
2239  * just sharing from the outer query).
2240  */
2241 static void
2242 EvalPlanQualStop(evalPlanQual *epq)
2243 {
2244         EState     *epqstate = epq->estate;
2245         MemoryContext oldcontext;
2246
2247         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2248
2249         ExecEndNode(epq->planstate);
2250
2251         ExecDropTupleTable(epqstate->es_tupleTable, true);
2252         epqstate->es_tupleTable = NULL;
2253
2254         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2255         {
2256                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2257                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2258         }
2259
2260         MemoryContextSwitchTo(oldcontext);
2261
2262         FreeExecutorState(epqstate);
2263
2264         epq->estate = NULL;
2265         epq->planstate = NULL;
2266 }