]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Fix oversight in optimization that avoids an unnecessary projection step
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.228 2004/01/22 02:23:21 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "catalog/heap.h"
37 #include "catalog/namespace.h"
38 #include "commands/tablecmds.h"
39 #include "commands/trigger.h"
40 #include "executor/execdebug.h"
41 #include "executor/execdefs.h"
42 #include "miscadmin.h"
43 #include "optimizer/clauses.h"
44 #include "optimizer/var.h"
45 #include "parser/parsetree.h"
46 #include "utils/acl.h"
47 #include "utils/guc.h"
48 #include "utils/lsyscache.h"
49
50
51 typedef struct execRowMark
52 {
53         Relation        relation;
54         Index           rti;
55         char            resname[32];
56 } execRowMark;
57
58 typedef struct evalPlanQual
59 {
60         Index           rti;
61         EState     *estate;
62         PlanState  *planstate;
63         struct evalPlanQual *next;      /* stack of active PlanQual plans */
64         struct evalPlanQual *free;      /* list of free PlanQual plans */
65 } evalPlanQual;
66
67 /* decls for local routines only used within this module */
68 static void InitPlan(QueryDesc *queryDesc, bool explainOnly);
69 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
70                                   Index resultRelationIndex,
71                                   List *rangeTable,
72                                   CmdType operation);
73 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
74                         CmdType operation,
75                         long numberTuples,
76                         ScanDirection direction,
77                         DestReceiver *dest);
78 static void ExecSelect(TupleTableSlot *slot,
79                    DestReceiver *dest,
80                    EState *estate);
81 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
82                    EState *estate);
83 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
84                    EState *estate);
85 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
86                    EState *estate);
87 static TupleTableSlot *EvalPlanQualNext(EState *estate);
88 static void EndEvalPlanQual(EState *estate);
89 static void ExecCheckRTEPerms(RangeTblEntry *rte);
90 static void ExecCheckXactReadOnly(Query *parsetree);
91 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
92                                   evalPlanQual *priorepq);
93 static void EvalPlanQualStop(evalPlanQual *epq);
94
95 /* end of local decls */
96
97
98 /* ----------------------------------------------------------------
99  *              ExecutorStart
100  *
101  *              This routine must be called at the beginning of any execution of any
102  *              query plan
103  *
104  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
105  * clear why we bother to separate the two functions, but...).  The tupDesc
106  * field of the QueryDesc is filled in to describe the tuples that will be
107  * returned, and the internal fields (estate and planstate) are set up.
108  *
109  * If useCurrentSnapshot is true, run the query with the latest available
110  * snapshot, instead of the normal QuerySnapshot.  Also, if it's an update
111  * or delete query, check that the rows to be updated or deleted would be
112  * visible to the normal QuerySnapshot.  (This is a special-case behavior
113  * needed for referential integrity updates in serializable transactions.
114  * We must check all currently-committed rows, but we want to throw a
115  * can't-serialize error if any rows that would need updates would not be
116  * visible under the normal serializable snapshot.)
117  *
118  * If explainOnly is true, we are not actually intending to run the plan,
119  * only to set up for EXPLAIN; so skip unwanted side-effects.
120  *
121  * NB: the CurrentMemoryContext when this is called will become the parent
122  * of the per-query context used for this Executor invocation.
123  * ----------------------------------------------------------------
124  */
125 void
126 ExecutorStart(QueryDesc *queryDesc, bool useCurrentSnapshot, bool explainOnly)
127 {
128         EState     *estate;
129         MemoryContext oldcontext;
130
131         /* sanity checks: queryDesc must not be started already */
132         Assert(queryDesc != NULL);
133         Assert(queryDesc->estate == NULL);
134
135         /*
136          * If the transaction is read-only, we need to check if any writes are
137          * planned to non-temporary tables.
138          */
139         if (XactReadOnly && !explainOnly)
140                 ExecCheckXactReadOnly(queryDesc->parsetree);
141
142         /*
143          * Build EState, switch into per-query memory context for startup.
144          */
145         estate = CreateExecutorState();
146         queryDesc->estate = estate;
147
148         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
149
150         /*
151          * Fill in parameters, if any, from queryDesc
152          */
153         estate->es_param_list_info = queryDesc->params;
154
155         if (queryDesc->plantree->nParamExec > 0)
156                 estate->es_param_exec_vals = (ParamExecData *)
157                         palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
158
159         estate->es_instrument = queryDesc->doInstrument;
160
161         /*
162          * Make our own private copy of the current query snapshot data.
163          *
164          * This "freezes" our idea of which tuples are good and which are not for
165          * the life of this query, even if it outlives the current command and
166          * current snapshot.
167          */
168         if (useCurrentSnapshot)
169         {
170                 /* RI update/delete query --- must use an up-to-date snapshot */
171                 estate->es_snapshot = CopyCurrentSnapshot();
172                 /* crosscheck updates/deletes against transaction snapshot */
173                 estate->es_crosscheck_snapshot = CopyQuerySnapshot();
174         }
175         else
176         {
177                 /* normal query --- use query snapshot, no crosscheck */
178                 estate->es_snapshot = CopyQuerySnapshot();
179                 estate->es_crosscheck_snapshot = SnapshotAny;
180         }
181
182         /*
183          * Initialize the plan state tree
184          */
185         InitPlan(queryDesc, explainOnly);
186
187         MemoryContextSwitchTo(oldcontext);
188 }
189
190 /* ----------------------------------------------------------------
191  *              ExecutorRun
192  *
193  *              This is the main routine of the executor module. It accepts
194  *              the query descriptor from the traffic cop and executes the
195  *              query plan.
196  *
197  *              ExecutorStart must have been called already.
198  *
199  *              If direction is NoMovementScanDirection then nothing is done
200  *              except to start up/shut down the destination.  Otherwise,
201  *              we retrieve up to 'count' tuples in the specified direction.
202  *
203  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
204  *              completion.
205  *
206  * ----------------------------------------------------------------
207  */
208 TupleTableSlot *
209 ExecutorRun(QueryDesc *queryDesc,
210                         ScanDirection direction, long count)
211 {
212         EState     *estate;
213         CmdType         operation;
214         DestReceiver *dest;
215         TupleTableSlot *result;
216         MemoryContext oldcontext;
217
218         /* sanity checks */
219         Assert(queryDesc != NULL);
220
221         estate = queryDesc->estate;
222
223         Assert(estate != NULL);
224
225         /*
226          * Switch into per-query memory context
227          */
228         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
229
230         /*
231          * extract information from the query descriptor and the query
232          * feature.
233          */
234         operation = queryDesc->operation;
235         dest = queryDesc->dest;
236
237         /*
238          * startup tuple receiver
239          */
240         estate->es_processed = 0;
241         estate->es_lastoid = InvalidOid;
242
243         (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
244
245         /*
246          * run plan
247          */
248         if (direction == NoMovementScanDirection)
249                 result = NULL;
250         else
251                 result = ExecutePlan(estate,
252                                                          queryDesc->planstate,
253                                                          operation,
254                                                          count,
255                                                          direction,
256                                                          dest);
257
258         /*
259          * shutdown receiver
260          */
261         (*dest->rShutdown) (dest);
262
263         MemoryContextSwitchTo(oldcontext);
264
265         return result;
266 }
267
268 /* ----------------------------------------------------------------
269  *              ExecutorEnd
270  *
271  *              This routine must be called at the end of execution of any
272  *              query plan
273  * ----------------------------------------------------------------
274  */
275 void
276 ExecutorEnd(QueryDesc *queryDesc)
277 {
278         EState     *estate;
279         MemoryContext oldcontext;
280
281         /* sanity checks */
282         Assert(queryDesc != NULL);
283
284         estate = queryDesc->estate;
285
286         Assert(estate != NULL);
287
288         /*
289          * Switch into per-query memory context to run ExecEndPlan
290          */
291         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
292
293         ExecEndPlan(queryDesc->planstate, estate);
294
295         /*
296          * Must switch out of context before destroying it
297          */
298         MemoryContextSwitchTo(oldcontext);
299
300         /*
301          * Release EState and per-query memory context.  This should release
302          * everything the executor has allocated.
303          */
304         FreeExecutorState(estate);
305
306         /* Reset queryDesc fields that no longer point to anything */
307         queryDesc->tupDesc = NULL;
308         queryDesc->estate = NULL;
309         queryDesc->planstate = NULL;
310 }
311
312 /* ----------------------------------------------------------------
313  *              ExecutorRewind
314  *
315  *              This routine may be called on an open queryDesc to rewind it
316  *              to the start.
317  * ----------------------------------------------------------------
318  */
319 void
320 ExecutorRewind(QueryDesc *queryDesc)
321 {
322         EState     *estate;
323         MemoryContext oldcontext;
324
325         /* sanity checks */
326         Assert(queryDesc != NULL);
327
328         estate = queryDesc->estate;
329
330         Assert(estate != NULL);
331
332         /* It's probably not sensible to rescan updating queries */
333         Assert(queryDesc->operation == CMD_SELECT);
334
335         /*
336          * Switch into per-query memory context
337          */
338         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
339
340         /*
341          * rescan plan
342          */
343         ExecReScan(queryDesc->planstate, NULL);
344
345         MemoryContextSwitchTo(oldcontext);
346 }
347
348
349 /*
350  * ExecCheckRTPerms
351  *              Check access permissions for all relations listed in a range table.
352  */
353 void
354 ExecCheckRTPerms(List *rangeTable)
355 {
356         List       *lp;
357
358         foreach(lp, rangeTable)
359         {
360                 RangeTblEntry *rte = lfirst(lp);
361
362                 ExecCheckRTEPerms(rte);
363         }
364 }
365
366 /*
367  * ExecCheckRTEPerms
368  *              Check access permissions for a single RTE.
369  */
370 static void
371 ExecCheckRTEPerms(RangeTblEntry *rte)
372 {
373         AclMode         requiredPerms;
374         Oid                     relOid;
375         AclId           userid;
376
377         /*
378          * If it's a subquery, recursively examine its rangetable.
379          */
380         if (rte->rtekind == RTE_SUBQUERY)
381         {
382                 ExecCheckRTPerms(rte->subquery->rtable);
383                 return;
384         }
385
386         /*
387          * Otherwise, only plain-relation RTEs need to be checked here.
388          * Function RTEs are checked by init_fcache when the function is
389          * prepared for execution. Join and special RTEs need no checks.
390          */
391         if (rte->rtekind != RTE_RELATION)
392                 return;
393
394         /*
395          * No work if requiredPerms is empty.
396          */
397         requiredPerms = rte->requiredPerms;
398         if (requiredPerms == 0)
399                 return;
400
401         relOid = rte->relid;
402
403         /*
404          * userid to check as: current user unless we have a setuid
405          * indication.
406          *
407          * Note: GetUserId() is presently fast enough that there's no harm in
408          * calling it separately for each RTE.  If that stops being true, we
409          * could call it once in ExecCheckRTPerms and pass the userid down
410          * from there.  But for now, no need for the extra clutter.
411          */
412         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
413
414         /*
415          * For each bit in requiredPerms, apply the required check.  (We can't
416          * do this in one aclcheck call because aclcheck treats multiple bits
417          * as OR semantics, when we want AND.)
418          *
419          * We use a well-known cute trick for isolating the rightmost one-bit
420          * in a nonzero word.  See nodes/bitmapset.c for commentary.
421          */
422 #define RIGHTMOST_ONE(x) ((int32) (x) & -((int32) (x)))
423
424         while (requiredPerms != 0)
425         {
426                 AclMode         thisPerm;
427                 AclResult       aclcheck_result;
428
429                 thisPerm = RIGHTMOST_ONE(requiredPerms);
430                 requiredPerms &= ~thisPerm;
431
432                 aclcheck_result = pg_class_aclcheck(relOid, userid, thisPerm);
433                 if (aclcheck_result != ACLCHECK_OK)
434                         aclcheck_error(aclcheck_result, ACL_KIND_CLASS,
435                                                    get_rel_name(relOid));
436         }
437 }
438
439 /*
440  * Check that the query does not imply any writes to non-temp tables.
441  */
442 static void
443 ExecCheckXactReadOnly(Query *parsetree)
444 {
445         List       *lp;
446
447         /*
448          * CREATE TABLE AS or SELECT INTO?
449          *
450          * XXX should we allow this if the destination is temp?
451          */
452         if (parsetree->into != NULL)
453                 goto fail;
454
455         /* Fail if write permissions are requested on any non-temp table */
456         foreach(lp, parsetree->rtable)
457         {
458                 RangeTblEntry *rte = lfirst(lp);
459
460                 if (rte->rtekind == RTE_SUBQUERY)
461                 {
462                         ExecCheckXactReadOnly(rte->subquery);
463                         continue;
464                 }
465
466                 if (rte->rtekind != RTE_RELATION)
467                         continue;
468
469                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
470                         continue;
471
472                 if (isTempNamespace(get_rel_namespace(rte->relid)))
473                         continue;
474
475                 goto fail;
476         }
477
478         return;
479
480 fail:
481         ereport(ERROR,
482                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
483                          errmsg("transaction is read-only")));
484 }
485
486
487 /* ----------------------------------------------------------------
488  *              InitPlan
489  *
490  *              Initializes the query plan: open files, allocate storage
491  *              and start up the rule manager
492  * ----------------------------------------------------------------
493  */
494 static void
495 InitPlan(QueryDesc *queryDesc, bool explainOnly)
496 {
497         CmdType         operation = queryDesc->operation;
498         Query      *parseTree = queryDesc->parsetree;
499         Plan       *plan = queryDesc->plantree;
500         EState     *estate = queryDesc->estate;
501         PlanState  *planstate;
502         List       *rangeTable;
503         Relation        intoRelationDesc;
504         bool            do_select_into;
505         TupleDesc       tupType;
506
507         /*
508          * Do permissions checks.  It's sufficient to examine the query's top
509          * rangetable here --- subplan RTEs will be checked during
510          * ExecInitSubPlan().
511          */
512         ExecCheckRTPerms(parseTree->rtable);
513
514         /*
515          * get information from query descriptor
516          */
517         rangeTable = parseTree->rtable;
518
519         /*
520          * initialize the node's execution state
521          */
522         estate->es_range_table = rangeTable;
523
524         /*
525          * if there is a result relation, initialize result relation stuff
526          */
527         if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
528         {
529                 List       *resultRelations = parseTree->resultRelations;
530                 int                     numResultRelations;
531                 ResultRelInfo *resultRelInfos;
532
533                 if (resultRelations != NIL)
534                 {
535                         /*
536                          * Multiple result relations (due to inheritance)
537                          * parseTree->resultRelations identifies them all
538                          */
539                         ResultRelInfo *resultRelInfo;
540
541                         numResultRelations = length(resultRelations);
542                         resultRelInfos = (ResultRelInfo *)
543                                 palloc(numResultRelations * sizeof(ResultRelInfo));
544                         resultRelInfo = resultRelInfos;
545                         while (resultRelations != NIL)
546                         {
547                                 initResultRelInfo(resultRelInfo,
548                                                                   lfirsti(resultRelations),
549                                                                   rangeTable,
550                                                                   operation);
551                                 resultRelInfo++;
552                                 resultRelations = lnext(resultRelations);
553                         }
554                 }
555                 else
556                 {
557                         /*
558                          * Single result relation identified by
559                          * parseTree->resultRelation
560                          */
561                         numResultRelations = 1;
562                         resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
563                         initResultRelInfo(resultRelInfos,
564                                                           parseTree->resultRelation,
565                                                           rangeTable,
566                                                           operation);
567                 }
568
569                 estate->es_result_relations = resultRelInfos;
570                 estate->es_num_result_relations = numResultRelations;
571                 /* Initialize to first or only result rel */
572                 estate->es_result_relation_info = resultRelInfos;
573         }
574         else
575         {
576                 /*
577                  * if no result relation, then set state appropriately
578                  */
579                 estate->es_result_relations = NULL;
580                 estate->es_num_result_relations = 0;
581                 estate->es_result_relation_info = NULL;
582         }
583
584         /*
585          * Detect whether we're doing SELECT INTO.  If so, set the force_oids
586          * flag appropriately so that the plan tree will be initialized with
587          * the correct tuple descriptors.
588          */
589         do_select_into = false;
590
591         if (operation == CMD_SELECT && parseTree->into != NULL)
592         {
593                 do_select_into = true;
594                 estate->es_select_into = true;
595                 estate->es_into_oids = parseTree->intoHasOids;
596         }
597
598         /*
599          * Have to lock relations selected for update
600          */
601         estate->es_rowMark = NIL;
602         if (parseTree->rowMarks != NIL)
603         {
604                 List       *l;
605
606                 foreach(l, parseTree->rowMarks)
607                 {
608                         Index           rti = lfirsti(l);
609                         Oid                     relid = getrelid(rti, rangeTable);
610                         Relation        relation;
611                         execRowMark *erm;
612
613                         relation = heap_open(relid, RowShareLock);
614                         erm = (execRowMark *) palloc(sizeof(execRowMark));
615                         erm->relation = relation;
616                         erm->rti = rti;
617                         snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rti);
618                         estate->es_rowMark = lappend(estate->es_rowMark, erm);
619                 }
620         }
621
622         /*
623          * initialize the executor "tuple" table.  We need slots for all the
624          * plan nodes, plus possibly output slots for the junkfilter(s). At
625          * this point we aren't sure if we need junkfilters, so just add slots
626          * for them unconditionally.
627          */
628         {
629                 int                     nSlots = ExecCountSlotsNode(plan);
630
631                 if (parseTree->resultRelations != NIL)
632                         nSlots += length(parseTree->resultRelations);
633                 else
634                         nSlots += 1;
635                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
636         }
637
638         /* mark EvalPlanQual not active */
639         estate->es_topPlan = plan;
640         estate->es_evalPlanQual = NULL;
641         estate->es_evTupleNull = NULL;
642         estate->es_evTuple = NULL;
643         estate->es_useEvalPlan = false;
644
645         /*
646          * initialize the private state information for all the nodes in the
647          * query tree.  This opens files, allocates storage and leaves us
648          * ready to start processing tuples.
649          */
650         planstate = ExecInitNode(plan, estate);
651
652         /*
653          * Get the tuple descriptor describing the type of tuples to return.
654          * (this is especially important if we are creating a relation with
655          * "SELECT INTO")
656          */
657         tupType = ExecGetResultType(planstate);
658
659         /*
660          * Initialize the junk filter if needed.  SELECT and INSERT queries
661          * need a filter if there are any junk attrs in the tlist.      INSERT and
662          * SELECT INTO also need a filter if the top plan node is a scan node
663          * that's not doing projection (else we'll be scribbling on the scan
664          * tuple!)      UPDATE and DELETE always need a filter, since there's
665          * always a junk 'ctid' attribute present --- no need to look first.
666          */
667         {
668                 bool            junk_filter_needed = false;
669                 List       *tlist;
670
671                 switch (operation)
672                 {
673                         case CMD_SELECT:
674                         case CMD_INSERT:
675                                 foreach(tlist, plan->targetlist)
676                                 {
677                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
678
679                                         if (tle->resdom->resjunk)
680                                         {
681                                                 junk_filter_needed = true;
682                                                 break;
683                                         }
684                                 }
685                                 if (!junk_filter_needed &&
686                                         (operation == CMD_INSERT || do_select_into))
687                                 {
688                                         if (IsA(planstate, SeqScanState) ||
689                                                 IsA(planstate, IndexScanState) ||
690                                                 IsA(planstate, TidScanState) ||
691                                                 IsA(planstate, SubqueryScanState) ||
692                                                 IsA(planstate, FunctionScanState))
693                                         {
694                                                 if (planstate->ps_ProjInfo == NULL)
695                                                         junk_filter_needed = true;
696                                         }
697                                 }
698                                 break;
699                         case CMD_UPDATE:
700                         case CMD_DELETE:
701                                 junk_filter_needed = true;
702                                 break;
703                         default:
704                                 break;
705                 }
706
707                 if (junk_filter_needed)
708                 {
709                         /*
710                          * If there are multiple result relations, each one needs its
711                          * own junk filter.  Note this is only possible for
712                          * UPDATE/DELETE, so we can't be fooled by some needing a
713                          * filter and some not.
714                          */
715                         if (parseTree->resultRelations != NIL)
716                         {
717                                 PlanState **appendplans;
718                                 int                     as_nplans;
719                                 ResultRelInfo *resultRelInfo;
720                                 int                     i;
721
722                                 /* Top plan had better be an Append here. */
723                                 Assert(IsA(plan, Append));
724                                 Assert(((Append *) plan)->isTarget);
725                                 Assert(IsA(planstate, AppendState));
726                                 appendplans = ((AppendState *) planstate)->appendplans;
727                                 as_nplans = ((AppendState *) planstate)->as_nplans;
728                                 Assert(as_nplans == estate->es_num_result_relations);
729                                 resultRelInfo = estate->es_result_relations;
730                                 for (i = 0; i < as_nplans; i++)
731                                 {
732                                         PlanState  *subplan = appendplans[i];
733                                         JunkFilter *j;
734
735                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
736                                                                                    ExecGetResultType(subplan),
737                                                           ExecAllocTableSlot(estate->es_tupleTable));
738                                         resultRelInfo->ri_junkFilter = j;
739                                         resultRelInfo++;
740                                 }
741
742                                 /*
743                                  * Set active junkfilter too; at this point ExecInitAppend
744                                  * has already selected an active result relation...
745                                  */
746                                 estate->es_junkFilter =
747                                         estate->es_result_relation_info->ri_junkFilter;
748                         }
749                         else
750                         {
751                                 /* Normal case with just one JunkFilter */
752                                 JunkFilter *j;
753
754                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
755                                                                            tupType,
756                                                           ExecAllocTableSlot(estate->es_tupleTable));
757                                 estate->es_junkFilter = j;
758                                 if (estate->es_result_relation_info)
759                                         estate->es_result_relation_info->ri_junkFilter = j;
760
761                                 /* For SELECT, want to return the cleaned tuple type */
762                                 if (operation == CMD_SELECT)
763                                         tupType = j->jf_cleanTupType;
764                         }
765                 }
766                 else
767                         estate->es_junkFilter = NULL;
768         }
769
770         /*
771          * If doing SELECT INTO, initialize the "into" relation.  We must wait
772          * till now so we have the "clean" result tuple type to create the new
773          * table from.
774          *
775          * If EXPLAIN, skip creating the "into" relation.
776          */
777         intoRelationDesc = NULL;
778
779         if (do_select_into && !explainOnly)
780         {
781                 char       *intoName;
782                 Oid                     namespaceId;
783                 AclResult       aclresult;
784                 Oid                     intoRelationId;
785                 TupleDesc       tupdesc;
786
787                 /*
788                  * find namespace to create in, check permissions
789                  */
790                 intoName = parseTree->into->relname;
791                 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
792
793                 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
794                                                                                   ACL_CREATE);
795                 if (aclresult != ACLCHECK_OK)
796                         aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
797                                                    get_namespace_name(namespaceId));
798
799                 /*
800                  * have to copy tupType to get rid of constraints
801                  */
802                 tupdesc = CreateTupleDescCopy(tupType);
803
804                 intoRelationId = heap_create_with_catalog(intoName,
805                                                                                                   namespaceId,
806                                                                                                   tupdesc,
807                                                                                                   RELKIND_RELATION,
808                                                                                                   false,
809                                                                                                   ONCOMMIT_NOOP,
810                                                                                                   allowSystemTableMods);
811
812                 FreeTupleDesc(tupdesc);
813
814                 /*
815                  * Advance command counter so that the newly-created relation's
816                  * catalog tuples will be visible to heap_open.
817                  */
818                 CommandCounterIncrement();
819
820                 /*
821                  * If necessary, create a TOAST table for the into relation. Note
822                  * that AlterTableCreateToastTable ends with
823                  * CommandCounterIncrement(), so that the TOAST table will be
824                  * visible for insertion.
825                  */
826                 AlterTableCreateToastTable(intoRelationId, true);
827
828                 /*
829                  * And open the constructed table for writing.
830                  */
831                 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
832         }
833
834         estate->es_into_relation_descriptor = intoRelationDesc;
835
836         queryDesc->tupDesc = tupType;
837         queryDesc->planstate = planstate;
838 }
839
840 /*
841  * Initialize ResultRelInfo data for one result relation
842  */
843 static void
844 initResultRelInfo(ResultRelInfo *resultRelInfo,
845                                   Index resultRelationIndex,
846                                   List *rangeTable,
847                                   CmdType operation)
848 {
849         Oid                     resultRelationOid;
850         Relation        resultRelationDesc;
851
852         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
853         resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
854
855         switch (resultRelationDesc->rd_rel->relkind)
856         {
857                 case RELKIND_SEQUENCE:
858                         ereport(ERROR,
859                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
860                                          errmsg("cannot change sequence \"%s\"",
861                                                   RelationGetRelationName(resultRelationDesc))));
862                         break;
863                 case RELKIND_TOASTVALUE:
864                         ereport(ERROR,
865                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
866                                          errmsg("cannot change TOAST relation \"%s\"",
867                                                   RelationGetRelationName(resultRelationDesc))));
868                         break;
869                 case RELKIND_VIEW:
870                         ereport(ERROR,
871                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
872                                          errmsg("cannot change view \"%s\"",
873                                                   RelationGetRelationName(resultRelationDesc))));
874                         break;
875         }
876
877         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
878         resultRelInfo->type = T_ResultRelInfo;
879         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
880         resultRelInfo->ri_RelationDesc = resultRelationDesc;
881         resultRelInfo->ri_NumIndices = 0;
882         resultRelInfo->ri_IndexRelationDescs = NULL;
883         resultRelInfo->ri_IndexRelationInfo = NULL;
884         /* make a copy so as not to depend on relcache info not changing... */
885         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
886         resultRelInfo->ri_TrigFunctions = NULL;
887         resultRelInfo->ri_ConstraintExprs = NULL;
888         resultRelInfo->ri_junkFilter = NULL;
889
890         /*
891          * If there are indices on the result relation, open them and save
892          * descriptors in the result relation info, so that we can add new
893          * index entries for the tuples we add/update.  We need not do this
894          * for a DELETE, however, since deletion doesn't affect indexes.
895          */
896         if (resultRelationDesc->rd_rel->relhasindex &&
897                 operation != CMD_DELETE)
898                 ExecOpenIndices(resultRelInfo);
899 }
900
901 /*
902  *              ExecContextForcesOids
903  *
904  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
905  * we need to ensure that result tuples have space for an OID iff they are
906  * going to be stored into a relation that has OIDs.  In other contexts
907  * we are free to choose whether to leave space for OIDs in result tuples
908  * (we generally don't want to, but we do if a physical-tlist optimization
909  * is possible).  This routine checks the plan context and returns TRUE if the
910  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
911  * *hasoids is set to the required value.
912  *
913  * One reason this is ugly is that all plan nodes in the plan tree will emit
914  * tuples with space for an OID, though we really only need the topmost node
915  * to do so.  However, node types like Sort don't project new tuples but just
916  * return their inputs, and in those cases the requirement propagates down
917  * to the input node.  Eventually we might make this code smart enough to
918  * recognize how far down the requirement really goes, but for now we just
919  * make all plan nodes do the same thing if the top level forces the choice.
920  *
921  * We assume that estate->es_result_relation_info is already set up to
922  * describe the target relation.  Note that in an UPDATE that spans an
923  * inheritance tree, some of the target relations may have OIDs and some not.
924  * We have to make the decisions on a per-relation basis as we initialize
925  * each of the child plans of the topmost Append plan.
926  *
927  * SELECT INTO is even uglier, because we don't have the INTO relation's
928  * descriptor available when this code runs; we have to look aside at a
929  * flag set by InitPlan().
930  */
931 bool
932 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
933 {
934         if (planstate->state->es_select_into)
935         {
936                 *hasoids = planstate->state->es_into_oids;
937                 return true;
938         }
939         else
940         {
941                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
942
943                 if (ri != NULL)
944                 {
945                         Relation        rel = ri->ri_RelationDesc;
946
947                         if (rel != NULL)
948                         {
949                                 *hasoids = rel->rd_rel->relhasoids;
950                                 return true;
951                         }
952                 }
953         }
954
955         return false;
956 }
957
958 /* ----------------------------------------------------------------
959  *              ExecEndPlan
960  *
961  *              Cleans up the query plan -- closes files and frees up storage
962  *
963  * NOTE: we are no longer very worried about freeing storage per se
964  * in this code; FreeExecutorState should be guaranteed to release all
965  * memory that needs to be released.  What we are worried about doing
966  * is closing relations and dropping buffer pins.  Thus, for example,
967  * tuple tables must be cleared or dropped to ensure pins are released.
968  * ----------------------------------------------------------------
969  */
970 void
971 ExecEndPlan(PlanState *planstate, EState *estate)
972 {
973         ResultRelInfo *resultRelInfo;
974         int                     i;
975         List       *l;
976
977         /*
978          * shut down any PlanQual processing we were doing
979          */
980         if (estate->es_evalPlanQual != NULL)
981                 EndEvalPlanQual(estate);
982
983         /*
984          * shut down the node-type-specific query processing
985          */
986         ExecEndNode(planstate);
987
988         /*
989          * destroy the executor "tuple" table.
990          */
991         ExecDropTupleTable(estate->es_tupleTable, true);
992         estate->es_tupleTable = NULL;
993
994         /*
995          * close the result relation(s) if any, but hold locks until xact
996          * commit.
997          */
998         resultRelInfo = estate->es_result_relations;
999         for (i = estate->es_num_result_relations; i > 0; i--)
1000         {
1001                 /* Close indices and then the relation itself */
1002                 ExecCloseIndices(resultRelInfo);
1003                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1004                 resultRelInfo++;
1005         }
1006
1007         /*
1008          * close the "into" relation if necessary, again keeping lock
1009          */
1010         if (estate->es_into_relation_descriptor != NULL)
1011                 heap_close(estate->es_into_relation_descriptor, NoLock);
1012
1013         /*
1014          * close any relations selected FOR UPDATE, again keeping locks
1015          */
1016         foreach(l, estate->es_rowMark)
1017         {
1018                 execRowMark *erm = lfirst(l);
1019
1020                 heap_close(erm->relation, NoLock);
1021         }
1022 }
1023
1024 /* ----------------------------------------------------------------
1025  *              ExecutePlan
1026  *
1027  *              processes the query plan to retrieve 'numberTuples' tuples in the
1028  *              direction specified.
1029  *
1030  *              Retrieves all tuples if numberTuples is 0
1031  *
1032  *              result is either a slot containing the last tuple in the case
1033  *              of a SELECT or NULL otherwise.
1034  *
1035  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1036  * user can see it
1037  * ----------------------------------------------------------------
1038  */
1039 static TupleTableSlot *
1040 ExecutePlan(EState *estate,
1041                         PlanState *planstate,
1042                         CmdType operation,
1043                         long numberTuples,
1044                         ScanDirection direction,
1045                         DestReceiver *dest)
1046 {
1047         JunkFilter *junkfilter;
1048         TupleTableSlot *slot;
1049         ItemPointer tupleid = NULL;
1050         ItemPointerData tuple_ctid;
1051         long            current_tuple_count;
1052         TupleTableSlot *result;
1053
1054         /*
1055          * initialize local variables
1056          */
1057         slot = NULL;
1058         current_tuple_count = 0;
1059         result = NULL;
1060
1061         /*
1062          * Set the direction.
1063          */
1064         estate->es_direction = direction;
1065
1066         /*
1067          * Process BEFORE EACH STATEMENT triggers
1068          */
1069         switch (operation)
1070         {
1071                 case CMD_UPDATE:
1072                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1073                         break;
1074                 case CMD_DELETE:
1075                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1076                         break;
1077                 case CMD_INSERT:
1078                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1079                         break;
1080                 default:
1081                         /* do nothing */
1082                         break;
1083         }
1084
1085         /*
1086          * Loop until we've processed the proper number of tuples from the
1087          * plan.
1088          */
1089
1090         for (;;)
1091         {
1092                 /* Reset the per-output-tuple exprcontext */
1093                 ResetPerTupleExprContext(estate);
1094
1095                 /*
1096                  * Execute the plan and obtain a tuple
1097                  */
1098 lnext:  ;
1099                 if (estate->es_useEvalPlan)
1100                 {
1101                         slot = EvalPlanQualNext(estate);
1102                         if (TupIsNull(slot))
1103                                 slot = ExecProcNode(planstate);
1104                 }
1105                 else
1106                         slot = ExecProcNode(planstate);
1107
1108                 /*
1109                  * if the tuple is null, then we assume there is nothing more to
1110                  * process so we just return null...
1111                  */
1112                 if (TupIsNull(slot))
1113                 {
1114                         result = NULL;
1115                         break;
1116                 }
1117
1118                 /*
1119                  * if we have a junk filter, then project a new tuple with the
1120                  * junk removed.
1121                  *
1122                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1123                  * (Formerly, we stored it back over the "dirty" tuple, which is
1124                  * WRONG because that tuple slot has the wrong descriptor.)
1125                  *
1126                  * Also, extract all the junk information we need.
1127                  */
1128                 if ((junkfilter = estate->es_junkFilter) != NULL)
1129                 {
1130                         Datum           datum;
1131                         HeapTuple       newTuple;
1132                         bool            isNull;
1133
1134                         /*
1135                          * extract the 'ctid' junk attribute.
1136                          */
1137                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1138                         {
1139                                 if (!ExecGetJunkAttribute(junkfilter,
1140                                                                                   slot,
1141                                                                                   "ctid",
1142                                                                                   &datum,
1143                                                                                   &isNull))
1144                                         elog(ERROR, "could not find junk ctid column");
1145
1146                                 /* shouldn't ever get a null result... */
1147                                 if (isNull)
1148                                         elog(ERROR, "ctid is NULL");
1149
1150                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1151                                 tuple_ctid = *tupleid;  /* make sure we don't free the
1152                                                                                  * ctid!! */
1153                                 tupleid = &tuple_ctid;
1154                         }
1155                         else if (estate->es_rowMark != NIL)
1156                         {
1157                                 List       *l;
1158
1159                 lmark:  ;
1160                                 foreach(l, estate->es_rowMark)
1161                                 {
1162                                         execRowMark *erm = lfirst(l);
1163                                         Buffer          buffer;
1164                                         HeapTupleData tuple;
1165                                         TupleTableSlot *newSlot;
1166                                         int                     test;
1167
1168                                         if (!ExecGetJunkAttribute(junkfilter,
1169                                                                                           slot,
1170                                                                                           erm->resname,
1171                                                                                           &datum,
1172                                                                                           &isNull))
1173                                                 elog(ERROR, "could not find junk \"%s\" column",
1174                                                          erm->resname);
1175
1176                                         /* shouldn't ever get a null result... */
1177                                         if (isNull)
1178                                                 elog(ERROR, "\"%s\" is NULL", erm->resname);
1179
1180                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1181                                         test = heap_mark4update(erm->relation, &tuple, &buffer,
1182                                                                                         estate->es_snapshot->curcid);
1183                                         ReleaseBuffer(buffer);
1184                                         switch (test)
1185                                         {
1186                                                 case HeapTupleSelfUpdated:
1187                                                         /* treat it as deleted; do not process */
1188                                                         goto lnext;
1189
1190                                                 case HeapTupleMayBeUpdated:
1191                                                         break;
1192
1193                                                 case HeapTupleUpdated:
1194                                                         if (IsXactIsoLevelSerializable)
1195                                                                 ereport(ERROR,
1196                                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1197                                                                                  errmsg("could not serialize access due to concurrent update")));
1198                                                         if (!(ItemPointerEquals(&(tuple.t_self),
1199                                                                   (ItemPointer) DatumGetPointer(datum))))
1200                                                         {
1201                                                                 newSlot = EvalPlanQual(estate, erm->rti, &(tuple.t_self));
1202                                                                 if (!(TupIsNull(newSlot)))
1203                                                                 {
1204                                                                         slot = newSlot;
1205                                                                         estate->es_useEvalPlan = true;
1206                                                                         goto lmark;
1207                                                                 }
1208                                                         }
1209
1210                                                         /*
1211                                                          * if tuple was deleted or PlanQual failed for
1212                                                          * updated tuple - we must not return this
1213                                                          * tuple!
1214                                                          */
1215                                                         goto lnext;
1216
1217                                                 default:
1218                                                         elog(ERROR, "unrecognized heap_mark4update status: %u",
1219                                                                  test);
1220                                                         return (NULL);
1221                                         }
1222                                 }
1223                         }
1224
1225                         /*
1226                          * Finally create a new "clean" tuple with all junk attributes
1227                          * removed
1228                          */
1229                         newTuple = ExecRemoveJunk(junkfilter, slot);
1230
1231                         slot = ExecStoreTuple(newTuple,         /* tuple to store */
1232                                                                   junkfilter->jf_resultSlot,    /* dest slot */
1233                                                                   InvalidBuffer,                /* this tuple has no
1234                                                                                                                  * buffer */
1235                                                                   true);                /* tuple should be pfreed */
1236                 }
1237
1238                 /*
1239                  * now that we have a tuple, do the appropriate thing with it..
1240                  * either return it to the user, add it to a relation someplace,
1241                  * delete it from a relation, or modify some of its attributes.
1242                  */
1243                 switch (operation)
1244                 {
1245                         case CMD_SELECT:
1246                                 ExecSelect(slot,        /* slot containing tuple */
1247                                                    dest,        /* destination's tuple-receiver obj */
1248                                                    estate);
1249                                 result = slot;
1250                                 break;
1251
1252                         case CMD_INSERT:
1253                                 ExecInsert(slot, tupleid, estate);
1254                                 result = NULL;
1255                                 break;
1256
1257                         case CMD_DELETE:
1258                                 ExecDelete(slot, tupleid, estate);
1259                                 result = NULL;
1260                                 break;
1261
1262                         case CMD_UPDATE:
1263                                 ExecUpdate(slot, tupleid, estate);
1264                                 result = NULL;
1265                                 break;
1266
1267                         default:
1268                                 elog(ERROR, "unrecognized operation code: %d",
1269                                          (int) operation);
1270                                 result = NULL;
1271                                 break;
1272                 }
1273
1274                 /*
1275                  * check our tuple count.. if we've processed the proper number
1276                  * then quit, else loop again and process more tuples.  Zero
1277                  * numberTuples means no limit.
1278                  */
1279                 current_tuple_count++;
1280                 if (numberTuples && numberTuples == current_tuple_count)
1281                         break;
1282         }
1283
1284         /*
1285          * Process AFTER EACH STATEMENT triggers
1286          */
1287         switch (operation)
1288         {
1289                 case CMD_UPDATE:
1290                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1291                         break;
1292                 case CMD_DELETE:
1293                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1294                         break;
1295                 case CMD_INSERT:
1296                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1297                         break;
1298                 default:
1299                         /* do nothing */
1300                         break;
1301         }
1302
1303         /*
1304          * here, result is either a slot containing a tuple in the case of a
1305          * SELECT or NULL otherwise.
1306          */
1307         return result;
1308 }
1309
1310 /* ----------------------------------------------------------------
1311  *              ExecSelect
1312  *
1313  *              SELECTs are easy.. we just pass the tuple to the appropriate
1314  *              print function.  The only complexity is when we do a
1315  *              "SELECT INTO", in which case we insert the tuple into
1316  *              the appropriate relation (note: this is a newly created relation
1317  *              so we don't need to worry about indices or locks.)
1318  * ----------------------------------------------------------------
1319  */
1320 static void
1321 ExecSelect(TupleTableSlot *slot,
1322                    DestReceiver *dest,
1323                    EState *estate)
1324 {
1325         HeapTuple       tuple;
1326         TupleDesc       attrtype;
1327
1328         /*
1329          * get the heap tuple out of the tuple table slot
1330          */
1331         tuple = slot->val;
1332         attrtype = slot->ttc_tupleDescriptor;
1333
1334         /*
1335          * insert the tuple into the "into relation"
1336          *
1337          * XXX this probably ought to be replaced by a separate destination
1338          */
1339         if (estate->es_into_relation_descriptor != NULL)
1340         {
1341                 heap_insert(estate->es_into_relation_descriptor, tuple,
1342                                         estate->es_snapshot->curcid);
1343                 IncrAppended();
1344         }
1345
1346         /*
1347          * send the tuple to the destination
1348          */
1349         (*dest->receiveTuple) (tuple, attrtype, dest);
1350         IncrRetrieved();
1351         (estate->es_processed)++;
1352 }
1353
1354 /* ----------------------------------------------------------------
1355  *              ExecInsert
1356  *
1357  *              INSERTs are trickier.. we have to insert the tuple into
1358  *              the base relation and insert appropriate tuples into the
1359  *              index relations.
1360  * ----------------------------------------------------------------
1361  */
1362 static void
1363 ExecInsert(TupleTableSlot *slot,
1364                    ItemPointer tupleid,
1365                    EState *estate)
1366 {
1367         HeapTuple       tuple;
1368         ResultRelInfo *resultRelInfo;
1369         Relation        resultRelationDesc;
1370         int                     numIndices;
1371         Oid                     newId;
1372
1373         /*
1374          * get the heap tuple out of the tuple table slot
1375          */
1376         tuple = slot->val;
1377
1378         /*
1379          * get information on the (current) result relation
1380          */
1381         resultRelInfo = estate->es_result_relation_info;
1382         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1383
1384         /* BEFORE ROW INSERT Triggers */
1385         if (resultRelInfo->ri_TrigDesc &&
1386           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1387         {
1388                 HeapTuple       newtuple;
1389
1390                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1391
1392                 if (newtuple == NULL)   /* "do nothing" */
1393                         return;
1394
1395                 if (newtuple != tuple)  /* modified by Trigger(s) */
1396                 {
1397                         /*
1398                          * Insert modified tuple into tuple table slot, replacing the
1399                          * original.  We assume that it was allocated in per-tuple
1400                          * memory context, and therefore will go away by itself. The
1401                          * tuple table slot should not try to clear it.
1402                          */
1403                         ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1404                         tuple = newtuple;
1405                 }
1406         }
1407
1408         /*
1409          * Check the constraints of the tuple
1410          */
1411         if (resultRelationDesc->rd_att->constr)
1412                 ExecConstraints(resultRelInfo, slot, estate);
1413
1414         /*
1415          * insert the tuple
1416          */
1417         newId = heap_insert(resultRelationDesc, tuple,
1418                                                 estate->es_snapshot->curcid);
1419
1420         IncrAppended();
1421         (estate->es_processed)++;
1422         estate->es_lastoid = newId;
1423         setLastTid(&(tuple->t_self));
1424
1425         /*
1426          * process indices
1427          *
1428          * Note: heap_insert adds a new tuple to a relation.  As a side effect,
1429          * the tupleid of the new tuple is placed in the new tuple's t_ctid
1430          * field.
1431          */
1432         numIndices = resultRelInfo->ri_NumIndices;
1433         if (numIndices > 0)
1434                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1435
1436         /* AFTER ROW INSERT Triggers */
1437         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1438 }
1439
1440 /* ----------------------------------------------------------------
1441  *              ExecDelete
1442  *
1443  *              DELETE is like UPDATE, we delete the tuple and its
1444  *              index tuples.
1445  * ----------------------------------------------------------------
1446  */
1447 static void
1448 ExecDelete(TupleTableSlot *slot,
1449                    ItemPointer tupleid,
1450                    EState *estate)
1451 {
1452         ResultRelInfo *resultRelInfo;
1453         Relation        resultRelationDesc;
1454         ItemPointerData ctid;
1455         int                     result;
1456
1457         /*
1458          * get information on the (current) result relation
1459          */
1460         resultRelInfo = estate->es_result_relation_info;
1461         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1462
1463         /* BEFORE ROW DELETE Triggers */
1464         if (resultRelInfo->ri_TrigDesc &&
1465           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1466         {
1467                 bool            dodelete;
1468
1469                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1470                                                                                 estate->es_snapshot->curcid);
1471
1472                 if (!dodelete)                  /* "do nothing" */
1473                         return;
1474         }
1475
1476         /*
1477          * delete the tuple
1478          */
1479 ldelete:;
1480         result = heap_delete(resultRelationDesc, tupleid,
1481                                                  &ctid,
1482                                                  estate->es_snapshot->curcid,
1483                                                  estate->es_crosscheck_snapshot,
1484                                                  true /* wait for commit */);
1485         switch (result)
1486         {
1487                 case HeapTupleSelfUpdated:
1488                         /* already deleted by self; nothing to do */
1489                         return;
1490
1491                 case HeapTupleMayBeUpdated:
1492                         break;
1493
1494                 case HeapTupleUpdated:
1495                         if (IsXactIsoLevelSerializable)
1496                                 ereport(ERROR,
1497                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1498                                                  errmsg("could not serialize access due to concurrent update")));
1499                         else if (!(ItemPointerEquals(tupleid, &ctid)))
1500                         {
1501                                 TupleTableSlot *epqslot = EvalPlanQual(estate,
1502                                                            resultRelInfo->ri_RangeTableIndex, &ctid);
1503
1504                                 if (!TupIsNull(epqslot))
1505                                 {
1506                                         *tupleid = ctid;
1507                                         goto ldelete;
1508                                 }
1509                         }
1510                         /* tuple already deleted; nothing to do */
1511                         return;
1512
1513                 default:
1514                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1515                         return;
1516         }
1517
1518         IncrDeleted();
1519         (estate->es_processed)++;
1520
1521         /*
1522          * Note: Normally one would think that we have to delete index tuples
1523          * associated with the heap tuple now..
1524          *
1525          * ... but in POSTGRES, we have no need to do this because the vacuum
1526          * daemon automatically opens an index scan and deletes index tuples
1527          * when it finds deleted heap tuples. -cim 9/27/89
1528          */
1529
1530         /* AFTER ROW DELETE Triggers */
1531         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1532 }
1533
1534 /* ----------------------------------------------------------------
1535  *              ExecUpdate
1536  *
1537  *              note: we can't run UPDATE queries with transactions
1538  *              off because UPDATEs are actually INSERTs and our
1539  *              scan will mistakenly loop forever, updating the tuple
1540  *              it just inserted..      This should be fixed but until it
1541  *              is, we don't want to get stuck in an infinite loop
1542  *              which corrupts your database..
1543  * ----------------------------------------------------------------
1544  */
1545 static void
1546 ExecUpdate(TupleTableSlot *slot,
1547                    ItemPointer tupleid,
1548                    EState *estate)
1549 {
1550         HeapTuple       tuple;
1551         ResultRelInfo *resultRelInfo;
1552         Relation        resultRelationDesc;
1553         ItemPointerData ctid;
1554         int                     result;
1555         int                     numIndices;
1556
1557         /*
1558          * abort the operation if not running transactions
1559          */
1560         if (IsBootstrapProcessingMode())
1561                 elog(ERROR, "cannot UPDATE during bootstrap");
1562
1563         /*
1564          * get the heap tuple out of the tuple table slot
1565          */
1566         tuple = slot->val;
1567
1568         /*
1569          * get information on the (current) result relation
1570          */
1571         resultRelInfo = estate->es_result_relation_info;
1572         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1573
1574         /* BEFORE ROW UPDATE Triggers */
1575         if (resultRelInfo->ri_TrigDesc &&
1576           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1577         {
1578                 HeapTuple       newtuple;
1579
1580                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1581                                                                                 tupleid, tuple,
1582                                                                                 estate->es_snapshot->curcid);
1583
1584                 if (newtuple == NULL)   /* "do nothing" */
1585                         return;
1586
1587                 if (newtuple != tuple)  /* modified by Trigger(s) */
1588                 {
1589                         /*
1590                          * Insert modified tuple into tuple table slot, replacing the
1591                          * original.  We assume that it was allocated in per-tuple
1592                          * memory context, and therefore will go away by itself. The
1593                          * tuple table slot should not try to clear it.
1594                          */
1595                         ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1596                         tuple = newtuple;
1597                 }
1598         }
1599
1600         /*
1601          * Check the constraints of the tuple
1602          *
1603          * If we generate a new candidate tuple after EvalPlanQual testing, we
1604          * must loop back here and recheck constraints.  (We don't need to
1605          * redo triggers, however.      If there are any BEFORE triggers then
1606          * trigger.c will have done mark4update to lock the correct tuple, so
1607          * there's no need to do them again.)
1608          */
1609 lreplace:;
1610         if (resultRelationDesc->rd_att->constr)
1611                 ExecConstraints(resultRelInfo, slot, estate);
1612
1613         /*
1614          * replace the heap tuple
1615          */
1616         result = heap_update(resultRelationDesc, tupleid, tuple,
1617                                                  &ctid,
1618                                                  estate->es_snapshot->curcid,
1619                                                  estate->es_crosscheck_snapshot,
1620                                                  true /* wait for commit */);
1621         switch (result)
1622         {
1623                 case HeapTupleSelfUpdated:
1624                         /* already deleted by self; nothing to do */
1625                         return;
1626
1627                 case HeapTupleMayBeUpdated:
1628                         break;
1629
1630                 case HeapTupleUpdated:
1631                         if (IsXactIsoLevelSerializable)
1632                                 ereport(ERROR,
1633                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1634                                                  errmsg("could not serialize access due to concurrent update")));
1635                         else if (!(ItemPointerEquals(tupleid, &ctid)))
1636                         {
1637                                 TupleTableSlot *epqslot = EvalPlanQual(estate,
1638                                                            resultRelInfo->ri_RangeTableIndex, &ctid);
1639
1640                                 if (!TupIsNull(epqslot))
1641                                 {
1642                                         *tupleid = ctid;
1643                                         tuple = ExecRemoveJunk(estate->es_junkFilter, epqslot);
1644                                         slot = ExecStoreTuple(tuple,
1645                                                                         estate->es_junkFilter->jf_resultSlot,
1646                                                                                   InvalidBuffer, true);
1647                                         goto lreplace;
1648                                 }
1649                         }
1650                         /* tuple already deleted; nothing to do */
1651                         return;
1652
1653                 default:
1654                         elog(ERROR, "unrecognized heap_update status: %u", result);
1655                         return;
1656         }
1657
1658         IncrReplaced();
1659         (estate->es_processed)++;
1660
1661         /*
1662          * Note: instead of having to update the old index tuples associated
1663          * with the heap tuple, all we do is form and insert new index tuples.
1664          * This is because UPDATEs are actually DELETEs and INSERTs and index
1665          * tuple deletion is done automagically by the vacuum daemon. All we
1666          * do is insert new index tuples.  -cim 9/27/89
1667          */
1668
1669         /*
1670          * process indices
1671          *
1672          * heap_update updates a tuple in the base relation by invalidating it
1673          * and then inserting a new tuple to the relation.      As a side effect,
1674          * the tupleid of the new tuple is placed in the new tuple's t_ctid
1675          * field.  So we now insert index tuples using the new tupleid stored
1676          * there.
1677          */
1678
1679         numIndices = resultRelInfo->ri_NumIndices;
1680         if (numIndices > 0)
1681                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1682
1683         /* AFTER ROW UPDATE Triggers */
1684         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1685 }
1686
1687 static const char *
1688 ExecRelCheck(ResultRelInfo *resultRelInfo,
1689                          TupleTableSlot *slot, EState *estate)
1690 {
1691         Relation        rel = resultRelInfo->ri_RelationDesc;
1692         int                     ncheck = rel->rd_att->constr->num_check;
1693         ConstrCheck *check = rel->rd_att->constr->check;
1694         ExprContext *econtext;
1695         MemoryContext oldContext;
1696         List       *qual;
1697         int                     i;
1698
1699         /*
1700          * If first time through for this result relation, build expression
1701          * nodetrees for rel's constraint expressions.  Keep them in the
1702          * per-query memory context so they'll survive throughout the query.
1703          */
1704         if (resultRelInfo->ri_ConstraintExprs == NULL)
1705         {
1706                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1707                 resultRelInfo->ri_ConstraintExprs =
1708                         (List **) palloc(ncheck * sizeof(List *));
1709                 for (i = 0; i < ncheck; i++)
1710                 {
1711                         /* ExecQual wants implicit-AND form */
1712                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1713                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1714                                 ExecPrepareExpr((Expr *) qual, estate);
1715                 }
1716                 MemoryContextSwitchTo(oldContext);
1717         }
1718
1719         /*
1720          * We will use the EState's per-tuple context for evaluating
1721          * constraint expressions (creating it if it's not already there).
1722          */
1723         econtext = GetPerTupleExprContext(estate);
1724
1725         /* Arrange for econtext's scan tuple to be the tuple under test */
1726         econtext->ecxt_scantuple = slot;
1727
1728         /* And evaluate the constraints */
1729         for (i = 0; i < ncheck; i++)
1730         {
1731                 qual = resultRelInfo->ri_ConstraintExprs[i];
1732
1733                 /*
1734                  * NOTE: SQL92 specifies that a NULL result from a constraint
1735                  * expression is not to be treated as a failure.  Therefore, tell
1736                  * ExecQual to return TRUE for NULL.
1737                  */
1738                 if (!ExecQual(qual, econtext, true))
1739                         return check[i].ccname;
1740         }
1741
1742         /* NULL result means no error */
1743         return NULL;
1744 }
1745
1746 void
1747 ExecConstraints(ResultRelInfo *resultRelInfo,
1748                                 TupleTableSlot *slot, EState *estate)
1749 {
1750         Relation        rel = resultRelInfo->ri_RelationDesc;
1751         HeapTuple       tuple = slot->val;
1752         TupleConstr *constr = rel->rd_att->constr;
1753
1754         Assert(constr);
1755
1756         if (constr->has_not_null)
1757         {
1758                 int                     natts = rel->rd_att->natts;
1759                 int                     attrChk;
1760
1761                 for (attrChk = 1; attrChk <= natts; attrChk++)
1762                 {
1763                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1764                                 heap_attisnull(tuple, attrChk))
1765                                 ereport(ERROR,
1766                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1767                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1768                                         NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1769                 }
1770         }
1771
1772         if (constr->num_check > 0)
1773         {
1774                 const char *failed;
1775
1776                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1777                         ereport(ERROR,
1778                                         (errcode(ERRCODE_CHECK_VIOLATION),
1779                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1780                                                         RelationGetRelationName(rel), failed)));
1781         }
1782 }
1783
1784 /*
1785  * Check a modified tuple to see if we want to process its updated version
1786  * under READ COMMITTED rules.
1787  *
1788  * See backend/executor/README for some info about how this works.
1789  */
1790 TupleTableSlot *
1791 EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
1792 {
1793         evalPlanQual *epq;
1794         EState     *epqstate;
1795         Relation        relation;
1796         HeapTupleData tuple;
1797         HeapTuple       copyTuple = NULL;
1798         bool            endNode;
1799
1800         Assert(rti != 0);
1801
1802         /*
1803          * find relation containing target tuple
1804          */
1805         if (estate->es_result_relation_info != NULL &&
1806                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1807                 relation = estate->es_result_relation_info->ri_RelationDesc;
1808         else
1809         {
1810                 List       *l;
1811
1812                 relation = NULL;
1813                 foreach(l, estate->es_rowMark)
1814                 {
1815                         if (((execRowMark *) lfirst(l))->rti == rti)
1816                         {
1817                                 relation = ((execRowMark *) lfirst(l))->relation;
1818                                 break;
1819                         }
1820                 }
1821                 if (relation == NULL)
1822                         elog(ERROR, "could not find RowMark for RT index %u", rti);
1823         }
1824
1825         /*
1826          * fetch tid tuple
1827          *
1828          * Loop here to deal with updated or busy tuples
1829          */
1830         tuple.t_self = *tid;
1831         for (;;)
1832         {
1833                 Buffer          buffer;
1834
1835                 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, false, NULL))
1836                 {
1837                         TransactionId xwait = SnapshotDirty->xmax;
1838
1839                         /* xmin should not be dirty... */
1840                         if (TransactionIdIsValid(SnapshotDirty->xmin))
1841                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1842
1843                         /*
1844                          * If tuple is being updated by other transaction then we have
1845                          * to wait for its commit/abort.
1846                          */
1847                         if (TransactionIdIsValid(xwait))
1848                         {
1849                                 ReleaseBuffer(buffer);
1850                                 XactLockTableWait(xwait);
1851                                 continue;
1852                         }
1853
1854                         /*
1855                          * We got tuple - now copy it for use by recheck query.
1856                          */
1857                         copyTuple = heap_copytuple(&tuple);
1858                         ReleaseBuffer(buffer);
1859                         break;
1860                 }
1861
1862                 /*
1863                  * Oops! Invalid tuple. Have to check is it updated or deleted.
1864                  * Note that it's possible to get invalid SnapshotDirty->tid if
1865                  * tuple updated by this transaction. Have we to check this ?
1866                  */
1867                 if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
1868                         !(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
1869                 {
1870                         /* updated, so look at the updated copy */
1871                         tuple.t_self = SnapshotDirty->tid;
1872                         continue;
1873                 }
1874
1875                 /*
1876                  * Deleted or updated by this transaction; forget it.
1877                  */
1878                 return NULL;
1879         }
1880
1881         /*
1882          * For UPDATE/DELETE we have to return tid of actual row we're
1883          * executing PQ for.
1884          */
1885         *tid = tuple.t_self;
1886
1887         /*
1888          * Need to run a recheck subquery.      Find or create a PQ stack entry.
1889          */
1890         epq = estate->es_evalPlanQual;
1891         endNode = true;
1892
1893         if (epq != NULL && epq->rti == 0)
1894         {
1895                 /* Top PQ stack entry is idle, so re-use it */
1896                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
1897                 epq->rti = rti;
1898                 endNode = false;
1899         }
1900
1901         /*
1902          * If this is request for another RTE - Ra, - then we have to check
1903          * wasn't PlanQual requested for Ra already and if so then Ra' row was
1904          * updated again and we have to re-start old execution for Ra and
1905          * forget all what we done after Ra was suspended. Cool? -:))
1906          */
1907         if (epq != NULL && epq->rti != rti &&
1908                 epq->estate->es_evTuple[rti - 1] != NULL)
1909         {
1910                 do
1911                 {
1912                         evalPlanQual *oldepq;
1913
1914                         /* stop execution */
1915                         EvalPlanQualStop(epq);
1916                         /* pop previous PlanQual from the stack */
1917                         oldepq = epq->next;
1918                         Assert(oldepq && oldepq->rti != 0);
1919                         /* push current PQ to freePQ stack */
1920                         oldepq->free = epq;
1921                         epq = oldepq;
1922                         estate->es_evalPlanQual = epq;
1923                 } while (epq->rti != rti);
1924         }
1925
1926         /*
1927          * If we are requested for another RTE then we have to suspend
1928          * execution of current PlanQual and start execution for new one.
1929          */
1930         if (epq == NULL || epq->rti != rti)
1931         {
1932                 /* try to reuse plan used previously */
1933                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
1934
1935                 if (newepq == NULL)             /* first call or freePQ stack is empty */
1936                 {
1937                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
1938                         newepq->free = NULL;
1939                         newepq->estate = NULL;
1940                         newepq->planstate = NULL;
1941                 }
1942                 else
1943                 {
1944                         /* recycle previously used PlanQual */
1945                         Assert(newepq->estate == NULL);
1946                         epq->free = NULL;
1947                 }
1948                 /* push current PQ to the stack */
1949                 newepq->next = epq;
1950                 epq = newepq;
1951                 estate->es_evalPlanQual = epq;
1952                 epq->rti = rti;
1953                 endNode = false;
1954         }
1955
1956         Assert(epq->rti == rti);
1957
1958         /*
1959          * Ok - we're requested for the same RTE.  Unfortunately we still have
1960          * to end and restart execution of the plan, because ExecReScan
1961          * wouldn't ensure that upper plan nodes would reset themselves.  We
1962          * could make that work if insertion of the target tuple were
1963          * integrated with the Param mechanism somehow, so that the upper plan
1964          * nodes know that their children's outputs have changed.
1965          *
1966          * Note that the stack of free evalPlanQual nodes is quite useless at the
1967          * moment, since it only saves us from pallocing/releasing the
1968          * evalPlanQual nodes themselves.  But it will be useful once we
1969          * implement ReScan instead of end/restart for re-using PlanQual
1970          * nodes.
1971          */
1972         if (endNode)
1973         {
1974                 /* stop execution */
1975                 EvalPlanQualStop(epq);
1976         }
1977
1978         /*
1979          * Initialize new recheck query.
1980          *
1981          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
1982          * instead copy down changeable state from the top plan (including
1983          * es_result_relation_info, es_junkFilter) and reset locally
1984          * changeable state in the epq (including es_param_exec_vals,
1985          * es_evTupleNull).
1986          */
1987         EvalPlanQualStart(epq, estate, epq->next);
1988
1989         /*
1990          * free old RTE' tuple, if any, and store target tuple where
1991          * relation's scan node will see it
1992          */
1993         epqstate = epq->estate;
1994         if (epqstate->es_evTuple[rti - 1] != NULL)
1995                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
1996         epqstate->es_evTuple[rti - 1] = copyTuple;
1997
1998         return EvalPlanQualNext(estate);
1999 }
2000
2001 static TupleTableSlot *
2002 EvalPlanQualNext(EState *estate)
2003 {
2004         evalPlanQual *epq = estate->es_evalPlanQual;
2005         MemoryContext oldcontext;
2006         TupleTableSlot *slot;
2007
2008         Assert(epq->rti != 0);
2009
2010 lpqnext:;
2011         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2012         slot = ExecProcNode(epq->planstate);
2013         MemoryContextSwitchTo(oldcontext);
2014
2015         /*
2016          * No more tuples for this PQ. Continue previous one.
2017          */
2018         if (TupIsNull(slot))
2019         {
2020                 evalPlanQual *oldepq;
2021
2022                 /* stop execution */
2023                 EvalPlanQualStop(epq);
2024                 /* pop old PQ from the stack */
2025                 oldepq = epq->next;
2026                 if (oldepq == NULL)
2027                 {
2028                         /* this is the first (oldest) PQ - mark as free */
2029                         epq->rti = 0;
2030                         estate->es_useEvalPlan = false;
2031                         /* and continue Query execution */
2032                         return (NULL);
2033                 }
2034                 Assert(oldepq->rti != 0);
2035                 /* push current PQ to freePQ stack */
2036                 oldepq->free = epq;
2037                 epq = oldepq;
2038                 estate->es_evalPlanQual = epq;
2039                 goto lpqnext;
2040         }
2041
2042         return (slot);
2043 }
2044
2045 static void
2046 EndEvalPlanQual(EState *estate)
2047 {
2048         evalPlanQual *epq = estate->es_evalPlanQual;
2049
2050         if (epq->rti == 0)                      /* plans already shutdowned */
2051         {
2052                 Assert(epq->next == NULL);
2053                 return;
2054         }
2055
2056         for (;;)
2057         {
2058                 evalPlanQual *oldepq;
2059
2060                 /* stop execution */
2061                 EvalPlanQualStop(epq);
2062                 /* pop old PQ from the stack */
2063                 oldepq = epq->next;
2064                 if (oldepq == NULL)
2065                 {
2066                         /* this is the first (oldest) PQ - mark as free */
2067                         epq->rti = 0;
2068                         estate->es_useEvalPlan = false;
2069                         break;
2070                 }
2071                 Assert(oldepq->rti != 0);
2072                 /* push current PQ to freePQ stack */
2073                 oldepq->free = epq;
2074                 epq = oldepq;
2075                 estate->es_evalPlanQual = epq;
2076         }
2077 }
2078
2079 /*
2080  * Start execution of one level of PlanQual.
2081  *
2082  * This is a cut-down version of ExecutorStart(): we copy some state from
2083  * the top-level estate rather than initializing it fresh.
2084  */
2085 static void
2086 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2087 {
2088         EState     *epqstate;
2089         int                     rtsize;
2090         MemoryContext oldcontext;
2091
2092         rtsize = length(estate->es_range_table);
2093
2094         epq->estate = epqstate = CreateExecutorState();
2095
2096         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2097
2098         /*
2099          * The epqstates share the top query's copy of unchanging state such
2100          * as the snapshot, rangetable, result-rel info, and external Param
2101          * info. They need their own copies of local state, including a tuple
2102          * table, es_param_exec_vals, etc.
2103          */
2104         epqstate->es_direction = ForwardScanDirection;
2105         epqstate->es_snapshot = estate->es_snapshot;
2106         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2107         epqstate->es_range_table = estate->es_range_table;
2108         epqstate->es_result_relations = estate->es_result_relations;
2109         epqstate->es_num_result_relations = estate->es_num_result_relations;
2110         epqstate->es_result_relation_info = estate->es_result_relation_info;
2111         epqstate->es_junkFilter = estate->es_junkFilter;
2112         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2113         epqstate->es_param_list_info = estate->es_param_list_info;
2114         if (estate->es_topPlan->nParamExec > 0)
2115                 epqstate->es_param_exec_vals = (ParamExecData *)
2116                         palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
2117         epqstate->es_rowMark = estate->es_rowMark;
2118         epqstate->es_instrument = estate->es_instrument;
2119         epqstate->es_select_into = estate->es_select_into;
2120         epqstate->es_into_oids = estate->es_into_oids;
2121         epqstate->es_topPlan = estate->es_topPlan;
2122
2123         /*
2124          * Each epqstate must have its own es_evTupleNull state, but all the
2125          * stack entries share es_evTuple state.  This allows sub-rechecks to
2126          * inherit the value being examined by an outer recheck.
2127          */
2128         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2129         if (priorepq == NULL)
2130                 /* first PQ stack entry */
2131                 epqstate->es_evTuple = (HeapTuple *)
2132                         palloc0(rtsize * sizeof(HeapTuple));
2133         else
2134                 /* later stack entries share the same storage */
2135                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2136
2137         epqstate->es_tupleTable =
2138                 ExecCreateTupleTable(estate->es_tupleTable->size);
2139
2140         epq->planstate = ExecInitNode(estate->es_topPlan, epqstate);
2141
2142         MemoryContextSwitchTo(oldcontext);
2143 }
2144
2145 /*
2146  * End execution of one level of PlanQual.
2147  *
2148  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2149  * of the normal cleanup, but *not* close result relations (which we are
2150  * just sharing from the outer query).
2151  */
2152 static void
2153 EvalPlanQualStop(evalPlanQual *epq)
2154 {
2155         EState     *epqstate = epq->estate;
2156         MemoryContext oldcontext;
2157
2158         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2159
2160         ExecEndNode(epq->planstate);
2161
2162         ExecDropTupleTable(epqstate->es_tupleTable, true);
2163         epqstate->es_tupleTable = NULL;
2164
2165         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2166         {
2167                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2168                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2169         }
2170
2171         MemoryContextSwitchTo(oldcontext);
2172
2173         FreeExecutorState(epqstate);
2174
2175         epq->estate = NULL;
2176         epq->planstate = NULL;
2177 }