]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Add comment explaining why RelationOpenSmgr() call is not needed.
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.263 2006/01/07 22:30:43 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/xlog.h"
37 #include "catalog/heap.h"
38 #include "catalog/namespace.h"
39 #include "commands/tablecmds.h"
40 #include "commands/trigger.h"
41 #include "executor/execdebug.h"
42 #include "executor/execdefs.h"
43 #include "executor/instrument.h"
44 #include "miscadmin.h"
45 #include "optimizer/clauses.h"
46 #include "optimizer/var.h"
47 #include "parser/parsetree.h"
48 #include "storage/smgr.h"
49 #include "utils/acl.h"
50 #include "utils/guc.h"
51 #include "utils/lsyscache.h"
52 #include "utils/memutils.h"
53
54
55 typedef struct evalPlanQual
56 {
57         Index           rti;
58         EState     *estate;
59         PlanState  *planstate;
60         struct evalPlanQual *next;      /* stack of active PlanQual plans */
61         struct evalPlanQual *free;      /* list of free PlanQual plans */
62 } evalPlanQual;
63
64 /* decls for local routines only used within this module */
65 static void InitPlan(QueryDesc *queryDesc, bool explainOnly);
66 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
67                                   Index resultRelationIndex,
68                                   List *rangeTable,
69                                   CmdType operation,
70                                   bool doInstrument);
71 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
72                         CmdType operation,
73                         long numberTuples,
74                         ScanDirection direction,
75                         DestReceiver *dest);
76 static void ExecSelect(TupleTableSlot *slot,
77                    DestReceiver *dest,
78                    EState *estate);
79 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
80                    EState *estate);
81 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
82                    EState *estate);
83 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
84                    EState *estate);
85 static TupleTableSlot *EvalPlanQualNext(EState *estate);
86 static void EndEvalPlanQual(EState *estate);
87 static void ExecCheckRTEPerms(RangeTblEntry *rte);
88 static void ExecCheckXactReadOnly(Query *parsetree);
89 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
90                                   evalPlanQual *priorepq);
91 static void EvalPlanQualStop(evalPlanQual *epq);
92
93 /* end of local decls */
94
95
96 /* ----------------------------------------------------------------
97  *              ExecutorStart
98  *
99  *              This routine must be called at the beginning of any execution of any
100  *              query plan
101  *
102  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
103  * clear why we bother to separate the two functions, but...).  The tupDesc
104  * field of the QueryDesc is filled in to describe the tuples that will be
105  * returned, and the internal fields (estate and planstate) are set up.
106  *
107  * If explainOnly is true, we are not actually intending to run the plan,
108  * only to set up for EXPLAIN; so skip unwanted side-effects.
109  *
110  * NB: the CurrentMemoryContext when this is called will become the parent
111  * of the per-query context used for this Executor invocation.
112  * ----------------------------------------------------------------
113  */
114 void
115 ExecutorStart(QueryDesc *queryDesc, bool explainOnly)
116 {
117         EState     *estate;
118         MemoryContext oldcontext;
119
120         /* sanity checks: queryDesc must not be started already */
121         Assert(queryDesc != NULL);
122         Assert(queryDesc->estate == NULL);
123
124         /*
125          * If the transaction is read-only, we need to check if any writes are
126          * planned to non-temporary tables.
127          */
128         if (XactReadOnly && !explainOnly)
129                 ExecCheckXactReadOnly(queryDesc->parsetree);
130
131         /*
132          * Build EState, switch into per-query memory context for startup.
133          */
134         estate = CreateExecutorState();
135         queryDesc->estate = estate;
136
137         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
138
139         /*
140          * Fill in parameters, if any, from queryDesc
141          */
142         estate->es_param_list_info = queryDesc->params;
143
144         if (queryDesc->plantree->nParamExec > 0)
145                 estate->es_param_exec_vals = (ParamExecData *)
146                         palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
147
148         /*
149          * Copy other important information into the EState
150          */
151         estate->es_snapshot = queryDesc->snapshot;
152         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
153         estate->es_instrument = queryDesc->doInstrument;
154
155         /*
156          * Initialize the plan state tree
157          */
158         InitPlan(queryDesc, explainOnly);
159
160         MemoryContextSwitchTo(oldcontext);
161 }
162
163 /* ----------------------------------------------------------------
164  *              ExecutorRun
165  *
166  *              This is the main routine of the executor module. It accepts
167  *              the query descriptor from the traffic cop and executes the
168  *              query plan.
169  *
170  *              ExecutorStart must have been called already.
171  *
172  *              If direction is NoMovementScanDirection then nothing is done
173  *              except to start up/shut down the destination.  Otherwise,
174  *              we retrieve up to 'count' tuples in the specified direction.
175  *
176  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
177  *              completion.
178  *
179  * ----------------------------------------------------------------
180  */
181 TupleTableSlot *
182 ExecutorRun(QueryDesc *queryDesc,
183                         ScanDirection direction, long count)
184 {
185         EState     *estate;
186         CmdType         operation;
187         DestReceiver *dest;
188         TupleTableSlot *result;
189         MemoryContext oldcontext;
190
191         /* sanity checks */
192         Assert(queryDesc != NULL);
193
194         estate = queryDesc->estate;
195
196         Assert(estate != NULL);
197
198         /*
199          * Switch into per-query memory context
200          */
201         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
202
203         /*
204          * extract information from the query descriptor and the query feature.
205          */
206         operation = queryDesc->operation;
207         dest = queryDesc->dest;
208
209         /*
210          * startup tuple receiver
211          */
212         estate->es_processed = 0;
213         estate->es_lastoid = InvalidOid;
214
215         (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
216
217         /*
218          * run plan
219          */
220         if (direction == NoMovementScanDirection)
221                 result = NULL;
222         else
223                 result = ExecutePlan(estate,
224                                                          queryDesc->planstate,
225                                                          operation,
226                                                          count,
227                                                          direction,
228                                                          dest);
229
230         /*
231          * shutdown receiver
232          */
233         (*dest->rShutdown) (dest);
234
235         MemoryContextSwitchTo(oldcontext);
236
237         return result;
238 }
239
240 /* ----------------------------------------------------------------
241  *              ExecutorEnd
242  *
243  *              This routine must be called at the end of execution of any
244  *              query plan
245  * ----------------------------------------------------------------
246  */
247 void
248 ExecutorEnd(QueryDesc *queryDesc)
249 {
250         EState     *estate;
251         MemoryContext oldcontext;
252
253         /* sanity checks */
254         Assert(queryDesc != NULL);
255
256         estate = queryDesc->estate;
257
258         Assert(estate != NULL);
259
260         /*
261          * Switch into per-query memory context to run ExecEndPlan
262          */
263         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
264
265         ExecEndPlan(queryDesc->planstate, estate);
266
267         /*
268          * Must switch out of context before destroying it
269          */
270         MemoryContextSwitchTo(oldcontext);
271
272         /*
273          * Release EState and per-query memory context.  This should release
274          * everything the executor has allocated.
275          */
276         FreeExecutorState(estate);
277
278         /* Reset queryDesc fields that no longer point to anything */
279         queryDesc->tupDesc = NULL;
280         queryDesc->estate = NULL;
281         queryDesc->planstate = NULL;
282 }
283
284 /* ----------------------------------------------------------------
285  *              ExecutorRewind
286  *
287  *              This routine may be called on an open queryDesc to rewind it
288  *              to the start.
289  * ----------------------------------------------------------------
290  */
291 void
292 ExecutorRewind(QueryDesc *queryDesc)
293 {
294         EState     *estate;
295         MemoryContext oldcontext;
296
297         /* sanity checks */
298         Assert(queryDesc != NULL);
299
300         estate = queryDesc->estate;
301
302         Assert(estate != NULL);
303
304         /* It's probably not sensible to rescan updating queries */
305         Assert(queryDesc->operation == CMD_SELECT);
306
307         /*
308          * Switch into per-query memory context
309          */
310         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
311
312         /*
313          * rescan plan
314          */
315         ExecReScan(queryDesc->planstate, NULL);
316
317         MemoryContextSwitchTo(oldcontext);
318 }
319
320
321 /*
322  * ExecCheckRTPerms
323  *              Check access permissions for all relations listed in a range table.
324  */
325 void
326 ExecCheckRTPerms(List *rangeTable)
327 {
328         ListCell   *l;
329
330         foreach(l, rangeTable)
331         {
332                 RangeTblEntry *rte = lfirst(l);
333
334                 ExecCheckRTEPerms(rte);
335         }
336 }
337
338 /*
339  * ExecCheckRTEPerms
340  *              Check access permissions for a single RTE.
341  */
342 static void
343 ExecCheckRTEPerms(RangeTblEntry *rte)
344 {
345         AclMode         requiredPerms;
346         Oid                     relOid;
347         Oid                     userid;
348
349         /*
350          * Only plain-relation RTEs need to be checked here.  Subquery RTEs are
351          * checked by ExecInitSubqueryScan if the subquery is still a separate
352          * subquery --- if it's been pulled up into our query level then the RTEs
353          * are in our rangetable and will be checked here. Function RTEs are
354          * checked by init_fcache when the function is prepared for execution.
355          * Join and special RTEs need no checks.
356          */
357         if (rte->rtekind != RTE_RELATION)
358                 return;
359
360         /*
361          * No work if requiredPerms is empty.
362          */
363         requiredPerms = rte->requiredPerms;
364         if (requiredPerms == 0)
365                 return;
366
367         relOid = rte->relid;
368
369         /*
370          * userid to check as: current user unless we have a setuid indication.
371          *
372          * Note: GetUserId() is presently fast enough that there's no harm in
373          * calling it separately for each RTE.  If that stops being true, we could
374          * call it once in ExecCheckRTPerms and pass the userid down from there.
375          * But for now, no need for the extra clutter.
376          */
377         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
378
379         /*
380          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
381          */
382         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
383                 != requiredPerms)
384                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
385                                            get_rel_name(relOid));
386 }
387
388 /*
389  * Check that the query does not imply any writes to non-temp tables.
390  */
391 static void
392 ExecCheckXactReadOnly(Query *parsetree)
393 {
394         ListCell   *l;
395
396         /*
397          * CREATE TABLE AS or SELECT INTO?
398          *
399          * XXX should we allow this if the destination is temp?
400          */
401         if (parsetree->into != NULL)
402                 goto fail;
403
404         /* Fail if write permissions are requested on any non-temp table */
405         foreach(l, parsetree->rtable)
406         {
407                 RangeTblEntry *rte = lfirst(l);
408
409                 if (rte->rtekind == RTE_SUBQUERY)
410                 {
411                         ExecCheckXactReadOnly(rte->subquery);
412                         continue;
413                 }
414
415                 if (rte->rtekind != RTE_RELATION)
416                         continue;
417
418                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
419                         continue;
420
421                 if (isTempNamespace(get_rel_namespace(rte->relid)))
422                         continue;
423
424                 goto fail;
425         }
426
427         return;
428
429 fail:
430         ereport(ERROR,
431                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
432                          errmsg("transaction is read-only")));
433 }
434
435
436 /* ----------------------------------------------------------------
437  *              InitPlan
438  *
439  *              Initializes the query plan: open files, allocate storage
440  *              and start up the rule manager
441  * ----------------------------------------------------------------
442  */
443 static void
444 InitPlan(QueryDesc *queryDesc, bool explainOnly)
445 {
446         CmdType         operation = queryDesc->operation;
447         Query      *parseTree = queryDesc->parsetree;
448         Plan       *plan = queryDesc->plantree;
449         EState     *estate = queryDesc->estate;
450         PlanState  *planstate;
451         List       *rangeTable;
452         Relation        intoRelationDesc;
453         bool            do_select_into;
454         TupleDesc       tupType;
455
456         /*
457          * Do permissions checks.  It's sufficient to examine the query's top
458          * rangetable here --- subplan RTEs will be checked during
459          * ExecInitSubPlan().
460          */
461         ExecCheckRTPerms(parseTree->rtable);
462
463         /*
464          * get information from query descriptor
465          */
466         rangeTable = parseTree->rtable;
467
468         /*
469          * initialize the node's execution state
470          */
471         estate->es_range_table = rangeTable;
472
473         /*
474          * if there is a result relation, initialize result relation stuff
475          */
476         if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
477         {
478                 List       *resultRelations = parseTree->resultRelations;
479                 int                     numResultRelations;
480                 ResultRelInfo *resultRelInfos;
481
482                 if (resultRelations != NIL)
483                 {
484                         /*
485                          * Multiple result relations (due to inheritance)
486                          * parseTree->resultRelations identifies them all
487                          */
488                         ResultRelInfo *resultRelInfo;
489                         ListCell   *l;
490
491                         numResultRelations = list_length(resultRelations);
492                         resultRelInfos = (ResultRelInfo *)
493                                 palloc(numResultRelations * sizeof(ResultRelInfo));
494                         resultRelInfo = resultRelInfos;
495                         foreach(l, resultRelations)
496                         {
497                                 initResultRelInfo(resultRelInfo,
498                                                                   lfirst_int(l),
499                                                                   rangeTable,
500                                                                   operation,
501                                                                   estate->es_instrument);
502                                 resultRelInfo++;
503                         }
504                 }
505                 else
506                 {
507                         /*
508                          * Single result relation identified by parseTree->resultRelation
509                          */
510                         numResultRelations = 1;
511                         resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
512                         initResultRelInfo(resultRelInfos,
513                                                           parseTree->resultRelation,
514                                                           rangeTable,
515                                                           operation,
516                                                           estate->es_instrument);
517                 }
518
519                 estate->es_result_relations = resultRelInfos;
520                 estate->es_num_result_relations = numResultRelations;
521                 /* Initialize to first or only result rel */
522                 estate->es_result_relation_info = resultRelInfos;
523         }
524         else
525         {
526                 /*
527                  * if no result relation, then set state appropriately
528                  */
529                 estate->es_result_relations = NULL;
530                 estate->es_num_result_relations = 0;
531                 estate->es_result_relation_info = NULL;
532         }
533
534         /*
535          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
536          * flag appropriately so that the plan tree will be initialized with the
537          * correct tuple descriptors.
538          */
539         do_select_into = false;
540
541         if (operation == CMD_SELECT && parseTree->into != NULL)
542         {
543                 do_select_into = true;
544                 estate->es_select_into = true;
545                 estate->es_into_oids = parseTree->intoHasOids;
546         }
547
548         /*
549          * Have to lock relations selected FOR UPDATE/FOR SHARE
550          */
551         estate->es_rowMarks = NIL;
552         estate->es_forUpdate = parseTree->forUpdate;
553         estate->es_rowNoWait = parseTree->rowNoWait;
554         if (parseTree->rowMarks != NIL)
555         {
556                 ListCell   *l;
557
558                 foreach(l, parseTree->rowMarks)
559                 {
560                         Index           rti = lfirst_int(l);
561                         Oid                     relid = getrelid(rti, rangeTable);
562                         Relation        relation;
563                         ExecRowMark *erm;
564
565                         relation = heap_open(relid, RowShareLock);
566                         erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
567                         erm->relation = relation;
568                         erm->rti = rti;
569                         snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rti);
570                         estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
571                 }
572         }
573
574         /*
575          * initialize the executor "tuple" table.  We need slots for all the plan
576          * nodes, plus possibly output slots for the junkfilter(s). At this point
577          * we aren't sure if we need junkfilters, so just add slots for them
578          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
579          * trigger output tuples.
580          */
581         {
582                 int                     nSlots = ExecCountSlotsNode(plan);
583
584                 if (parseTree->resultRelations != NIL)
585                         nSlots += list_length(parseTree->resultRelations);
586                 else
587                         nSlots += 1;
588                 if (operation != CMD_SELECT)
589                         nSlots++;
590
591                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
592
593                 if (operation != CMD_SELECT)
594                         estate->es_trig_tuple_slot =
595                                 ExecAllocTableSlot(estate->es_tupleTable);
596         }
597
598         /* mark EvalPlanQual not active */
599         estate->es_topPlan = plan;
600         estate->es_evalPlanQual = NULL;
601         estate->es_evTupleNull = NULL;
602         estate->es_evTuple = NULL;
603         estate->es_useEvalPlan = false;
604
605         /*
606          * initialize the private state information for all the nodes in the query
607          * tree.  This opens files, allocates storage and leaves us ready to start
608          * processing tuples.
609          */
610         planstate = ExecInitNode(plan, estate);
611
612         /*
613          * Get the tuple descriptor describing the type of tuples to return. (this
614          * is especially important if we are creating a relation with "SELECT
615          * INTO")
616          */
617         tupType = ExecGetResultType(planstate);
618
619         /*
620          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
621          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
622          * INTO also need a filter if the plan may return raw disk tuples (else
623          * heap_insert will be scribbling on the source relation!). UPDATE and
624          * DELETE always need a filter, since there's always a junk 'ctid'
625          * attribute present --- no need to look first.
626          */
627         {
628                 bool            junk_filter_needed = false;
629                 ListCell   *tlist;
630
631                 switch (operation)
632                 {
633                         case CMD_SELECT:
634                         case CMD_INSERT:
635                                 foreach(tlist, plan->targetlist)
636                                 {
637                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
638
639                                         if (tle->resjunk)
640                                         {
641                                                 junk_filter_needed = true;
642                                                 break;
643                                         }
644                                 }
645                                 if (!junk_filter_needed &&
646                                         (operation == CMD_INSERT || do_select_into) &&
647                                         ExecMayReturnRawTuples(planstate))
648                                         junk_filter_needed = true;
649                                 break;
650                         case CMD_UPDATE:
651                         case CMD_DELETE:
652                                 junk_filter_needed = true;
653                                 break;
654                         default:
655                                 break;
656                 }
657
658                 if (junk_filter_needed)
659                 {
660                         /*
661                          * If there are multiple result relations, each one needs its own
662                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
663                          * we can't be fooled by some needing a filter and some not.
664                          */
665                         if (parseTree->resultRelations != NIL)
666                         {
667                                 PlanState **appendplans;
668                                 int                     as_nplans;
669                                 ResultRelInfo *resultRelInfo;
670                                 int                     i;
671
672                                 /* Top plan had better be an Append here. */
673                                 Assert(IsA(plan, Append));
674                                 Assert(((Append *) plan)->isTarget);
675                                 Assert(IsA(planstate, AppendState));
676                                 appendplans = ((AppendState *) planstate)->appendplans;
677                                 as_nplans = ((AppendState *) planstate)->as_nplans;
678                                 Assert(as_nplans == estate->es_num_result_relations);
679                                 resultRelInfo = estate->es_result_relations;
680                                 for (i = 0; i < as_nplans; i++)
681                                 {
682                                         PlanState  *subplan = appendplans[i];
683                                         JunkFilter *j;
684
685                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
686                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
687                                                                   ExecAllocTableSlot(estate->es_tupleTable));
688                                         resultRelInfo->ri_junkFilter = j;
689                                         resultRelInfo++;
690                                 }
691
692                                 /*
693                                  * Set active junkfilter too; at this point ExecInitAppend has
694                                  * already selected an active result relation...
695                                  */
696                                 estate->es_junkFilter =
697                                         estate->es_result_relation_info->ri_junkFilter;
698                         }
699                         else
700                         {
701                                 /* Normal case with just one JunkFilter */
702                                 JunkFilter *j;
703
704                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
705                                                                            tupType->tdhasoid,
706                                                                   ExecAllocTableSlot(estate->es_tupleTable));
707                                 estate->es_junkFilter = j;
708                                 if (estate->es_result_relation_info)
709                                         estate->es_result_relation_info->ri_junkFilter = j;
710
711                                 /* For SELECT, want to return the cleaned tuple type */
712                                 if (operation == CMD_SELECT)
713                                         tupType = j->jf_cleanTupType;
714                         }
715                 }
716                 else
717                         estate->es_junkFilter = NULL;
718         }
719
720         /*
721          * If doing SELECT INTO, initialize the "into" relation.  We must wait
722          * till now so we have the "clean" result tuple type to create the new
723          * table from.
724          *
725          * If EXPLAIN, skip creating the "into" relation.
726          */
727         intoRelationDesc = NULL;
728
729         if (do_select_into && !explainOnly)
730         {
731                 char       *intoName;
732                 Oid                     namespaceId;
733                 AclResult       aclresult;
734                 Oid                     intoRelationId;
735                 TupleDesc       tupdesc;
736
737                 /*
738                  * find namespace to create in, check permissions
739                  */
740                 intoName = parseTree->into->relname;
741                 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
742
743                 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
744                                                                                   ACL_CREATE);
745                 if (aclresult != ACLCHECK_OK)
746                         aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
747                                                    get_namespace_name(namespaceId));
748
749                 /*
750                  * have to copy tupType to get rid of constraints
751                  */
752                 tupdesc = CreateTupleDescCopy(tupType);
753
754                 intoRelationId = heap_create_with_catalog(intoName,
755                                                                                                   namespaceId,
756                                                                                                   InvalidOid,
757                                                                                                   InvalidOid,
758                                                                                                   GetUserId(),
759                                                                                                   tupdesc,
760                                                                                                   RELKIND_RELATION,
761                                                                                                   false,
762                                                                                                   true,
763                                                                                                   0,
764                                                                                                   ONCOMMIT_NOOP,
765                                                                                                   allowSystemTableMods);
766
767                 FreeTupleDesc(tupdesc);
768
769                 /*
770                  * Advance command counter so that the newly-created relation's
771                  * catalog tuples will be visible to heap_open.
772                  */
773                 CommandCounterIncrement();
774
775                 /*
776                  * If necessary, create a TOAST table for the into relation. Note that
777                  * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
778                  * that the TOAST table will be visible for insertion.
779                  */
780                 AlterTableCreateToastTable(intoRelationId, true);
781
782                 /*
783                  * And open the constructed table for writing.
784                  */
785                 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
786
787                 /* use_wal off requires rd_targblock be initially invalid */
788                 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
789
790                 /*
791                  * We can skip WAL-logging the insertions, unless PITR is in use.
792                  *
793                  * Note that for a non-temp INTO table, this is safe only because we
794                  * know that the catalog changes above will have been WAL-logged, and
795                  * so RecordTransactionCommit will think it needs to WAL-log the
796                  * eventual transaction commit.  Else the commit might be lost, even
797                  * though all the data is safely fsync'd ...
798                  */
799                 estate->es_into_relation_use_wal = XLogArchivingActive();
800         }
801
802         estate->es_into_relation_descriptor = intoRelationDesc;
803
804         queryDesc->tupDesc = tupType;
805         queryDesc->planstate = planstate;
806 }
807
808 /*
809  * Initialize ResultRelInfo data for one result relation
810  */
811 static void
812 initResultRelInfo(ResultRelInfo *resultRelInfo,
813                                   Index resultRelationIndex,
814                                   List *rangeTable,
815                                   CmdType operation,
816                                   bool doInstrument)
817 {
818         Oid                     resultRelationOid;
819         Relation        resultRelationDesc;
820
821         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
822         resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
823
824         switch (resultRelationDesc->rd_rel->relkind)
825         {
826                 case RELKIND_SEQUENCE:
827                         ereport(ERROR,
828                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
829                                          errmsg("cannot change sequence \"%s\"",
830                                                         RelationGetRelationName(resultRelationDesc))));
831                         break;
832                 case RELKIND_TOASTVALUE:
833                         ereport(ERROR,
834                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
835                                          errmsg("cannot change TOAST relation \"%s\"",
836                                                         RelationGetRelationName(resultRelationDesc))));
837                         break;
838                 case RELKIND_VIEW:
839                         ereport(ERROR,
840                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
841                                          errmsg("cannot change view \"%s\"",
842                                                         RelationGetRelationName(resultRelationDesc))));
843                         break;
844         }
845
846         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
847         resultRelInfo->type = T_ResultRelInfo;
848         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
849         resultRelInfo->ri_RelationDesc = resultRelationDesc;
850         resultRelInfo->ri_NumIndices = 0;
851         resultRelInfo->ri_IndexRelationDescs = NULL;
852         resultRelInfo->ri_IndexRelationInfo = NULL;
853         /* make a copy so as not to depend on relcache info not changing... */
854         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
855         if (resultRelInfo->ri_TrigDesc)
856         {
857                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
858
859                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
860                         palloc0(n * sizeof(FmgrInfo));
861                 if (doInstrument)
862                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
863                 else
864                         resultRelInfo->ri_TrigInstrument = NULL;
865         }
866         else
867         {
868                 resultRelInfo->ri_TrigFunctions = NULL;
869                 resultRelInfo->ri_TrigInstrument = NULL;
870         }
871         resultRelInfo->ri_ConstraintExprs = NULL;
872         resultRelInfo->ri_junkFilter = NULL;
873
874         /*
875          * If there are indices on the result relation, open them and save
876          * descriptors in the result relation info, so that we can add new index
877          * entries for the tuples we add/update.  We need not do this for a
878          * DELETE, however, since deletion doesn't affect indexes.
879          */
880         if (resultRelationDesc->rd_rel->relhasindex &&
881                 operation != CMD_DELETE)
882                 ExecOpenIndices(resultRelInfo);
883 }
884
885 /*
886  *              ExecContextForcesOids
887  *
888  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
889  * we need to ensure that result tuples have space for an OID iff they are
890  * going to be stored into a relation that has OIDs.  In other contexts
891  * we are free to choose whether to leave space for OIDs in result tuples
892  * (we generally don't want to, but we do if a physical-tlist optimization
893  * is possible).  This routine checks the plan context and returns TRUE if the
894  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
895  * *hasoids is set to the required value.
896  *
897  * One reason this is ugly is that all plan nodes in the plan tree will emit
898  * tuples with space for an OID, though we really only need the topmost node
899  * to do so.  However, node types like Sort don't project new tuples but just
900  * return their inputs, and in those cases the requirement propagates down
901  * to the input node.  Eventually we might make this code smart enough to
902  * recognize how far down the requirement really goes, but for now we just
903  * make all plan nodes do the same thing if the top level forces the choice.
904  *
905  * We assume that estate->es_result_relation_info is already set up to
906  * describe the target relation.  Note that in an UPDATE that spans an
907  * inheritance tree, some of the target relations may have OIDs and some not.
908  * We have to make the decisions on a per-relation basis as we initialize
909  * each of the child plans of the topmost Append plan.
910  *
911  * SELECT INTO is even uglier, because we don't have the INTO relation's
912  * descriptor available when this code runs; we have to look aside at a
913  * flag set by InitPlan().
914  */
915 bool
916 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
917 {
918         if (planstate->state->es_select_into)
919         {
920                 *hasoids = planstate->state->es_into_oids;
921                 return true;
922         }
923         else
924         {
925                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
926
927                 if (ri != NULL)
928                 {
929                         Relation        rel = ri->ri_RelationDesc;
930
931                         if (rel != NULL)
932                         {
933                                 *hasoids = rel->rd_rel->relhasoids;
934                                 return true;
935                         }
936                 }
937         }
938
939         return false;
940 }
941
942 /* ----------------------------------------------------------------
943  *              ExecEndPlan
944  *
945  *              Cleans up the query plan -- closes files and frees up storage
946  *
947  * NOTE: we are no longer very worried about freeing storage per se
948  * in this code; FreeExecutorState should be guaranteed to release all
949  * memory that needs to be released.  What we are worried about doing
950  * is closing relations and dropping buffer pins.  Thus, for example,
951  * tuple tables must be cleared or dropped to ensure pins are released.
952  * ----------------------------------------------------------------
953  */
954 void
955 ExecEndPlan(PlanState *planstate, EState *estate)
956 {
957         ResultRelInfo *resultRelInfo;
958         int                     i;
959         ListCell   *l;
960
961         /*
962          * shut down any PlanQual processing we were doing
963          */
964         if (estate->es_evalPlanQual != NULL)
965                 EndEvalPlanQual(estate);
966
967         /*
968          * shut down the node-type-specific query processing
969          */
970         ExecEndNode(planstate);
971
972         /*
973          * destroy the executor "tuple" table.
974          */
975         ExecDropTupleTable(estate->es_tupleTable, true);
976         estate->es_tupleTable = NULL;
977
978         /*
979          * close the result relation(s) if any, but hold locks until xact commit.
980          */
981         resultRelInfo = estate->es_result_relations;
982         for (i = estate->es_num_result_relations; i > 0; i--)
983         {
984                 /* Close indices and then the relation itself */
985                 ExecCloseIndices(resultRelInfo);
986                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
987                 resultRelInfo++;
988         }
989
990         /*
991          * close the "into" relation if necessary, again keeping lock
992          */
993         if (estate->es_into_relation_descriptor != NULL)
994         {
995                 /*
996                  * If we skipped using WAL, and it's not a temp relation, we must
997                  * force the relation down to disk before it's safe to commit the
998                  * transaction.  This requires forcing out any dirty buffers and then
999                  * doing a forced fsync.
1000                  */
1001                 if (!estate->es_into_relation_use_wal &&
1002                         !estate->es_into_relation_descriptor->rd_istemp)
1003                 {
1004                         FlushRelationBuffers(estate->es_into_relation_descriptor);
1005                         /* FlushRelationBuffers will have opened rd_smgr */
1006                         smgrimmedsync(estate->es_into_relation_descriptor->rd_smgr);
1007                 }
1008
1009                 heap_close(estate->es_into_relation_descriptor, NoLock);
1010         }
1011
1012         /*
1013          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1014          */
1015         foreach(l, estate->es_rowMarks)
1016         {
1017                 ExecRowMark *erm = lfirst(l);
1018
1019                 heap_close(erm->relation, NoLock);
1020         }
1021 }
1022
1023 /* ----------------------------------------------------------------
1024  *              ExecutePlan
1025  *
1026  *              processes the query plan to retrieve 'numberTuples' tuples in the
1027  *              direction specified.
1028  *
1029  *              Retrieves all tuples if numberTuples is 0
1030  *
1031  *              result is either a slot containing the last tuple in the case
1032  *              of a SELECT or NULL otherwise.
1033  *
1034  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1035  * user can see it
1036  * ----------------------------------------------------------------
1037  */
1038 static TupleTableSlot *
1039 ExecutePlan(EState *estate,
1040                         PlanState *planstate,
1041                         CmdType operation,
1042                         long numberTuples,
1043                         ScanDirection direction,
1044                         DestReceiver *dest)
1045 {
1046         JunkFilter *junkfilter;
1047         TupleTableSlot *slot;
1048         ItemPointer tupleid = NULL;
1049         ItemPointerData tuple_ctid;
1050         long            current_tuple_count;
1051         TupleTableSlot *result;
1052
1053         /*
1054          * initialize local variables
1055          */
1056         slot = NULL;
1057         current_tuple_count = 0;
1058         result = NULL;
1059
1060         /*
1061          * Set the direction.
1062          */
1063         estate->es_direction = direction;
1064
1065         /*
1066          * Process BEFORE EACH STATEMENT triggers
1067          */
1068         switch (operation)
1069         {
1070                 case CMD_UPDATE:
1071                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1072                         break;
1073                 case CMD_DELETE:
1074                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1075                         break;
1076                 case CMD_INSERT:
1077                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1078                         break;
1079                 default:
1080                         /* do nothing */
1081                         break;
1082         }
1083
1084         /*
1085          * Loop until we've processed the proper number of tuples from the plan.
1086          */
1087
1088         for (;;)
1089         {
1090                 /* Reset the per-output-tuple exprcontext */
1091                 ResetPerTupleExprContext(estate);
1092
1093                 /*
1094                  * Execute the plan and obtain a tuple
1095                  */
1096 lnext:  ;
1097                 if (estate->es_useEvalPlan)
1098                 {
1099                         slot = EvalPlanQualNext(estate);
1100                         if (TupIsNull(slot))
1101                                 slot = ExecProcNode(planstate);
1102                 }
1103                 else
1104                         slot = ExecProcNode(planstate);
1105
1106                 /*
1107                  * if the tuple is null, then we assume there is nothing more to
1108                  * process so we just return null...
1109                  */
1110                 if (TupIsNull(slot))
1111                 {
1112                         result = NULL;
1113                         break;
1114                 }
1115
1116                 /*
1117                  * if we have a junk filter, then project a new tuple with the junk
1118                  * removed.
1119                  *
1120                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1121                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1122                  * because that tuple slot has the wrong descriptor.)
1123                  *
1124                  * Also, extract all the junk information we need.
1125                  */
1126                 if ((junkfilter = estate->es_junkFilter) != NULL)
1127                 {
1128                         Datum           datum;
1129                         bool            isNull;
1130
1131                         /*
1132                          * extract the 'ctid' junk attribute.
1133                          */
1134                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1135                         {
1136                                 if (!ExecGetJunkAttribute(junkfilter,
1137                                                                                   slot,
1138                                                                                   "ctid",
1139                                                                                   &datum,
1140                                                                                   &isNull))
1141                                         elog(ERROR, "could not find junk ctid column");
1142
1143                                 /* shouldn't ever get a null result... */
1144                                 if (isNull)
1145                                         elog(ERROR, "ctid is NULL");
1146
1147                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1148                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1149                                 tupleid = &tuple_ctid;
1150                         }
1151
1152                         /*
1153                          * Process any FOR UPDATE or FOR SHARE locking requested.
1154                          */
1155                         else if (estate->es_rowMarks != NIL)
1156                         {
1157                                 ListCell   *l;
1158
1159                 lmark:  ;
1160                                 foreach(l, estate->es_rowMarks)
1161                                 {
1162                                         ExecRowMark *erm = lfirst(l);
1163                                         HeapTupleData tuple;
1164                                         Buffer          buffer;
1165                                         ItemPointerData update_ctid;
1166                                         TransactionId update_xmax;
1167                                         TupleTableSlot *newSlot;
1168                                         LockTupleMode lockmode;
1169                                         HTSU_Result test;
1170
1171                                         if (!ExecGetJunkAttribute(junkfilter,
1172                                                                                           slot,
1173                                                                                           erm->resname,
1174                                                                                           &datum,
1175                                                                                           &isNull))
1176                                                 elog(ERROR, "could not find junk \"%s\" column",
1177                                                          erm->resname);
1178
1179                                         /* shouldn't ever get a null result... */
1180                                         if (isNull)
1181                                                 elog(ERROR, "\"%s\" is NULL", erm->resname);
1182
1183                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1184
1185                                         if (estate->es_forUpdate)
1186                                                 lockmode = LockTupleExclusive;
1187                                         else
1188                                                 lockmode = LockTupleShared;
1189
1190                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1191                                                                                    &update_ctid, &update_xmax,
1192                                                                                    estate->es_snapshot->curcid,
1193                                                                                    lockmode, estate->es_rowNoWait);
1194                                         ReleaseBuffer(buffer);
1195                                         switch (test)
1196                                         {
1197                                                 case HeapTupleSelfUpdated:
1198                                                         /* treat it as deleted; do not process */
1199                                                         goto lnext;
1200
1201                                                 case HeapTupleMayBeUpdated:
1202                                                         break;
1203
1204                                                 case HeapTupleUpdated:
1205                                                         if (IsXactIsoLevelSerializable)
1206                                                                 ereport(ERROR,
1207                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1208                                                                   errmsg("could not serialize access due to concurrent update")));
1209                                                         if (!ItemPointerEquals(&update_ctid,
1210                                                                                                    &tuple.t_self))
1211                                                         {
1212                                                                 /* updated, so look at updated version */
1213                                                                 newSlot = EvalPlanQual(estate,
1214                                                                                                            erm->rti,
1215                                                                                                            &update_ctid,
1216                                                                                                            update_xmax);
1217                                                                 if (!TupIsNull(newSlot))
1218                                                                 {
1219                                                                         slot = newSlot;
1220                                                                         estate->es_useEvalPlan = true;
1221                                                                         goto lmark;
1222                                                                 }
1223                                                         }
1224
1225                                                         /*
1226                                                          * if tuple was deleted or PlanQual failed for
1227                                                          * updated tuple - we must not return this tuple!
1228                                                          */
1229                                                         goto lnext;
1230
1231                                                 default:
1232                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1233                                                                  test);
1234                                                         return (NULL);
1235                                         }
1236                                 }
1237                         }
1238
1239                         /*
1240                          * Finally create a new "clean" tuple with all junk attributes
1241                          * removed
1242                          */
1243                         slot = ExecFilterJunk(junkfilter, slot);
1244                 }
1245
1246                 /*
1247                  * now that we have a tuple, do the appropriate thing with it.. either
1248                  * return it to the user, add it to a relation someplace, delete it
1249                  * from a relation, or modify some of its attributes.
1250                  */
1251                 switch (operation)
1252                 {
1253                         case CMD_SELECT:
1254                                 ExecSelect(slot,        /* slot containing tuple */
1255                                                    dest,        /* destination's tuple-receiver obj */
1256                                                    estate);
1257                                 result = slot;
1258                                 break;
1259
1260                         case CMD_INSERT:
1261                                 ExecInsert(slot, tupleid, estate);
1262                                 result = NULL;
1263                                 break;
1264
1265                         case CMD_DELETE:
1266                                 ExecDelete(slot, tupleid, estate);
1267                                 result = NULL;
1268                                 break;
1269
1270                         case CMD_UPDATE:
1271                                 ExecUpdate(slot, tupleid, estate);
1272                                 result = NULL;
1273                                 break;
1274
1275                         default:
1276                                 elog(ERROR, "unrecognized operation code: %d",
1277                                          (int) operation);
1278                                 result = NULL;
1279                                 break;
1280                 }
1281
1282                 /*
1283                  * check our tuple count.. if we've processed the proper number then
1284                  * quit, else loop again and process more tuples.  Zero numberTuples
1285                  * means no limit.
1286                  */
1287                 current_tuple_count++;
1288                 if (numberTuples && numberTuples == current_tuple_count)
1289                         break;
1290         }
1291
1292         /*
1293          * Process AFTER EACH STATEMENT triggers
1294          */
1295         switch (operation)
1296         {
1297                 case CMD_UPDATE:
1298                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1299                         break;
1300                 case CMD_DELETE:
1301                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1302                         break;
1303                 case CMD_INSERT:
1304                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1305                         break;
1306                 default:
1307                         /* do nothing */
1308                         break;
1309         }
1310
1311         /*
1312          * here, result is either a slot containing a tuple in the case of a
1313          * SELECT or NULL otherwise.
1314          */
1315         return result;
1316 }
1317
1318 /* ----------------------------------------------------------------
1319  *              ExecSelect
1320  *
1321  *              SELECTs are easy.. we just pass the tuple to the appropriate
1322  *              print function.  The only complexity is when we do a
1323  *              "SELECT INTO", in which case we insert the tuple into
1324  *              the appropriate relation (note: this is a newly created relation
1325  *              so we don't need to worry about indices or locks.)
1326  * ----------------------------------------------------------------
1327  */
1328 static void
1329 ExecSelect(TupleTableSlot *slot,
1330                    DestReceiver *dest,
1331                    EState *estate)
1332 {
1333         /*
1334          * insert the tuple into the "into relation"
1335          *
1336          * XXX this probably ought to be replaced by a separate destination
1337          */
1338         if (estate->es_into_relation_descriptor != NULL)
1339         {
1340                 HeapTuple       tuple;
1341
1342                 tuple = ExecCopySlotTuple(slot);
1343                 heap_insert(estate->es_into_relation_descriptor, tuple,
1344                                         estate->es_snapshot->curcid,
1345                                         estate->es_into_relation_use_wal,
1346                                         false);         /* never any point in using FSM */
1347                 /* we know there are no indexes to update */
1348                 heap_freetuple(tuple);
1349                 IncrAppended();
1350         }
1351
1352         /*
1353          * send the tuple to the destination
1354          */
1355         (*dest->receiveSlot) (slot, dest);
1356         IncrRetrieved();
1357         (estate->es_processed)++;
1358 }
1359
1360 /* ----------------------------------------------------------------
1361  *              ExecInsert
1362  *
1363  *              INSERTs are trickier.. we have to insert the tuple into
1364  *              the base relation and insert appropriate tuples into the
1365  *              index relations.
1366  * ----------------------------------------------------------------
1367  */
1368 static void
1369 ExecInsert(TupleTableSlot *slot,
1370                    ItemPointer tupleid,
1371                    EState *estate)
1372 {
1373         HeapTuple       tuple;
1374         ResultRelInfo *resultRelInfo;
1375         Relation        resultRelationDesc;
1376         Oid                     newId;
1377
1378         /*
1379          * get the heap tuple out of the tuple table slot, making sure we have a
1380          * writable copy
1381          */
1382         tuple = ExecMaterializeSlot(slot);
1383
1384         /*
1385          * get information on the (current) result relation
1386          */
1387         resultRelInfo = estate->es_result_relation_info;
1388         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1389
1390         /* BEFORE ROW INSERT Triggers */
1391         if (resultRelInfo->ri_TrigDesc &&
1392                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1393         {
1394                 HeapTuple       newtuple;
1395
1396                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1397
1398                 if (newtuple == NULL)   /* "do nothing" */
1399                         return;
1400
1401                 if (newtuple != tuple)  /* modified by Trigger(s) */
1402                 {
1403                         /*
1404                          * Put the modified tuple into a slot for convenience of routines
1405                          * below.  We assume the tuple was allocated in per-tuple memory
1406                          * context, and therefore will go away by itself. The tuple table
1407                          * slot should not try to clear it.
1408                          */
1409                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1410
1411                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1412                                 ExecSetSlotDescriptor(newslot,
1413                                                                           slot->tts_tupleDescriptor,
1414                                                                           false);
1415                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1416                         slot = newslot;
1417                         tuple = newtuple;
1418                 }
1419         }
1420
1421         /*
1422          * Check the constraints of the tuple
1423          */
1424         if (resultRelationDesc->rd_att->constr)
1425                 ExecConstraints(resultRelInfo, slot, estate);
1426
1427         /*
1428          * insert the tuple
1429          *
1430          * Note: heap_insert returns the tid (location) of the new tuple in the
1431          * t_self field.
1432          */
1433         newId = heap_insert(resultRelationDesc, tuple,
1434                                                 estate->es_snapshot->curcid,
1435                                                 true, true);
1436
1437         IncrAppended();
1438         (estate->es_processed)++;
1439         estate->es_lastoid = newId;
1440         setLastTid(&(tuple->t_self));
1441
1442         /*
1443          * insert index entries for tuple
1444          */
1445         if (resultRelInfo->ri_NumIndices > 0)
1446                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1447
1448         /* AFTER ROW INSERT Triggers */
1449         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1450 }
1451
1452 /* ----------------------------------------------------------------
1453  *              ExecDelete
1454  *
1455  *              DELETE is like UPDATE, except that we delete the tuple and no
1456  *              index modifications are needed
1457  * ----------------------------------------------------------------
1458  */
1459 static void
1460 ExecDelete(TupleTableSlot *slot,
1461                    ItemPointer tupleid,
1462                    EState *estate)
1463 {
1464         ResultRelInfo *resultRelInfo;
1465         Relation        resultRelationDesc;
1466         HTSU_Result result;
1467         ItemPointerData update_ctid;
1468         TransactionId update_xmax;
1469
1470         /*
1471          * get information on the (current) result relation
1472          */
1473         resultRelInfo = estate->es_result_relation_info;
1474         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1475
1476         /* BEFORE ROW DELETE Triggers */
1477         if (resultRelInfo->ri_TrigDesc &&
1478                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1479         {
1480                 bool            dodelete;
1481
1482                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1483                                                                                 estate->es_snapshot->curcid);
1484
1485                 if (!dodelete)                  /* "do nothing" */
1486                         return;
1487         }
1488
1489         /*
1490          * delete the tuple
1491          *
1492          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1493          * the row to be deleted is visible to that snapshot, and throw a can't-
1494          * serialize error if not.      This is a special-case behavior needed for
1495          * referential integrity updates in serializable transactions.
1496          */
1497 ldelete:;
1498         result = heap_delete(resultRelationDesc, tupleid,
1499                                                  &update_ctid, &update_xmax,
1500                                                  estate->es_snapshot->curcid,
1501                                                  estate->es_crosscheck_snapshot,
1502                                                  true /* wait for commit */ );
1503         switch (result)
1504         {
1505                 case HeapTupleSelfUpdated:
1506                         /* already deleted by self; nothing to do */
1507                         return;
1508
1509                 case HeapTupleMayBeUpdated:
1510                         break;
1511
1512                 case HeapTupleUpdated:
1513                         if (IsXactIsoLevelSerializable)
1514                                 ereport(ERROR,
1515                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1516                                                  errmsg("could not serialize access due to concurrent update")));
1517                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1518                         {
1519                                 TupleTableSlot *epqslot;
1520
1521                                 epqslot = EvalPlanQual(estate,
1522                                                                            resultRelInfo->ri_RangeTableIndex,
1523                                                                            &update_ctid,
1524                                                                            update_xmax);
1525                                 if (!TupIsNull(epqslot))
1526                                 {
1527                                         *tupleid = update_ctid;
1528                                         goto ldelete;
1529                                 }
1530                         }
1531                         /* tuple already deleted; nothing to do */
1532                         return;
1533
1534                 default:
1535                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1536                         return;
1537         }
1538
1539         IncrDeleted();
1540         (estate->es_processed)++;
1541
1542         /*
1543          * Note: Normally one would think that we have to delete index tuples
1544          * associated with the heap tuple now...
1545          *
1546          * ... but in POSTGRES, we have no need to do this because VACUUM will
1547          * take care of it later.  We can't delete index tuples immediately
1548          * anyway, since the tuple is still visible to other transactions.
1549          */
1550
1551         /* AFTER ROW DELETE Triggers */
1552         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1553 }
1554
1555 /* ----------------------------------------------------------------
1556  *              ExecUpdate
1557  *
1558  *              note: we can't run UPDATE queries with transactions
1559  *              off because UPDATEs are actually INSERTs and our
1560  *              scan will mistakenly loop forever, updating the tuple
1561  *              it just inserted..      This should be fixed but until it
1562  *              is, we don't want to get stuck in an infinite loop
1563  *              which corrupts your database..
1564  * ----------------------------------------------------------------
1565  */
1566 static void
1567 ExecUpdate(TupleTableSlot *slot,
1568                    ItemPointer tupleid,
1569                    EState *estate)
1570 {
1571         HeapTuple       tuple;
1572         ResultRelInfo *resultRelInfo;
1573         Relation        resultRelationDesc;
1574         HTSU_Result result;
1575         ItemPointerData update_ctid;
1576         TransactionId update_xmax;
1577
1578         /*
1579          * abort the operation if not running transactions
1580          */
1581         if (IsBootstrapProcessingMode())
1582                 elog(ERROR, "cannot UPDATE during bootstrap");
1583
1584         /*
1585          * get the heap tuple out of the tuple table slot, making sure we have a
1586          * writable copy
1587          */
1588         tuple = ExecMaterializeSlot(slot);
1589
1590         /*
1591          * get information on the (current) result relation
1592          */
1593         resultRelInfo = estate->es_result_relation_info;
1594         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1595
1596         /* BEFORE ROW UPDATE Triggers */
1597         if (resultRelInfo->ri_TrigDesc &&
1598                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1599         {
1600                 HeapTuple       newtuple;
1601
1602                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1603                                                                                 tupleid, tuple,
1604                                                                                 estate->es_snapshot->curcid);
1605
1606                 if (newtuple == NULL)   /* "do nothing" */
1607                         return;
1608
1609                 if (newtuple != tuple)  /* modified by Trigger(s) */
1610                 {
1611                         /*
1612                          * Put the modified tuple into a slot for convenience of routines
1613                          * below.  We assume the tuple was allocated in per-tuple memory
1614                          * context, and therefore will go away by itself. The tuple table
1615                          * slot should not try to clear it.
1616                          */
1617                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1618
1619                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1620                                 ExecSetSlotDescriptor(newslot,
1621                                                                           slot->tts_tupleDescriptor,
1622                                                                           false);
1623                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1624                         slot = newslot;
1625                         tuple = newtuple;
1626                 }
1627         }
1628
1629         /*
1630          * Check the constraints of the tuple
1631          *
1632          * If we generate a new candidate tuple after EvalPlanQual testing, we
1633          * must loop back here and recheck constraints.  (We don't need to redo
1634          * triggers, however.  If there are any BEFORE triggers then trigger.c
1635          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1636          * need to do them again.)
1637          */
1638 lreplace:;
1639         if (resultRelationDesc->rd_att->constr)
1640                 ExecConstraints(resultRelInfo, slot, estate);
1641
1642         /*
1643          * replace the heap tuple
1644          *
1645          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1646          * the row to be updated is visible to that snapshot, and throw a can't-
1647          * serialize error if not.      This is a special-case behavior needed for
1648          * referential integrity updates in serializable transactions.
1649          */
1650         result = heap_update(resultRelationDesc, tupleid, tuple,
1651                                                  &update_ctid, &update_xmax,
1652                                                  estate->es_snapshot->curcid,
1653                                                  estate->es_crosscheck_snapshot,
1654                                                  true /* wait for commit */ );
1655         switch (result)
1656         {
1657                 case HeapTupleSelfUpdated:
1658                         /* already deleted by self; nothing to do */
1659                         return;
1660
1661                 case HeapTupleMayBeUpdated:
1662                         break;
1663
1664                 case HeapTupleUpdated:
1665                         if (IsXactIsoLevelSerializable)
1666                                 ereport(ERROR,
1667                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1668                                                  errmsg("could not serialize access due to concurrent update")));
1669                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1670                         {
1671                                 TupleTableSlot *epqslot;
1672
1673                                 epqslot = EvalPlanQual(estate,
1674                                                                            resultRelInfo->ri_RangeTableIndex,
1675                                                                            &update_ctid,
1676                                                                            update_xmax);
1677                                 if (!TupIsNull(epqslot))
1678                                 {
1679                                         *tupleid = update_ctid;
1680                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1681                                         tuple = ExecMaterializeSlot(slot);
1682                                         goto lreplace;
1683                                 }
1684                         }
1685                         /* tuple already deleted; nothing to do */
1686                         return;
1687
1688                 default:
1689                         elog(ERROR, "unrecognized heap_update status: %u", result);
1690                         return;
1691         }
1692
1693         IncrReplaced();
1694         (estate->es_processed)++;
1695
1696         /*
1697          * Note: instead of having to update the old index tuples associated with
1698          * the heap tuple, all we do is form and insert new index tuples. This is
1699          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1700          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1701          * here is insert new index tuples.  -cim 9/27/89
1702          */
1703
1704         /*
1705          * insert index entries for tuple
1706          *
1707          * Note: heap_update returns the tid (location) of the new tuple in the
1708          * t_self field.
1709          */
1710         if (resultRelInfo->ri_NumIndices > 0)
1711                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1712
1713         /* AFTER ROW UPDATE Triggers */
1714         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1715 }
1716
1717 static const char *
1718 ExecRelCheck(ResultRelInfo *resultRelInfo,
1719                          TupleTableSlot *slot, EState *estate)
1720 {
1721         Relation        rel = resultRelInfo->ri_RelationDesc;
1722         int                     ncheck = rel->rd_att->constr->num_check;
1723         ConstrCheck *check = rel->rd_att->constr->check;
1724         ExprContext *econtext;
1725         MemoryContext oldContext;
1726         List       *qual;
1727         int                     i;
1728
1729         /*
1730          * If first time through for this result relation, build expression
1731          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1732          * memory context so they'll survive throughout the query.
1733          */
1734         if (resultRelInfo->ri_ConstraintExprs == NULL)
1735         {
1736                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1737                 resultRelInfo->ri_ConstraintExprs =
1738                         (List **) palloc(ncheck * sizeof(List *));
1739                 for (i = 0; i < ncheck; i++)
1740                 {
1741                         /* ExecQual wants implicit-AND form */
1742                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1743                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1744                                 ExecPrepareExpr((Expr *) qual, estate);
1745                 }
1746                 MemoryContextSwitchTo(oldContext);
1747         }
1748
1749         /*
1750          * We will use the EState's per-tuple context for evaluating constraint
1751          * expressions (creating it if it's not already there).
1752          */
1753         econtext = GetPerTupleExprContext(estate);
1754
1755         /* Arrange for econtext's scan tuple to be the tuple under test */
1756         econtext->ecxt_scantuple = slot;
1757
1758         /* And evaluate the constraints */
1759         for (i = 0; i < ncheck; i++)
1760         {
1761                 qual = resultRelInfo->ri_ConstraintExprs[i];
1762
1763                 /*
1764                  * NOTE: SQL92 specifies that a NULL result from a constraint
1765                  * expression is not to be treated as a failure.  Therefore, tell
1766                  * ExecQual to return TRUE for NULL.
1767                  */
1768                 if (!ExecQual(qual, econtext, true))
1769                         return check[i].ccname;
1770         }
1771
1772         /* NULL result means no error */
1773         return NULL;
1774 }
1775
1776 void
1777 ExecConstraints(ResultRelInfo *resultRelInfo,
1778                                 TupleTableSlot *slot, EState *estate)
1779 {
1780         Relation        rel = resultRelInfo->ri_RelationDesc;
1781         TupleConstr *constr = rel->rd_att->constr;
1782
1783         Assert(constr);
1784
1785         if (constr->has_not_null)
1786         {
1787                 int                     natts = rel->rd_att->natts;
1788                 int                     attrChk;
1789
1790                 for (attrChk = 1; attrChk <= natts; attrChk++)
1791                 {
1792                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1793                                 slot_attisnull(slot, attrChk))
1794                                 ereport(ERROR,
1795                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1796                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1797                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1798                 }
1799         }
1800
1801         if (constr->num_check > 0)
1802         {
1803                 const char *failed;
1804
1805                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1806                         ereport(ERROR,
1807                                         (errcode(ERRCODE_CHECK_VIOLATION),
1808                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1809                                                         RelationGetRelationName(rel), failed)));
1810         }
1811 }
1812
1813 /*
1814  * Check a modified tuple to see if we want to process its updated version
1815  * under READ COMMITTED rules.
1816  *
1817  * See backend/executor/README for some info about how this works.
1818  *
1819  *      estate - executor state data
1820  *      rti - rangetable index of table containing tuple
1821  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1822  *      priorXmax - t_xmax from the outdated tuple
1823  *
1824  * *tid is also an output parameter: it's modified to hold the TID of the
1825  * latest version of the tuple (note this may be changed even on failure)
1826  *
1827  * Returns a slot containing the new candidate update/delete tuple, or
1828  * NULL if we determine we shouldn't process the row.
1829  */
1830 TupleTableSlot *
1831 EvalPlanQual(EState *estate, Index rti,
1832                          ItemPointer tid, TransactionId priorXmax)
1833 {
1834         evalPlanQual *epq;
1835         EState     *epqstate;
1836         Relation        relation;
1837         HeapTupleData tuple;
1838         HeapTuple       copyTuple = NULL;
1839         bool            endNode;
1840
1841         Assert(rti != 0);
1842
1843         /*
1844          * find relation containing target tuple
1845          */
1846         if (estate->es_result_relation_info != NULL &&
1847                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1848                 relation = estate->es_result_relation_info->ri_RelationDesc;
1849         else
1850         {
1851                 ListCell   *l;
1852
1853                 relation = NULL;
1854                 foreach(l, estate->es_rowMarks)
1855                 {
1856                         if (((ExecRowMark *) lfirst(l))->rti == rti)
1857                         {
1858                                 relation = ((ExecRowMark *) lfirst(l))->relation;
1859                                 break;
1860                         }
1861                 }
1862                 if (relation == NULL)
1863                         elog(ERROR, "could not find RowMark for RT index %u", rti);
1864         }
1865
1866         /*
1867          * fetch tid tuple
1868          *
1869          * Loop here to deal with updated or busy tuples
1870          */
1871         tuple.t_self = *tid;
1872         for (;;)
1873         {
1874                 Buffer          buffer;
1875
1876                 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL))
1877                 {
1878                         /*
1879                          * If xmin isn't what we're expecting, the slot must have been
1880                          * recycled and reused for an unrelated tuple.  This implies that
1881                          * the latest version of the row was deleted, so we need do
1882                          * nothing.  (Should be safe to examine xmin without getting
1883                          * buffer's content lock, since xmin never changes in an existing
1884                          * tuple.)
1885                          */
1886                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1887                                                                          priorXmax))
1888                         {
1889                                 ReleaseBuffer(buffer);
1890                                 return NULL;
1891                         }
1892
1893                         /* otherwise xmin should not be dirty... */
1894                         if (TransactionIdIsValid(SnapshotDirty->xmin))
1895                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1896
1897                         /*
1898                          * If tuple is being updated by other transaction then we have to
1899                          * wait for its commit/abort.
1900                          */
1901                         if (TransactionIdIsValid(SnapshotDirty->xmax))
1902                         {
1903                                 ReleaseBuffer(buffer);
1904                                 XactLockTableWait(SnapshotDirty->xmax);
1905                                 continue;               /* loop back to repeat heap_fetch */
1906                         }
1907
1908                         /*
1909                          * We got tuple - now copy it for use by recheck query.
1910                          */
1911                         copyTuple = heap_copytuple(&tuple);
1912                         ReleaseBuffer(buffer);
1913                         break;
1914                 }
1915
1916                 /*
1917                  * If the referenced slot was actually empty, the latest version of
1918                  * the row must have been deleted, so we need do nothing.
1919                  */
1920                 if (tuple.t_data == NULL)
1921                 {
1922                         ReleaseBuffer(buffer);
1923                         return NULL;
1924                 }
1925
1926                 /*
1927                  * As above, if xmin isn't what we're expecting, do nothing.
1928                  */
1929                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1930                                                                  priorXmax))
1931                 {
1932                         ReleaseBuffer(buffer);
1933                         return NULL;
1934                 }
1935
1936                 /*
1937                  * If we get here, the tuple was found but failed SnapshotDirty.
1938                  * Assuming the xmin is either a committed xact or our own xact (as it
1939                  * certainly should be if we're trying to modify the tuple), this must
1940                  * mean that the row was updated or deleted by either a committed xact
1941                  * or our own xact.  If it was deleted, we can ignore it; if it was
1942                  * updated then chain up to the next version and repeat the whole
1943                  * test.
1944                  *
1945                  * As above, it should be safe to examine xmax and t_ctid without the
1946                  * buffer content lock, because they can't be changing.
1947                  */
1948                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1949                 {
1950                         /* deleted, so forget about it */
1951                         ReleaseBuffer(buffer);
1952                         return NULL;
1953                 }
1954
1955                 /* updated, so look at the updated row */
1956                 tuple.t_self = tuple.t_data->t_ctid;
1957                 /* updated row should have xmin matching this xmax */
1958                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1959                 ReleaseBuffer(buffer);
1960                 /* loop back to fetch next in chain */
1961         }
1962
1963         /*
1964          * For UPDATE/DELETE we have to return tid of actual row we're executing
1965          * PQ for.
1966          */
1967         *tid = tuple.t_self;
1968
1969         /*
1970          * Need to run a recheck subquery.      Find or create a PQ stack entry.
1971          */
1972         epq = estate->es_evalPlanQual;
1973         endNode = true;
1974
1975         if (epq != NULL && epq->rti == 0)
1976         {
1977                 /* Top PQ stack entry is idle, so re-use it */
1978                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
1979                 epq->rti = rti;
1980                 endNode = false;
1981         }
1982
1983         /*
1984          * If this is request for another RTE - Ra, - then we have to check wasn't
1985          * PlanQual requested for Ra already and if so then Ra' row was updated
1986          * again and we have to re-start old execution for Ra and forget all what
1987          * we done after Ra was suspended. Cool? -:))
1988          */
1989         if (epq != NULL && epq->rti != rti &&
1990                 epq->estate->es_evTuple[rti - 1] != NULL)
1991         {
1992                 do
1993                 {
1994                         evalPlanQual *oldepq;
1995
1996                         /* stop execution */
1997                         EvalPlanQualStop(epq);
1998                         /* pop previous PlanQual from the stack */
1999                         oldepq = epq->next;
2000                         Assert(oldepq && oldepq->rti != 0);
2001                         /* push current PQ to freePQ stack */
2002                         oldepq->free = epq;
2003                         epq = oldepq;
2004                         estate->es_evalPlanQual = epq;
2005                 } while (epq->rti != rti);
2006         }
2007
2008         /*
2009          * If we are requested for another RTE then we have to suspend execution
2010          * of current PlanQual and start execution for new one.
2011          */
2012         if (epq == NULL || epq->rti != rti)
2013         {
2014                 /* try to reuse plan used previously */
2015                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2016
2017                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2018                 {
2019                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2020                         newepq->free = NULL;
2021                         newepq->estate = NULL;
2022                         newepq->planstate = NULL;
2023                 }
2024                 else
2025                 {
2026                         /* recycle previously used PlanQual */
2027                         Assert(newepq->estate == NULL);
2028                         epq->free = NULL;
2029                 }
2030                 /* push current PQ to the stack */
2031                 newepq->next = epq;
2032                 epq = newepq;
2033                 estate->es_evalPlanQual = epq;
2034                 epq->rti = rti;
2035                 endNode = false;
2036         }
2037
2038         Assert(epq->rti == rti);
2039
2040         /*
2041          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2042          * end and restart execution of the plan, because ExecReScan wouldn't
2043          * ensure that upper plan nodes would reset themselves.  We could make
2044          * that work if insertion of the target tuple were integrated with the
2045          * Param mechanism somehow, so that the upper plan nodes know that their
2046          * children's outputs have changed.
2047          *
2048          * Note that the stack of free evalPlanQual nodes is quite useless at the
2049          * moment, since it only saves us from pallocing/releasing the
2050          * evalPlanQual nodes themselves.  But it will be useful once we implement
2051          * ReScan instead of end/restart for re-using PlanQual nodes.
2052          */
2053         if (endNode)
2054         {
2055                 /* stop execution */
2056                 EvalPlanQualStop(epq);
2057         }
2058
2059         /*
2060          * Initialize new recheck query.
2061          *
2062          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2063          * instead copy down changeable state from the top plan (including
2064          * es_result_relation_info, es_junkFilter) and reset locally changeable
2065          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2066          */
2067         EvalPlanQualStart(epq, estate, epq->next);
2068
2069         /*
2070          * free old RTE' tuple, if any, and store target tuple where relation's
2071          * scan node will see it
2072          */
2073         epqstate = epq->estate;
2074         if (epqstate->es_evTuple[rti - 1] != NULL)
2075                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2076         epqstate->es_evTuple[rti - 1] = copyTuple;
2077
2078         return EvalPlanQualNext(estate);
2079 }
2080
2081 static TupleTableSlot *
2082 EvalPlanQualNext(EState *estate)
2083 {
2084         evalPlanQual *epq = estate->es_evalPlanQual;
2085         MemoryContext oldcontext;
2086         TupleTableSlot *slot;
2087
2088         Assert(epq->rti != 0);
2089
2090 lpqnext:;
2091         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2092         slot = ExecProcNode(epq->planstate);
2093         MemoryContextSwitchTo(oldcontext);
2094
2095         /*
2096          * No more tuples for this PQ. Continue previous one.
2097          */
2098         if (TupIsNull(slot))
2099         {
2100                 evalPlanQual *oldepq;
2101
2102                 /* stop execution */
2103                 EvalPlanQualStop(epq);
2104                 /* pop old PQ from the stack */
2105                 oldepq = epq->next;
2106                 if (oldepq == NULL)
2107                 {
2108                         /* this is the first (oldest) PQ - mark as free */
2109                         epq->rti = 0;
2110                         estate->es_useEvalPlan = false;
2111                         /* and continue Query execution */
2112                         return (NULL);
2113                 }
2114                 Assert(oldepq->rti != 0);
2115                 /* push current PQ to freePQ stack */
2116                 oldepq->free = epq;
2117                 epq = oldepq;
2118                 estate->es_evalPlanQual = epq;
2119                 goto lpqnext;
2120         }
2121
2122         return (slot);
2123 }
2124
2125 static void
2126 EndEvalPlanQual(EState *estate)
2127 {
2128         evalPlanQual *epq = estate->es_evalPlanQual;
2129
2130         if (epq->rti == 0)                      /* plans already shutdowned */
2131         {
2132                 Assert(epq->next == NULL);
2133                 return;
2134         }
2135
2136         for (;;)
2137         {
2138                 evalPlanQual *oldepq;
2139
2140                 /* stop execution */
2141                 EvalPlanQualStop(epq);
2142                 /* pop old PQ from the stack */
2143                 oldepq = epq->next;
2144                 if (oldepq == NULL)
2145                 {
2146                         /* this is the first (oldest) PQ - mark as free */
2147                         epq->rti = 0;
2148                         estate->es_useEvalPlan = false;
2149                         break;
2150                 }
2151                 Assert(oldepq->rti != 0);
2152                 /* push current PQ to freePQ stack */
2153                 oldepq->free = epq;
2154                 epq = oldepq;
2155                 estate->es_evalPlanQual = epq;
2156         }
2157 }
2158
2159 /*
2160  * Start execution of one level of PlanQual.
2161  *
2162  * This is a cut-down version of ExecutorStart(): we copy some state from
2163  * the top-level estate rather than initializing it fresh.
2164  */
2165 static void
2166 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2167 {
2168         EState     *epqstate;
2169         int                     rtsize;
2170         MemoryContext oldcontext;
2171
2172         rtsize = list_length(estate->es_range_table);
2173
2174         epq->estate = epqstate = CreateExecutorState();
2175
2176         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2177
2178         /*
2179          * The epqstates share the top query's copy of unchanging state such as
2180          * the snapshot, rangetable, result-rel info, and external Param info.
2181          * They need their own copies of local state, including a tuple table,
2182          * es_param_exec_vals, etc.
2183          */
2184         epqstate->es_direction = ForwardScanDirection;
2185         epqstate->es_snapshot = estate->es_snapshot;
2186         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2187         epqstate->es_range_table = estate->es_range_table;
2188         epqstate->es_result_relations = estate->es_result_relations;
2189         epqstate->es_num_result_relations = estate->es_num_result_relations;
2190         epqstate->es_result_relation_info = estate->es_result_relation_info;
2191         epqstate->es_junkFilter = estate->es_junkFilter;
2192         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2193         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2194         epqstate->es_param_list_info = estate->es_param_list_info;
2195         if (estate->es_topPlan->nParamExec > 0)
2196                 epqstate->es_param_exec_vals = (ParamExecData *)
2197                         palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
2198         epqstate->es_rowMarks = estate->es_rowMarks;
2199         epqstate->es_forUpdate = estate->es_forUpdate;
2200         epqstate->es_rowNoWait = estate->es_rowNoWait;
2201         epqstate->es_instrument = estate->es_instrument;
2202         epqstate->es_select_into = estate->es_select_into;
2203         epqstate->es_into_oids = estate->es_into_oids;
2204         epqstate->es_topPlan = estate->es_topPlan;
2205
2206         /*
2207          * Each epqstate must have its own es_evTupleNull state, but all the stack
2208          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2209          * the value being examined by an outer recheck.
2210          */
2211         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2212         if (priorepq == NULL)
2213                 /* first PQ stack entry */
2214                 epqstate->es_evTuple = (HeapTuple *)
2215                         palloc0(rtsize * sizeof(HeapTuple));
2216         else
2217                 /* later stack entries share the same storage */
2218                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2219
2220         epqstate->es_tupleTable =
2221                 ExecCreateTupleTable(estate->es_tupleTable->size);
2222
2223         epq->planstate = ExecInitNode(estate->es_topPlan, epqstate);
2224
2225         MemoryContextSwitchTo(oldcontext);
2226 }
2227
2228 /*
2229  * End execution of one level of PlanQual.
2230  *
2231  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2232  * of the normal cleanup, but *not* close result relations (which we are
2233  * just sharing from the outer query).
2234  */
2235 static void
2236 EvalPlanQualStop(evalPlanQual *epq)
2237 {
2238         EState     *epqstate = epq->estate;
2239         MemoryContext oldcontext;
2240
2241         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2242
2243         ExecEndNode(epq->planstate);
2244
2245         ExecDropTupleTable(epqstate->es_tupleTable, true);
2246         epqstate->es_tupleTable = NULL;
2247
2248         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2249         {
2250                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2251                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2252         }
2253
2254         MemoryContextSwitchTo(oldcontext);
2255
2256         FreeExecutorState(epqstate);
2257
2258         epq->estate = NULL;
2259         epq->planstate = NULL;
2260 }