]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
efc5d54fe9e25f7044cf5fc19afefe898625ef4d
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.277 2006/07/31 01:16:37 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "parser/parse_clause.h"
49 #include "parser/parsetree.h"
50 #include "storage/smgr.h"
51 #include "utils/acl.h"
52 #include "utils/lsyscache.h"
53 #include "utils/memutils.h"
54
55
56 typedef struct evalPlanQual
57 {
58         Index           rti;
59         EState     *estate;
60         PlanState  *planstate;
61         struct evalPlanQual *next;      /* stack of active PlanQual plans */
62         struct evalPlanQual *free;      /* list of free PlanQual plans */
63 } evalPlanQual;
64
65 /* decls for local routines only used within this module */
66 static void InitPlan(QueryDesc *queryDesc, int eflags);
67 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
68                                   Index resultRelationIndex,
69                                   List *rangeTable,
70                                   CmdType operation,
71                                   bool doInstrument);
72 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
73                         CmdType operation,
74                         long numberTuples,
75                         ScanDirection direction,
76                         DestReceiver *dest);
77 static void ExecSelect(TupleTableSlot *slot,
78                    DestReceiver *dest,
79                    EState *estate);
80 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
81                    EState *estate);
82 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
83                    EState *estate);
84 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
85                    EState *estate);
86 static TupleTableSlot *EvalPlanQualNext(EState *estate);
87 static void EndEvalPlanQual(EState *estate);
88 static void ExecCheckRTEPerms(RangeTblEntry *rte);
89 static void ExecCheckXactReadOnly(Query *parsetree);
90 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
91                                   evalPlanQual *priorepq);
92 static void EvalPlanQualStop(evalPlanQual *epq);
93
94 /* end of local decls */
95
96
97 /* ----------------------------------------------------------------
98  *              ExecutorStart
99  *
100  *              This routine must be called at the beginning of any execution of any
101  *              query plan
102  *
103  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
104  * clear why we bother to separate the two functions, but...).  The tupDesc
105  * field of the QueryDesc is filled in to describe the tuples that will be
106  * returned, and the internal fields (estate and planstate) are set up.
107  *
108  * eflags contains flag bits as described in executor.h.
109  *
110  * NB: the CurrentMemoryContext when this is called will become the parent
111  * of the per-query context used for this Executor invocation.
112  * ----------------------------------------------------------------
113  */
114 void
115 ExecutorStart(QueryDesc *queryDesc, int eflags)
116 {
117         EState     *estate;
118         MemoryContext oldcontext;
119
120         /* sanity checks: queryDesc must not be started already */
121         Assert(queryDesc != NULL);
122         Assert(queryDesc->estate == NULL);
123
124         /*
125          * If the transaction is read-only, we need to check if any writes are
126          * planned to non-temporary tables.  EXPLAIN is considered read-only.
127          */
128         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
129                 ExecCheckXactReadOnly(queryDesc->parsetree);
130
131         /*
132          * Build EState, switch into per-query memory context for startup.
133          */
134         estate = CreateExecutorState();
135         queryDesc->estate = estate;
136
137         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
138
139         /*
140          * Fill in parameters, if any, from queryDesc
141          */
142         estate->es_param_list_info = queryDesc->params;
143
144         if (queryDesc->plantree->nParamExec > 0)
145                 estate->es_param_exec_vals = (ParamExecData *)
146                         palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
147
148         /*
149          * Copy other important information into the EState
150          */
151         estate->es_snapshot = queryDesc->snapshot;
152         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
153         estate->es_instrument = queryDesc->doInstrument;
154
155         /*
156          * Initialize the plan state tree
157          */
158         InitPlan(queryDesc, eflags);
159
160         MemoryContextSwitchTo(oldcontext);
161 }
162
163 /* ----------------------------------------------------------------
164  *              ExecutorRun
165  *
166  *              This is the main routine of the executor module. It accepts
167  *              the query descriptor from the traffic cop and executes the
168  *              query plan.
169  *
170  *              ExecutorStart must have been called already.
171  *
172  *              If direction is NoMovementScanDirection then nothing is done
173  *              except to start up/shut down the destination.  Otherwise,
174  *              we retrieve up to 'count' tuples in the specified direction.
175  *
176  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
177  *              completion.
178  *
179  * ----------------------------------------------------------------
180  */
181 TupleTableSlot *
182 ExecutorRun(QueryDesc *queryDesc,
183                         ScanDirection direction, long count)
184 {
185         EState     *estate;
186         CmdType         operation;
187         DestReceiver *dest;
188         TupleTableSlot *result;
189         MemoryContext oldcontext;
190
191         /* sanity checks */
192         Assert(queryDesc != NULL);
193
194         estate = queryDesc->estate;
195
196         Assert(estate != NULL);
197
198         /*
199          * Switch into per-query memory context
200          */
201         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
202
203         /*
204          * extract information from the query descriptor and the query feature.
205          */
206         operation = queryDesc->operation;
207         dest = queryDesc->dest;
208
209         /*
210          * startup tuple receiver
211          */
212         estate->es_processed = 0;
213         estate->es_lastoid = InvalidOid;
214
215         (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
216
217         /*
218          * run plan
219          */
220         if (ScanDirectionIsNoMovement(direction))
221                 result = NULL;
222         else
223                 result = ExecutePlan(estate,
224                                                          queryDesc->planstate,
225                                                          operation,
226                                                          count,
227                                                          direction,
228                                                          dest);
229
230         /*
231          * shutdown receiver
232          */
233         (*dest->rShutdown) (dest);
234
235         MemoryContextSwitchTo(oldcontext);
236
237         return result;
238 }
239
240 /* ----------------------------------------------------------------
241  *              ExecutorEnd
242  *
243  *              This routine must be called at the end of execution of any
244  *              query plan
245  * ----------------------------------------------------------------
246  */
247 void
248 ExecutorEnd(QueryDesc *queryDesc)
249 {
250         EState     *estate;
251         MemoryContext oldcontext;
252
253         /* sanity checks */
254         Assert(queryDesc != NULL);
255
256         estate = queryDesc->estate;
257
258         Assert(estate != NULL);
259
260         /*
261          * Switch into per-query memory context to run ExecEndPlan
262          */
263         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
264
265         ExecEndPlan(queryDesc->planstate, estate);
266
267         /*
268          * Must switch out of context before destroying it
269          */
270         MemoryContextSwitchTo(oldcontext);
271
272         /*
273          * Release EState and per-query memory context.  This should release
274          * everything the executor has allocated.
275          */
276         FreeExecutorState(estate);
277
278         /* Reset queryDesc fields that no longer point to anything */
279         queryDesc->tupDesc = NULL;
280         queryDesc->estate = NULL;
281         queryDesc->planstate = NULL;
282 }
283
284 /* ----------------------------------------------------------------
285  *              ExecutorRewind
286  *
287  *              This routine may be called on an open queryDesc to rewind it
288  *              to the start.
289  * ----------------------------------------------------------------
290  */
291 void
292 ExecutorRewind(QueryDesc *queryDesc)
293 {
294         EState     *estate;
295         MemoryContext oldcontext;
296
297         /* sanity checks */
298         Assert(queryDesc != NULL);
299
300         estate = queryDesc->estate;
301
302         Assert(estate != NULL);
303
304         /* It's probably not sensible to rescan updating queries */
305         Assert(queryDesc->operation == CMD_SELECT);
306
307         /*
308          * Switch into per-query memory context
309          */
310         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
311
312         /*
313          * rescan plan
314          */
315         ExecReScan(queryDesc->planstate, NULL);
316
317         MemoryContextSwitchTo(oldcontext);
318 }
319
320
321 /*
322  * ExecCheckRTPerms
323  *              Check access permissions for all relations listed in a range table.
324  */
325 void
326 ExecCheckRTPerms(List *rangeTable)
327 {
328         ListCell   *l;
329
330         foreach(l, rangeTable)
331         {
332                 RangeTblEntry *rte = lfirst(l);
333
334                 ExecCheckRTEPerms(rte);
335         }
336 }
337
338 /*
339  * ExecCheckRTEPerms
340  *              Check access permissions for a single RTE.
341  */
342 static void
343 ExecCheckRTEPerms(RangeTblEntry *rte)
344 {
345         AclMode         requiredPerms;
346         Oid                     relOid;
347         Oid                     userid;
348
349         /*
350          * Only plain-relation RTEs need to be checked here.  Subquery RTEs are
351          * checked by ExecInitSubqueryScan if the subquery is still a separate
352          * subquery --- if it's been pulled up into our query level then the RTEs
353          * are in our rangetable and will be checked here. Function RTEs are
354          * checked by init_fcache when the function is prepared for execution.
355          * Join and special RTEs need no checks.
356          */
357         if (rte->rtekind != RTE_RELATION)
358                 return;
359
360         /*
361          * No work if requiredPerms is empty.
362          */
363         requiredPerms = rte->requiredPerms;
364         if (requiredPerms == 0)
365                 return;
366
367         relOid = rte->relid;
368
369         /*
370          * userid to check as: current user unless we have a setuid indication.
371          *
372          * Note: GetUserId() is presently fast enough that there's no harm in
373          * calling it separately for each RTE.  If that stops being true, we could
374          * call it once in ExecCheckRTPerms and pass the userid down from there.
375          * But for now, no need for the extra clutter.
376          */
377         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
378
379         /*
380          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
381          */
382         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
383                 != requiredPerms)
384                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
385                                            get_rel_name(relOid));
386 }
387
388 /*
389  * Check that the query does not imply any writes to non-temp tables.
390  */
391 static void
392 ExecCheckXactReadOnly(Query *parsetree)
393 {
394         ListCell   *l;
395
396         /*
397          * CREATE TABLE AS or SELECT INTO?
398          *
399          * XXX should we allow this if the destination is temp?
400          */
401         if (parsetree->into != NULL)
402                 goto fail;
403
404         /* Fail if write permissions are requested on any non-temp table */
405         foreach(l, parsetree->rtable)
406         {
407                 RangeTblEntry *rte = lfirst(l);
408
409                 if (rte->rtekind == RTE_SUBQUERY)
410                 {
411                         ExecCheckXactReadOnly(rte->subquery);
412                         continue;
413                 }
414
415                 if (rte->rtekind != RTE_RELATION)
416                         continue;
417
418                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
419                         continue;
420
421                 if (isTempNamespace(get_rel_namespace(rte->relid)))
422                         continue;
423
424                 goto fail;
425         }
426
427         return;
428
429 fail:
430         ereport(ERROR,
431                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
432                          errmsg("transaction is read-only")));
433 }
434
435
436 /* ----------------------------------------------------------------
437  *              InitPlan
438  *
439  *              Initializes the query plan: open files, allocate storage
440  *              and start up the rule manager
441  * ----------------------------------------------------------------
442  */
443 static void
444 InitPlan(QueryDesc *queryDesc, int eflags)
445 {
446         CmdType         operation = queryDesc->operation;
447         Query      *parseTree = queryDesc->parsetree;
448         Plan       *plan = queryDesc->plantree;
449         EState     *estate = queryDesc->estate;
450         PlanState  *planstate;
451         List       *rangeTable;
452         Relation        intoRelationDesc;
453         bool            do_select_into;
454         TupleDesc       tupType;
455         ListCell   *l;
456
457         /*
458          * Do permissions checks.  It's sufficient to examine the query's top
459          * rangetable here --- subplan RTEs will be checked during
460          * ExecInitSubPlan().
461          */
462         ExecCheckRTPerms(parseTree->rtable);
463
464         /*
465          * get information from query descriptor
466          */
467         rangeTable = parseTree->rtable;
468
469         /*
470          * initialize the node's execution state
471          */
472         estate->es_range_table = rangeTable;
473
474         /*
475          * if there is a result relation, initialize result relation stuff
476          */
477         if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
478         {
479                 List       *resultRelations = parseTree->resultRelations;
480                 int                     numResultRelations;
481                 ResultRelInfo *resultRelInfos;
482
483                 if (resultRelations != NIL)
484                 {
485                         /*
486                          * Multiple result relations (due to inheritance)
487                          * parseTree->resultRelations identifies them all
488                          */
489                         ResultRelInfo *resultRelInfo;
490
491                         numResultRelations = list_length(resultRelations);
492                         resultRelInfos = (ResultRelInfo *)
493                                 palloc(numResultRelations * sizeof(ResultRelInfo));
494                         resultRelInfo = resultRelInfos;
495                         foreach(l, resultRelations)
496                         {
497                                 initResultRelInfo(resultRelInfo,
498                                                                   lfirst_int(l),
499                                                                   rangeTable,
500                                                                   operation,
501                                                                   estate->es_instrument);
502                                 resultRelInfo++;
503                         }
504                 }
505                 else
506                 {
507                         /*
508                          * Single result relation identified by parseTree->resultRelation
509                          */
510                         numResultRelations = 1;
511                         resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
512                         initResultRelInfo(resultRelInfos,
513                                                           parseTree->resultRelation,
514                                                           rangeTable,
515                                                           operation,
516                                                           estate->es_instrument);
517                 }
518
519                 estate->es_result_relations = resultRelInfos;
520                 estate->es_num_result_relations = numResultRelations;
521                 /* Initialize to first or only result rel */
522                 estate->es_result_relation_info = resultRelInfos;
523         }
524         else
525         {
526                 /*
527                  * if no result relation, then set state appropriately
528                  */
529                 estate->es_result_relations = NULL;
530                 estate->es_num_result_relations = 0;
531                 estate->es_result_relation_info = NULL;
532         }
533
534         /*
535          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
536          * flag appropriately so that the plan tree will be initialized with the
537          * correct tuple descriptors.
538          */
539         do_select_into = false;
540
541         if (operation == CMD_SELECT && parseTree->into != NULL)
542         {
543                 do_select_into = true;
544                 estate->es_select_into = true;
545                 estate->es_into_oids = interpretOidsOption(parseTree->intoOptions);
546         }
547
548         /*
549          * Have to lock relations selected FOR UPDATE/FOR SHARE
550          */
551         estate->es_rowMarks = NIL;
552         foreach(l, parseTree->rowMarks)
553         {
554                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
555                 Oid                     relid = getrelid(rc->rti, rangeTable);
556                 Relation        relation;
557                 ExecRowMark *erm;
558
559                 relation = heap_open(relid, RowShareLock);
560                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
561                 erm->relation = relation;
562                 erm->rti = rc->rti;
563                 erm->forUpdate = rc->forUpdate;
564                 erm->noWait = rc->noWait;
565                 snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rc->rti);
566                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
567         }
568
569         /*
570          * initialize the executor "tuple" table.  We need slots for all the plan
571          * nodes, plus possibly output slots for the junkfilter(s). At this point
572          * we aren't sure if we need junkfilters, so just add slots for them
573          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
574          * trigger output tuples.
575          */
576         {
577                 int                     nSlots = ExecCountSlotsNode(plan);
578
579                 if (parseTree->resultRelations != NIL)
580                         nSlots += list_length(parseTree->resultRelations);
581                 else
582                         nSlots += 1;
583                 if (operation != CMD_SELECT)
584                         nSlots++;
585
586                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
587
588                 if (operation != CMD_SELECT)
589                         estate->es_trig_tuple_slot =
590                                 ExecAllocTableSlot(estate->es_tupleTable);
591         }
592
593         /* mark EvalPlanQual not active */
594         estate->es_topPlan = plan;
595         estate->es_evalPlanQual = NULL;
596         estate->es_evTupleNull = NULL;
597         estate->es_evTuple = NULL;
598         estate->es_useEvalPlan = false;
599
600         /*
601          * initialize the private state information for all the nodes in the query
602          * tree.  This opens files, allocates storage and leaves us ready to start
603          * processing tuples.
604          */
605         planstate = ExecInitNode(plan, estate, eflags);
606
607         /*
608          * Get the tuple descriptor describing the type of tuples to return. (this
609          * is especially important if we are creating a relation with "SELECT
610          * INTO")
611          */
612         tupType = ExecGetResultType(planstate);
613
614         /*
615          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
616          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
617          * INTO also need a filter if the plan may return raw disk tuples (else
618          * heap_insert will be scribbling on the source relation!). UPDATE and
619          * DELETE always need a filter, since there's always a junk 'ctid'
620          * attribute present --- no need to look first.
621          */
622         {
623                 bool            junk_filter_needed = false;
624                 ListCell   *tlist;
625
626                 switch (operation)
627                 {
628                         case CMD_SELECT:
629                         case CMD_INSERT:
630                                 foreach(tlist, plan->targetlist)
631                                 {
632                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
633
634                                         if (tle->resjunk)
635                                         {
636                                                 junk_filter_needed = true;
637                                                 break;
638                                         }
639                                 }
640                                 if (!junk_filter_needed &&
641                                         (operation == CMD_INSERT || do_select_into) &&
642                                         ExecMayReturnRawTuples(planstate))
643                                         junk_filter_needed = true;
644                                 break;
645                         case CMD_UPDATE:
646                         case CMD_DELETE:
647                                 junk_filter_needed = true;
648                                 break;
649                         default:
650                                 break;
651                 }
652
653                 if (junk_filter_needed)
654                 {
655                         /*
656                          * If there are multiple result relations, each one needs its own
657                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
658                          * we can't be fooled by some needing a filter and some not.
659                          */
660                         if (parseTree->resultRelations != NIL)
661                         {
662                                 PlanState **appendplans;
663                                 int                     as_nplans;
664                                 ResultRelInfo *resultRelInfo;
665                                 int                     i;
666
667                                 /* Top plan had better be an Append here. */
668                                 Assert(IsA(plan, Append));
669                                 Assert(((Append *) plan)->isTarget);
670                                 Assert(IsA(planstate, AppendState));
671                                 appendplans = ((AppendState *) planstate)->appendplans;
672                                 as_nplans = ((AppendState *) planstate)->as_nplans;
673                                 Assert(as_nplans == estate->es_num_result_relations);
674                                 resultRelInfo = estate->es_result_relations;
675                                 for (i = 0; i < as_nplans; i++)
676                                 {
677                                         PlanState  *subplan = appendplans[i];
678                                         JunkFilter *j;
679
680                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
681                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
682                                                                   ExecAllocTableSlot(estate->es_tupleTable));
683                                         resultRelInfo->ri_junkFilter = j;
684                                         resultRelInfo++;
685                                 }
686
687                                 /*
688                                  * Set active junkfilter too; at this point ExecInitAppend has
689                                  * already selected an active result relation...
690                                  */
691                                 estate->es_junkFilter =
692                                         estate->es_result_relation_info->ri_junkFilter;
693                         }
694                         else
695                         {
696                                 /* Normal case with just one JunkFilter */
697                                 JunkFilter *j;
698
699                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
700                                                                            tupType->tdhasoid,
701                                                                   ExecAllocTableSlot(estate->es_tupleTable));
702                                 estate->es_junkFilter = j;
703                                 if (estate->es_result_relation_info)
704                                         estate->es_result_relation_info->ri_junkFilter = j;
705
706                                 /* For SELECT, want to return the cleaned tuple type */
707                                 if (operation == CMD_SELECT)
708                                         tupType = j->jf_cleanTupType;
709                         }
710                 }
711                 else
712                         estate->es_junkFilter = NULL;
713         }
714
715         /*
716          * If doing SELECT INTO, initialize the "into" relation.  We must wait
717          * till now so we have the "clean" result tuple type to create the new
718          * table from.
719          *
720          * If EXPLAIN, skip creating the "into" relation.
721          */
722         intoRelationDesc = NULL;
723
724         if (do_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
725         {
726                 char       *intoName;
727                 Oid                     namespaceId;
728                 Oid                     tablespaceId;
729                 Datum           reloptions;
730                 AclResult       aclresult;
731                 Oid                     intoRelationId;
732                 TupleDesc       tupdesc;
733
734                 /*
735                  * Check consistency of arguments
736                  */
737                 if (parseTree->intoOnCommit != ONCOMMIT_NOOP && !parseTree->into->istemp)
738                         ereport(ERROR,
739                                         (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
740                                          errmsg("ON COMMIT can only be used on temporary tables")));
741
742                 /*
743                  * find namespace to create in, check permissions
744                  */
745                 intoName = parseTree->into->relname;
746                 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
747
748                 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
749                                                                                   ACL_CREATE);
750                 if (aclresult != ACLCHECK_OK)
751                         aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
752                                                    get_namespace_name(namespaceId));
753
754                 /*
755                  * Select tablespace to use.  If not specified, use default_tablespace
756                  * (which may in turn default to database's default).
757                  */
758                 if (parseTree->intoTableSpaceName)
759                 {
760                         tablespaceId = get_tablespace_oid(parseTree->intoTableSpaceName);
761                         if (!OidIsValid(tablespaceId))
762                                 ereport(ERROR,
763                                                 (errcode(ERRCODE_UNDEFINED_OBJECT),
764                                                  errmsg("tablespace \"%s\" does not exist",
765                                                                 parseTree->intoTableSpaceName)));
766                 } else
767                 {
768                         tablespaceId = GetDefaultTablespace();
769                         /* note InvalidOid is OK in this case */
770                 }
771
772                 /* Parse and validate any reloptions */
773                 reloptions = transformRelOptions((Datum) 0,
774                                                                                  parseTree->intoOptions,
775                                                                                  true,
776                                                                                  false);
777                 (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
778
779                 /* Check permissions except when using the database's default */
780                 if (OidIsValid(tablespaceId))
781                 {
782                         AclResult       aclresult;
783
784                         aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
785                                                                                            ACL_CREATE);
786
787                         if (aclresult != ACLCHECK_OK)
788                                 aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
789                                                            get_tablespace_name(tablespaceId));
790                 }
791
792                 /*
793                  * have to copy tupType to get rid of constraints
794                  */
795                 tupdesc = CreateTupleDescCopy(tupType);
796
797                 intoRelationId = heap_create_with_catalog(intoName,
798                                                                                                   namespaceId,
799                                                                                                   tablespaceId,
800                                                                                                   InvalidOid,
801                                                                                                   GetUserId(),
802                                                                                                   tupdesc,
803                                                                                                   RELKIND_RELATION,
804                                                                                                   false,
805                                                                                                   true,
806                                                                                                   0,
807                                                                                                   parseTree->intoOnCommit,
808                                                                                                   reloptions,
809                                                                                                   allowSystemTableMods);
810
811                 FreeTupleDesc(tupdesc);
812
813                 /*
814                  * Advance command counter so that the newly-created relation's
815                  * catalog tuples will be visible to heap_open.
816                  */
817                 CommandCounterIncrement();
818
819                 /*
820                  * If necessary, create a TOAST table for the into relation. Note that
821                  * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
822                  * that the TOAST table will be visible for insertion.
823                  */
824                 AlterTableCreateToastTable(intoRelationId);
825
826                 /*
827                  * And open the constructed table for writing.
828                  */
829                 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
830
831                 /* use_wal off requires rd_targblock be initially invalid */
832                 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
833
834                 /*
835                  * We can skip WAL-logging the insertions, unless PITR is in use.
836                  *
837                  * Note that for a non-temp INTO table, this is safe only because we
838                  * know that the catalog changes above will have been WAL-logged, and
839                  * so RecordTransactionCommit will think it needs to WAL-log the
840                  * eventual transaction commit.  Else the commit might be lost, even
841                  * though all the data is safely fsync'd ...
842                  */
843                 estate->es_into_relation_use_wal = XLogArchivingActive();
844         }
845
846         estate->es_into_relation_descriptor = intoRelationDesc;
847
848         queryDesc->tupDesc = tupType;
849         queryDesc->planstate = planstate;
850 }
851
852 /*
853  * Initialize ResultRelInfo data for one result relation
854  */
855 static void
856 initResultRelInfo(ResultRelInfo *resultRelInfo,
857                                   Index resultRelationIndex,
858                                   List *rangeTable,
859                                   CmdType operation,
860                                   bool doInstrument)
861 {
862         Oid                     resultRelationOid;
863         Relation        resultRelationDesc;
864
865         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
866         resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
867
868         switch (resultRelationDesc->rd_rel->relkind)
869         {
870                 case RELKIND_SEQUENCE:
871                         ereport(ERROR,
872                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
873                                          errmsg("cannot change sequence \"%s\"",
874                                                         RelationGetRelationName(resultRelationDesc))));
875                         break;
876                 case RELKIND_TOASTVALUE:
877                         ereport(ERROR,
878                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
879                                          errmsg("cannot change TOAST relation \"%s\"",
880                                                         RelationGetRelationName(resultRelationDesc))));
881                         break;
882                 case RELKIND_VIEW:
883                         ereport(ERROR,
884                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
885                                          errmsg("cannot change view \"%s\"",
886                                                         RelationGetRelationName(resultRelationDesc))));
887                         break;
888         }
889
890         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
891         resultRelInfo->type = T_ResultRelInfo;
892         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
893         resultRelInfo->ri_RelationDesc = resultRelationDesc;
894         resultRelInfo->ri_NumIndices = 0;
895         resultRelInfo->ri_IndexRelationDescs = NULL;
896         resultRelInfo->ri_IndexRelationInfo = NULL;
897         /* make a copy so as not to depend on relcache info not changing... */
898         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
899         if (resultRelInfo->ri_TrigDesc)
900         {
901                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
902
903                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
904                         palloc0(n * sizeof(FmgrInfo));
905                 if (doInstrument)
906                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
907                 else
908                         resultRelInfo->ri_TrigInstrument = NULL;
909         }
910         else
911         {
912                 resultRelInfo->ri_TrigFunctions = NULL;
913                 resultRelInfo->ri_TrigInstrument = NULL;
914         }
915         resultRelInfo->ri_ConstraintExprs = NULL;
916         resultRelInfo->ri_junkFilter = NULL;
917
918         /*
919          * If there are indices on the result relation, open them and save
920          * descriptors in the result relation info, so that we can add new index
921          * entries for the tuples we add/update.  We need not do this for a
922          * DELETE, however, since deletion doesn't affect indexes.
923          */
924         if (resultRelationDesc->rd_rel->relhasindex &&
925                 operation != CMD_DELETE)
926                 ExecOpenIndices(resultRelInfo);
927 }
928
929 /*
930  *              ExecContextForcesOids
931  *
932  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
933  * we need to ensure that result tuples have space for an OID iff they are
934  * going to be stored into a relation that has OIDs.  In other contexts
935  * we are free to choose whether to leave space for OIDs in result tuples
936  * (we generally don't want to, but we do if a physical-tlist optimization
937  * is possible).  This routine checks the plan context and returns TRUE if the
938  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
939  * *hasoids is set to the required value.
940  *
941  * One reason this is ugly is that all plan nodes in the plan tree will emit
942  * tuples with space for an OID, though we really only need the topmost node
943  * to do so.  However, node types like Sort don't project new tuples but just
944  * return their inputs, and in those cases the requirement propagates down
945  * to the input node.  Eventually we might make this code smart enough to
946  * recognize how far down the requirement really goes, but for now we just
947  * make all plan nodes do the same thing if the top level forces the choice.
948  *
949  * We assume that estate->es_result_relation_info is already set up to
950  * describe the target relation.  Note that in an UPDATE that spans an
951  * inheritance tree, some of the target relations may have OIDs and some not.
952  * We have to make the decisions on a per-relation basis as we initialize
953  * each of the child plans of the topmost Append plan.
954  *
955  * SELECT INTO is even uglier, because we don't have the INTO relation's
956  * descriptor available when this code runs; we have to look aside at a
957  * flag set by InitPlan().
958  */
959 bool
960 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
961 {
962         if (planstate->state->es_select_into)
963         {
964                 *hasoids = planstate->state->es_into_oids;
965                 return true;
966         }
967         else
968         {
969                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
970
971                 if (ri != NULL)
972                 {
973                         Relation        rel = ri->ri_RelationDesc;
974
975                         if (rel != NULL)
976                         {
977                                 *hasoids = rel->rd_rel->relhasoids;
978                                 return true;
979                         }
980                 }
981         }
982
983         return false;
984 }
985
986 /* ----------------------------------------------------------------
987  *              ExecEndPlan
988  *
989  *              Cleans up the query plan -- closes files and frees up storage
990  *
991  * NOTE: we are no longer very worried about freeing storage per se
992  * in this code; FreeExecutorState should be guaranteed to release all
993  * memory that needs to be released.  What we are worried about doing
994  * is closing relations and dropping buffer pins.  Thus, for example,
995  * tuple tables must be cleared or dropped to ensure pins are released.
996  * ----------------------------------------------------------------
997  */
998 void
999 ExecEndPlan(PlanState *planstate, EState *estate)
1000 {
1001         ResultRelInfo *resultRelInfo;
1002         int                     i;
1003         ListCell   *l;
1004
1005         /*
1006          * shut down any PlanQual processing we were doing
1007          */
1008         if (estate->es_evalPlanQual != NULL)
1009                 EndEvalPlanQual(estate);
1010
1011         /*
1012          * shut down the node-type-specific query processing
1013          */
1014         ExecEndNode(planstate);
1015
1016         /*
1017          * destroy the executor "tuple" table.
1018          */
1019         ExecDropTupleTable(estate->es_tupleTable, true);
1020         estate->es_tupleTable = NULL;
1021
1022         /*
1023          * close the result relation(s) if any, but hold locks until xact commit.
1024          */
1025         resultRelInfo = estate->es_result_relations;
1026         for (i = estate->es_num_result_relations; i > 0; i--)
1027         {
1028                 /* Close indices and then the relation itself */
1029                 ExecCloseIndices(resultRelInfo);
1030                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1031                 resultRelInfo++;
1032         }
1033
1034         /*
1035          * close the "into" relation if necessary, again keeping lock
1036          */
1037         if (estate->es_into_relation_descriptor != NULL)
1038         {
1039                 /*
1040                  * If we skipped using WAL, and it's not a temp relation, we must
1041                  * force the relation down to disk before it's safe to commit the
1042                  * transaction.  This requires forcing out any dirty buffers and then
1043                  * doing a forced fsync.
1044                  */
1045                 if (!estate->es_into_relation_use_wal &&
1046                         !estate->es_into_relation_descriptor->rd_istemp)
1047                 {
1048                         FlushRelationBuffers(estate->es_into_relation_descriptor);
1049                         /* FlushRelationBuffers will have opened rd_smgr */
1050                         smgrimmedsync(estate->es_into_relation_descriptor->rd_smgr);
1051                 }
1052
1053                 heap_close(estate->es_into_relation_descriptor, NoLock);
1054         }
1055
1056         /*
1057          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1058          */
1059         foreach(l, estate->es_rowMarks)
1060         {
1061                 ExecRowMark *erm = lfirst(l);
1062
1063                 heap_close(erm->relation, NoLock);
1064         }
1065 }
1066
1067 /* ----------------------------------------------------------------
1068  *              ExecutePlan
1069  *
1070  *              processes the query plan to retrieve 'numberTuples' tuples in the
1071  *              direction specified.
1072  *
1073  *              Retrieves all tuples if numberTuples is 0
1074  *
1075  *              result is either a slot containing the last tuple in the case
1076  *              of a SELECT or NULL otherwise.
1077  *
1078  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1079  * user can see it
1080  * ----------------------------------------------------------------
1081  */
1082 static TupleTableSlot *
1083 ExecutePlan(EState *estate,
1084                         PlanState *planstate,
1085                         CmdType operation,
1086                         long numberTuples,
1087                         ScanDirection direction,
1088                         DestReceiver *dest)
1089 {
1090         JunkFilter *junkfilter;
1091         TupleTableSlot *slot;
1092         ItemPointer tupleid = NULL;
1093         ItemPointerData tuple_ctid;
1094         long            current_tuple_count;
1095         TupleTableSlot *result;
1096
1097         /*
1098          * initialize local variables
1099          */
1100         slot = NULL;
1101         current_tuple_count = 0;
1102         result = NULL;
1103
1104         /*
1105          * Set the direction.
1106          */
1107         estate->es_direction = direction;
1108
1109         /*
1110          * Process BEFORE EACH STATEMENT triggers
1111          */
1112         switch (operation)
1113         {
1114                 case CMD_UPDATE:
1115                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1116                         break;
1117                 case CMD_DELETE:
1118                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1119                         break;
1120                 case CMD_INSERT:
1121                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1122                         break;
1123                 default:
1124                         /* do nothing */
1125                         break;
1126         }
1127
1128         /*
1129          * Loop until we've processed the proper number of tuples from the plan.
1130          */
1131
1132         for (;;)
1133         {
1134                 /* Reset the per-output-tuple exprcontext */
1135                 ResetPerTupleExprContext(estate);
1136
1137                 /*
1138                  * Execute the plan and obtain a tuple
1139                  */
1140 lnext:  ;
1141                 if (estate->es_useEvalPlan)
1142                 {
1143                         slot = EvalPlanQualNext(estate);
1144                         if (TupIsNull(slot))
1145                                 slot = ExecProcNode(planstate);
1146                 }
1147                 else
1148                         slot = ExecProcNode(planstate);
1149
1150                 /*
1151                  * if the tuple is null, then we assume there is nothing more to
1152                  * process so we just return null...
1153                  */
1154                 if (TupIsNull(slot))
1155                 {
1156                         result = NULL;
1157                         break;
1158                 }
1159
1160                 /*
1161                  * if we have a junk filter, then project a new tuple with the junk
1162                  * removed.
1163                  *
1164                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1165                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1166                  * because that tuple slot has the wrong descriptor.)
1167                  *
1168                  * Also, extract all the junk information we need.
1169                  */
1170                 if ((junkfilter = estate->es_junkFilter) != NULL)
1171                 {
1172                         Datum           datum;
1173                         bool            isNull;
1174
1175                         /*
1176                          * extract the 'ctid' junk attribute.
1177                          */
1178                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1179                         {
1180                                 if (!ExecGetJunkAttribute(junkfilter,
1181                                                                                   slot,
1182                                                                                   "ctid",
1183                                                                                   &datum,
1184                                                                                   &isNull))
1185                                         elog(ERROR, "could not find junk ctid column");
1186
1187                                 /* shouldn't ever get a null result... */
1188                                 if (isNull)
1189                                         elog(ERROR, "ctid is NULL");
1190
1191                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1192                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1193                                 tupleid = &tuple_ctid;
1194                         }
1195
1196                         /*
1197                          * Process any FOR UPDATE or FOR SHARE locking requested.
1198                          */
1199                         else if (estate->es_rowMarks != NIL)
1200                         {
1201                                 ListCell   *l;
1202
1203                 lmark:  ;
1204                                 foreach(l, estate->es_rowMarks)
1205                                 {
1206                                         ExecRowMark *erm = lfirst(l);
1207                                         HeapTupleData tuple;
1208                                         Buffer          buffer;
1209                                         ItemPointerData update_ctid;
1210                                         TransactionId update_xmax;
1211                                         TupleTableSlot *newSlot;
1212                                         LockTupleMode lockmode;
1213                                         HTSU_Result test;
1214
1215                                         if (!ExecGetJunkAttribute(junkfilter,
1216                                                                                           slot,
1217                                                                                           erm->resname,
1218                                                                                           &datum,
1219                                                                                           &isNull))
1220                                                 elog(ERROR, "could not find junk \"%s\" column",
1221                                                          erm->resname);
1222
1223                                         /* shouldn't ever get a null result... */
1224                                         if (isNull)
1225                                                 elog(ERROR, "\"%s\" is NULL", erm->resname);
1226
1227                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1228
1229                                         if (erm->forUpdate)
1230                                                 lockmode = LockTupleExclusive;
1231                                         else
1232                                                 lockmode = LockTupleShared;
1233
1234                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1235                                                                                    &update_ctid, &update_xmax,
1236                                                                                    estate->es_snapshot->curcid,
1237                                                                                    lockmode, erm->noWait);
1238                                         ReleaseBuffer(buffer);
1239                                         switch (test)
1240                                         {
1241                                                 case HeapTupleSelfUpdated:
1242                                                         /* treat it as deleted; do not process */
1243                                                         goto lnext;
1244
1245                                                 case HeapTupleMayBeUpdated:
1246                                                         break;
1247
1248                                                 case HeapTupleUpdated:
1249                                                         if (IsXactIsoLevelSerializable)
1250                                                                 ereport(ERROR,
1251                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1252                                                                   errmsg("could not serialize access due to concurrent update")));
1253                                                         if (!ItemPointerEquals(&update_ctid,
1254                                                                                                    &tuple.t_self))
1255                                                         {
1256                                                                 /* updated, so look at updated version */
1257                                                                 newSlot = EvalPlanQual(estate,
1258                                                                                                            erm->rti,
1259                                                                                                            &update_ctid,
1260                                                                                                            update_xmax,
1261                                                                                                            estate->es_snapshot->curcid);
1262                                                                 if (!TupIsNull(newSlot))
1263                                                                 {
1264                                                                         slot = newSlot;
1265                                                                         estate->es_useEvalPlan = true;
1266                                                                         goto lmark;
1267                                                                 }
1268                                                         }
1269
1270                                                         /*
1271                                                          * if tuple was deleted or PlanQual failed for
1272                                                          * updated tuple - we must not return this tuple!
1273                                                          */
1274                                                         goto lnext;
1275
1276                                                 default:
1277                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1278                                                                  test);
1279                                                         return NULL;
1280                                         }
1281                                 }
1282                         }
1283
1284                         /*
1285                          * Finally create a new "clean" tuple with all junk attributes
1286                          * removed
1287                          */
1288                         slot = ExecFilterJunk(junkfilter, slot);
1289                 }
1290
1291                 /*
1292                  * now that we have a tuple, do the appropriate thing with it.. either
1293                  * return it to the user, add it to a relation someplace, delete it
1294                  * from a relation, or modify some of its attributes.
1295                  */
1296                 switch (operation)
1297                 {
1298                         case CMD_SELECT:
1299                                 ExecSelect(slot,        /* slot containing tuple */
1300                                                    dest,        /* destination's tuple-receiver obj */
1301                                                    estate);
1302                                 result = slot;
1303                                 break;
1304
1305                         case CMD_INSERT:
1306                                 ExecInsert(slot, tupleid, estate);
1307                                 result = NULL;
1308                                 break;
1309
1310                         case CMD_DELETE:
1311                                 ExecDelete(slot, tupleid, estate);
1312                                 result = NULL;
1313                                 break;
1314
1315                         case CMD_UPDATE:
1316                                 ExecUpdate(slot, tupleid, estate);
1317                                 result = NULL;
1318                                 break;
1319
1320                         default:
1321                                 elog(ERROR, "unrecognized operation code: %d",
1322                                          (int) operation);
1323                                 result = NULL;
1324                                 break;
1325                 }
1326
1327                 /*
1328                  * check our tuple count.. if we've processed the proper number then
1329                  * quit, else loop again and process more tuples.  Zero numberTuples
1330                  * means no limit.
1331                  */
1332                 current_tuple_count++;
1333                 if (numberTuples && numberTuples == current_tuple_count)
1334                         break;
1335         }
1336
1337         /*
1338          * Process AFTER EACH STATEMENT triggers
1339          */
1340         switch (operation)
1341         {
1342                 case CMD_UPDATE:
1343                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1344                         break;
1345                 case CMD_DELETE:
1346                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1347                         break;
1348                 case CMD_INSERT:
1349                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1350                         break;
1351                 default:
1352                         /* do nothing */
1353                         break;
1354         }
1355
1356         /*
1357          * here, result is either a slot containing a tuple in the case of a
1358          * SELECT or NULL otherwise.
1359          */
1360         return result;
1361 }
1362
1363 /* ----------------------------------------------------------------
1364  *              ExecSelect
1365  *
1366  *              SELECTs are easy.. we just pass the tuple to the appropriate
1367  *              print function.  The only complexity is when we do a
1368  *              "SELECT INTO", in which case we insert the tuple into
1369  *              the appropriate relation (note: this is a newly created relation
1370  *              so we don't need to worry about indices or locks.)
1371  * ----------------------------------------------------------------
1372  */
1373 static void
1374 ExecSelect(TupleTableSlot *slot,
1375                    DestReceiver *dest,
1376                    EState *estate)
1377 {
1378         /*
1379          * insert the tuple into the "into relation"
1380          *
1381          * XXX this probably ought to be replaced by a separate destination
1382          */
1383         if (estate->es_into_relation_descriptor != NULL)
1384         {
1385                 HeapTuple       tuple;
1386
1387                 tuple = ExecCopySlotTuple(slot);
1388                 heap_insert(estate->es_into_relation_descriptor, tuple,
1389                                         estate->es_snapshot->curcid,
1390                                         estate->es_into_relation_use_wal,
1391                                         false);         /* never any point in using FSM */
1392                 /* we know there are no indexes to update */
1393                 heap_freetuple(tuple);
1394                 IncrAppended();
1395         }
1396
1397         /*
1398          * send the tuple to the destination
1399          */
1400         (*dest->receiveSlot) (slot, dest);
1401         IncrRetrieved();
1402         (estate->es_processed)++;
1403 }
1404
1405 /* ----------------------------------------------------------------
1406  *              ExecInsert
1407  *
1408  *              INSERTs are trickier.. we have to insert the tuple into
1409  *              the base relation and insert appropriate tuples into the
1410  *              index relations.
1411  * ----------------------------------------------------------------
1412  */
1413 static void
1414 ExecInsert(TupleTableSlot *slot,
1415                    ItemPointer tupleid,
1416                    EState *estate)
1417 {
1418         HeapTuple       tuple;
1419         ResultRelInfo *resultRelInfo;
1420         Relation        resultRelationDesc;
1421         Oid                     newId;
1422
1423         /*
1424          * get the heap tuple out of the tuple table slot, making sure we have a
1425          * writable copy
1426          */
1427         tuple = ExecMaterializeSlot(slot);
1428
1429         /*
1430          * get information on the (current) result relation
1431          */
1432         resultRelInfo = estate->es_result_relation_info;
1433         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1434
1435         /* BEFORE ROW INSERT Triggers */
1436         if (resultRelInfo->ri_TrigDesc &&
1437                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1438         {
1439                 HeapTuple       newtuple;
1440
1441                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1442
1443                 if (newtuple == NULL)   /* "do nothing" */
1444                         return;
1445
1446                 if (newtuple != tuple)  /* modified by Trigger(s) */
1447                 {
1448                         /*
1449                          * Put the modified tuple into a slot for convenience of routines
1450                          * below.  We assume the tuple was allocated in per-tuple memory
1451                          * context, and therefore will go away by itself. The tuple table
1452                          * slot should not try to clear it.
1453                          */
1454                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1455
1456                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1457                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1458                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1459                         slot = newslot;
1460                         tuple = newtuple;
1461                 }
1462         }
1463
1464         /*
1465          * Check the constraints of the tuple
1466          */
1467         if (resultRelationDesc->rd_att->constr)
1468                 ExecConstraints(resultRelInfo, slot, estate);
1469
1470         /*
1471          * insert the tuple
1472          *
1473          * Note: heap_insert returns the tid (location) of the new tuple in the
1474          * t_self field.
1475          */
1476         newId = heap_insert(resultRelationDesc, tuple,
1477                                                 estate->es_snapshot->curcid,
1478                                                 true, true);
1479
1480         IncrAppended();
1481         (estate->es_processed)++;
1482         estate->es_lastoid = newId;
1483         setLastTid(&(tuple->t_self));
1484
1485         /*
1486          * insert index entries for tuple
1487          */
1488         if (resultRelInfo->ri_NumIndices > 0)
1489                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1490
1491         /* AFTER ROW INSERT Triggers */
1492         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1493 }
1494
1495 /* ----------------------------------------------------------------
1496  *              ExecDelete
1497  *
1498  *              DELETE is like UPDATE, except that we delete the tuple and no
1499  *              index modifications are needed
1500  * ----------------------------------------------------------------
1501  */
1502 static void
1503 ExecDelete(TupleTableSlot *slot,
1504                    ItemPointer tupleid,
1505                    EState *estate)
1506 {
1507         ResultRelInfo *resultRelInfo;
1508         Relation        resultRelationDesc;
1509         HTSU_Result result;
1510         ItemPointerData update_ctid;
1511         TransactionId update_xmax;
1512
1513         /*
1514          * get information on the (current) result relation
1515          */
1516         resultRelInfo = estate->es_result_relation_info;
1517         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1518
1519         /* BEFORE ROW DELETE Triggers */
1520         if (resultRelInfo->ri_TrigDesc &&
1521                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1522         {
1523                 bool            dodelete;
1524
1525                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1526                                                                                 estate->es_snapshot->curcid);
1527
1528                 if (!dodelete)                  /* "do nothing" */
1529                         return;
1530         }
1531
1532         /*
1533          * delete the tuple
1534          *
1535          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1536          * the row to be deleted is visible to that snapshot, and throw a can't-
1537          * serialize error if not.      This is a special-case behavior needed for
1538          * referential integrity updates in serializable transactions.
1539          */
1540 ldelete:;
1541         result = heap_delete(resultRelationDesc, tupleid,
1542                                                  &update_ctid, &update_xmax,
1543                                                  estate->es_snapshot->curcid,
1544                                                  estate->es_crosscheck_snapshot,
1545                                                  true /* wait for commit */ );
1546         switch (result)
1547         {
1548                 case HeapTupleSelfUpdated:
1549                         /* already deleted by self; nothing to do */
1550                         return;
1551
1552                 case HeapTupleMayBeUpdated:
1553                         break;
1554
1555                 case HeapTupleUpdated:
1556                         if (IsXactIsoLevelSerializable)
1557                                 ereport(ERROR,
1558                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1559                                                  errmsg("could not serialize access due to concurrent update")));
1560                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1561                         {
1562                                 TupleTableSlot *epqslot;
1563
1564                                 epqslot = EvalPlanQual(estate,
1565                                                                            resultRelInfo->ri_RangeTableIndex,
1566                                                                            &update_ctid,
1567                                                                            update_xmax,
1568                                                                            estate->es_snapshot->curcid);
1569                                 if (!TupIsNull(epqslot))
1570                                 {
1571                                         *tupleid = update_ctid;
1572                                         goto ldelete;
1573                                 }
1574                         }
1575                         /* tuple already deleted; nothing to do */
1576                         return;
1577
1578                 default:
1579                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1580                         return;
1581         }
1582
1583         IncrDeleted();
1584         (estate->es_processed)++;
1585
1586         /*
1587          * Note: Normally one would think that we have to delete index tuples
1588          * associated with the heap tuple now...
1589          *
1590          * ... but in POSTGRES, we have no need to do this because VACUUM will
1591          * take care of it later.  We can't delete index tuples immediately
1592          * anyway, since the tuple is still visible to other transactions.
1593          */
1594
1595         /* AFTER ROW DELETE Triggers */
1596         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1597 }
1598
1599 /* ----------------------------------------------------------------
1600  *              ExecUpdate
1601  *
1602  *              note: we can't run UPDATE queries with transactions
1603  *              off because UPDATEs are actually INSERTs and our
1604  *              scan will mistakenly loop forever, updating the tuple
1605  *              it just inserted..      This should be fixed but until it
1606  *              is, we don't want to get stuck in an infinite loop
1607  *              which corrupts your database..
1608  * ----------------------------------------------------------------
1609  */
1610 static void
1611 ExecUpdate(TupleTableSlot *slot,
1612                    ItemPointer tupleid,
1613                    EState *estate)
1614 {
1615         HeapTuple       tuple;
1616         ResultRelInfo *resultRelInfo;
1617         Relation        resultRelationDesc;
1618         HTSU_Result result;
1619         ItemPointerData update_ctid;
1620         TransactionId update_xmax;
1621
1622         /*
1623          * abort the operation if not running transactions
1624          */
1625         if (IsBootstrapProcessingMode())
1626                 elog(ERROR, "cannot UPDATE during bootstrap");
1627
1628         /*
1629          * get the heap tuple out of the tuple table slot, making sure we have a
1630          * writable copy
1631          */
1632         tuple = ExecMaterializeSlot(slot);
1633
1634         /*
1635          * get information on the (current) result relation
1636          */
1637         resultRelInfo = estate->es_result_relation_info;
1638         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1639
1640         /* BEFORE ROW UPDATE Triggers */
1641         if (resultRelInfo->ri_TrigDesc &&
1642                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1643         {
1644                 HeapTuple       newtuple;
1645
1646                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1647                                                                                 tupleid, tuple,
1648                                                                                 estate->es_snapshot->curcid);
1649
1650                 if (newtuple == NULL)   /* "do nothing" */
1651                         return;
1652
1653                 if (newtuple != tuple)  /* modified by Trigger(s) */
1654                 {
1655                         /*
1656                          * Put the modified tuple into a slot for convenience of routines
1657                          * below.  We assume the tuple was allocated in per-tuple memory
1658                          * context, and therefore will go away by itself. The tuple table
1659                          * slot should not try to clear it.
1660                          */
1661                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1662
1663                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1664                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1665                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1666                         slot = newslot;
1667                         tuple = newtuple;
1668                 }
1669         }
1670
1671         /*
1672          * Check the constraints of the tuple
1673          *
1674          * If we generate a new candidate tuple after EvalPlanQual testing, we
1675          * must loop back here and recheck constraints.  (We don't need to redo
1676          * triggers, however.  If there are any BEFORE triggers then trigger.c
1677          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1678          * need to do them again.)
1679          */
1680 lreplace:;
1681         if (resultRelationDesc->rd_att->constr)
1682                 ExecConstraints(resultRelInfo, slot, estate);
1683
1684         /*
1685          * replace the heap tuple
1686          *
1687          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1688          * the row to be updated is visible to that snapshot, and throw a can't-
1689          * serialize error if not.      This is a special-case behavior needed for
1690          * referential integrity updates in serializable transactions.
1691          */
1692         result = heap_update(resultRelationDesc, tupleid, tuple,
1693                                                  &update_ctid, &update_xmax,
1694                                                  estate->es_snapshot->curcid,
1695                                                  estate->es_crosscheck_snapshot,
1696                                                  true /* wait for commit */ );
1697         switch (result)
1698         {
1699                 case HeapTupleSelfUpdated:
1700                         /* already deleted by self; nothing to do */
1701                         return;
1702
1703                 case HeapTupleMayBeUpdated:
1704                         break;
1705
1706                 case HeapTupleUpdated:
1707                         if (IsXactIsoLevelSerializable)
1708                                 ereport(ERROR,
1709                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1710                                                  errmsg("could not serialize access due to concurrent update")));
1711                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1712                         {
1713                                 TupleTableSlot *epqslot;
1714
1715                                 epqslot = EvalPlanQual(estate,
1716                                                                            resultRelInfo->ri_RangeTableIndex,
1717                                                                            &update_ctid,
1718                                                                            update_xmax,
1719                                                                            estate->es_snapshot->curcid);
1720                                 if (!TupIsNull(epqslot))
1721                                 {
1722                                         *tupleid = update_ctid;
1723                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1724                                         tuple = ExecMaterializeSlot(slot);
1725                                         goto lreplace;
1726                                 }
1727                         }
1728                         /* tuple already deleted; nothing to do */
1729                         return;
1730
1731                 default:
1732                         elog(ERROR, "unrecognized heap_update status: %u", result);
1733                         return;
1734         }
1735
1736         IncrReplaced();
1737         (estate->es_processed)++;
1738
1739         /*
1740          * Note: instead of having to update the old index tuples associated with
1741          * the heap tuple, all we do is form and insert new index tuples. This is
1742          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1743          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1744          * here is insert new index tuples.  -cim 9/27/89
1745          */
1746
1747         /*
1748          * insert index entries for tuple
1749          *
1750          * Note: heap_update returns the tid (location) of the new tuple in the
1751          * t_self field.
1752          */
1753         if (resultRelInfo->ri_NumIndices > 0)
1754                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1755
1756         /* AFTER ROW UPDATE Triggers */
1757         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1758 }
1759
1760 static const char *
1761 ExecRelCheck(ResultRelInfo *resultRelInfo,
1762                          TupleTableSlot *slot, EState *estate)
1763 {
1764         Relation        rel = resultRelInfo->ri_RelationDesc;
1765         int                     ncheck = rel->rd_att->constr->num_check;
1766         ConstrCheck *check = rel->rd_att->constr->check;
1767         ExprContext *econtext;
1768         MemoryContext oldContext;
1769         List       *qual;
1770         int                     i;
1771
1772         /*
1773          * If first time through for this result relation, build expression
1774          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1775          * memory context so they'll survive throughout the query.
1776          */
1777         if (resultRelInfo->ri_ConstraintExprs == NULL)
1778         {
1779                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1780                 resultRelInfo->ri_ConstraintExprs =
1781                         (List **) palloc(ncheck * sizeof(List *));
1782                 for (i = 0; i < ncheck; i++)
1783                 {
1784                         /* ExecQual wants implicit-AND form */
1785                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1786                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1787                                 ExecPrepareExpr((Expr *) qual, estate);
1788                 }
1789                 MemoryContextSwitchTo(oldContext);
1790         }
1791
1792         /*
1793          * We will use the EState's per-tuple context for evaluating constraint
1794          * expressions (creating it if it's not already there).
1795          */
1796         econtext = GetPerTupleExprContext(estate);
1797
1798         /* Arrange for econtext's scan tuple to be the tuple under test */
1799         econtext->ecxt_scantuple = slot;
1800
1801         /* And evaluate the constraints */
1802         for (i = 0; i < ncheck; i++)
1803         {
1804                 qual = resultRelInfo->ri_ConstraintExprs[i];
1805
1806                 /*
1807                  * NOTE: SQL92 specifies that a NULL result from a constraint
1808                  * expression is not to be treated as a failure.  Therefore, tell
1809                  * ExecQual to return TRUE for NULL.
1810                  */
1811                 if (!ExecQual(qual, econtext, true))
1812                         return check[i].ccname;
1813         }
1814
1815         /* NULL result means no error */
1816         return NULL;
1817 }
1818
1819 void
1820 ExecConstraints(ResultRelInfo *resultRelInfo,
1821                                 TupleTableSlot *slot, EState *estate)
1822 {
1823         Relation        rel = resultRelInfo->ri_RelationDesc;
1824         TupleConstr *constr = rel->rd_att->constr;
1825
1826         Assert(constr);
1827
1828         if (constr->has_not_null)
1829         {
1830                 int                     natts = rel->rd_att->natts;
1831                 int                     attrChk;
1832
1833                 for (attrChk = 1; attrChk <= natts; attrChk++)
1834                 {
1835                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1836                                 slot_attisnull(slot, attrChk))
1837                                 ereport(ERROR,
1838                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1839                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1840                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1841                 }
1842         }
1843
1844         if (constr->num_check > 0)
1845         {
1846                 const char *failed;
1847
1848                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1849                         ereport(ERROR,
1850                                         (errcode(ERRCODE_CHECK_VIOLATION),
1851                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1852                                                         RelationGetRelationName(rel), failed)));
1853         }
1854 }
1855
1856 /*
1857  * Check a modified tuple to see if we want to process its updated version
1858  * under READ COMMITTED rules.
1859  *
1860  * See backend/executor/README for some info about how this works.
1861  *
1862  *      estate - executor state data
1863  *      rti - rangetable index of table containing tuple
1864  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1865  *      priorXmax - t_xmax from the outdated tuple
1866  *      curCid - command ID of current command of my transaction
1867  *
1868  * *tid is also an output parameter: it's modified to hold the TID of the
1869  * latest version of the tuple (note this may be changed even on failure)
1870  *
1871  * Returns a slot containing the new candidate update/delete tuple, or
1872  * NULL if we determine we shouldn't process the row.
1873  */
1874 TupleTableSlot *
1875 EvalPlanQual(EState *estate, Index rti,
1876                          ItemPointer tid, TransactionId priorXmax, CommandId curCid)
1877 {
1878         evalPlanQual *epq;
1879         EState     *epqstate;
1880         Relation        relation;
1881         HeapTupleData tuple;
1882         HeapTuple       copyTuple = NULL;
1883         bool            endNode;
1884
1885         Assert(rti != 0);
1886
1887         /*
1888          * find relation containing target tuple
1889          */
1890         if (estate->es_result_relation_info != NULL &&
1891                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1892                 relation = estate->es_result_relation_info->ri_RelationDesc;
1893         else
1894         {
1895                 ListCell   *l;
1896
1897                 relation = NULL;
1898                 foreach(l, estate->es_rowMarks)
1899                 {
1900                         if (((ExecRowMark *) lfirst(l))->rti == rti)
1901                         {
1902                                 relation = ((ExecRowMark *) lfirst(l))->relation;
1903                                 break;
1904                         }
1905                 }
1906                 if (relation == NULL)
1907                         elog(ERROR, "could not find RowMark for RT index %u", rti);
1908         }
1909
1910         /*
1911          * fetch tid tuple
1912          *
1913          * Loop here to deal with updated or busy tuples
1914          */
1915         tuple.t_self = *tid;
1916         for (;;)
1917         {
1918                 Buffer          buffer;
1919
1920                 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL))
1921                 {
1922                         /*
1923                          * If xmin isn't what we're expecting, the slot must have been
1924                          * recycled and reused for an unrelated tuple.  This implies that
1925                          * the latest version of the row was deleted, so we need do
1926                          * nothing.  (Should be safe to examine xmin without getting
1927                          * buffer's content lock, since xmin never changes in an existing
1928                          * tuple.)
1929                          */
1930                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1931                                                                          priorXmax))
1932                         {
1933                                 ReleaseBuffer(buffer);
1934                                 return NULL;
1935                         }
1936
1937                         /* otherwise xmin should not be dirty... */
1938                         if (TransactionIdIsValid(SnapshotDirty->xmin))
1939                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1940
1941                         /*
1942                          * If tuple is being updated by other transaction then we have to
1943                          * wait for its commit/abort.
1944                          */
1945                         if (TransactionIdIsValid(SnapshotDirty->xmax))
1946                         {
1947                                 ReleaseBuffer(buffer);
1948                                 XactLockTableWait(SnapshotDirty->xmax);
1949                                 continue;               /* loop back to repeat heap_fetch */
1950                         }
1951
1952                         /*
1953                          * If tuple was inserted by our own transaction, we have to check
1954                          * cmin against curCid: cmin >= curCid means our command cannot
1955                          * see the tuple, so we should ignore it.  Without this we are
1956                          * open to the "Halloween problem" of indefinitely re-updating
1957                          * the same tuple.  (We need not check cmax because
1958                          * HeapTupleSatisfiesDirty will consider a tuple deleted by
1959                          * our transaction dead, regardless of cmax.)  We just checked
1960                          * that priorXmax == xmin, so we can test that variable instead
1961                          * of doing HeapTupleHeaderGetXmin again.
1962                          */
1963                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1964                                 HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
1965                         {
1966                                 ReleaseBuffer(buffer);
1967                                 return NULL;
1968                         }
1969
1970                         /*
1971                          * We got tuple - now copy it for use by recheck query.
1972                          */
1973                         copyTuple = heap_copytuple(&tuple);
1974                         ReleaseBuffer(buffer);
1975                         break;
1976                 }
1977
1978                 /*
1979                  * If the referenced slot was actually empty, the latest version of
1980                  * the row must have been deleted, so we need do nothing.
1981                  */
1982                 if (tuple.t_data == NULL)
1983                 {
1984                         ReleaseBuffer(buffer);
1985                         return NULL;
1986                 }
1987
1988                 /*
1989                  * As above, if xmin isn't what we're expecting, do nothing.
1990                  */
1991                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1992                                                                  priorXmax))
1993                 {
1994                         ReleaseBuffer(buffer);
1995                         return NULL;
1996                 }
1997
1998                 /*
1999                  * If we get here, the tuple was found but failed SnapshotDirty.
2000                  * Assuming the xmin is either a committed xact or our own xact (as it
2001                  * certainly should be if we're trying to modify the tuple), this must
2002                  * mean that the row was updated or deleted by either a committed xact
2003                  * or our own xact.  If it was deleted, we can ignore it; if it was
2004                  * updated then chain up to the next version and repeat the whole
2005                  * test.
2006                  *
2007                  * As above, it should be safe to examine xmax and t_ctid without the
2008                  * buffer content lock, because they can't be changing.
2009                  */
2010                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2011                 {
2012                         /* deleted, so forget about it */
2013                         ReleaseBuffer(buffer);
2014                         return NULL;
2015                 }
2016
2017                 /* updated, so look at the updated row */
2018                 tuple.t_self = tuple.t_data->t_ctid;
2019                 /* updated row should have xmin matching this xmax */
2020                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2021                 ReleaseBuffer(buffer);
2022                 /* loop back to fetch next in chain */
2023         }
2024
2025         /*
2026          * For UPDATE/DELETE we have to return tid of actual row we're executing
2027          * PQ for.
2028          */
2029         *tid = tuple.t_self;
2030
2031         /*
2032          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2033          */
2034         epq = estate->es_evalPlanQual;
2035         endNode = true;
2036
2037         if (epq != NULL && epq->rti == 0)
2038         {
2039                 /* Top PQ stack entry is idle, so re-use it */
2040                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2041                 epq->rti = rti;
2042                 endNode = false;
2043         }
2044
2045         /*
2046          * If this is request for another RTE - Ra, - then we have to check wasn't
2047          * PlanQual requested for Ra already and if so then Ra' row was updated
2048          * again and we have to re-start old execution for Ra and forget all what
2049          * we done after Ra was suspended. Cool? -:))
2050          */
2051         if (epq != NULL && epq->rti != rti &&
2052                 epq->estate->es_evTuple[rti - 1] != NULL)
2053         {
2054                 do
2055                 {
2056                         evalPlanQual *oldepq;
2057
2058                         /* stop execution */
2059                         EvalPlanQualStop(epq);
2060                         /* pop previous PlanQual from the stack */
2061                         oldepq = epq->next;
2062                         Assert(oldepq && oldepq->rti != 0);
2063                         /* push current PQ to freePQ stack */
2064                         oldepq->free = epq;
2065                         epq = oldepq;
2066                         estate->es_evalPlanQual = epq;
2067                 } while (epq->rti != rti);
2068         }
2069
2070         /*
2071          * If we are requested for another RTE then we have to suspend execution
2072          * of current PlanQual and start execution for new one.
2073          */
2074         if (epq == NULL || epq->rti != rti)
2075         {
2076                 /* try to reuse plan used previously */
2077                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2078
2079                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2080                 {
2081                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2082                         newepq->free = NULL;
2083                         newepq->estate = NULL;
2084                         newepq->planstate = NULL;
2085                 }
2086                 else
2087                 {
2088                         /* recycle previously used PlanQual */
2089                         Assert(newepq->estate == NULL);
2090                         epq->free = NULL;
2091                 }
2092                 /* push current PQ to the stack */
2093                 newepq->next = epq;
2094                 epq = newepq;
2095                 estate->es_evalPlanQual = epq;
2096                 epq->rti = rti;
2097                 endNode = false;
2098         }
2099
2100         Assert(epq->rti == rti);
2101
2102         /*
2103          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2104          * end and restart execution of the plan, because ExecReScan wouldn't
2105          * ensure that upper plan nodes would reset themselves.  We could make
2106          * that work if insertion of the target tuple were integrated with the
2107          * Param mechanism somehow, so that the upper plan nodes know that their
2108          * children's outputs have changed.
2109          *
2110          * Note that the stack of free evalPlanQual nodes is quite useless at the
2111          * moment, since it only saves us from pallocing/releasing the
2112          * evalPlanQual nodes themselves.  But it will be useful once we implement
2113          * ReScan instead of end/restart for re-using PlanQual nodes.
2114          */
2115         if (endNode)
2116         {
2117                 /* stop execution */
2118                 EvalPlanQualStop(epq);
2119         }
2120
2121         /*
2122          * Initialize new recheck query.
2123          *
2124          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2125          * instead copy down changeable state from the top plan (including
2126          * es_result_relation_info, es_junkFilter) and reset locally changeable
2127          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2128          */
2129         EvalPlanQualStart(epq, estate, epq->next);
2130
2131         /*
2132          * free old RTE' tuple, if any, and store target tuple where relation's
2133          * scan node will see it
2134          */
2135         epqstate = epq->estate;
2136         if (epqstate->es_evTuple[rti - 1] != NULL)
2137                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2138         epqstate->es_evTuple[rti - 1] = copyTuple;
2139
2140         return EvalPlanQualNext(estate);
2141 }
2142
2143 static TupleTableSlot *
2144 EvalPlanQualNext(EState *estate)
2145 {
2146         evalPlanQual *epq = estate->es_evalPlanQual;
2147         MemoryContext oldcontext;
2148         TupleTableSlot *slot;
2149
2150         Assert(epq->rti != 0);
2151
2152 lpqnext:;
2153         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2154         slot = ExecProcNode(epq->planstate);
2155         MemoryContextSwitchTo(oldcontext);
2156
2157         /*
2158          * No more tuples for this PQ. Continue previous one.
2159          */
2160         if (TupIsNull(slot))
2161         {
2162                 evalPlanQual *oldepq;
2163
2164                 /* stop execution */
2165                 EvalPlanQualStop(epq);
2166                 /* pop old PQ from the stack */
2167                 oldepq = epq->next;
2168                 if (oldepq == NULL)
2169                 {
2170                         /* this is the first (oldest) PQ - mark as free */
2171                         epq->rti = 0;
2172                         estate->es_useEvalPlan = false;
2173                         /* and continue Query execution */
2174                         return NULL;
2175                 }
2176                 Assert(oldepq->rti != 0);
2177                 /* push current PQ to freePQ stack */
2178                 oldepq->free = epq;
2179                 epq = oldepq;
2180                 estate->es_evalPlanQual = epq;
2181                 goto lpqnext;
2182         }
2183
2184         return slot;
2185 }
2186
2187 static void
2188 EndEvalPlanQual(EState *estate)
2189 {
2190         evalPlanQual *epq = estate->es_evalPlanQual;
2191
2192         if (epq->rti == 0)                      /* plans already shutdowned */
2193         {
2194                 Assert(epq->next == NULL);
2195                 return;
2196         }
2197
2198         for (;;)
2199         {
2200                 evalPlanQual *oldepq;
2201
2202                 /* stop execution */
2203                 EvalPlanQualStop(epq);
2204                 /* pop old PQ from the stack */
2205                 oldepq = epq->next;
2206                 if (oldepq == NULL)
2207                 {
2208                         /* this is the first (oldest) PQ - mark as free */
2209                         epq->rti = 0;
2210                         estate->es_useEvalPlan = false;
2211                         break;
2212                 }
2213                 Assert(oldepq->rti != 0);
2214                 /* push current PQ to freePQ stack */
2215                 oldepq->free = epq;
2216                 epq = oldepq;
2217                 estate->es_evalPlanQual = epq;
2218         }
2219 }
2220
2221 /*
2222  * Start execution of one level of PlanQual.
2223  *
2224  * This is a cut-down version of ExecutorStart(): we copy some state from
2225  * the top-level estate rather than initializing it fresh.
2226  */
2227 static void
2228 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2229 {
2230         EState     *epqstate;
2231         int                     rtsize;
2232         MemoryContext oldcontext;
2233
2234         rtsize = list_length(estate->es_range_table);
2235
2236         epq->estate = epqstate = CreateExecutorState();
2237
2238         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2239
2240         /*
2241          * The epqstates share the top query's copy of unchanging state such as
2242          * the snapshot, rangetable, result-rel info, and external Param info.
2243          * They need their own copies of local state, including a tuple table,
2244          * es_param_exec_vals, etc.
2245          */
2246         epqstate->es_direction = ForwardScanDirection;
2247         epqstate->es_snapshot = estate->es_snapshot;
2248         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2249         epqstate->es_range_table = estate->es_range_table;
2250         epqstate->es_result_relations = estate->es_result_relations;
2251         epqstate->es_num_result_relations = estate->es_num_result_relations;
2252         epqstate->es_result_relation_info = estate->es_result_relation_info;
2253         epqstate->es_junkFilter = estate->es_junkFilter;
2254         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2255         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2256         epqstate->es_param_list_info = estate->es_param_list_info;
2257         if (estate->es_topPlan->nParamExec > 0)
2258                 epqstate->es_param_exec_vals = (ParamExecData *)
2259                         palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
2260         epqstate->es_rowMarks = estate->es_rowMarks;
2261         epqstate->es_instrument = estate->es_instrument;
2262         epqstate->es_select_into = estate->es_select_into;
2263         epqstate->es_into_oids = estate->es_into_oids;
2264         epqstate->es_topPlan = estate->es_topPlan;
2265
2266         /*
2267          * Each epqstate must have its own es_evTupleNull state, but all the stack
2268          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2269          * the value being examined by an outer recheck.
2270          */
2271         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2272         if (priorepq == NULL)
2273                 /* first PQ stack entry */
2274                 epqstate->es_evTuple = (HeapTuple *)
2275                         palloc0(rtsize * sizeof(HeapTuple));
2276         else
2277                 /* later stack entries share the same storage */
2278                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2279
2280         epqstate->es_tupleTable =
2281                 ExecCreateTupleTable(estate->es_tupleTable->size);
2282
2283         epq->planstate = ExecInitNode(estate->es_topPlan, epqstate, 0);
2284
2285         MemoryContextSwitchTo(oldcontext);
2286 }
2287
2288 /*
2289  * End execution of one level of PlanQual.
2290  *
2291  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2292  * of the normal cleanup, but *not* close result relations (which we are
2293  * just sharing from the outer query).
2294  */
2295 static void
2296 EvalPlanQualStop(evalPlanQual *epq)
2297 {
2298         EState     *epqstate = epq->estate;
2299         MemoryContext oldcontext;
2300
2301         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2302
2303         ExecEndNode(epq->planstate);
2304
2305         ExecDropTupleTable(epqstate->es_tupleTable, true);
2306         epqstate->es_tupleTable = NULL;
2307
2308         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2309         {
2310                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2311                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2312         }
2313
2314         MemoryContextSwitchTo(oldcontext);
2315
2316         FreeExecutorState(epqstate);
2317
2318         epq->estate = NULL;
2319         epq->planstate = NULL;
2320 }