]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Replace pg_shadow and pg_group by new role-capable catalogs pg_authid
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.251 2005/06/28 05:08:55 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/xlog.h"
37 #include "catalog/heap.h"
38 #include "catalog/namespace.h"
39 #include "commands/tablecmds.h"
40 #include "commands/trigger.h"
41 #include "executor/execdebug.h"
42 #include "executor/execdefs.h"
43 #include "executor/instrument.h"
44 #include "miscadmin.h"
45 #include "optimizer/clauses.h"
46 #include "optimizer/var.h"
47 #include "parser/parsetree.h"
48 #include "storage/smgr.h"
49 #include "utils/acl.h"
50 #include "utils/guc.h"
51 #include "utils/lsyscache.h"
52 #include "utils/memutils.h"
53
54
55 typedef struct execRowMark
56 {
57         Relation        relation;
58         Index           rti;
59         char            resname[32];
60 } execRowMark;
61
62 typedef struct evalPlanQual
63 {
64         Index           rti;
65         EState     *estate;
66         PlanState  *planstate;
67         struct evalPlanQual *next;      /* stack of active PlanQual plans */
68         struct evalPlanQual *free;      /* list of free PlanQual plans */
69 } evalPlanQual;
70
71 /* decls for local routines only used within this module */
72 static void InitPlan(QueryDesc *queryDesc, bool explainOnly);
73 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
74                                   Index resultRelationIndex,
75                                   List *rangeTable,
76                                   CmdType operation,
77                                   bool doInstrument);
78 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
79                         CmdType operation,
80                         long numberTuples,
81                         ScanDirection direction,
82                         DestReceiver *dest);
83 static void ExecSelect(TupleTableSlot *slot,
84                    DestReceiver *dest,
85                    EState *estate);
86 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
87                    EState *estate);
88 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
89                    EState *estate);
90 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
91                    EState *estate);
92 static TupleTableSlot *EvalPlanQualNext(EState *estate);
93 static void EndEvalPlanQual(EState *estate);
94 static void ExecCheckRTEPerms(RangeTblEntry *rte);
95 static void ExecCheckXactReadOnly(Query *parsetree);
96 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
97                                   evalPlanQual *priorepq);
98 static void EvalPlanQualStop(evalPlanQual *epq);
99
100 /* end of local decls */
101
102
103 /* ----------------------------------------------------------------
104  *              ExecutorStart
105  *
106  *              This routine must be called at the beginning of any execution of any
107  *              query plan
108  *
109  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
110  * clear why we bother to separate the two functions, but...).  The tupDesc
111  * field of the QueryDesc is filled in to describe the tuples that will be
112  * returned, and the internal fields (estate and planstate) are set up.
113  *
114  * If explainOnly is true, we are not actually intending to run the plan,
115  * only to set up for EXPLAIN; so skip unwanted side-effects.
116  *
117  * NB: the CurrentMemoryContext when this is called will become the parent
118  * of the per-query context used for this Executor invocation.
119  * ----------------------------------------------------------------
120  */
121 void
122 ExecutorStart(QueryDesc *queryDesc, bool explainOnly)
123 {
124         EState     *estate;
125         MemoryContext oldcontext;
126
127         /* sanity checks: queryDesc must not be started already */
128         Assert(queryDesc != NULL);
129         Assert(queryDesc->estate == NULL);
130
131         /*
132          * If the transaction is read-only, we need to check if any writes are
133          * planned to non-temporary tables.
134          */
135         if (XactReadOnly && !explainOnly)
136                 ExecCheckXactReadOnly(queryDesc->parsetree);
137
138         /*
139          * Build EState, switch into per-query memory context for startup.
140          */
141         estate = CreateExecutorState();
142         queryDesc->estate = estate;
143
144         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
145
146         /*
147          * Fill in parameters, if any, from queryDesc
148          */
149         estate->es_param_list_info = queryDesc->params;
150
151         if (queryDesc->plantree->nParamExec > 0)
152                 estate->es_param_exec_vals = (ParamExecData *)
153                         palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
154
155         /*
156          * Copy other important information into the EState
157          */
158         estate->es_snapshot = queryDesc->snapshot;
159         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
160         estate->es_instrument = queryDesc->doInstrument;
161
162         /*
163          * Initialize the plan state tree
164          */
165         InitPlan(queryDesc, explainOnly);
166
167         MemoryContextSwitchTo(oldcontext);
168 }
169
170 /* ----------------------------------------------------------------
171  *              ExecutorRun
172  *
173  *              This is the main routine of the executor module. It accepts
174  *              the query descriptor from the traffic cop and executes the
175  *              query plan.
176  *
177  *              ExecutorStart must have been called already.
178  *
179  *              If direction is NoMovementScanDirection then nothing is done
180  *              except to start up/shut down the destination.  Otherwise,
181  *              we retrieve up to 'count' tuples in the specified direction.
182  *
183  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
184  *              completion.
185  *
186  * ----------------------------------------------------------------
187  */
188 TupleTableSlot *
189 ExecutorRun(QueryDesc *queryDesc,
190                         ScanDirection direction, long count)
191 {
192         EState     *estate;
193         CmdType         operation;
194         DestReceiver *dest;
195         TupleTableSlot *result;
196         MemoryContext oldcontext;
197
198         /* sanity checks */
199         Assert(queryDesc != NULL);
200
201         estate = queryDesc->estate;
202
203         Assert(estate != NULL);
204
205         /*
206          * Switch into per-query memory context
207          */
208         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
209
210         /*
211          * extract information from the query descriptor and the query
212          * feature.
213          */
214         operation = queryDesc->operation;
215         dest = queryDesc->dest;
216
217         /*
218          * startup tuple receiver
219          */
220         estate->es_processed = 0;
221         estate->es_lastoid = InvalidOid;
222
223         (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
224
225         /*
226          * run plan
227          */
228         if (direction == NoMovementScanDirection)
229                 result = NULL;
230         else
231                 result = ExecutePlan(estate,
232                                                          queryDesc->planstate,
233                                                          operation,
234                                                          count,
235                                                          direction,
236                                                          dest);
237
238         /*
239          * shutdown receiver
240          */
241         (*dest->rShutdown) (dest);
242
243         MemoryContextSwitchTo(oldcontext);
244
245         return result;
246 }
247
248 /* ----------------------------------------------------------------
249  *              ExecutorEnd
250  *
251  *              This routine must be called at the end of execution of any
252  *              query plan
253  * ----------------------------------------------------------------
254  */
255 void
256 ExecutorEnd(QueryDesc *queryDesc)
257 {
258         EState     *estate;
259         MemoryContext oldcontext;
260
261         /* sanity checks */
262         Assert(queryDesc != NULL);
263
264         estate = queryDesc->estate;
265
266         Assert(estate != NULL);
267
268         /*
269          * Switch into per-query memory context to run ExecEndPlan
270          */
271         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
272
273         ExecEndPlan(queryDesc->planstate, estate);
274
275         /*
276          * Must switch out of context before destroying it
277          */
278         MemoryContextSwitchTo(oldcontext);
279
280         /*
281          * Release EState and per-query memory context.  This should release
282          * everything the executor has allocated.
283          */
284         FreeExecutorState(estate);
285
286         /* Reset queryDesc fields that no longer point to anything */
287         queryDesc->tupDesc = NULL;
288         queryDesc->estate = NULL;
289         queryDesc->planstate = NULL;
290 }
291
292 /* ----------------------------------------------------------------
293  *              ExecutorRewind
294  *
295  *              This routine may be called on an open queryDesc to rewind it
296  *              to the start.
297  * ----------------------------------------------------------------
298  */
299 void
300 ExecutorRewind(QueryDesc *queryDesc)
301 {
302         EState     *estate;
303         MemoryContext oldcontext;
304
305         /* sanity checks */
306         Assert(queryDesc != NULL);
307
308         estate = queryDesc->estate;
309
310         Assert(estate != NULL);
311
312         /* It's probably not sensible to rescan updating queries */
313         Assert(queryDesc->operation == CMD_SELECT);
314
315         /*
316          * Switch into per-query memory context
317          */
318         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
319
320         /*
321          * rescan plan
322          */
323         ExecReScan(queryDesc->planstate, NULL);
324
325         MemoryContextSwitchTo(oldcontext);
326 }
327
328
329 /*
330  * ExecCheckRTPerms
331  *              Check access permissions for all relations listed in a range table.
332  */
333 void
334 ExecCheckRTPerms(List *rangeTable)
335 {
336         ListCell   *l;
337
338         foreach(l, rangeTable)
339         {
340                 RangeTblEntry *rte = lfirst(l);
341
342                 ExecCheckRTEPerms(rte);
343         }
344 }
345
346 /*
347  * ExecCheckRTEPerms
348  *              Check access permissions for a single RTE.
349  */
350 static void
351 ExecCheckRTEPerms(RangeTblEntry *rte)
352 {
353         AclMode         requiredPerms;
354         Oid                     relOid;
355         Oid             userid;
356
357         /*
358          * Only plain-relation RTEs need to be checked here.  Subquery RTEs
359          * are checked by ExecInitSubqueryScan if the subquery is still a
360          * separate subquery --- if it's been pulled up into our query level
361          * then the RTEs are in our rangetable and will be checked here.
362          * Function RTEs are checked by init_fcache when the function is
363          * prepared for execution. Join and special RTEs need no checks.
364          */
365         if (rte->rtekind != RTE_RELATION)
366                 return;
367
368         /*
369          * No work if requiredPerms is empty.
370          */
371         requiredPerms = rte->requiredPerms;
372         if (requiredPerms == 0)
373                 return;
374
375         relOid = rte->relid;
376
377         /*
378          * userid to check as: current user unless we have a setuid
379          * indication.
380          *
381          * Note: GetUserId() is presently fast enough that there's no harm in
382          * calling it separately for each RTE.  If that stops being true, we
383          * could call it once in ExecCheckRTPerms and pass the userid down
384          * from there.  But for now, no need for the extra clutter.
385          */
386         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
387
388         /*
389          * We must have *all* the requiredPerms bits, so use aclmask not
390          * aclcheck.
391          */
392         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
393                 != requiredPerms)
394                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
395                                            get_rel_name(relOid));
396 }
397
398 /*
399  * Check that the query does not imply any writes to non-temp tables.
400  */
401 static void
402 ExecCheckXactReadOnly(Query *parsetree)
403 {
404         ListCell   *l;
405
406         /*
407          * CREATE TABLE AS or SELECT INTO?
408          *
409          * XXX should we allow this if the destination is temp?
410          */
411         if (parsetree->into != NULL)
412                 goto fail;
413
414         /* Fail if write permissions are requested on any non-temp table */
415         foreach(l, parsetree->rtable)
416         {
417                 RangeTblEntry *rte = lfirst(l);
418
419                 if (rte->rtekind == RTE_SUBQUERY)
420                 {
421                         ExecCheckXactReadOnly(rte->subquery);
422                         continue;
423                 }
424
425                 if (rte->rtekind != RTE_RELATION)
426                         continue;
427
428                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
429                         continue;
430
431                 if (isTempNamespace(get_rel_namespace(rte->relid)))
432                         continue;
433
434                 goto fail;
435         }
436
437         return;
438
439 fail:
440         ereport(ERROR,
441                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
442                          errmsg("transaction is read-only")));
443 }
444
445
446 /* ----------------------------------------------------------------
447  *              InitPlan
448  *
449  *              Initializes the query plan: open files, allocate storage
450  *              and start up the rule manager
451  * ----------------------------------------------------------------
452  */
453 static void
454 InitPlan(QueryDesc *queryDesc, bool explainOnly)
455 {
456         CmdType         operation = queryDesc->operation;
457         Query      *parseTree = queryDesc->parsetree;
458         Plan       *plan = queryDesc->plantree;
459         EState     *estate = queryDesc->estate;
460         PlanState  *planstate;
461         List       *rangeTable;
462         Relation        intoRelationDesc;
463         bool            do_select_into;
464         TupleDesc       tupType;
465
466         /*
467          * Do permissions checks.  It's sufficient to examine the query's top
468          * rangetable here --- subplan RTEs will be checked during
469          * ExecInitSubPlan().
470          */
471         ExecCheckRTPerms(parseTree->rtable);
472
473         /*
474          * get information from query descriptor
475          */
476         rangeTable = parseTree->rtable;
477
478         /*
479          * initialize the node's execution state
480          */
481         estate->es_range_table = rangeTable;
482
483         /*
484          * if there is a result relation, initialize result relation stuff
485          */
486         if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
487         {
488                 List       *resultRelations = parseTree->resultRelations;
489                 int                     numResultRelations;
490                 ResultRelInfo *resultRelInfos;
491
492                 if (resultRelations != NIL)
493                 {
494                         /*
495                          * Multiple result relations (due to inheritance)
496                          * parseTree->resultRelations identifies them all
497                          */
498                         ResultRelInfo *resultRelInfo;
499                         ListCell   *l;
500
501                         numResultRelations = list_length(resultRelations);
502                         resultRelInfos = (ResultRelInfo *)
503                                 palloc(numResultRelations * sizeof(ResultRelInfo));
504                         resultRelInfo = resultRelInfos;
505                         foreach(l, resultRelations)
506                         {
507                                 initResultRelInfo(resultRelInfo,
508                                                                   lfirst_int(l),
509                                                                   rangeTable,
510                                                                   operation,
511                                                                   estate->es_instrument);
512                                 resultRelInfo++;
513                         }
514                 }
515                 else
516                 {
517                         /*
518                          * Single result relation identified by
519                          * parseTree->resultRelation
520                          */
521                         numResultRelations = 1;
522                         resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
523                         initResultRelInfo(resultRelInfos,
524                                                           parseTree->resultRelation,
525                                                           rangeTable,
526                                                           operation,
527                                                           estate->es_instrument);
528                 }
529
530                 estate->es_result_relations = resultRelInfos;
531                 estate->es_num_result_relations = numResultRelations;
532                 /* Initialize to first or only result rel */
533                 estate->es_result_relation_info = resultRelInfos;
534         }
535         else
536         {
537                 /*
538                  * if no result relation, then set state appropriately
539                  */
540                 estate->es_result_relations = NULL;
541                 estate->es_num_result_relations = 0;
542                 estate->es_result_relation_info = NULL;
543         }
544
545         /*
546          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
547          * flag appropriately so that the plan tree will be initialized with
548          * the correct tuple descriptors.
549          */
550         do_select_into = false;
551
552         if (operation == CMD_SELECT && parseTree->into != NULL)
553         {
554                 do_select_into = true;
555                 estate->es_select_into = true;
556                 estate->es_into_oids = parseTree->intoHasOids;
557         }
558
559         /*
560          * Have to lock relations selected FOR UPDATE/FOR SHARE
561          */
562         estate->es_rowMark = NIL;
563         estate->es_forUpdate = parseTree->forUpdate;
564         if (parseTree->rowMarks != NIL)
565         {
566                 ListCell   *l;
567
568                 foreach(l, parseTree->rowMarks)
569                 {
570                         Index           rti = lfirst_int(l);
571                         Oid                     relid = getrelid(rti, rangeTable);
572                         Relation        relation;
573                         execRowMark *erm;
574
575                         relation = heap_open(relid, RowShareLock);
576                         erm = (execRowMark *) palloc(sizeof(execRowMark));
577                         erm->relation = relation;
578                         erm->rti = rti;
579                         snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rti);
580                         estate->es_rowMark = lappend(estate->es_rowMark, erm);
581                 }
582         }
583
584         /*
585          * initialize the executor "tuple" table.  We need slots for all the
586          * plan nodes, plus possibly output slots for the junkfilter(s). At
587          * this point we aren't sure if we need junkfilters, so just add slots
588          * for them unconditionally.
589          */
590         {
591                 int                     nSlots = ExecCountSlotsNode(plan);
592
593                 if (parseTree->resultRelations != NIL)
594                         nSlots += list_length(parseTree->resultRelations);
595                 else
596                         nSlots += 1;
597                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
598         }
599
600         /* mark EvalPlanQual not active */
601         estate->es_topPlan = plan;
602         estate->es_evalPlanQual = NULL;
603         estate->es_evTupleNull = NULL;
604         estate->es_evTuple = NULL;
605         estate->es_useEvalPlan = false;
606
607         /*
608          * initialize the private state information for all the nodes in the
609          * query tree.  This opens files, allocates storage and leaves us
610          * ready to start processing tuples.
611          */
612         planstate = ExecInitNode(plan, estate);
613
614         /*
615          * Get the tuple descriptor describing the type of tuples to return.
616          * (this is especially important if we are creating a relation with
617          * "SELECT INTO")
618          */
619         tupType = ExecGetResultType(planstate);
620
621         /*
622          * Initialize the junk filter if needed.  SELECT and INSERT queries
623          * need a filter if there are any junk attrs in the tlist.      INSERT and
624          * SELECT INTO also need a filter if the plan may return raw disk
625          * tuples (else heap_insert will be scribbling on the source
626          * relation!). UPDATE and DELETE always need a filter, since there's
627          * always a junk 'ctid' attribute present --- no need to look first.
628          */
629         {
630                 bool            junk_filter_needed = false;
631                 ListCell   *tlist;
632
633                 switch (operation)
634                 {
635                         case CMD_SELECT:
636                         case CMD_INSERT:
637                                 foreach(tlist, plan->targetlist)
638                                 {
639                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
640
641                                         if (tle->resjunk)
642                                         {
643                                                 junk_filter_needed = true;
644                                                 break;
645                                         }
646                                 }
647                                 if (!junk_filter_needed &&
648                                         (operation == CMD_INSERT || do_select_into) &&
649                                         ExecMayReturnRawTuples(planstate))
650                                         junk_filter_needed = true;
651                                 break;
652                         case CMD_UPDATE:
653                         case CMD_DELETE:
654                                 junk_filter_needed = true;
655                                 break;
656                         default:
657                                 break;
658                 }
659
660                 if (junk_filter_needed)
661                 {
662                         /*
663                          * If there are multiple result relations, each one needs its
664                          * own junk filter.  Note this is only possible for
665                          * UPDATE/DELETE, so we can't be fooled by some needing a
666                          * filter and some not.
667                          */
668                         if (parseTree->resultRelations != NIL)
669                         {
670                                 PlanState **appendplans;
671                                 int                     as_nplans;
672                                 ResultRelInfo *resultRelInfo;
673                                 int                     i;
674
675                                 /* Top plan had better be an Append here. */
676                                 Assert(IsA(plan, Append));
677                                 Assert(((Append *) plan)->isTarget);
678                                 Assert(IsA(planstate, AppendState));
679                                 appendplans = ((AppendState *) planstate)->appendplans;
680                                 as_nplans = ((AppendState *) planstate)->as_nplans;
681                                 Assert(as_nplans == estate->es_num_result_relations);
682                                 resultRelInfo = estate->es_result_relations;
683                                 for (i = 0; i < as_nplans; i++)
684                                 {
685                                         PlanState  *subplan = appendplans[i];
686                                         JunkFilter *j;
687
688                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
689                                                                                    resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
690                                                                                    ExecAllocTableSlot(estate->es_tupleTable));
691                                         resultRelInfo->ri_junkFilter = j;
692                                         resultRelInfo++;
693                                 }
694
695                                 /*
696                                  * Set active junkfilter too; at this point ExecInitAppend
697                                  * has already selected an active result relation...
698                                  */
699                                 estate->es_junkFilter =
700                                         estate->es_result_relation_info->ri_junkFilter;
701                         }
702                         else
703                         {
704                                 /* Normal case with just one JunkFilter */
705                                 JunkFilter *j;
706
707                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
708                                                                            tupType->tdhasoid,
709                                                           ExecAllocTableSlot(estate->es_tupleTable));
710                                 estate->es_junkFilter = j;
711                                 if (estate->es_result_relation_info)
712                                         estate->es_result_relation_info->ri_junkFilter = j;
713
714                                 /* For SELECT, want to return the cleaned tuple type */
715                                 if (operation == CMD_SELECT)
716                                         tupType = j->jf_cleanTupType;
717                         }
718                 }
719                 else
720                         estate->es_junkFilter = NULL;
721         }
722
723         /*
724          * If doing SELECT INTO, initialize the "into" relation.  We must wait
725          * till now so we have the "clean" result tuple type to create the new
726          * table from.
727          *
728          * If EXPLAIN, skip creating the "into" relation.
729          */
730         intoRelationDesc = NULL;
731
732         if (do_select_into && !explainOnly)
733         {
734                 char       *intoName;
735                 Oid                     namespaceId;
736                 AclResult       aclresult;
737                 Oid                     intoRelationId;
738                 TupleDesc       tupdesc;
739
740                 /*
741                  * find namespace to create in, check permissions
742                  */
743                 intoName = parseTree->into->relname;
744                 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
745
746                 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
747                                                                                   ACL_CREATE);
748                 if (aclresult != ACLCHECK_OK)
749                         aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
750                                                    get_namespace_name(namespaceId));
751
752                 /*
753                  * have to copy tupType to get rid of constraints
754                  */
755                 tupdesc = CreateTupleDescCopy(tupType);
756
757                 intoRelationId = heap_create_with_catalog(intoName,
758                                                                                                   namespaceId,
759                                                                                                   InvalidOid,
760                                                                                                   InvalidOid,
761                                                                                                   tupdesc,
762                                                                                                   RELKIND_RELATION,
763                                                                                                   false,
764                                                                                                   true,
765                                                                                                   0,
766                                                                                                   ONCOMMIT_NOOP,
767                                                                                                   allowSystemTableMods);
768
769                 FreeTupleDesc(tupdesc);
770
771                 /*
772                  * Advance command counter so that the newly-created relation's
773                  * catalog tuples will be visible to heap_open.
774                  */
775                 CommandCounterIncrement();
776
777                 /*
778                  * If necessary, create a TOAST table for the into relation. Note
779                  * that AlterTableCreateToastTable ends with
780                  * CommandCounterIncrement(), so that the TOAST table will be
781                  * visible for insertion.
782                  */
783                 AlterTableCreateToastTable(intoRelationId, true);
784
785                 /*
786                  * And open the constructed table for writing.
787                  */
788                 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
789
790                 /* use_wal off requires rd_targblock be initially invalid */
791                 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
792
793                 /*
794                  * We can skip WAL-logging the insertions, unless PITR is in use.
795                  *
796                  * Note that for a non-temp INTO table, this is safe only because
797                  * we know that the catalog changes above will have been WAL-logged,
798                  * and so RecordTransactionCommit will think it needs to WAL-log the
799                  * eventual transaction commit.  Else the commit might be lost, even
800                  * though all the data is safely fsync'd ...
801                  */
802                 estate->es_into_relation_use_wal = XLogArchivingActive();
803         }
804
805         estate->es_into_relation_descriptor = intoRelationDesc;
806
807         queryDesc->tupDesc = tupType;
808         queryDesc->planstate = planstate;
809 }
810
811 /*
812  * Initialize ResultRelInfo data for one result relation
813  */
814 static void
815 initResultRelInfo(ResultRelInfo *resultRelInfo,
816                                   Index resultRelationIndex,
817                                   List *rangeTable,
818                                   CmdType operation,
819                                   bool doInstrument)
820 {
821         Oid                     resultRelationOid;
822         Relation        resultRelationDesc;
823
824         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
825         resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
826
827         switch (resultRelationDesc->rd_rel->relkind)
828         {
829                 case RELKIND_SEQUENCE:
830                         ereport(ERROR,
831                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
832                                          errmsg("cannot change sequence \"%s\"",
833                                                   RelationGetRelationName(resultRelationDesc))));
834                         break;
835                 case RELKIND_TOASTVALUE:
836                         ereport(ERROR,
837                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
838                                          errmsg("cannot change TOAST relation \"%s\"",
839                                                   RelationGetRelationName(resultRelationDesc))));
840                         break;
841                 case RELKIND_VIEW:
842                         ereport(ERROR,
843                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
844                                          errmsg("cannot change view \"%s\"",
845                                                   RelationGetRelationName(resultRelationDesc))));
846                         break;
847         }
848
849         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
850         resultRelInfo->type = T_ResultRelInfo;
851         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
852         resultRelInfo->ri_RelationDesc = resultRelationDesc;
853         resultRelInfo->ri_NumIndices = 0;
854         resultRelInfo->ri_IndexRelationDescs = NULL;
855         resultRelInfo->ri_IndexRelationInfo = NULL;
856         /* make a copy so as not to depend on relcache info not changing... */
857         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
858         if (resultRelInfo->ri_TrigDesc)
859         {
860                 int             n = resultRelInfo->ri_TrigDesc->numtriggers;
861
862                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
863                         palloc0(n * sizeof(FmgrInfo));
864                 if (doInstrument)
865                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
866                 else
867                         resultRelInfo->ri_TrigInstrument = NULL;
868         }
869         else
870         {
871                 resultRelInfo->ri_TrigFunctions = NULL;
872                 resultRelInfo->ri_TrigInstrument = NULL;
873         }
874         resultRelInfo->ri_ConstraintExprs = NULL;
875         resultRelInfo->ri_junkFilter = NULL;
876
877         /*
878          * If there are indices on the result relation, open them and save
879          * descriptors in the result relation info, so that we can add new
880          * index entries for the tuples we add/update.  We need not do this
881          * for a DELETE, however, since deletion doesn't affect indexes.
882          */
883         if (resultRelationDesc->rd_rel->relhasindex &&
884                 operation != CMD_DELETE)
885                 ExecOpenIndices(resultRelInfo);
886 }
887
888 /*
889  *              ExecContextForcesOids
890  *
891  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
892  * we need to ensure that result tuples have space for an OID iff they are
893  * going to be stored into a relation that has OIDs.  In other contexts
894  * we are free to choose whether to leave space for OIDs in result tuples
895  * (we generally don't want to, but we do if a physical-tlist optimization
896  * is possible).  This routine checks the plan context and returns TRUE if the
897  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
898  * *hasoids is set to the required value.
899  *
900  * One reason this is ugly is that all plan nodes in the plan tree will emit
901  * tuples with space for an OID, though we really only need the topmost node
902  * to do so.  However, node types like Sort don't project new tuples but just
903  * return their inputs, and in those cases the requirement propagates down
904  * to the input node.  Eventually we might make this code smart enough to
905  * recognize how far down the requirement really goes, but for now we just
906  * make all plan nodes do the same thing if the top level forces the choice.
907  *
908  * We assume that estate->es_result_relation_info is already set up to
909  * describe the target relation.  Note that in an UPDATE that spans an
910  * inheritance tree, some of the target relations may have OIDs and some not.
911  * We have to make the decisions on a per-relation basis as we initialize
912  * each of the child plans of the topmost Append plan.
913  *
914  * SELECT INTO is even uglier, because we don't have the INTO relation's
915  * descriptor available when this code runs; we have to look aside at a
916  * flag set by InitPlan().
917  */
918 bool
919 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
920 {
921         if (planstate->state->es_select_into)
922         {
923                 *hasoids = planstate->state->es_into_oids;
924                 return true;
925         }
926         else
927         {
928                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
929
930                 if (ri != NULL)
931                 {
932                         Relation        rel = ri->ri_RelationDesc;
933
934                         if (rel != NULL)
935                         {
936                                 *hasoids = rel->rd_rel->relhasoids;
937                                 return true;
938                         }
939                 }
940         }
941
942         return false;
943 }
944
945 /* ----------------------------------------------------------------
946  *              ExecEndPlan
947  *
948  *              Cleans up the query plan -- closes files and frees up storage
949  *
950  * NOTE: we are no longer very worried about freeing storage per se
951  * in this code; FreeExecutorState should be guaranteed to release all
952  * memory that needs to be released.  What we are worried about doing
953  * is closing relations and dropping buffer pins.  Thus, for example,
954  * tuple tables must be cleared or dropped to ensure pins are released.
955  * ----------------------------------------------------------------
956  */
957 void
958 ExecEndPlan(PlanState *planstate, EState *estate)
959 {
960         ResultRelInfo *resultRelInfo;
961         int                     i;
962         ListCell   *l;
963
964         /*
965          * shut down any PlanQual processing we were doing
966          */
967         if (estate->es_evalPlanQual != NULL)
968                 EndEvalPlanQual(estate);
969
970         /*
971          * shut down the node-type-specific query processing
972          */
973         ExecEndNode(planstate);
974
975         /*
976          * destroy the executor "tuple" table.
977          */
978         ExecDropTupleTable(estate->es_tupleTable, true);
979         estate->es_tupleTable = NULL;
980
981         /*
982          * close the result relation(s) if any, but hold locks until xact
983          * commit.
984          */
985         resultRelInfo = estate->es_result_relations;
986         for (i = estate->es_num_result_relations; i > 0; i--)
987         {
988                 /* Close indices and then the relation itself */
989                 ExecCloseIndices(resultRelInfo);
990                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
991                 resultRelInfo++;
992         }
993
994         /*
995          * close the "into" relation if necessary, again keeping lock
996          */
997         if (estate->es_into_relation_descriptor != NULL)
998         {
999                 /*
1000                  * If we skipped using WAL, and it's not a temp relation,
1001                  * we must force the relation down to disk before it's
1002                  * safe to commit the transaction.  This requires forcing
1003                  * out any dirty buffers and then doing a forced fsync.
1004                  */
1005                 if (!estate->es_into_relation_use_wal &&
1006                         !estate->es_into_relation_descriptor->rd_istemp)
1007                 {
1008                         FlushRelationBuffers(estate->es_into_relation_descriptor);
1009                         smgrimmedsync(estate->es_into_relation_descriptor->rd_smgr);
1010                 }
1011
1012                 heap_close(estate->es_into_relation_descriptor, NoLock);
1013    }
1014
1015         /*
1016          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1017          */
1018         foreach(l, estate->es_rowMark)
1019         {
1020                 execRowMark *erm = lfirst(l);
1021
1022                 heap_close(erm->relation, NoLock);
1023         }
1024 }
1025
1026 /* ----------------------------------------------------------------
1027  *              ExecutePlan
1028  *
1029  *              processes the query plan to retrieve 'numberTuples' tuples in the
1030  *              direction specified.
1031  *
1032  *              Retrieves all tuples if numberTuples is 0
1033  *
1034  *              result is either a slot containing the last tuple in the case
1035  *              of a SELECT or NULL otherwise.
1036  *
1037  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1038  * user can see it
1039  * ----------------------------------------------------------------
1040  */
1041 static TupleTableSlot *
1042 ExecutePlan(EState *estate,
1043                         PlanState *planstate,
1044                         CmdType operation,
1045                         long numberTuples,
1046                         ScanDirection direction,
1047                         DestReceiver *dest)
1048 {
1049         JunkFilter *junkfilter;
1050         TupleTableSlot *slot;
1051         ItemPointer tupleid = NULL;
1052         ItemPointerData tuple_ctid;
1053         long            current_tuple_count;
1054         TupleTableSlot *result;
1055
1056         /*
1057          * initialize local variables
1058          */
1059         slot = NULL;
1060         current_tuple_count = 0;
1061         result = NULL;
1062
1063         /*
1064          * Set the direction.
1065          */
1066         estate->es_direction = direction;
1067
1068         /*
1069          * Process BEFORE EACH STATEMENT triggers
1070          */
1071         switch (operation)
1072         {
1073                 case CMD_UPDATE:
1074                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1075                         break;
1076                 case CMD_DELETE:
1077                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1078                         break;
1079                 case CMD_INSERT:
1080                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1081                         break;
1082                 default:
1083                         /* do nothing */
1084                         break;
1085         }
1086
1087         /*
1088          * Loop until we've processed the proper number of tuples from the
1089          * plan.
1090          */
1091
1092         for (;;)
1093         {
1094                 /* Reset the per-output-tuple exprcontext */
1095                 ResetPerTupleExprContext(estate);
1096
1097                 /*
1098                  * Execute the plan and obtain a tuple
1099                  */
1100 lnext:  ;
1101                 if (estate->es_useEvalPlan)
1102                 {
1103                         slot = EvalPlanQualNext(estate);
1104                         if (TupIsNull(slot))
1105                                 slot = ExecProcNode(planstate);
1106                 }
1107                 else
1108                         slot = ExecProcNode(planstate);
1109
1110                 /*
1111                  * if the tuple is null, then we assume there is nothing more to
1112                  * process so we just return null...
1113                  */
1114                 if (TupIsNull(slot))
1115                 {
1116                         result = NULL;
1117                         break;
1118                 }
1119
1120                 /*
1121                  * if we have a junk filter, then project a new tuple with the
1122                  * junk removed.
1123                  *
1124                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1125                  * (Formerly, we stored it back over the "dirty" tuple, which is
1126                  * WRONG because that tuple slot has the wrong descriptor.)
1127                  *
1128                  * Also, extract all the junk information we need.
1129                  */
1130                 if ((junkfilter = estate->es_junkFilter) != NULL)
1131                 {
1132                         Datum           datum;
1133                         bool            isNull;
1134
1135                         /*
1136                          * extract the 'ctid' junk attribute.
1137                          */
1138                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1139                         {
1140                                 if (!ExecGetJunkAttribute(junkfilter,
1141                                                                                   slot,
1142                                                                                   "ctid",
1143                                                                                   &datum,
1144                                                                                   &isNull))
1145                                         elog(ERROR, "could not find junk ctid column");
1146
1147                                 /* shouldn't ever get a null result... */
1148                                 if (isNull)
1149                                         elog(ERROR, "ctid is NULL");
1150
1151                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1152                                 tuple_ctid = *tupleid;  /* make sure we don't free the
1153                                                                                  * ctid!! */
1154                                 tupleid = &tuple_ctid;
1155                         }
1156                         /*
1157                          * Process any FOR UPDATE or FOR SHARE locking requested.
1158                          */
1159                         else if (estate->es_rowMark != NIL)
1160                         {
1161                                 ListCell   *l;
1162
1163                 lmark:  ;
1164                                 foreach(l, estate->es_rowMark)
1165                                 {
1166                                         execRowMark *erm = lfirst(l);
1167                                         Buffer          buffer;
1168                                         HeapTupleData tuple;
1169                                         TupleTableSlot *newSlot;
1170                                         LockTupleMode   lockmode;
1171                                         HTSU_Result             test;
1172
1173                                         if (!ExecGetJunkAttribute(junkfilter,
1174                                                                                           slot,
1175                                                                                           erm->resname,
1176                                                                                           &datum,
1177                                                                                           &isNull))
1178                                                 elog(ERROR, "could not find junk \"%s\" column",
1179                                                          erm->resname);
1180
1181                                         /* shouldn't ever get a null result... */
1182                                         if (isNull)
1183                                                 elog(ERROR, "\"%s\" is NULL", erm->resname);
1184
1185                                         if (estate->es_forUpdate)
1186                                                 lockmode = LockTupleExclusive;
1187                                         else
1188                                                 lockmode = LockTupleShared;
1189
1190                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1191                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1192                                                                                   estate->es_snapshot->curcid,
1193                                                                                   lockmode);
1194                                         ReleaseBuffer(buffer);
1195                                         switch (test)
1196                                         {
1197                                                 case HeapTupleSelfUpdated:
1198                                                         /* treat it as deleted; do not process */
1199                                                         goto lnext;
1200
1201                                                 case HeapTupleMayBeUpdated:
1202                                                         break;
1203
1204                                                 case HeapTupleUpdated:
1205                                                         if (IsXactIsoLevelSerializable)
1206                                                                 ereport(ERROR,
1207                                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1208                                                                                  errmsg("could not serialize access due to concurrent update")));
1209                                                         if (!(ItemPointerEquals(&(tuple.t_self),
1210                                                                   (ItemPointer) DatumGetPointer(datum))))
1211                                                         {
1212                                                                 newSlot = EvalPlanQual(estate, erm->rti, &(tuple.t_self));
1213                                                                 if (!(TupIsNull(newSlot)))
1214                                                                 {
1215                                                                         slot = newSlot;
1216                                                                         estate->es_useEvalPlan = true;
1217                                                                         goto lmark;
1218                                                                 }
1219                                                         }
1220
1221                                                         /*
1222                                                          * if tuple was deleted or PlanQual failed for
1223                                                          * updated tuple - we must not return this
1224                                                          * tuple!
1225                                                          */
1226                                                         goto lnext;
1227
1228                                                 default:
1229                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1230                                                                  test);
1231                                                         return (NULL);
1232                                         }
1233                                 }
1234                         }
1235
1236                         /*
1237                          * Finally create a new "clean" tuple with all junk attributes
1238                          * removed
1239                          */
1240                         slot = ExecFilterJunk(junkfilter, slot);
1241                 }
1242
1243                 /*
1244                  * now that we have a tuple, do the appropriate thing with it..
1245                  * either return it to the user, add it to a relation someplace,
1246                  * delete it from a relation, or modify some of its attributes.
1247                  */
1248                 switch (operation)
1249                 {
1250                         case CMD_SELECT:
1251                                 ExecSelect(slot,        /* slot containing tuple */
1252                                                    dest,        /* destination's tuple-receiver obj */
1253                                                    estate);
1254                                 result = slot;
1255                                 break;
1256
1257                         case CMD_INSERT:
1258                                 ExecInsert(slot, tupleid, estate);
1259                                 result = NULL;
1260                                 break;
1261
1262                         case CMD_DELETE:
1263                                 ExecDelete(slot, tupleid, estate);
1264                                 result = NULL;
1265                                 break;
1266
1267                         case CMD_UPDATE:
1268                                 ExecUpdate(slot, tupleid, estate);
1269                                 result = NULL;
1270                                 break;
1271
1272                         default:
1273                                 elog(ERROR, "unrecognized operation code: %d",
1274                                          (int) operation);
1275                                 result = NULL;
1276                                 break;
1277                 }
1278
1279                 /*
1280                  * check our tuple count.. if we've processed the proper number
1281                  * then quit, else loop again and process more tuples.  Zero
1282                  * numberTuples means no limit.
1283                  */
1284                 current_tuple_count++;
1285                 if (numberTuples && numberTuples == current_tuple_count)
1286                         break;
1287         }
1288
1289         /*
1290          * Process AFTER EACH STATEMENT triggers
1291          */
1292         switch (operation)
1293         {
1294                 case CMD_UPDATE:
1295                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1296                         break;
1297                 case CMD_DELETE:
1298                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1299                         break;
1300                 case CMD_INSERT:
1301                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1302                         break;
1303                 default:
1304                         /* do nothing */
1305                         break;
1306         }
1307
1308         /*
1309          * here, result is either a slot containing a tuple in the case of a
1310          * SELECT or NULL otherwise.
1311          */
1312         return result;
1313 }
1314
1315 /* ----------------------------------------------------------------
1316  *              ExecSelect
1317  *
1318  *              SELECTs are easy.. we just pass the tuple to the appropriate
1319  *              print function.  The only complexity is when we do a
1320  *              "SELECT INTO", in which case we insert the tuple into
1321  *              the appropriate relation (note: this is a newly created relation
1322  *              so we don't need to worry about indices or locks.)
1323  * ----------------------------------------------------------------
1324  */
1325 static void
1326 ExecSelect(TupleTableSlot *slot,
1327                    DestReceiver *dest,
1328                    EState *estate)
1329 {
1330         /*
1331          * insert the tuple into the "into relation"
1332          *
1333          * XXX this probably ought to be replaced by a separate destination
1334          */
1335         if (estate->es_into_relation_descriptor != NULL)
1336         {
1337                 HeapTuple       tuple;
1338
1339                 tuple = ExecCopySlotTuple(slot);
1340                 heap_insert(estate->es_into_relation_descriptor, tuple,
1341                                         estate->es_snapshot->curcid,
1342                                         estate->es_into_relation_use_wal,
1343                                         false);         /* never any point in using FSM */
1344                 /* we know there are no indexes to update */
1345                 heap_freetuple(tuple);
1346                 IncrAppended();
1347         }
1348
1349         /*
1350          * send the tuple to the destination
1351          */
1352         (*dest->receiveSlot) (slot, dest);
1353         IncrRetrieved();
1354         (estate->es_processed)++;
1355 }
1356
1357 /* ----------------------------------------------------------------
1358  *              ExecInsert
1359  *
1360  *              INSERTs are trickier.. we have to insert the tuple into
1361  *              the base relation and insert appropriate tuples into the
1362  *              index relations.
1363  * ----------------------------------------------------------------
1364  */
1365 static void
1366 ExecInsert(TupleTableSlot *slot,
1367                    ItemPointer tupleid,
1368                    EState *estate)
1369 {
1370         HeapTuple       tuple;
1371         ResultRelInfo *resultRelInfo;
1372         Relation        resultRelationDesc;
1373         int                     numIndices;
1374         Oid                     newId;
1375
1376         /*
1377          * get the heap tuple out of the tuple table slot, making sure
1378          * we have a writable copy
1379          */
1380         tuple = ExecMaterializeSlot(slot);
1381
1382         /*
1383          * get information on the (current) result relation
1384          */
1385         resultRelInfo = estate->es_result_relation_info;
1386         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1387
1388         /* BEFORE ROW INSERT Triggers */
1389         if (resultRelInfo->ri_TrigDesc &&
1390           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1391         {
1392                 HeapTuple       newtuple;
1393
1394                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1395
1396                 if (newtuple == NULL)   /* "do nothing" */
1397                         return;
1398
1399                 if (newtuple != tuple)  /* modified by Trigger(s) */
1400                 {
1401                         /*
1402                          * Insert modified tuple into tuple table slot, replacing the
1403                          * original.  We assume that it was allocated in per-tuple
1404                          * memory context, and therefore will go away by itself. The
1405                          * tuple table slot should not try to clear it.
1406                          */
1407                         ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1408                         tuple = newtuple;
1409                 }
1410         }
1411
1412         /*
1413          * Check the constraints of the tuple
1414          */
1415         if (resultRelationDesc->rd_att->constr)
1416                 ExecConstraints(resultRelInfo, slot, estate);
1417
1418         /*
1419          * insert the tuple
1420          */
1421         newId = heap_insert(resultRelationDesc, tuple,
1422                                                 estate->es_snapshot->curcid,
1423                                                 true, true);
1424
1425         IncrAppended();
1426         (estate->es_processed)++;
1427         estate->es_lastoid = newId;
1428         setLastTid(&(tuple->t_self));
1429
1430         /*
1431          * process indices
1432          *
1433          * Note: heap_insert adds a new tuple to a relation.  As a side effect,
1434          * the tupleid of the new tuple is placed in the new tuple's t_ctid
1435          * field.
1436          */
1437         numIndices = resultRelInfo->ri_NumIndices;
1438         if (numIndices > 0)
1439                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1440
1441         /* AFTER ROW INSERT Triggers */
1442         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1443 }
1444
1445 /* ----------------------------------------------------------------
1446  *              ExecDelete
1447  *
1448  *              DELETE is like UPDATE, we delete the tuple and its
1449  *              index tuples.
1450  * ----------------------------------------------------------------
1451  */
1452 static void
1453 ExecDelete(TupleTableSlot *slot,
1454                    ItemPointer tupleid,
1455                    EState *estate)
1456 {
1457         ResultRelInfo *resultRelInfo;
1458         Relation        resultRelationDesc;
1459         ItemPointerData ctid;
1460         HTSU_Result     result;
1461
1462         /*
1463          * get information on the (current) result relation
1464          */
1465         resultRelInfo = estate->es_result_relation_info;
1466         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1467
1468         /* BEFORE ROW DELETE Triggers */
1469         if (resultRelInfo->ri_TrigDesc &&
1470           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1471         {
1472                 bool            dodelete;
1473
1474                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1475                                                                                 estate->es_snapshot->curcid);
1476
1477                 if (!dodelete)                  /* "do nothing" */
1478                         return;
1479         }
1480
1481         /*
1482          * delete the tuple
1483          *
1484          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1485          * the row to be deleted is visible to that snapshot, and throw a can't-
1486          * serialize error if not.  This is a special-case behavior needed for
1487          * referential integrity updates in serializable transactions.
1488          */
1489 ldelete:;
1490         result = heap_delete(resultRelationDesc, tupleid,
1491                                                  &ctid,
1492                                                  estate->es_snapshot->curcid,
1493                                                  estate->es_crosscheck_snapshot,
1494                                                  true /* wait for commit */ );
1495         switch (result)
1496         {
1497                 case HeapTupleSelfUpdated:
1498                         /* already deleted by self; nothing to do */
1499                         return;
1500
1501                 case HeapTupleMayBeUpdated:
1502                         break;
1503
1504                 case HeapTupleUpdated:
1505                         if (IsXactIsoLevelSerializable)
1506                                 ereport(ERROR,
1507                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1508                                                  errmsg("could not serialize access due to concurrent update")));
1509                         else if (!(ItemPointerEquals(tupleid, &ctid)))
1510                         {
1511                                 TupleTableSlot *epqslot = EvalPlanQual(estate,
1512                                                            resultRelInfo->ri_RangeTableIndex, &ctid);
1513
1514                                 if (!TupIsNull(epqslot))
1515                                 {
1516                                         *tupleid = ctid;
1517                                         goto ldelete;
1518                                 }
1519                         }
1520                         /* tuple already deleted; nothing to do */
1521                         return;
1522
1523                 default:
1524                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1525                         return;
1526         }
1527
1528         IncrDeleted();
1529         (estate->es_processed)++;
1530
1531         /*
1532          * Note: Normally one would think that we have to delete index tuples
1533          * associated with the heap tuple now..
1534          *
1535          * ... but in POSTGRES, we have no need to do this because the vacuum
1536          * daemon automatically opens an index scan and deletes index tuples
1537          * when it finds deleted heap tuples. -cim 9/27/89
1538          */
1539
1540         /* AFTER ROW DELETE Triggers */
1541         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1542 }
1543
1544 /* ----------------------------------------------------------------
1545  *              ExecUpdate
1546  *
1547  *              note: we can't run UPDATE queries with transactions
1548  *              off because UPDATEs are actually INSERTs and our
1549  *              scan will mistakenly loop forever, updating the tuple
1550  *              it just inserted..      This should be fixed but until it
1551  *              is, we don't want to get stuck in an infinite loop
1552  *              which corrupts your database..
1553  * ----------------------------------------------------------------
1554  */
1555 static void
1556 ExecUpdate(TupleTableSlot *slot,
1557                    ItemPointer tupleid,
1558                    EState *estate)
1559 {
1560         HeapTuple       tuple;
1561         ResultRelInfo *resultRelInfo;
1562         Relation        resultRelationDesc;
1563         ItemPointerData ctid;
1564         HTSU_Result     result;
1565         int                     numIndices;
1566
1567         /*
1568          * abort the operation if not running transactions
1569          */
1570         if (IsBootstrapProcessingMode())
1571                 elog(ERROR, "cannot UPDATE during bootstrap");
1572
1573         /*
1574          * get the heap tuple out of the tuple table slot, making sure
1575          * we have a writable copy
1576          */
1577         tuple = ExecMaterializeSlot(slot);
1578
1579         /*
1580          * get information on the (current) result relation
1581          */
1582         resultRelInfo = estate->es_result_relation_info;
1583         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1584
1585         /* BEFORE ROW UPDATE Triggers */
1586         if (resultRelInfo->ri_TrigDesc &&
1587           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1588         {
1589                 HeapTuple       newtuple;
1590
1591                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1592                                                                                 tupleid, tuple,
1593                                                                                 estate->es_snapshot->curcid);
1594
1595                 if (newtuple == NULL)   /* "do nothing" */
1596                         return;
1597
1598                 if (newtuple != tuple)  /* modified by Trigger(s) */
1599                 {
1600                         /*
1601                          * Insert modified tuple into tuple table slot, replacing the
1602                          * original.  We assume that it was allocated in per-tuple
1603                          * memory context, and therefore will go away by itself. The
1604                          * tuple table slot should not try to clear it.
1605                          */
1606                         ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1607                         tuple = newtuple;
1608                 }
1609         }
1610
1611         /*
1612          * Check the constraints of the tuple
1613          *
1614          * If we generate a new candidate tuple after EvalPlanQual testing, we
1615          * must loop back here and recheck constraints.  (We don't need to
1616          * redo triggers, however.      If there are any BEFORE triggers then
1617          * trigger.c will have done heap_lock_tuple to lock the correct tuple,
1618          * so there's no need to do them again.)
1619          */
1620 lreplace:;
1621         if (resultRelationDesc->rd_att->constr)
1622                 ExecConstraints(resultRelInfo, slot, estate);
1623
1624         /*
1625          * replace the heap tuple
1626          *
1627          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1628          * the row to be updated is visible to that snapshot, and throw a can't-
1629          * serialize error if not.  This is a special-case behavior needed for
1630          * referential integrity updates in serializable transactions.
1631          */
1632         result = heap_update(resultRelationDesc, tupleid, tuple,
1633                                                  &ctid,
1634                                                  estate->es_snapshot->curcid,
1635                                                  estate->es_crosscheck_snapshot,
1636                                                  true /* wait for commit */ );
1637         switch (result)
1638         {
1639                 case HeapTupleSelfUpdated:
1640                         /* already deleted by self; nothing to do */
1641                         return;
1642
1643                 case HeapTupleMayBeUpdated:
1644                         break;
1645
1646                 case HeapTupleUpdated:
1647                         if (IsXactIsoLevelSerializable)
1648                                 ereport(ERROR,
1649                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1650                                                  errmsg("could not serialize access due to concurrent update")));
1651                         else if (!(ItemPointerEquals(tupleid, &ctid)))
1652                         {
1653                                 TupleTableSlot *epqslot = EvalPlanQual(estate,
1654                                                            resultRelInfo->ri_RangeTableIndex, &ctid);
1655
1656                                 if (!TupIsNull(epqslot))
1657                                 {
1658                                         *tupleid = ctid;
1659                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1660                                         tuple = ExecMaterializeSlot(slot);
1661                                         goto lreplace;
1662                                 }
1663                         }
1664                         /* tuple already deleted; nothing to do */
1665                         return;
1666
1667                 default:
1668                         elog(ERROR, "unrecognized heap_update status: %u", result);
1669                         return;
1670         }
1671
1672         IncrReplaced();
1673         (estate->es_processed)++;
1674
1675         /*
1676          * Note: instead of having to update the old index tuples associated
1677          * with the heap tuple, all we do is form and insert new index tuples.
1678          * This is because UPDATEs are actually DELETEs and INSERTs and index
1679          * tuple deletion is done automagically by the vacuum daemon. All we
1680          * do is insert new index tuples.  -cim 9/27/89
1681          */
1682
1683         /*
1684          * process indices
1685          *
1686          * heap_update updates a tuple in the base relation by invalidating it
1687          * and then inserting a new tuple to the relation.      As a side effect,
1688          * the tupleid of the new tuple is placed in the new tuple's t_ctid
1689          * field.  So we now insert index tuples using the new tupleid stored
1690          * there.
1691          */
1692
1693         numIndices = resultRelInfo->ri_NumIndices;
1694         if (numIndices > 0)
1695                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1696
1697         /* AFTER ROW UPDATE Triggers */
1698         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1699 }
1700
1701 static const char *
1702 ExecRelCheck(ResultRelInfo *resultRelInfo,
1703                          TupleTableSlot *slot, EState *estate)
1704 {
1705         Relation        rel = resultRelInfo->ri_RelationDesc;
1706         int                     ncheck = rel->rd_att->constr->num_check;
1707         ConstrCheck *check = rel->rd_att->constr->check;
1708         ExprContext *econtext;
1709         MemoryContext oldContext;
1710         List       *qual;
1711         int                     i;
1712
1713         /*
1714          * If first time through for this result relation, build expression
1715          * nodetrees for rel's constraint expressions.  Keep them in the
1716          * per-query memory context so they'll survive throughout the query.
1717          */
1718         if (resultRelInfo->ri_ConstraintExprs == NULL)
1719         {
1720                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1721                 resultRelInfo->ri_ConstraintExprs =
1722                         (List **) palloc(ncheck * sizeof(List *));
1723                 for (i = 0; i < ncheck; i++)
1724                 {
1725                         /* ExecQual wants implicit-AND form */
1726                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1727                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1728                                 ExecPrepareExpr((Expr *) qual, estate);
1729                 }
1730                 MemoryContextSwitchTo(oldContext);
1731         }
1732
1733         /*
1734          * We will use the EState's per-tuple context for evaluating
1735          * constraint expressions (creating it if it's not already there).
1736          */
1737         econtext = GetPerTupleExprContext(estate);
1738
1739         /* Arrange for econtext's scan tuple to be the tuple under test */
1740         econtext->ecxt_scantuple = slot;
1741
1742         /* And evaluate the constraints */
1743         for (i = 0; i < ncheck; i++)
1744         {
1745                 qual = resultRelInfo->ri_ConstraintExprs[i];
1746
1747                 /*
1748                  * NOTE: SQL92 specifies that a NULL result from a constraint
1749                  * expression is not to be treated as a failure.  Therefore, tell
1750                  * ExecQual to return TRUE for NULL.
1751                  */
1752                 if (!ExecQual(qual, econtext, true))
1753                         return check[i].ccname;
1754         }
1755
1756         /* NULL result means no error */
1757         return NULL;
1758 }
1759
1760 void
1761 ExecConstraints(ResultRelInfo *resultRelInfo,
1762                                 TupleTableSlot *slot, EState *estate)
1763 {
1764         Relation        rel = resultRelInfo->ri_RelationDesc;
1765         TupleConstr *constr = rel->rd_att->constr;
1766
1767         Assert(constr);
1768
1769         if (constr->has_not_null)
1770         {
1771                 int                     natts = rel->rd_att->natts;
1772                 int                     attrChk;
1773
1774                 for (attrChk = 1; attrChk <= natts; attrChk++)
1775                 {
1776                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1777                                 slot_attisnull(slot, attrChk))
1778                                 ereport(ERROR,
1779                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1780                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1781                                         NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1782                 }
1783         }
1784
1785         if (constr->num_check > 0)
1786         {
1787                 const char *failed;
1788
1789                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1790                         ereport(ERROR,
1791                                         (errcode(ERRCODE_CHECK_VIOLATION),
1792                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1793                                                         RelationGetRelationName(rel), failed)));
1794         }
1795 }
1796
1797 /*
1798  * Check a modified tuple to see if we want to process its updated version
1799  * under READ COMMITTED rules.
1800  *
1801  * See backend/executor/README for some info about how this works.
1802  */
1803 TupleTableSlot *
1804 EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
1805 {
1806         evalPlanQual *epq;
1807         EState     *epqstate;
1808         Relation        relation;
1809         HeapTupleData tuple;
1810         HeapTuple       copyTuple = NULL;
1811         bool            endNode;
1812
1813         Assert(rti != 0);
1814
1815         /*
1816          * find relation containing target tuple
1817          */
1818         if (estate->es_result_relation_info != NULL &&
1819                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1820                 relation = estate->es_result_relation_info->ri_RelationDesc;
1821         else
1822         {
1823                 ListCell   *l;
1824
1825                 relation = NULL;
1826                 foreach(l, estate->es_rowMark)
1827                 {
1828                         if (((execRowMark *) lfirst(l))->rti == rti)
1829                         {
1830                                 relation = ((execRowMark *) lfirst(l))->relation;
1831                                 break;
1832                         }
1833                 }
1834                 if (relation == NULL)
1835                         elog(ERROR, "could not find RowMark for RT index %u", rti);
1836         }
1837
1838         /*
1839          * fetch tid tuple
1840          *
1841          * Loop here to deal with updated or busy tuples
1842          */
1843         tuple.t_self = *tid;
1844         for (;;)
1845         {
1846                 Buffer          buffer;
1847
1848                 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, false, NULL))
1849                 {
1850                         TransactionId xwait = SnapshotDirty->xmax;
1851
1852                         /* xmin should not be dirty... */
1853                         if (TransactionIdIsValid(SnapshotDirty->xmin))
1854                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1855
1856                         /*
1857                          * If tuple is being updated by other transaction then we have
1858                          * to wait for its commit/abort.
1859                          */
1860                         if (TransactionIdIsValid(xwait))
1861                         {
1862                                 ReleaseBuffer(buffer);
1863                                 XactLockTableWait(xwait);
1864                                 continue;
1865                         }
1866
1867                         /*
1868                          * We got tuple - now copy it for use by recheck query.
1869                          */
1870                         copyTuple = heap_copytuple(&tuple);
1871                         ReleaseBuffer(buffer);
1872                         break;
1873                 }
1874
1875                 /*
1876                  * Oops! Invalid tuple. Have to check is it updated or deleted.
1877                  * Note that it's possible to get invalid SnapshotDirty->tid if
1878                  * tuple updated by this transaction. Have we to check this ?
1879                  */
1880                 if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
1881                         !(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
1882                 {
1883                         /* updated, so look at the updated copy */
1884                         tuple.t_self = SnapshotDirty->tid;
1885                         continue;
1886                 }
1887
1888                 /*
1889                  * Deleted or updated by this transaction; forget it.
1890                  */
1891                 return NULL;
1892         }
1893
1894         /*
1895          * For UPDATE/DELETE we have to return tid of actual row we're
1896          * executing PQ for.
1897          */
1898         *tid = tuple.t_self;
1899
1900         /*
1901          * Need to run a recheck subquery.      Find or create a PQ stack entry.
1902          */
1903         epq = estate->es_evalPlanQual;
1904         endNode = true;
1905
1906         if (epq != NULL && epq->rti == 0)
1907         {
1908                 /* Top PQ stack entry is idle, so re-use it */
1909                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
1910                 epq->rti = rti;
1911                 endNode = false;
1912         }
1913
1914         /*
1915          * If this is request for another RTE - Ra, - then we have to check
1916          * wasn't PlanQual requested for Ra already and if so then Ra' row was
1917          * updated again and we have to re-start old execution for Ra and
1918          * forget all what we done after Ra was suspended. Cool? -:))
1919          */
1920         if (epq != NULL && epq->rti != rti &&
1921                 epq->estate->es_evTuple[rti - 1] != NULL)
1922         {
1923                 do
1924                 {
1925                         evalPlanQual *oldepq;
1926
1927                         /* stop execution */
1928                         EvalPlanQualStop(epq);
1929                         /* pop previous PlanQual from the stack */
1930                         oldepq = epq->next;
1931                         Assert(oldepq && oldepq->rti != 0);
1932                         /* push current PQ to freePQ stack */
1933                         oldepq->free = epq;
1934                         epq = oldepq;
1935                         estate->es_evalPlanQual = epq;
1936                 } while (epq->rti != rti);
1937         }
1938
1939         /*
1940          * If we are requested for another RTE then we have to suspend
1941          * execution of current PlanQual and start execution for new one.
1942          */
1943         if (epq == NULL || epq->rti != rti)
1944         {
1945                 /* try to reuse plan used previously */
1946                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
1947
1948                 if (newepq == NULL)             /* first call or freePQ stack is empty */
1949                 {
1950                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
1951                         newepq->free = NULL;
1952                         newepq->estate = NULL;
1953                         newepq->planstate = NULL;
1954                 }
1955                 else
1956                 {
1957                         /* recycle previously used PlanQual */
1958                         Assert(newepq->estate == NULL);
1959                         epq->free = NULL;
1960                 }
1961                 /* push current PQ to the stack */
1962                 newepq->next = epq;
1963                 epq = newepq;
1964                 estate->es_evalPlanQual = epq;
1965                 epq->rti = rti;
1966                 endNode = false;
1967         }
1968
1969         Assert(epq->rti == rti);
1970
1971         /*
1972          * Ok - we're requested for the same RTE.  Unfortunately we still have
1973          * to end and restart execution of the plan, because ExecReScan
1974          * wouldn't ensure that upper plan nodes would reset themselves.  We
1975          * could make that work if insertion of the target tuple were
1976          * integrated with the Param mechanism somehow, so that the upper plan
1977          * nodes know that their children's outputs have changed.
1978          *
1979          * Note that the stack of free evalPlanQual nodes is quite useless at the
1980          * moment, since it only saves us from pallocing/releasing the
1981          * evalPlanQual nodes themselves.  But it will be useful once we
1982          * implement ReScan instead of end/restart for re-using PlanQual
1983          * nodes.
1984          */
1985         if (endNode)
1986         {
1987                 /* stop execution */
1988                 EvalPlanQualStop(epq);
1989         }
1990
1991         /*
1992          * Initialize new recheck query.
1993          *
1994          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
1995          * instead copy down changeable state from the top plan (including
1996          * es_result_relation_info, es_junkFilter) and reset locally
1997          * changeable state in the epq (including es_param_exec_vals,
1998          * es_evTupleNull).
1999          */
2000         EvalPlanQualStart(epq, estate, epq->next);
2001
2002         /*
2003          * free old RTE' tuple, if any, and store target tuple where
2004          * relation's scan node will see it
2005          */
2006         epqstate = epq->estate;
2007         if (epqstate->es_evTuple[rti - 1] != NULL)
2008                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2009         epqstate->es_evTuple[rti - 1] = copyTuple;
2010
2011         return EvalPlanQualNext(estate);
2012 }
2013
2014 static TupleTableSlot *
2015 EvalPlanQualNext(EState *estate)
2016 {
2017         evalPlanQual *epq = estate->es_evalPlanQual;
2018         MemoryContext oldcontext;
2019         TupleTableSlot *slot;
2020
2021         Assert(epq->rti != 0);
2022
2023 lpqnext:;
2024         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2025         slot = ExecProcNode(epq->planstate);
2026         MemoryContextSwitchTo(oldcontext);
2027
2028         /*
2029          * No more tuples for this PQ. Continue previous one.
2030          */
2031         if (TupIsNull(slot))
2032         {
2033                 evalPlanQual *oldepq;
2034
2035                 /* stop execution */
2036                 EvalPlanQualStop(epq);
2037                 /* pop old PQ from the stack */
2038                 oldepq = epq->next;
2039                 if (oldepq == NULL)
2040                 {
2041                         /* this is the first (oldest) PQ - mark as free */
2042                         epq->rti = 0;
2043                         estate->es_useEvalPlan = false;
2044                         /* and continue Query execution */
2045                         return (NULL);
2046                 }
2047                 Assert(oldepq->rti != 0);
2048                 /* push current PQ to freePQ stack */
2049                 oldepq->free = epq;
2050                 epq = oldepq;
2051                 estate->es_evalPlanQual = epq;
2052                 goto lpqnext;
2053         }
2054
2055         return (slot);
2056 }
2057
2058 static void
2059 EndEvalPlanQual(EState *estate)
2060 {
2061         evalPlanQual *epq = estate->es_evalPlanQual;
2062
2063         if (epq->rti == 0)                      /* plans already shutdowned */
2064         {
2065                 Assert(epq->next == NULL);
2066                 return;
2067         }
2068
2069         for (;;)
2070         {
2071                 evalPlanQual *oldepq;
2072
2073                 /* stop execution */
2074                 EvalPlanQualStop(epq);
2075                 /* pop old PQ from the stack */
2076                 oldepq = epq->next;
2077                 if (oldepq == NULL)
2078                 {
2079                         /* this is the first (oldest) PQ - mark as free */
2080                         epq->rti = 0;
2081                         estate->es_useEvalPlan = false;
2082                         break;
2083                 }
2084                 Assert(oldepq->rti != 0);
2085                 /* push current PQ to freePQ stack */
2086                 oldepq->free = epq;
2087                 epq = oldepq;
2088                 estate->es_evalPlanQual = epq;
2089         }
2090 }
2091
2092 /*
2093  * Start execution of one level of PlanQual.
2094  *
2095  * This is a cut-down version of ExecutorStart(): we copy some state from
2096  * the top-level estate rather than initializing it fresh.
2097  */
2098 static void
2099 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2100 {
2101         EState     *epqstate;
2102         int                     rtsize;
2103         MemoryContext oldcontext;
2104
2105         rtsize = list_length(estate->es_range_table);
2106
2107         epq->estate = epqstate = CreateExecutorState();
2108
2109         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2110
2111         /*
2112          * The epqstates share the top query's copy of unchanging state such
2113          * as the snapshot, rangetable, result-rel info, and external Param
2114          * info. They need their own copies of local state, including a tuple
2115          * table, es_param_exec_vals, etc.
2116          */
2117         epqstate->es_direction = ForwardScanDirection;
2118         epqstate->es_snapshot = estate->es_snapshot;
2119         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2120         epqstate->es_range_table = estate->es_range_table;
2121         epqstate->es_result_relations = estate->es_result_relations;
2122         epqstate->es_num_result_relations = estate->es_num_result_relations;
2123         epqstate->es_result_relation_info = estate->es_result_relation_info;
2124         epqstate->es_junkFilter = estate->es_junkFilter;
2125         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2126         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2127         epqstate->es_param_list_info = estate->es_param_list_info;
2128         if (estate->es_topPlan->nParamExec > 0)
2129                 epqstate->es_param_exec_vals = (ParamExecData *)
2130                         palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
2131         epqstate->es_rowMark = estate->es_rowMark;
2132         epqstate->es_forUpdate = estate->es_forUpdate;
2133         epqstate->es_instrument = estate->es_instrument;
2134         epqstate->es_select_into = estate->es_select_into;
2135         epqstate->es_into_oids = estate->es_into_oids;
2136         epqstate->es_topPlan = estate->es_topPlan;
2137
2138         /*
2139          * Each epqstate must have its own es_evTupleNull state, but all the
2140          * stack entries share es_evTuple state.  This allows sub-rechecks to
2141          * inherit the value being examined by an outer recheck.
2142          */
2143         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2144         if (priorepq == NULL)
2145                 /* first PQ stack entry */
2146                 epqstate->es_evTuple = (HeapTuple *)
2147                         palloc0(rtsize * sizeof(HeapTuple));
2148         else
2149                 /* later stack entries share the same storage */
2150                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2151
2152         epqstate->es_tupleTable =
2153                 ExecCreateTupleTable(estate->es_tupleTable->size);
2154
2155         epq->planstate = ExecInitNode(estate->es_topPlan, epqstate);
2156
2157         MemoryContextSwitchTo(oldcontext);
2158 }
2159
2160 /*
2161  * End execution of one level of PlanQual.
2162  *
2163  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2164  * of the normal cleanup, but *not* close result relations (which we are
2165  * just sharing from the outer query).
2166  */
2167 static void
2168 EvalPlanQualStop(evalPlanQual *epq)
2169 {
2170         EState     *epqstate = epq->estate;
2171         MemoryContext oldcontext;
2172
2173         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2174
2175         ExecEndNode(epq->planstate);
2176
2177         ExecDropTupleTable(epqstate->es_tupleTable, true);
2178         epqstate->es_tupleTable = NULL;
2179
2180         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2181         {
2182                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2183                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2184         }
2185
2186         MemoryContextSwitchTo(oldcontext);
2187
2188         FreeExecutorState(epqstate);
2189
2190         epq->estate = NULL;
2191         epq->planstate = NULL;
2192 }