]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
pgindent run for 8.3.
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.299 2007/11/15 21:14:34 momjian Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "optimizer/clauses.h"
49 #include "parser/parse_clause.h"
50 #include "parser/parsetree.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55
56
57 typedef struct evalPlanQual
58 {
59         Index           rti;
60         EState     *estate;
61         PlanState  *planstate;
62         struct evalPlanQual *next;      /* stack of active PlanQual plans */
63         struct evalPlanQual *free;      /* list of free PlanQual plans */
64 } evalPlanQual;
65
66 /* decls for local routines only used within this module */
67 static void InitPlan(QueryDesc *queryDesc, int eflags);
68 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
69                                   Relation resultRelationDesc,
70                                   Index resultRelationIndex,
71                                   CmdType operation,
72                                   bool doInstrument);
73 static void ExecEndPlan(PlanState *planstate, EState *estate);
74 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
75                         CmdType operation,
76                         long numberTuples,
77                         ScanDirection direction,
78                         DestReceiver *dest);
79 static void ExecSelect(TupleTableSlot *slot,
80                    DestReceiver *dest, EState *estate);
81 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
82                    TupleTableSlot *planSlot,
83                    DestReceiver *dest, EState *estate);
84 static void ExecDelete(ItemPointer tupleid,
85                    TupleTableSlot *planSlot,
86                    DestReceiver *dest, EState *estate);
87 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
88                    TupleTableSlot *planSlot,
89                    DestReceiver *dest, EState *estate);
90 static void ExecProcessReturning(ProjectionInfo *projectReturning,
91                                          TupleTableSlot *tupleSlot,
92                                          TupleTableSlot *planSlot,
93                                          DestReceiver *dest);
94 static TupleTableSlot *EvalPlanQualNext(EState *estate);
95 static void EndEvalPlanQual(EState *estate);
96 static void ExecCheckRTPerms(List *rangeTable);
97 static void ExecCheckRTEPerms(RangeTblEntry *rte);
98 static void ExecCheckXactReadOnly(PlannedStmt * plannedstmt);
99 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
100                                   evalPlanQual *priorepq);
101 static void EvalPlanQualStop(evalPlanQual *epq);
102 static void OpenIntoRel(QueryDesc *queryDesc);
103 static void CloseIntoRel(QueryDesc *queryDesc);
104 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
105 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
106 static void intorel_shutdown(DestReceiver *self);
107 static void intorel_destroy(DestReceiver *self);
108
109 /* end of local decls */
110
111
112 /* ----------------------------------------------------------------
113  *              ExecutorStart
114  *
115  *              This routine must be called at the beginning of any execution of any
116  *              query plan
117  *
118  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
119  * clear why we bother to separate the two functions, but...).  The tupDesc
120  * field of the QueryDesc is filled in to describe the tuples that will be
121  * returned, and the internal fields (estate and planstate) are set up.
122  *
123  * eflags contains flag bits as described in executor.h.
124  *
125  * NB: the CurrentMemoryContext when this is called will become the parent
126  * of the per-query context used for this Executor invocation.
127  * ----------------------------------------------------------------
128  */
129 void
130 ExecutorStart(QueryDesc *queryDesc, int eflags)
131 {
132         EState     *estate;
133         MemoryContext oldcontext;
134
135         /* sanity checks: queryDesc must not be started already */
136         Assert(queryDesc != NULL);
137         Assert(queryDesc->estate == NULL);
138
139         /*
140          * If the transaction is read-only, we need to check if any writes are
141          * planned to non-temporary tables.  EXPLAIN is considered read-only.
142          */
143         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
144                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
145
146         /*
147          * Build EState, switch into per-query memory context for startup.
148          */
149         estate = CreateExecutorState();
150         queryDesc->estate = estate;
151
152         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
153
154         /*
155          * Fill in parameters, if any, from queryDesc
156          */
157         estate->es_param_list_info = queryDesc->params;
158
159         if (queryDesc->plannedstmt->nParamExec > 0)
160                 estate->es_param_exec_vals = (ParamExecData *)
161                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
162
163         /*
164          * Copy other important information into the EState
165          */
166         estate->es_snapshot = queryDesc->snapshot;
167         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
168         estate->es_instrument = queryDesc->doInstrument;
169
170         /*
171          * Initialize the plan state tree
172          */
173         InitPlan(queryDesc, eflags);
174
175         MemoryContextSwitchTo(oldcontext);
176 }
177
178 /* ----------------------------------------------------------------
179  *              ExecutorRun
180  *
181  *              This is the main routine of the executor module. It accepts
182  *              the query descriptor from the traffic cop and executes the
183  *              query plan.
184  *
185  *              ExecutorStart must have been called already.
186  *
187  *              If direction is NoMovementScanDirection then nothing is done
188  *              except to start up/shut down the destination.  Otherwise,
189  *              we retrieve up to 'count' tuples in the specified direction.
190  *
191  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
192  *              completion.
193  *
194  * ----------------------------------------------------------------
195  */
196 TupleTableSlot *
197 ExecutorRun(QueryDesc *queryDesc,
198                         ScanDirection direction, long count)
199 {
200         EState     *estate;
201         CmdType         operation;
202         DestReceiver *dest;
203         bool            sendTuples;
204         TupleTableSlot *result;
205         MemoryContext oldcontext;
206
207         /* sanity checks */
208         Assert(queryDesc != NULL);
209
210         estate = queryDesc->estate;
211
212         Assert(estate != NULL);
213
214         /*
215          * Switch into per-query memory context
216          */
217         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
218
219         /*
220          * extract information from the query descriptor and the query feature.
221          */
222         operation = queryDesc->operation;
223         dest = queryDesc->dest;
224
225         /*
226          * startup tuple receiver, if we will be emitting tuples
227          */
228         estate->es_processed = 0;
229         estate->es_lastoid = InvalidOid;
230
231         sendTuples = (operation == CMD_SELECT ||
232                                   queryDesc->plannedstmt->returningLists);
233
234         if (sendTuples)
235                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
236
237         /*
238          * run plan
239          */
240         if (ScanDirectionIsNoMovement(direction))
241                 result = NULL;
242         else
243                 result = ExecutePlan(estate,
244                                                          queryDesc->planstate,
245                                                          operation,
246                                                          count,
247                                                          direction,
248                                                          dest);
249
250         /*
251          * shutdown tuple receiver, if we started it
252          */
253         if (sendTuples)
254                 (*dest->rShutdown) (dest);
255
256         MemoryContextSwitchTo(oldcontext);
257
258         return result;
259 }
260
261 /* ----------------------------------------------------------------
262  *              ExecutorEnd
263  *
264  *              This routine must be called at the end of execution of any
265  *              query plan
266  * ----------------------------------------------------------------
267  */
268 void
269 ExecutorEnd(QueryDesc *queryDesc)
270 {
271         EState     *estate;
272         MemoryContext oldcontext;
273
274         /* sanity checks */
275         Assert(queryDesc != NULL);
276
277         estate = queryDesc->estate;
278
279         Assert(estate != NULL);
280
281         /*
282          * Switch into per-query memory context to run ExecEndPlan
283          */
284         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
285
286         ExecEndPlan(queryDesc->planstate, estate);
287
288         /*
289          * Close the SELECT INTO relation if any
290          */
291         if (estate->es_select_into)
292                 CloseIntoRel(queryDesc);
293
294         /*
295          * Must switch out of context before destroying it
296          */
297         MemoryContextSwitchTo(oldcontext);
298
299         /*
300          * Release EState and per-query memory context.  This should release
301          * everything the executor has allocated.
302          */
303         FreeExecutorState(estate);
304
305         /* Reset queryDesc fields that no longer point to anything */
306         queryDesc->tupDesc = NULL;
307         queryDesc->estate = NULL;
308         queryDesc->planstate = NULL;
309 }
310
311 /* ----------------------------------------------------------------
312  *              ExecutorRewind
313  *
314  *              This routine may be called on an open queryDesc to rewind it
315  *              to the start.
316  * ----------------------------------------------------------------
317  */
318 void
319 ExecutorRewind(QueryDesc *queryDesc)
320 {
321         EState     *estate;
322         MemoryContext oldcontext;
323
324         /* sanity checks */
325         Assert(queryDesc != NULL);
326
327         estate = queryDesc->estate;
328
329         Assert(estate != NULL);
330
331         /* It's probably not sensible to rescan updating queries */
332         Assert(queryDesc->operation == CMD_SELECT);
333
334         /*
335          * Switch into per-query memory context
336          */
337         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
338
339         /*
340          * rescan plan
341          */
342         ExecReScan(queryDesc->planstate, NULL);
343
344         MemoryContextSwitchTo(oldcontext);
345 }
346
347
348 /*
349  * ExecCheckRTPerms
350  *              Check access permissions for all relations listed in a range table.
351  */
352 static void
353 ExecCheckRTPerms(List *rangeTable)
354 {
355         ListCell   *l;
356
357         foreach(l, rangeTable)
358         {
359                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
360         }
361 }
362
363 /*
364  * ExecCheckRTEPerms
365  *              Check access permissions for a single RTE.
366  */
367 static void
368 ExecCheckRTEPerms(RangeTblEntry *rte)
369 {
370         AclMode         requiredPerms;
371         Oid                     relOid;
372         Oid                     userid;
373
374         /*
375          * Only plain-relation RTEs need to be checked here.  Function RTEs are
376          * checked by init_fcache when the function is prepared for execution.
377          * Join, subquery, and special RTEs need no checks.
378          */
379         if (rte->rtekind != RTE_RELATION)
380                 return;
381
382         /*
383          * No work if requiredPerms is empty.
384          */
385         requiredPerms = rte->requiredPerms;
386         if (requiredPerms == 0)
387                 return;
388
389         relOid = rte->relid;
390
391         /*
392          * userid to check as: current user unless we have a setuid indication.
393          *
394          * Note: GetUserId() is presently fast enough that there's no harm in
395          * calling it separately for each RTE.  If that stops being true, we could
396          * call it once in ExecCheckRTPerms and pass the userid down from there.
397          * But for now, no need for the extra clutter.
398          */
399         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
400
401         /*
402          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
403          */
404         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
405                 != requiredPerms)
406                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
407                                            get_rel_name(relOid));
408 }
409
410 /*
411  * Check that the query does not imply any writes to non-temp tables.
412  */
413 static void
414 ExecCheckXactReadOnly(PlannedStmt * plannedstmt)
415 {
416         ListCell   *l;
417
418         /*
419          * CREATE TABLE AS or SELECT INTO?
420          *
421          * XXX should we allow this if the destination is temp?
422          */
423         if (plannedstmt->intoClause != NULL)
424                 goto fail;
425
426         /* Fail if write permissions are requested on any non-temp table */
427         foreach(l, plannedstmt->rtable)
428         {
429                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
430
431                 if (rte->rtekind != RTE_RELATION)
432                         continue;
433
434                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
435                         continue;
436
437                 if (isTempNamespace(get_rel_namespace(rte->relid)))
438                         continue;
439
440                 goto fail;
441         }
442
443         return;
444
445 fail:
446         ereport(ERROR,
447                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
448                          errmsg("transaction is read-only")));
449 }
450
451
452 /* ----------------------------------------------------------------
453  *              InitPlan
454  *
455  *              Initializes the query plan: open files, allocate storage
456  *              and start up the rule manager
457  * ----------------------------------------------------------------
458  */
459 static void
460 InitPlan(QueryDesc *queryDesc, int eflags)
461 {
462         CmdType         operation = queryDesc->operation;
463         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
464         Plan       *plan = plannedstmt->planTree;
465         List       *rangeTable = plannedstmt->rtable;
466         EState     *estate = queryDesc->estate;
467         PlanState  *planstate;
468         TupleDesc       tupType;
469         ListCell   *l;
470         int                     i;
471
472         /*
473          * Do permissions checks
474          */
475         ExecCheckRTPerms(rangeTable);
476
477         /*
478          * initialize the node's execution state
479          */
480         estate->es_range_table = rangeTable;
481
482         /*
483          * initialize result relation stuff
484          */
485         if (plannedstmt->resultRelations)
486         {
487                 List       *resultRelations = plannedstmt->resultRelations;
488                 int                     numResultRelations = list_length(resultRelations);
489                 ResultRelInfo *resultRelInfos;
490                 ResultRelInfo *resultRelInfo;
491
492                 resultRelInfos = (ResultRelInfo *)
493                         palloc(numResultRelations * sizeof(ResultRelInfo));
494                 resultRelInfo = resultRelInfos;
495                 foreach(l, resultRelations)
496                 {
497                         Index           resultRelationIndex = lfirst_int(l);
498                         Oid                     resultRelationOid;
499                         Relation        resultRelation;
500
501                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
502                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
503                         initResultRelInfo(resultRelInfo,
504                                                           resultRelation,
505                                                           resultRelationIndex,
506                                                           operation,
507                                                           estate->es_instrument);
508                         resultRelInfo++;
509                 }
510                 estate->es_result_relations = resultRelInfos;
511                 estate->es_num_result_relations = numResultRelations;
512                 /* Initialize to first or only result rel */
513                 estate->es_result_relation_info = resultRelInfos;
514         }
515         else
516         {
517                 /*
518                  * if no result relation, then set state appropriately
519                  */
520                 estate->es_result_relations = NULL;
521                 estate->es_num_result_relations = 0;
522                 estate->es_result_relation_info = NULL;
523         }
524
525         /*
526          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
527          * flag appropriately so that the plan tree will be initialized with the
528          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
529          */
530         estate->es_select_into = false;
531         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
532         {
533                 estate->es_select_into = true;
534                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
535         }
536
537         /*
538          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
539          * initialize the plan tree, else we'd be doing a lock upgrade. While we
540          * are at it, build the ExecRowMark list.
541          */
542         estate->es_rowMarks = NIL;
543         foreach(l, plannedstmt->rowMarks)
544         {
545                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
546                 Oid                     relid = getrelid(rc->rti, rangeTable);
547                 Relation        relation;
548                 ExecRowMark *erm;
549
550                 relation = heap_open(relid, RowShareLock);
551                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
552                 erm->relation = relation;
553                 erm->rti = rc->rti;
554                 erm->forUpdate = rc->forUpdate;
555                 erm->noWait = rc->noWait;
556                 /* We'll set up ctidAttno below */
557                 erm->ctidAttNo = InvalidAttrNumber;
558                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
559         }
560
561         /*
562          * Initialize the executor "tuple" table.  We need slots for all the plan
563          * nodes, plus possibly output slots for the junkfilter(s). At this point
564          * we aren't sure if we need junkfilters, so just add slots for them
565          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
566          * trigger output tuples.  Also, one for RETURNING-list evaluation.
567          */
568         {
569                 int                     nSlots;
570
571                 /* Slots for the main plan tree */
572                 nSlots = ExecCountSlotsNode(plan);
573                 /* Add slots for subplans and initplans */
574                 foreach(l, plannedstmt->subplans)
575                 {
576                         Plan       *subplan = (Plan *) lfirst(l);
577
578                         nSlots += ExecCountSlotsNode(subplan);
579                 }
580                 /* Add slots for junkfilter(s) */
581                 if (plannedstmt->resultRelations != NIL)
582                         nSlots += list_length(plannedstmt->resultRelations);
583                 else
584                         nSlots += 1;
585                 if (operation != CMD_SELECT)
586                         nSlots++;                       /* for es_trig_tuple_slot */
587                 if (plannedstmt->returningLists)
588                         nSlots++;                       /* for RETURNING projection */
589
590                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
591
592                 if (operation != CMD_SELECT)
593                         estate->es_trig_tuple_slot =
594                                 ExecAllocTableSlot(estate->es_tupleTable);
595         }
596
597         /* mark EvalPlanQual not active */
598         estate->es_plannedstmt = plannedstmt;
599         estate->es_evalPlanQual = NULL;
600         estate->es_evTupleNull = NULL;
601         estate->es_evTuple = NULL;
602         estate->es_useEvalPlan = false;
603
604         /*
605          * Initialize private state information for each SubPlan.  We must do this
606          * before running ExecInitNode on the main query tree, since
607          * ExecInitSubPlan expects to be able to find these entries.
608          */
609         Assert(estate->es_subplanstates == NIL);
610         i = 1;                                          /* subplan indices count from 1 */
611         foreach(l, plannedstmt->subplans)
612         {
613                 Plan       *subplan = (Plan *) lfirst(l);
614                 PlanState  *subplanstate;
615                 int                     sp_eflags;
616
617                 /*
618                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
619                  * it is a parameterless subplan (not initplan), we suggest that it be
620                  * prepared to handle REWIND efficiently; otherwise there is no need.
621                  */
622                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
623                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
624                         sp_eflags |= EXEC_FLAG_REWIND;
625
626                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
627
628                 estate->es_subplanstates = lappend(estate->es_subplanstates,
629                                                                                    subplanstate);
630
631                 i++;
632         }
633
634         /*
635          * Initialize the private state information for all the nodes in the query
636          * tree.  This opens files, allocates storage and leaves us ready to start
637          * processing tuples.
638          */
639         planstate = ExecInitNode(plan, estate, eflags);
640
641         /*
642          * Get the tuple descriptor describing the type of tuples to return. (this
643          * is especially important if we are creating a relation with "SELECT
644          * INTO")
645          */
646         tupType = ExecGetResultType(planstate);
647
648         /*
649          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
650          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
651          * INTO also need a filter if the plan may return raw disk tuples (else
652          * heap_insert will be scribbling on the source relation!). UPDATE and
653          * DELETE always need a filter, since there's always a junk 'ctid'
654          * attribute present --- no need to look first.
655          */
656         {
657                 bool            junk_filter_needed = false;
658                 ListCell   *tlist;
659
660                 switch (operation)
661                 {
662                         case CMD_SELECT:
663                         case CMD_INSERT:
664                                 foreach(tlist, plan->targetlist)
665                                 {
666                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
667
668                                         if (tle->resjunk)
669                                         {
670                                                 junk_filter_needed = true;
671                                                 break;
672                                         }
673                                 }
674                                 if (!junk_filter_needed &&
675                                         (operation == CMD_INSERT || estate->es_select_into) &&
676                                         ExecMayReturnRawTuples(planstate))
677                                         junk_filter_needed = true;
678                                 break;
679                         case CMD_UPDATE:
680                         case CMD_DELETE:
681                                 junk_filter_needed = true;
682                                 break;
683                         default:
684                                 break;
685                 }
686
687                 if (junk_filter_needed)
688                 {
689                         /*
690                          * If there are multiple result relations, each one needs its own
691                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
692                          * we can't be fooled by some needing a filter and some not.
693                          */
694                         if (list_length(plannedstmt->resultRelations) > 1)
695                         {
696                                 PlanState **appendplans;
697                                 int                     as_nplans;
698                                 ResultRelInfo *resultRelInfo;
699
700                                 /* Top plan had better be an Append here. */
701                                 Assert(IsA(plan, Append));
702                                 Assert(((Append *) plan)->isTarget);
703                                 Assert(IsA(planstate, AppendState));
704                                 appendplans = ((AppendState *) planstate)->appendplans;
705                                 as_nplans = ((AppendState *) planstate)->as_nplans;
706                                 Assert(as_nplans == estate->es_num_result_relations);
707                                 resultRelInfo = estate->es_result_relations;
708                                 for (i = 0; i < as_nplans; i++)
709                                 {
710                                         PlanState  *subplan = appendplans[i];
711                                         JunkFilter *j;
712
713                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
714                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
715                                                                   ExecAllocTableSlot(estate->es_tupleTable));
716
717                                         /*
718                                          * Since it must be UPDATE/DELETE, there had better be a
719                                          * "ctid" junk attribute in the tlist ... but ctid could
720                                          * be at a different resno for each result relation. We
721                                          * look up the ctid resnos now and save them in the
722                                          * junkfilters.
723                                          */
724                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
725                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
726                                                 elog(ERROR, "could not find junk ctid column");
727                                         resultRelInfo->ri_junkFilter = j;
728                                         resultRelInfo++;
729                                 }
730
731                                 /*
732                                  * Set active junkfilter too; at this point ExecInitAppend has
733                                  * already selected an active result relation...
734                                  */
735                                 estate->es_junkFilter =
736                                         estate->es_result_relation_info->ri_junkFilter;
737                         }
738                         else
739                         {
740                                 /* Normal case with just one JunkFilter */
741                                 JunkFilter *j;
742
743                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
744                                                                            tupType->tdhasoid,
745                                                                   ExecAllocTableSlot(estate->es_tupleTable));
746                                 estate->es_junkFilter = j;
747                                 if (estate->es_result_relation_info)
748                                         estate->es_result_relation_info->ri_junkFilter = j;
749
750                                 if (operation == CMD_SELECT)
751                                 {
752                                         /* For SELECT, want to return the cleaned tuple type */
753                                         tupType = j->jf_cleanTupType;
754                                         /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
755                                         foreach(l, estate->es_rowMarks)
756                                         {
757                                                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
758                                                 char            resname[32];
759
760                                                 snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
761                                                 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
762                                                 if (!AttributeNumberIsValid(erm->ctidAttNo))
763                                                         elog(ERROR, "could not find junk \"%s\" column",
764                                                                  resname);
765                                         }
766                                 }
767                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
768                                 {
769                                         /* For UPDATE/DELETE, find the ctid junk attr now */
770                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
771                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
772                                                 elog(ERROR, "could not find junk ctid column");
773                                 }
774                         }
775                 }
776                 else
777                         estate->es_junkFilter = NULL;
778         }
779
780         /*
781          * Initialize RETURNING projections if needed.
782          */
783         if (plannedstmt->returningLists)
784         {
785                 TupleTableSlot *slot;
786                 ExprContext *econtext;
787                 ResultRelInfo *resultRelInfo;
788
789                 /*
790                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
791                  * We assume all the sublists will generate the same output tupdesc.
792                  */
793                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
794                                                                  false);
795
796                 /* Set up a slot for the output of the RETURNING projection(s) */
797                 slot = ExecAllocTableSlot(estate->es_tupleTable);
798                 ExecSetSlotDescriptor(slot, tupType);
799                 /* Need an econtext too */
800                 econtext = CreateExprContext(estate);
801
802                 /*
803                  * Build a projection for each result rel.      Note that any SubPlans in
804                  * the RETURNING lists get attached to the topmost plan node.
805                  */
806                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
807                 resultRelInfo = estate->es_result_relations;
808                 foreach(l, plannedstmt->returningLists)
809                 {
810                         List       *rlist = (List *) lfirst(l);
811                         List       *rliststate;
812
813                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
814                         resultRelInfo->ri_projectReturning =
815                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
816                                                                          resultRelInfo->ri_RelationDesc->rd_att);
817                         resultRelInfo++;
818                 }
819         }
820
821         queryDesc->tupDesc = tupType;
822         queryDesc->planstate = planstate;
823
824         /*
825          * If doing SELECT INTO, initialize the "into" relation.  We must wait
826          * till now so we have the "clean" result tuple type to create the new
827          * table from.
828          *
829          * If EXPLAIN, skip creating the "into" relation.
830          */
831         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
832                 OpenIntoRel(queryDesc);
833 }
834
835 /*
836  * Initialize ResultRelInfo data for one result relation
837  */
838 static void
839 initResultRelInfo(ResultRelInfo *resultRelInfo,
840                                   Relation resultRelationDesc,
841                                   Index resultRelationIndex,
842                                   CmdType operation,
843                                   bool doInstrument)
844 {
845         /*
846          * Check valid relkind ... parser and/or planner should have noticed this
847          * already, but let's make sure.
848          */
849         switch (resultRelationDesc->rd_rel->relkind)
850         {
851                 case RELKIND_RELATION:
852                         /* OK */
853                         break;
854                 case RELKIND_SEQUENCE:
855                         ereport(ERROR,
856                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
857                                          errmsg("cannot change sequence \"%s\"",
858                                                         RelationGetRelationName(resultRelationDesc))));
859                         break;
860                 case RELKIND_TOASTVALUE:
861                         ereport(ERROR,
862                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
863                                          errmsg("cannot change TOAST relation \"%s\"",
864                                                         RelationGetRelationName(resultRelationDesc))));
865                         break;
866                 case RELKIND_VIEW:
867                         ereport(ERROR,
868                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
869                                          errmsg("cannot change view \"%s\"",
870                                                         RelationGetRelationName(resultRelationDesc))));
871                         break;
872                 default:
873                         ereport(ERROR,
874                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
875                                          errmsg("cannot change relation \"%s\"",
876                                                         RelationGetRelationName(resultRelationDesc))));
877                         break;
878         }
879
880         /* OK, fill in the node */
881         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
882         resultRelInfo->type = T_ResultRelInfo;
883         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
884         resultRelInfo->ri_RelationDesc = resultRelationDesc;
885         resultRelInfo->ri_NumIndices = 0;
886         resultRelInfo->ri_IndexRelationDescs = NULL;
887         resultRelInfo->ri_IndexRelationInfo = NULL;
888         /* make a copy so as not to depend on relcache info not changing... */
889         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
890         if (resultRelInfo->ri_TrigDesc)
891         {
892                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
893
894                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
895                         palloc0(n * sizeof(FmgrInfo));
896                 if (doInstrument)
897                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
898                 else
899                         resultRelInfo->ri_TrigInstrument = NULL;
900         }
901         else
902         {
903                 resultRelInfo->ri_TrigFunctions = NULL;
904                 resultRelInfo->ri_TrigInstrument = NULL;
905         }
906         resultRelInfo->ri_ConstraintExprs = NULL;
907         resultRelInfo->ri_junkFilter = NULL;
908         resultRelInfo->ri_projectReturning = NULL;
909
910         /*
911          * If there are indices on the result relation, open them and save
912          * descriptors in the result relation info, so that we can add new index
913          * entries for the tuples we add/update.  We need not do this for a
914          * DELETE, however, since deletion doesn't affect indexes.
915          */
916         if (resultRelationDesc->rd_rel->relhasindex &&
917                 operation != CMD_DELETE)
918                 ExecOpenIndices(resultRelInfo);
919 }
920
921 /*
922  *              ExecGetTriggerResultRel
923  *
924  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
925  * triggers are fired on one of the result relations of the query, and so
926  * we can just return a member of the es_result_relations array.  (Note: in
927  * self-join situations there might be multiple members with the same OID;
928  * if so it doesn't matter which one we pick.)  However, it is sometimes
929  * necessary to fire triggers on other relations; this happens mainly when an
930  * RI update trigger queues additional triggers on other relations, which will
931  * be processed in the context of the outer query.      For efficiency's sake,
932  * we want to have a ResultRelInfo for those triggers too; that can avoid
933  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
934  * ANALYZE to report the runtimes of such triggers.)  So we make additional
935  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
936  */
937 ResultRelInfo *
938 ExecGetTriggerResultRel(EState *estate, Oid relid)
939 {
940         ResultRelInfo *rInfo;
941         int                     nr;
942         ListCell   *l;
943         Relation        rel;
944         MemoryContext oldcontext;
945
946         /* First, search through the query result relations */
947         rInfo = estate->es_result_relations;
948         nr = estate->es_num_result_relations;
949         while (nr > 0)
950         {
951                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
952                         return rInfo;
953                 rInfo++;
954                 nr--;
955         }
956         /* Nope, but maybe we already made an extra ResultRelInfo for it */
957         foreach(l, estate->es_trig_target_relations)
958         {
959                 rInfo = (ResultRelInfo *) lfirst(l);
960                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
961                         return rInfo;
962         }
963         /* Nope, so we need a new one */
964
965         /*
966          * Open the target relation's relcache entry.  We assume that an
967          * appropriate lock is still held by the backend from whenever the trigger
968          * event got queued, so we need take no new lock here.
969          */
970         rel = heap_open(relid, NoLock);
971
972         /*
973          * Make the new entry in the right context.  Currently, we don't need any
974          * index information in ResultRelInfos used only for triggers, so tell
975          * initResultRelInfo it's a DELETE.
976          */
977         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
978         rInfo = makeNode(ResultRelInfo);
979         initResultRelInfo(rInfo,
980                                           rel,
981                                           0,            /* dummy rangetable index */
982                                           CMD_DELETE,
983                                           estate->es_instrument);
984         estate->es_trig_target_relations =
985                 lappend(estate->es_trig_target_relations, rInfo);
986         MemoryContextSwitchTo(oldcontext);
987
988         return rInfo;
989 }
990
991 /*
992  *              ExecContextForcesOids
993  *
994  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
995  * we need to ensure that result tuples have space for an OID iff they are
996  * going to be stored into a relation that has OIDs.  In other contexts
997  * we are free to choose whether to leave space for OIDs in result tuples
998  * (we generally don't want to, but we do if a physical-tlist optimization
999  * is possible).  This routine checks the plan context and returns TRUE if the
1000  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1001  * *hasoids is set to the required value.
1002  *
1003  * One reason this is ugly is that all plan nodes in the plan tree will emit
1004  * tuples with space for an OID, though we really only need the topmost node
1005  * to do so.  However, node types like Sort don't project new tuples but just
1006  * return their inputs, and in those cases the requirement propagates down
1007  * to the input node.  Eventually we might make this code smart enough to
1008  * recognize how far down the requirement really goes, but for now we just
1009  * make all plan nodes do the same thing if the top level forces the choice.
1010  *
1011  * We assume that estate->es_result_relation_info is already set up to
1012  * describe the target relation.  Note that in an UPDATE that spans an
1013  * inheritance tree, some of the target relations may have OIDs and some not.
1014  * We have to make the decisions on a per-relation basis as we initialize
1015  * each of the child plans of the topmost Append plan.
1016  *
1017  * SELECT INTO is even uglier, because we don't have the INTO relation's
1018  * descriptor available when this code runs; we have to look aside at a
1019  * flag set by InitPlan().
1020  */
1021 bool
1022 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1023 {
1024         if (planstate->state->es_select_into)
1025         {
1026                 *hasoids = planstate->state->es_into_oids;
1027                 return true;
1028         }
1029         else
1030         {
1031                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1032
1033                 if (ri != NULL)
1034                 {
1035                         Relation        rel = ri->ri_RelationDesc;
1036
1037                         if (rel != NULL)
1038                         {
1039                                 *hasoids = rel->rd_rel->relhasoids;
1040                                 return true;
1041                         }
1042                 }
1043         }
1044
1045         return false;
1046 }
1047
1048 /* ----------------------------------------------------------------
1049  *              ExecEndPlan
1050  *
1051  *              Cleans up the query plan -- closes files and frees up storage
1052  *
1053  * NOTE: we are no longer very worried about freeing storage per se
1054  * in this code; FreeExecutorState should be guaranteed to release all
1055  * memory that needs to be released.  What we are worried about doing
1056  * is closing relations and dropping buffer pins.  Thus, for example,
1057  * tuple tables must be cleared or dropped to ensure pins are released.
1058  * ----------------------------------------------------------------
1059  */
1060 static void
1061 ExecEndPlan(PlanState *planstate, EState *estate)
1062 {
1063         ResultRelInfo *resultRelInfo;
1064         int                     i;
1065         ListCell   *l;
1066
1067         /*
1068          * shut down any PlanQual processing we were doing
1069          */
1070         if (estate->es_evalPlanQual != NULL)
1071                 EndEvalPlanQual(estate);
1072
1073         /*
1074          * shut down the node-type-specific query processing
1075          */
1076         ExecEndNode(planstate);
1077
1078         /*
1079          * for subplans too
1080          */
1081         foreach(l, estate->es_subplanstates)
1082         {
1083                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1084
1085                 ExecEndNode(subplanstate);
1086         }
1087
1088         /*
1089          * destroy the executor "tuple" table.
1090          */
1091         ExecDropTupleTable(estate->es_tupleTable, true);
1092         estate->es_tupleTable = NULL;
1093
1094         /*
1095          * close the result relation(s) if any, but hold locks until xact commit.
1096          */
1097         resultRelInfo = estate->es_result_relations;
1098         for (i = estate->es_num_result_relations; i > 0; i--)
1099         {
1100                 /* Close indices and then the relation itself */
1101                 ExecCloseIndices(resultRelInfo);
1102                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1103                 resultRelInfo++;
1104         }
1105
1106         /*
1107          * likewise close any trigger target relations
1108          */
1109         foreach(l, estate->es_trig_target_relations)
1110         {
1111                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1112                 /* Close indices and then the relation itself */
1113                 ExecCloseIndices(resultRelInfo);
1114                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1115         }
1116
1117         /*
1118          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1119          */
1120         foreach(l, estate->es_rowMarks)
1121         {
1122                 ExecRowMark *erm = lfirst(l);
1123
1124                 heap_close(erm->relation, NoLock);
1125         }
1126 }
1127
1128 /* ----------------------------------------------------------------
1129  *              ExecutePlan
1130  *
1131  *              processes the query plan to retrieve 'numberTuples' tuples in the
1132  *              direction specified.
1133  *
1134  *              Retrieves all tuples if numberTuples is 0
1135  *
1136  *              result is either a slot containing the last tuple in the case
1137  *              of a SELECT or NULL otherwise.
1138  *
1139  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1140  * user can see it
1141  * ----------------------------------------------------------------
1142  */
1143 static TupleTableSlot *
1144 ExecutePlan(EState *estate,
1145                         PlanState *planstate,
1146                         CmdType operation,
1147                         long numberTuples,
1148                         ScanDirection direction,
1149                         DestReceiver *dest)
1150 {
1151         JunkFilter *junkfilter;
1152         TupleTableSlot *planSlot;
1153         TupleTableSlot *slot;
1154         ItemPointer tupleid = NULL;
1155         ItemPointerData tuple_ctid;
1156         long            current_tuple_count;
1157         TupleTableSlot *result;
1158
1159         /*
1160          * initialize local variables
1161          */
1162         current_tuple_count = 0;
1163         result = NULL;
1164
1165         /*
1166          * Set the direction.
1167          */
1168         estate->es_direction = direction;
1169
1170         /*
1171          * Process BEFORE EACH STATEMENT triggers
1172          */
1173         switch (operation)
1174         {
1175                 case CMD_UPDATE:
1176                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1177                         break;
1178                 case CMD_DELETE:
1179                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1180                         break;
1181                 case CMD_INSERT:
1182                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1183                         break;
1184                 default:
1185                         /* do nothing */
1186                         break;
1187         }
1188
1189         /*
1190          * Loop until we've processed the proper number of tuples from the plan.
1191          */
1192
1193         for (;;)
1194         {
1195                 /* Reset the per-output-tuple exprcontext */
1196                 ResetPerTupleExprContext(estate);
1197
1198                 /*
1199                  * Execute the plan and obtain a tuple
1200                  */
1201 lnext:  ;
1202                 if (estate->es_useEvalPlan)
1203                 {
1204                         planSlot = EvalPlanQualNext(estate);
1205                         if (TupIsNull(planSlot))
1206                                 planSlot = ExecProcNode(planstate);
1207                 }
1208                 else
1209                         planSlot = ExecProcNode(planstate);
1210
1211                 /*
1212                  * if the tuple is null, then we assume there is nothing more to
1213                  * process so we just return null...
1214                  */
1215                 if (TupIsNull(planSlot))
1216                 {
1217                         result = NULL;
1218                         break;
1219                 }
1220                 slot = planSlot;
1221
1222                 /*
1223                  * if we have a junk filter, then project a new tuple with the junk
1224                  * removed.
1225                  *
1226                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1227                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1228                  * because that tuple slot has the wrong descriptor.)
1229                  *
1230                  * Also, extract all the junk information we need.
1231                  */
1232                 if ((junkfilter = estate->es_junkFilter) != NULL)
1233                 {
1234                         Datum           datum;
1235                         bool            isNull;
1236
1237                         /*
1238                          * extract the 'ctid' junk attribute.
1239                          */
1240                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1241                         {
1242                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1243                                                                                          &isNull);
1244                                 /* shouldn't ever get a null result... */
1245                                 if (isNull)
1246                                         elog(ERROR, "ctid is NULL");
1247
1248                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1249                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1250                                 tupleid = &tuple_ctid;
1251                         }
1252
1253                         /*
1254                          * Process any FOR UPDATE or FOR SHARE locking requested.
1255                          */
1256                         else if (estate->es_rowMarks != NIL)
1257                         {
1258                                 ListCell   *l;
1259
1260                 lmark:  ;
1261                                 foreach(l, estate->es_rowMarks)
1262                                 {
1263                                         ExecRowMark *erm = lfirst(l);
1264                                         HeapTupleData tuple;
1265                                         Buffer          buffer;
1266                                         ItemPointerData update_ctid;
1267                                         TransactionId update_xmax;
1268                                         TupleTableSlot *newSlot;
1269                                         LockTupleMode lockmode;
1270                                         HTSU_Result test;
1271
1272                                         datum = ExecGetJunkAttribute(slot,
1273                                                                                                  erm->ctidAttNo,
1274                                                                                                  &isNull);
1275                                         /* shouldn't ever get a null result... */
1276                                         if (isNull)
1277                                                 elog(ERROR, "ctid is NULL");
1278
1279                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1280
1281                                         if (erm->forUpdate)
1282                                                 lockmode = LockTupleExclusive;
1283                                         else
1284                                                 lockmode = LockTupleShared;
1285
1286                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1287                                                                                    &update_ctid, &update_xmax,
1288                                                                                    estate->es_snapshot->curcid,
1289                                                                                    lockmode, erm->noWait);
1290                                         ReleaseBuffer(buffer);
1291                                         switch (test)
1292                                         {
1293                                                 case HeapTupleSelfUpdated:
1294                                                         /* treat it as deleted; do not process */
1295                                                         goto lnext;
1296
1297                                                 case HeapTupleMayBeUpdated:
1298                                                         break;
1299
1300                                                 case HeapTupleUpdated:
1301                                                         if (IsXactIsoLevelSerializable)
1302                                                                 ereport(ERROR,
1303                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1304                                                                   errmsg("could not serialize access due to concurrent update")));
1305                                                         if (!ItemPointerEquals(&update_ctid,
1306                                                                                                    &tuple.t_self))
1307                                                         {
1308                                                                 /* updated, so look at updated version */
1309                                                                 newSlot = EvalPlanQual(estate,
1310                                                                                                            erm->rti,
1311                                                                                                            &update_ctid,
1312                                                                                                            update_xmax,
1313                                                                                                 estate->es_snapshot->curcid);
1314                                                                 if (!TupIsNull(newSlot))
1315                                                                 {
1316                                                                         slot = planSlot = newSlot;
1317                                                                         estate->es_useEvalPlan = true;
1318                                                                         goto lmark;
1319                                                                 }
1320                                                         }
1321
1322                                                         /*
1323                                                          * if tuple was deleted or PlanQual failed for
1324                                                          * updated tuple - we must not return this tuple!
1325                                                          */
1326                                                         goto lnext;
1327
1328                                                 default:
1329                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1330                                                                  test);
1331                                                         return NULL;
1332                                         }
1333                                 }
1334                         }
1335
1336                         /*
1337                          * Create a new "clean" tuple with all junk attributes removed. We
1338                          * don't need to do this for DELETE, however (there will in fact
1339                          * be no non-junk attributes in a DELETE!)
1340                          */
1341                         if (operation != CMD_DELETE)
1342                                 slot = ExecFilterJunk(junkfilter, slot);
1343                 }
1344
1345                 /*
1346                  * now that we have a tuple, do the appropriate thing with it.. either
1347                  * return it to the user, add it to a relation someplace, delete it
1348                  * from a relation, or modify some of its attributes.
1349                  */
1350                 switch (operation)
1351                 {
1352                         case CMD_SELECT:
1353                                 ExecSelect(slot, dest, estate);
1354                                 result = slot;
1355                                 break;
1356
1357                         case CMD_INSERT:
1358                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1359                                 result = NULL;
1360                                 break;
1361
1362                         case CMD_DELETE:
1363                                 ExecDelete(tupleid, planSlot, dest, estate);
1364                                 result = NULL;
1365                                 break;
1366
1367                         case CMD_UPDATE:
1368                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1369                                 result = NULL;
1370                                 break;
1371
1372                         default:
1373                                 elog(ERROR, "unrecognized operation code: %d",
1374                                          (int) operation);
1375                                 result = NULL;
1376                                 break;
1377                 }
1378
1379                 /*
1380                  * check our tuple count.. if we've processed the proper number then
1381                  * quit, else loop again and process more tuples.  Zero numberTuples
1382                  * means no limit.
1383                  */
1384                 current_tuple_count++;
1385                 if (numberTuples && numberTuples == current_tuple_count)
1386                         break;
1387         }
1388
1389         /*
1390          * Process AFTER EACH STATEMENT triggers
1391          */
1392         switch (operation)
1393         {
1394                 case CMD_UPDATE:
1395                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1396                         break;
1397                 case CMD_DELETE:
1398                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1399                         break;
1400                 case CMD_INSERT:
1401                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1402                         break;
1403                 default:
1404                         /* do nothing */
1405                         break;
1406         }
1407
1408         /*
1409          * here, result is either a slot containing a tuple in the case of a
1410          * SELECT or NULL otherwise.
1411          */
1412         return result;
1413 }
1414
1415 /* ----------------------------------------------------------------
1416  *              ExecSelect
1417  *
1418  *              SELECTs are easy.. we just pass the tuple to the appropriate
1419  *              output function.
1420  * ----------------------------------------------------------------
1421  */
1422 static void
1423 ExecSelect(TupleTableSlot *slot,
1424                    DestReceiver *dest,
1425                    EState *estate)
1426 {
1427         (*dest->receiveSlot) (slot, dest);
1428         IncrRetrieved();
1429         (estate->es_processed)++;
1430 }
1431
1432 /* ----------------------------------------------------------------
1433  *              ExecInsert
1434  *
1435  *              INSERTs are trickier.. we have to insert the tuple into
1436  *              the base relation and insert appropriate tuples into the
1437  *              index relations.
1438  * ----------------------------------------------------------------
1439  */
1440 static void
1441 ExecInsert(TupleTableSlot *slot,
1442                    ItemPointer tupleid,
1443                    TupleTableSlot *planSlot,
1444                    DestReceiver *dest,
1445                    EState *estate)
1446 {
1447         HeapTuple       tuple;
1448         ResultRelInfo *resultRelInfo;
1449         Relation        resultRelationDesc;
1450         Oid                     newId;
1451
1452         /*
1453          * get the heap tuple out of the tuple table slot, making sure we have a
1454          * writable copy
1455          */
1456         tuple = ExecMaterializeSlot(slot);
1457
1458         /*
1459          * get information on the (current) result relation
1460          */
1461         resultRelInfo = estate->es_result_relation_info;
1462         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1463
1464         /* BEFORE ROW INSERT Triggers */
1465         if (resultRelInfo->ri_TrigDesc &&
1466                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1467         {
1468                 HeapTuple       newtuple;
1469
1470                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1471
1472                 if (newtuple == NULL)   /* "do nothing" */
1473                         return;
1474
1475                 if (newtuple != tuple)  /* modified by Trigger(s) */
1476                 {
1477                         /*
1478                          * Put the modified tuple into a slot for convenience of routines
1479                          * below.  We assume the tuple was allocated in per-tuple memory
1480                          * context, and therefore will go away by itself. The tuple table
1481                          * slot should not try to clear it.
1482                          */
1483                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1484
1485                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1486                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1487                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1488                         slot = newslot;
1489                         tuple = newtuple;
1490                 }
1491         }
1492
1493         /*
1494          * Check the constraints of the tuple
1495          */
1496         if (resultRelationDesc->rd_att->constr)
1497                 ExecConstraints(resultRelInfo, slot, estate);
1498
1499         /*
1500          * insert the tuple
1501          *
1502          * Note: heap_insert returns the tid (location) of the new tuple in the
1503          * t_self field.
1504          */
1505         newId = heap_insert(resultRelationDesc, tuple,
1506                                                 estate->es_snapshot->curcid,
1507                                                 true, true);
1508
1509         IncrAppended();
1510         (estate->es_processed)++;
1511         estate->es_lastoid = newId;
1512         setLastTid(&(tuple->t_self));
1513
1514         /*
1515          * insert index entries for tuple
1516          */
1517         if (resultRelInfo->ri_NumIndices > 0)
1518                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1519
1520         /* AFTER ROW INSERT Triggers */
1521         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1522
1523         /* Process RETURNING if present */
1524         if (resultRelInfo->ri_projectReturning)
1525                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1526                                                          slot, planSlot, dest);
1527 }
1528
1529 /* ----------------------------------------------------------------
1530  *              ExecDelete
1531  *
1532  *              DELETE is like UPDATE, except that we delete the tuple and no
1533  *              index modifications are needed
1534  * ----------------------------------------------------------------
1535  */
1536 static void
1537 ExecDelete(ItemPointer tupleid,
1538                    TupleTableSlot *planSlot,
1539                    DestReceiver *dest,
1540                    EState *estate)
1541 {
1542         ResultRelInfo *resultRelInfo;
1543         Relation        resultRelationDesc;
1544         HTSU_Result result;
1545         ItemPointerData update_ctid;
1546         TransactionId update_xmax;
1547
1548         /*
1549          * get information on the (current) result relation
1550          */
1551         resultRelInfo = estate->es_result_relation_info;
1552         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1553
1554         /* BEFORE ROW DELETE Triggers */
1555         if (resultRelInfo->ri_TrigDesc &&
1556                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1557         {
1558                 bool            dodelete;
1559
1560                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1561                                                                                 estate->es_snapshot->curcid);
1562
1563                 if (!dodelete)                  /* "do nothing" */
1564                         return;
1565         }
1566
1567         /*
1568          * delete the tuple
1569          *
1570          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1571          * the row to be deleted is visible to that snapshot, and throw a can't-
1572          * serialize error if not.      This is a special-case behavior needed for
1573          * referential integrity updates in serializable transactions.
1574          */
1575 ldelete:;
1576         result = heap_delete(resultRelationDesc, tupleid,
1577                                                  &update_ctid, &update_xmax,
1578                                                  estate->es_snapshot->curcid,
1579                                                  estate->es_crosscheck_snapshot,
1580                                                  true /* wait for commit */ );
1581         switch (result)
1582         {
1583                 case HeapTupleSelfUpdated:
1584                         /* already deleted by self; nothing to do */
1585                         return;
1586
1587                 case HeapTupleMayBeUpdated:
1588                         break;
1589
1590                 case HeapTupleUpdated:
1591                         if (IsXactIsoLevelSerializable)
1592                                 ereport(ERROR,
1593                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1594                                                  errmsg("could not serialize access due to concurrent update")));
1595                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1596                         {
1597                                 TupleTableSlot *epqslot;
1598
1599                                 epqslot = EvalPlanQual(estate,
1600                                                                            resultRelInfo->ri_RangeTableIndex,
1601                                                                            &update_ctid,
1602                                                                            update_xmax,
1603                                                                            estate->es_snapshot->curcid);
1604                                 if (!TupIsNull(epqslot))
1605                                 {
1606                                         *tupleid = update_ctid;
1607                                         goto ldelete;
1608                                 }
1609                         }
1610                         /* tuple already deleted; nothing to do */
1611                         return;
1612
1613                 default:
1614                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1615                         return;
1616         }
1617
1618         IncrDeleted();
1619         (estate->es_processed)++;
1620
1621         /*
1622          * Note: Normally one would think that we have to delete index tuples
1623          * associated with the heap tuple now...
1624          *
1625          * ... but in POSTGRES, we have no need to do this because VACUUM will
1626          * take care of it later.  We can't delete index tuples immediately
1627          * anyway, since the tuple is still visible to other transactions.
1628          */
1629
1630         /* AFTER ROW DELETE Triggers */
1631         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1632
1633         /* Process RETURNING if present */
1634         if (resultRelInfo->ri_projectReturning)
1635         {
1636                 /*
1637                  * We have to put the target tuple into a slot, which means first we
1638                  * gotta fetch it.      We can use the trigger tuple slot.
1639                  */
1640                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1641                 HeapTupleData deltuple;
1642                 Buffer          delbuffer;
1643
1644                 deltuple.t_self = *tupleid;
1645                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1646                                                 &deltuple, &delbuffer, false, NULL))
1647                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1648
1649                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1650                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1651                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1652
1653                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1654                                                          slot, planSlot, dest);
1655
1656                 ExecClearTuple(slot);
1657                 ReleaseBuffer(delbuffer);
1658         }
1659 }
1660
1661 /* ----------------------------------------------------------------
1662  *              ExecUpdate
1663  *
1664  *              note: we can't run UPDATE queries with transactions
1665  *              off because UPDATEs are actually INSERTs and our
1666  *              scan will mistakenly loop forever, updating the tuple
1667  *              it just inserted..      This should be fixed but until it
1668  *              is, we don't want to get stuck in an infinite loop
1669  *              which corrupts your database..
1670  * ----------------------------------------------------------------
1671  */
1672 static void
1673 ExecUpdate(TupleTableSlot *slot,
1674                    ItemPointer tupleid,
1675                    TupleTableSlot *planSlot,
1676                    DestReceiver *dest,
1677                    EState *estate)
1678 {
1679         HeapTuple       tuple;
1680         ResultRelInfo *resultRelInfo;
1681         Relation        resultRelationDesc;
1682         HTSU_Result result;
1683         ItemPointerData update_ctid;
1684         TransactionId update_xmax;
1685
1686         /*
1687          * abort the operation if not running transactions
1688          */
1689         if (IsBootstrapProcessingMode())
1690                 elog(ERROR, "cannot UPDATE during bootstrap");
1691
1692         /*
1693          * get the heap tuple out of the tuple table slot, making sure we have a
1694          * writable copy
1695          */
1696         tuple = ExecMaterializeSlot(slot);
1697
1698         /*
1699          * get information on the (current) result relation
1700          */
1701         resultRelInfo = estate->es_result_relation_info;
1702         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1703
1704         /* BEFORE ROW UPDATE Triggers */
1705         if (resultRelInfo->ri_TrigDesc &&
1706                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1707         {
1708                 HeapTuple       newtuple;
1709
1710                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1711                                                                                 tupleid, tuple,
1712                                                                                 estate->es_snapshot->curcid);
1713
1714                 if (newtuple == NULL)   /* "do nothing" */
1715                         return;
1716
1717                 if (newtuple != tuple)  /* modified by Trigger(s) */
1718                 {
1719                         /*
1720                          * Put the modified tuple into a slot for convenience of routines
1721                          * below.  We assume the tuple was allocated in per-tuple memory
1722                          * context, and therefore will go away by itself. The tuple table
1723                          * slot should not try to clear it.
1724                          */
1725                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1726
1727                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1728                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1729                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1730                         slot = newslot;
1731                         tuple = newtuple;
1732                 }
1733         }
1734
1735         /*
1736          * Check the constraints of the tuple
1737          *
1738          * If we generate a new candidate tuple after EvalPlanQual testing, we
1739          * must loop back here and recheck constraints.  (We don't need to redo
1740          * triggers, however.  If there are any BEFORE triggers then trigger.c
1741          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1742          * need to do them again.)
1743          */
1744 lreplace:;
1745         if (resultRelationDesc->rd_att->constr)
1746                 ExecConstraints(resultRelInfo, slot, estate);
1747
1748         /*
1749          * replace the heap tuple
1750          *
1751          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1752          * the row to be updated is visible to that snapshot, and throw a can't-
1753          * serialize error if not.      This is a special-case behavior needed for
1754          * referential integrity updates in serializable transactions.
1755          */
1756         result = heap_update(resultRelationDesc, tupleid, tuple,
1757                                                  &update_ctid, &update_xmax,
1758                                                  estate->es_snapshot->curcid,
1759                                                  estate->es_crosscheck_snapshot,
1760                                                  true /* wait for commit */ );
1761         switch (result)
1762         {
1763                 case HeapTupleSelfUpdated:
1764                         /* already deleted by self; nothing to do */
1765                         return;
1766
1767                 case HeapTupleMayBeUpdated:
1768                         break;
1769
1770                 case HeapTupleUpdated:
1771                         if (IsXactIsoLevelSerializable)
1772                                 ereport(ERROR,
1773                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1774                                                  errmsg("could not serialize access due to concurrent update")));
1775                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1776                         {
1777                                 TupleTableSlot *epqslot;
1778
1779                                 epqslot = EvalPlanQual(estate,
1780                                                                            resultRelInfo->ri_RangeTableIndex,
1781                                                                            &update_ctid,
1782                                                                            update_xmax,
1783                                                                            estate->es_snapshot->curcid);
1784                                 if (!TupIsNull(epqslot))
1785                                 {
1786                                         *tupleid = update_ctid;
1787                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1788                                         tuple = ExecMaterializeSlot(slot);
1789                                         goto lreplace;
1790                                 }
1791                         }
1792                         /* tuple already deleted; nothing to do */
1793                         return;
1794
1795                 default:
1796                         elog(ERROR, "unrecognized heap_update status: %u", result);
1797                         return;
1798         }
1799
1800         IncrReplaced();
1801         (estate->es_processed)++;
1802
1803         /*
1804          * Note: instead of having to update the old index tuples associated with
1805          * the heap tuple, all we do is form and insert new index tuples. This is
1806          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1807          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1808          * here is insert new index tuples.  -cim 9/27/89
1809          */
1810
1811         /*
1812          * insert index entries for tuple
1813          *
1814          * Note: heap_update returns the tid (location) of the new tuple in the
1815          * t_self field.
1816          *
1817          * If it's a HOT update, we mustn't insert new index entries.
1818          */
1819         if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1820                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1821
1822         /* AFTER ROW UPDATE Triggers */
1823         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1824
1825         /* Process RETURNING if present */
1826         if (resultRelInfo->ri_projectReturning)
1827                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1828                                                          slot, planSlot, dest);
1829 }
1830
1831 /*
1832  * ExecRelCheck --- check that tuple meets constraints for result relation
1833  */
1834 static const char *
1835 ExecRelCheck(ResultRelInfo *resultRelInfo,
1836                          TupleTableSlot *slot, EState *estate)
1837 {
1838         Relation        rel = resultRelInfo->ri_RelationDesc;
1839         int                     ncheck = rel->rd_att->constr->num_check;
1840         ConstrCheck *check = rel->rd_att->constr->check;
1841         ExprContext *econtext;
1842         MemoryContext oldContext;
1843         List       *qual;
1844         int                     i;
1845
1846         /*
1847          * If first time through for this result relation, build expression
1848          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1849          * memory context so they'll survive throughout the query.
1850          */
1851         if (resultRelInfo->ri_ConstraintExprs == NULL)
1852         {
1853                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1854                 resultRelInfo->ri_ConstraintExprs =
1855                         (List **) palloc(ncheck * sizeof(List *));
1856                 for (i = 0; i < ncheck; i++)
1857                 {
1858                         /* ExecQual wants implicit-AND form */
1859                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1860                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1861                                 ExecPrepareExpr((Expr *) qual, estate);
1862                 }
1863                 MemoryContextSwitchTo(oldContext);
1864         }
1865
1866         /*
1867          * We will use the EState's per-tuple context for evaluating constraint
1868          * expressions (creating it if it's not already there).
1869          */
1870         econtext = GetPerTupleExprContext(estate);
1871
1872         /* Arrange for econtext's scan tuple to be the tuple under test */
1873         econtext->ecxt_scantuple = slot;
1874
1875         /* And evaluate the constraints */
1876         for (i = 0; i < ncheck; i++)
1877         {
1878                 qual = resultRelInfo->ri_ConstraintExprs[i];
1879
1880                 /*
1881                  * NOTE: SQL92 specifies that a NULL result from a constraint
1882                  * expression is not to be treated as a failure.  Therefore, tell
1883                  * ExecQual to return TRUE for NULL.
1884                  */
1885                 if (!ExecQual(qual, econtext, true))
1886                         return check[i].ccname;
1887         }
1888
1889         /* NULL result means no error */
1890         return NULL;
1891 }
1892
1893 void
1894 ExecConstraints(ResultRelInfo *resultRelInfo,
1895                                 TupleTableSlot *slot, EState *estate)
1896 {
1897         Relation        rel = resultRelInfo->ri_RelationDesc;
1898         TupleConstr *constr = rel->rd_att->constr;
1899
1900         Assert(constr);
1901
1902         if (constr->has_not_null)
1903         {
1904                 int                     natts = rel->rd_att->natts;
1905                 int                     attrChk;
1906
1907                 for (attrChk = 1; attrChk <= natts; attrChk++)
1908                 {
1909                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1910                                 slot_attisnull(slot, attrChk))
1911                                 ereport(ERROR,
1912                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1913                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1914                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1915                 }
1916         }
1917
1918         if (constr->num_check > 0)
1919         {
1920                 const char *failed;
1921
1922                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1923                         ereport(ERROR,
1924                                         (errcode(ERRCODE_CHECK_VIOLATION),
1925                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1926                                                         RelationGetRelationName(rel), failed)));
1927         }
1928 }
1929
1930 /*
1931  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
1932  *
1933  * projectReturning: RETURNING projection info for current result rel
1934  * tupleSlot: slot holding tuple actually inserted/updated/deleted
1935  * planSlot: slot holding tuple returned by top plan node
1936  * dest: where to send the output
1937  */
1938 static void
1939 ExecProcessReturning(ProjectionInfo *projectReturning,
1940                                          TupleTableSlot *tupleSlot,
1941                                          TupleTableSlot *planSlot,
1942                                          DestReceiver *dest)
1943 {
1944         ExprContext *econtext = projectReturning->pi_exprContext;
1945         TupleTableSlot *retSlot;
1946
1947         /*
1948          * Reset per-tuple memory context to free any expression evaluation
1949          * storage allocated in the previous cycle.
1950          */
1951         ResetExprContext(econtext);
1952
1953         /* Make tuple and any needed join variables available to ExecProject */
1954         econtext->ecxt_scantuple = tupleSlot;
1955         econtext->ecxt_outertuple = planSlot;
1956
1957         /* Compute the RETURNING expressions */
1958         retSlot = ExecProject(projectReturning, NULL);
1959
1960         /* Send to dest */
1961         (*dest->receiveSlot) (retSlot, dest);
1962
1963         ExecClearTuple(retSlot);
1964 }
1965
1966 /*
1967  * Check a modified tuple to see if we want to process its updated version
1968  * under READ COMMITTED rules.
1969  *
1970  * See backend/executor/README for some info about how this works.
1971  *
1972  *      estate - executor state data
1973  *      rti - rangetable index of table containing tuple
1974  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1975  *      priorXmax - t_xmax from the outdated tuple
1976  *      curCid - command ID of current command of my transaction
1977  *
1978  * *tid is also an output parameter: it's modified to hold the TID of the
1979  * latest version of the tuple (note this may be changed even on failure)
1980  *
1981  * Returns a slot containing the new candidate update/delete tuple, or
1982  * NULL if we determine we shouldn't process the row.
1983  */
1984 TupleTableSlot *
1985 EvalPlanQual(EState *estate, Index rti,
1986                          ItemPointer tid, TransactionId priorXmax, CommandId curCid)
1987 {
1988         evalPlanQual *epq;
1989         EState     *epqstate;
1990         Relation        relation;
1991         HeapTupleData tuple;
1992         HeapTuple       copyTuple = NULL;
1993         SnapshotData SnapshotDirty;
1994         bool            endNode;
1995
1996         Assert(rti != 0);
1997
1998         /*
1999          * find relation containing target tuple
2000          */
2001         if (estate->es_result_relation_info != NULL &&
2002                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2003                 relation = estate->es_result_relation_info->ri_RelationDesc;
2004         else
2005         {
2006                 ListCell   *l;
2007
2008                 relation = NULL;
2009                 foreach(l, estate->es_rowMarks)
2010                 {
2011                         if (((ExecRowMark *) lfirst(l))->rti == rti)
2012                         {
2013                                 relation = ((ExecRowMark *) lfirst(l))->relation;
2014                                 break;
2015                         }
2016                 }
2017                 if (relation == NULL)
2018                         elog(ERROR, "could not find RowMark for RT index %u", rti);
2019         }
2020
2021         /*
2022          * fetch tid tuple
2023          *
2024          * Loop here to deal with updated or busy tuples
2025          */
2026         InitDirtySnapshot(SnapshotDirty);
2027         tuple.t_self = *tid;
2028         for (;;)
2029         {
2030                 Buffer          buffer;
2031
2032                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2033                 {
2034                         /*
2035                          * If xmin isn't what we're expecting, the slot must have been
2036                          * recycled and reused for an unrelated tuple.  This implies that
2037                          * the latest version of the row was deleted, so we need do
2038                          * nothing.  (Should be safe to examine xmin without getting
2039                          * buffer's content lock, since xmin never changes in an existing
2040                          * tuple.)
2041                          */
2042                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2043                                                                          priorXmax))
2044                         {
2045                                 ReleaseBuffer(buffer);
2046                                 return NULL;
2047                         }
2048
2049                         /* otherwise xmin should not be dirty... */
2050                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2051                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2052
2053                         /*
2054                          * If tuple is being updated by other transaction then we have to
2055                          * wait for its commit/abort.
2056                          */
2057                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2058                         {
2059                                 ReleaseBuffer(buffer);
2060                                 XactLockTableWait(SnapshotDirty.xmax);
2061                                 continue;               /* loop back to repeat heap_fetch */
2062                         }
2063
2064                         /*
2065                          * If tuple was inserted by our own transaction, we have to check
2066                          * cmin against curCid: cmin >= curCid means our command cannot
2067                          * see the tuple, so we should ignore it.  Without this we are
2068                          * open to the "Halloween problem" of indefinitely re-updating the
2069                          * same tuple.  (We need not check cmax because
2070                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2071                          * transaction dead, regardless of cmax.)  We just checked that
2072                          * priorXmax == xmin, so we can test that variable instead of
2073                          * doing HeapTupleHeaderGetXmin again.
2074                          */
2075                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2076                                 HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
2077                         {
2078                                 ReleaseBuffer(buffer);
2079                                 return NULL;
2080                         }
2081
2082                         /*
2083                          * We got tuple - now copy it for use by recheck query.
2084                          */
2085                         copyTuple = heap_copytuple(&tuple);
2086                         ReleaseBuffer(buffer);
2087                         break;
2088                 }
2089
2090                 /*
2091                  * If the referenced slot was actually empty, the latest version of
2092                  * the row must have been deleted, so we need do nothing.
2093                  */
2094                 if (tuple.t_data == NULL)
2095                 {
2096                         ReleaseBuffer(buffer);
2097                         return NULL;
2098                 }
2099
2100                 /*
2101                  * As above, if xmin isn't what we're expecting, do nothing.
2102                  */
2103                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2104                                                                  priorXmax))
2105                 {
2106                         ReleaseBuffer(buffer);
2107                         return NULL;
2108                 }
2109
2110                 /*
2111                  * If we get here, the tuple was found but failed SnapshotDirty.
2112                  * Assuming the xmin is either a committed xact or our own xact (as it
2113                  * certainly should be if we're trying to modify the tuple), this must
2114                  * mean that the row was updated or deleted by either a committed xact
2115                  * or our own xact.  If it was deleted, we can ignore it; if it was
2116                  * updated then chain up to the next version and repeat the whole
2117                  * test.
2118                  *
2119                  * As above, it should be safe to examine xmax and t_ctid without the
2120                  * buffer content lock, because they can't be changing.
2121                  */
2122                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2123                 {
2124                         /* deleted, so forget about it */
2125                         ReleaseBuffer(buffer);
2126                         return NULL;
2127                 }
2128
2129                 /* updated, so look at the updated row */
2130                 tuple.t_self = tuple.t_data->t_ctid;
2131                 /* updated row should have xmin matching this xmax */
2132                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2133                 ReleaseBuffer(buffer);
2134                 /* loop back to fetch next in chain */
2135         }
2136
2137         /*
2138          * For UPDATE/DELETE we have to return tid of actual row we're executing
2139          * PQ for.
2140          */
2141         *tid = tuple.t_self;
2142
2143         /*
2144          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2145          */
2146         epq = estate->es_evalPlanQual;
2147         endNode = true;
2148
2149         if (epq != NULL && epq->rti == 0)
2150         {
2151                 /* Top PQ stack entry is idle, so re-use it */
2152                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2153                 epq->rti = rti;
2154                 endNode = false;
2155         }
2156
2157         /*
2158          * If this is request for another RTE - Ra, - then we have to check wasn't
2159          * PlanQual requested for Ra already and if so then Ra' row was updated
2160          * again and we have to re-start old execution for Ra and forget all what
2161          * we done after Ra was suspended. Cool? -:))
2162          */
2163         if (epq != NULL && epq->rti != rti &&
2164                 epq->estate->es_evTuple[rti - 1] != NULL)
2165         {
2166                 do
2167                 {
2168                         evalPlanQual *oldepq;
2169
2170                         /* stop execution */
2171                         EvalPlanQualStop(epq);
2172                         /* pop previous PlanQual from the stack */
2173                         oldepq = epq->next;
2174                         Assert(oldepq && oldepq->rti != 0);
2175                         /* push current PQ to freePQ stack */
2176                         oldepq->free = epq;
2177                         epq = oldepq;
2178                         estate->es_evalPlanQual = epq;
2179                 } while (epq->rti != rti);
2180         }
2181
2182         /*
2183          * If we are requested for another RTE then we have to suspend execution
2184          * of current PlanQual and start execution for new one.
2185          */
2186         if (epq == NULL || epq->rti != rti)
2187         {
2188                 /* try to reuse plan used previously */
2189                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2190
2191                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2192                 {
2193                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2194                         newepq->free = NULL;
2195                         newepq->estate = NULL;
2196                         newepq->planstate = NULL;
2197                 }
2198                 else
2199                 {
2200                         /* recycle previously used PlanQual */
2201                         Assert(newepq->estate == NULL);
2202                         epq->free = NULL;
2203                 }
2204                 /* push current PQ to the stack */
2205                 newepq->next = epq;
2206                 epq = newepq;
2207                 estate->es_evalPlanQual = epq;
2208                 epq->rti = rti;
2209                 endNode = false;
2210         }
2211
2212         Assert(epq->rti == rti);
2213
2214         /*
2215          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2216          * end and restart execution of the plan, because ExecReScan wouldn't
2217          * ensure that upper plan nodes would reset themselves.  We could make
2218          * that work if insertion of the target tuple were integrated with the
2219          * Param mechanism somehow, so that the upper plan nodes know that their
2220          * children's outputs have changed.
2221          *
2222          * Note that the stack of free evalPlanQual nodes is quite useless at the
2223          * moment, since it only saves us from pallocing/releasing the
2224          * evalPlanQual nodes themselves.  But it will be useful once we implement
2225          * ReScan instead of end/restart for re-using PlanQual nodes.
2226          */
2227         if (endNode)
2228         {
2229                 /* stop execution */
2230                 EvalPlanQualStop(epq);
2231         }
2232
2233         /*
2234          * Initialize new recheck query.
2235          *
2236          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2237          * instead copy down changeable state from the top plan (including
2238          * es_result_relation_info, es_junkFilter) and reset locally changeable
2239          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2240          */
2241         EvalPlanQualStart(epq, estate, epq->next);
2242
2243         /*
2244          * free old RTE' tuple, if any, and store target tuple where relation's
2245          * scan node will see it
2246          */
2247         epqstate = epq->estate;
2248         if (epqstate->es_evTuple[rti - 1] != NULL)
2249                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2250         epqstate->es_evTuple[rti - 1] = copyTuple;
2251
2252         return EvalPlanQualNext(estate);
2253 }
2254
2255 static TupleTableSlot *
2256 EvalPlanQualNext(EState *estate)
2257 {
2258         evalPlanQual *epq = estate->es_evalPlanQual;
2259         MemoryContext oldcontext;
2260         TupleTableSlot *slot;
2261
2262         Assert(epq->rti != 0);
2263
2264 lpqnext:;
2265         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2266         slot = ExecProcNode(epq->planstate);
2267         MemoryContextSwitchTo(oldcontext);
2268
2269         /*
2270          * No more tuples for this PQ. Continue previous one.
2271          */
2272         if (TupIsNull(slot))
2273         {
2274                 evalPlanQual *oldepq;
2275
2276                 /* stop execution */
2277                 EvalPlanQualStop(epq);
2278                 /* pop old PQ from the stack */
2279                 oldepq = epq->next;
2280                 if (oldepq == NULL)
2281                 {
2282                         /* this is the first (oldest) PQ - mark as free */
2283                         epq->rti = 0;
2284                         estate->es_useEvalPlan = false;
2285                         /* and continue Query execution */
2286                         return NULL;
2287                 }
2288                 Assert(oldepq->rti != 0);
2289                 /* push current PQ to freePQ stack */
2290                 oldepq->free = epq;
2291                 epq = oldepq;
2292                 estate->es_evalPlanQual = epq;
2293                 goto lpqnext;
2294         }
2295
2296         return slot;
2297 }
2298
2299 static void
2300 EndEvalPlanQual(EState *estate)
2301 {
2302         evalPlanQual *epq = estate->es_evalPlanQual;
2303
2304         if (epq->rti == 0)                      /* plans already shutdowned */
2305         {
2306                 Assert(epq->next == NULL);
2307                 return;
2308         }
2309
2310         for (;;)
2311         {
2312                 evalPlanQual *oldepq;
2313
2314                 /* stop execution */
2315                 EvalPlanQualStop(epq);
2316                 /* pop old PQ from the stack */
2317                 oldepq = epq->next;
2318                 if (oldepq == NULL)
2319                 {
2320                         /* this is the first (oldest) PQ - mark as free */
2321                         epq->rti = 0;
2322                         estate->es_useEvalPlan = false;
2323                         break;
2324                 }
2325                 Assert(oldepq->rti != 0);
2326                 /* push current PQ to freePQ stack */
2327                 oldepq->free = epq;
2328                 epq = oldepq;
2329                 estate->es_evalPlanQual = epq;
2330         }
2331 }
2332
2333 /*
2334  * Start execution of one level of PlanQual.
2335  *
2336  * This is a cut-down version of ExecutorStart(): we copy some state from
2337  * the top-level estate rather than initializing it fresh.
2338  */
2339 static void
2340 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2341 {
2342         EState     *epqstate;
2343         int                     rtsize;
2344         MemoryContext oldcontext;
2345         ListCell   *l;
2346
2347         rtsize = list_length(estate->es_range_table);
2348
2349         epq->estate = epqstate = CreateExecutorState();
2350
2351         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2352
2353         /*
2354          * The epqstates share the top query's copy of unchanging state such as
2355          * the snapshot, rangetable, result-rel info, and external Param info.
2356          * They need their own copies of local state, including a tuple table,
2357          * es_param_exec_vals, etc.
2358          */
2359         epqstate->es_direction = ForwardScanDirection;
2360         epqstate->es_snapshot = estate->es_snapshot;
2361         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2362         epqstate->es_range_table = estate->es_range_table;
2363         epqstate->es_result_relations = estate->es_result_relations;
2364         epqstate->es_num_result_relations = estate->es_num_result_relations;
2365         epqstate->es_result_relation_info = estate->es_result_relation_info;
2366         epqstate->es_junkFilter = estate->es_junkFilter;
2367         /* es_trig_target_relations must NOT be copied */
2368         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2369         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2370         epqstate->es_param_list_info = estate->es_param_list_info;
2371         if (estate->es_plannedstmt->nParamExec > 0)
2372                 epqstate->es_param_exec_vals = (ParamExecData *)
2373                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2374         epqstate->es_rowMarks = estate->es_rowMarks;
2375         epqstate->es_instrument = estate->es_instrument;
2376         epqstate->es_select_into = estate->es_select_into;
2377         epqstate->es_into_oids = estate->es_into_oids;
2378         epqstate->es_plannedstmt = estate->es_plannedstmt;
2379
2380         /*
2381          * Each epqstate must have its own es_evTupleNull state, but all the stack
2382          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2383          * the value being examined by an outer recheck.
2384          */
2385         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2386         if (priorepq == NULL)
2387                 /* first PQ stack entry */
2388                 epqstate->es_evTuple = (HeapTuple *)
2389                         palloc0(rtsize * sizeof(HeapTuple));
2390         else
2391                 /* later stack entries share the same storage */
2392                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2393
2394         /*
2395          * Create sub-tuple-table; we needn't redo the CountSlots work though.
2396          */
2397         epqstate->es_tupleTable =
2398                 ExecCreateTupleTable(estate->es_tupleTable->size);
2399
2400         /*
2401          * Initialize private state information for each SubPlan.  We must do this
2402          * before running ExecInitNode on the main query tree, since
2403          * ExecInitSubPlan expects to be able to find these entries.
2404          */
2405         Assert(epqstate->es_subplanstates == NIL);
2406         foreach(l, estate->es_plannedstmt->subplans)
2407         {
2408                 Plan       *subplan = (Plan *) lfirst(l);
2409                 PlanState  *subplanstate;
2410
2411                 subplanstate = ExecInitNode(subplan, epqstate, 0);
2412
2413                 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2414                                                                                          subplanstate);
2415         }
2416
2417         /*
2418          * Initialize the private state information for all the nodes in the query
2419          * tree.  This opens files, allocates storage and leaves us ready to start
2420          * processing tuples.
2421          */
2422         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2423
2424         MemoryContextSwitchTo(oldcontext);
2425 }
2426
2427 /*
2428  * End execution of one level of PlanQual.
2429  *
2430  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2431  * of the normal cleanup, but *not* close result relations (which we are
2432  * just sharing from the outer query).  We do, however, have to close any
2433  * trigger target relations that got opened, since those are not shared.
2434  */
2435 static void
2436 EvalPlanQualStop(evalPlanQual *epq)
2437 {
2438         EState     *epqstate = epq->estate;
2439         MemoryContext oldcontext;
2440         ListCell   *l;
2441
2442         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2443
2444         ExecEndNode(epq->planstate);
2445
2446         foreach(l, epqstate->es_subplanstates)
2447         {
2448                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2449
2450                 ExecEndNode(subplanstate);
2451         }
2452
2453         ExecDropTupleTable(epqstate->es_tupleTable, true);
2454         epqstate->es_tupleTable = NULL;
2455
2456         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2457         {
2458                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2459                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2460         }
2461
2462         foreach(l, epqstate->es_trig_target_relations)
2463         {
2464                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2465
2466                 /* Close indices and then the relation itself */
2467                 ExecCloseIndices(resultRelInfo);
2468                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2469         }
2470
2471         MemoryContextSwitchTo(oldcontext);
2472
2473         FreeExecutorState(epqstate);
2474
2475         epq->estate = NULL;
2476         epq->planstate = NULL;
2477 }
2478
2479 /*
2480  * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2481  *
2482  * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2483  * are looking at a row returned by the EvalPlanQual machinery, we need
2484  * to look at the subsidiary state instead.
2485  */
2486 PlanState *
2487 ExecGetActivePlanTree(QueryDesc *queryDesc)
2488 {
2489         EState     *estate = queryDesc->estate;
2490
2491         if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2492                 return estate->es_evalPlanQual->planstate;
2493         else
2494                 return queryDesc->planstate;
2495 }
2496
2497
2498 /*
2499  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2500  *
2501  * We implement SELECT INTO by diverting SELECT's normal output with
2502  * a specialized DestReceiver type.
2503  *
2504  * TODO: remove some of the INTO-specific cruft from EState, and keep
2505  * it in the DestReceiver instead.
2506  */
2507
2508 typedef struct
2509 {
2510         DestReceiver pub;                       /* publicly-known function pointers */
2511         EState     *estate;                     /* EState we are working with */
2512 } DR_intorel;
2513
2514 /*
2515  * OpenIntoRel --- actually create the SELECT INTO target relation
2516  *
2517  * This also replaces QueryDesc->dest with the special DestReceiver for
2518  * SELECT INTO.  We assume that the correct result tuple type has already
2519  * been placed in queryDesc->tupDesc.
2520  */
2521 static void
2522 OpenIntoRel(QueryDesc *queryDesc)
2523 {
2524         IntoClause *into = queryDesc->plannedstmt->intoClause;
2525         EState     *estate = queryDesc->estate;
2526         Relation        intoRelationDesc;
2527         char       *intoName;
2528         Oid                     namespaceId;
2529         Oid                     tablespaceId;
2530         Datum           reloptions;
2531         AclResult       aclresult;
2532         Oid                     intoRelationId;
2533         TupleDesc       tupdesc;
2534         DR_intorel *myState;
2535
2536         Assert(into);
2537
2538         /*
2539          * Check consistency of arguments
2540          */
2541         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2542                 ereport(ERROR,
2543                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2544                                  errmsg("ON COMMIT can only be used on temporary tables")));
2545
2546         /*
2547          * Find namespace to create in, check its permissions
2548          */
2549         intoName = into->rel->relname;
2550         namespaceId = RangeVarGetCreationNamespace(into->rel);
2551
2552         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2553                                                                           ACL_CREATE);
2554         if (aclresult != ACLCHECK_OK)
2555                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2556                                            get_namespace_name(namespaceId));
2557
2558         /*
2559          * Select tablespace to use.  If not specified, use default tablespace
2560          * (which may in turn default to database's default).
2561          */
2562         if (into->tableSpaceName)
2563         {
2564                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2565                 if (!OidIsValid(tablespaceId))
2566                         ereport(ERROR,
2567                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2568                                          errmsg("tablespace \"%s\" does not exist",
2569                                                         into->tableSpaceName)));
2570         }
2571         else
2572         {
2573                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2574                 /* note InvalidOid is OK in this case */
2575         }
2576
2577         /* Check permissions except when using the database's default space */
2578         if (OidIsValid(tablespaceId))
2579         {
2580                 AclResult       aclresult;
2581
2582                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2583                                                                                    ACL_CREATE);
2584
2585                 if (aclresult != ACLCHECK_OK)
2586                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2587                                                    get_tablespace_name(tablespaceId));
2588         }
2589
2590         /* Parse and validate any reloptions */
2591         reloptions = transformRelOptions((Datum) 0,
2592                                                                          into->options,
2593                                                                          true,
2594                                                                          false);
2595         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2596
2597         /* have to copy the actual tupdesc to get rid of any constraints */
2598         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2599
2600         /* Now we can actually create the new relation */
2601         intoRelationId = heap_create_with_catalog(intoName,
2602                                                                                           namespaceId,
2603                                                                                           tablespaceId,
2604                                                                                           InvalidOid,
2605                                                                                           GetUserId(),
2606                                                                                           tupdesc,
2607                                                                                           RELKIND_RELATION,
2608                                                                                           false,
2609                                                                                           true,
2610                                                                                           0,
2611                                                                                           into->onCommit,
2612                                                                                           reloptions,
2613                                                                                           allowSystemTableMods);
2614
2615         FreeTupleDesc(tupdesc);
2616
2617         /*
2618          * Advance command counter so that the newly-created relation's catalog
2619          * tuples will be visible to heap_open.
2620          */
2621         CommandCounterIncrement();
2622
2623         /*
2624          * If necessary, create a TOAST table for the INTO relation. Note that
2625          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2626          * the TOAST table will be visible for insertion.
2627          */
2628         AlterTableCreateToastTable(intoRelationId);
2629
2630         /*
2631          * And open the constructed table for writing.
2632          */
2633         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2634
2635         /* use_wal off requires rd_targblock be initially invalid */
2636         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2637
2638         /*
2639          * We can skip WAL-logging the insertions, unless PITR is in use.
2640          */
2641         estate->es_into_relation_use_wal = XLogArchivingActive();
2642         estate->es_into_relation_descriptor = intoRelationDesc;
2643
2644         /*
2645          * Now replace the query's DestReceiver with one for SELECT INTO
2646          */
2647         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2648         myState = (DR_intorel *) queryDesc->dest;
2649         Assert(myState->pub.mydest == DestIntoRel);
2650         myState->estate = estate;
2651 }
2652
2653 /*
2654  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2655  */
2656 static void
2657 CloseIntoRel(QueryDesc *queryDesc)
2658 {
2659         EState     *estate = queryDesc->estate;
2660
2661         /* OpenIntoRel might never have gotten called */
2662         if (estate->es_into_relation_descriptor)
2663         {
2664                 /* If we skipped using WAL, must heap_sync before commit */
2665                 if (!estate->es_into_relation_use_wal)
2666                         heap_sync(estate->es_into_relation_descriptor);
2667
2668                 /* close rel, but keep lock until commit */
2669                 heap_close(estate->es_into_relation_descriptor, NoLock);
2670
2671                 estate->es_into_relation_descriptor = NULL;
2672         }
2673 }
2674
2675 /*
2676  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2677  *
2678  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2679  * we just leave the private fields empty here.  OpenIntoRel will
2680  * fill them in.
2681  */
2682 DestReceiver *
2683 CreateIntoRelDestReceiver(void)
2684 {
2685         DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
2686
2687         self->pub.receiveSlot = intorel_receive;
2688         self->pub.rStartup = intorel_startup;
2689         self->pub.rShutdown = intorel_shutdown;
2690         self->pub.rDestroy = intorel_destroy;
2691         self->pub.mydest = DestIntoRel;
2692
2693         self->estate = NULL;
2694
2695         return (DestReceiver *) self;
2696 }
2697
2698 /*
2699  * intorel_startup --- executor startup
2700  */
2701 static void
2702 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2703 {
2704         /* no-op */
2705 }
2706
2707 /*
2708  * intorel_receive --- receive one tuple
2709  */
2710 static void
2711 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2712 {
2713         DR_intorel *myState = (DR_intorel *) self;
2714         EState     *estate = myState->estate;
2715         HeapTuple       tuple;
2716
2717         tuple = ExecCopySlotTuple(slot);
2718
2719         heap_insert(estate->es_into_relation_descriptor,
2720                                 tuple,
2721                                 estate->es_snapshot->curcid,
2722                                 estate->es_into_relation_use_wal,
2723                                 false);                 /* never any point in using FSM */
2724
2725         /* We know this is a newly created relation, so there are no indexes */
2726
2727         heap_freetuple(tuple);
2728
2729         IncrAppended();
2730 }
2731
2732 /*
2733  * intorel_shutdown --- executor end
2734  */
2735 static void
2736 intorel_shutdown(DestReceiver *self)
2737 {
2738         /* no-op */
2739 }
2740
2741 /*
2742  * intorel_destroy --- release DestReceiver object
2743  */
2744 static void
2745 intorel_destroy(DestReceiver *self)
2746 {
2747         pfree(self);
2748 }