]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Arrange to cache a ResultRelInfo in the executor's EState for relations that
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.296 2007/08/15 21:39:50 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "optimizer/clauses.h"
49 #include "parser/parse_clause.h"
50 #include "parser/parsetree.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55
56
57 typedef struct evalPlanQual
58 {
59         Index           rti;
60         EState     *estate;
61         PlanState  *planstate;
62         struct evalPlanQual *next;      /* stack of active PlanQual plans */
63         struct evalPlanQual *free;      /* list of free PlanQual plans */
64 } evalPlanQual;
65
66 /* decls for local routines only used within this module */
67 static void InitPlan(QueryDesc *queryDesc, int eflags);
68 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
69                                   Relation resultRelationDesc,
70                                   Index resultRelationIndex,
71                                   CmdType operation,
72                                   bool doInstrument);
73 static void ExecEndPlan(PlanState *planstate, EState *estate);
74 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
75                         CmdType operation,
76                         long numberTuples,
77                         ScanDirection direction,
78                         DestReceiver *dest);
79 static void ExecSelect(TupleTableSlot *slot,
80                    DestReceiver *dest, EState *estate);
81 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
82                    TupleTableSlot *planSlot,
83                    DestReceiver *dest, EState *estate);
84 static void ExecDelete(ItemPointer tupleid,
85                    TupleTableSlot *planSlot,
86                    DestReceiver *dest, EState *estate);
87 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
88                    TupleTableSlot *planSlot,
89                    DestReceiver *dest, EState *estate);
90 static void ExecProcessReturning(ProjectionInfo *projectReturning,
91                                          TupleTableSlot *tupleSlot,
92                                          TupleTableSlot *planSlot,
93                                          DestReceiver *dest);
94 static TupleTableSlot *EvalPlanQualNext(EState *estate);
95 static void EndEvalPlanQual(EState *estate);
96 static void ExecCheckRTPerms(List *rangeTable);
97 static void ExecCheckRTEPerms(RangeTblEntry *rte);
98 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
99 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
100                                   evalPlanQual *priorepq);
101 static void EvalPlanQualStop(evalPlanQual *epq);
102 static void OpenIntoRel(QueryDesc *queryDesc);
103 static void CloseIntoRel(QueryDesc *queryDesc);
104 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
105 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
106 static void intorel_shutdown(DestReceiver *self);
107 static void intorel_destroy(DestReceiver *self);
108
109 /* end of local decls */
110
111
112 /* ----------------------------------------------------------------
113  *              ExecutorStart
114  *
115  *              This routine must be called at the beginning of any execution of any
116  *              query plan
117  *
118  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
119  * clear why we bother to separate the two functions, but...).  The tupDesc
120  * field of the QueryDesc is filled in to describe the tuples that will be
121  * returned, and the internal fields (estate and planstate) are set up.
122  *
123  * eflags contains flag bits as described in executor.h.
124  *
125  * NB: the CurrentMemoryContext when this is called will become the parent
126  * of the per-query context used for this Executor invocation.
127  * ----------------------------------------------------------------
128  */
129 void
130 ExecutorStart(QueryDesc *queryDesc, int eflags)
131 {
132         EState     *estate;
133         MemoryContext oldcontext;
134
135         /* sanity checks: queryDesc must not be started already */
136         Assert(queryDesc != NULL);
137         Assert(queryDesc->estate == NULL);
138
139         /*
140          * If the transaction is read-only, we need to check if any writes are
141          * planned to non-temporary tables.  EXPLAIN is considered read-only.
142          */
143         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
144                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
145
146         /*
147          * Build EState, switch into per-query memory context for startup.
148          */
149         estate = CreateExecutorState();
150         queryDesc->estate = estate;
151
152         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
153
154         /*
155          * Fill in parameters, if any, from queryDesc
156          */
157         estate->es_param_list_info = queryDesc->params;
158
159         if (queryDesc->plannedstmt->nParamExec > 0)
160                 estate->es_param_exec_vals = (ParamExecData *)
161                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
162
163         /*
164          * Copy other important information into the EState
165          */
166         estate->es_snapshot = queryDesc->snapshot;
167         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
168         estate->es_instrument = queryDesc->doInstrument;
169
170         /*
171          * Initialize the plan state tree
172          */
173         InitPlan(queryDesc, eflags);
174
175         MemoryContextSwitchTo(oldcontext);
176 }
177
178 /* ----------------------------------------------------------------
179  *              ExecutorRun
180  *
181  *              This is the main routine of the executor module. It accepts
182  *              the query descriptor from the traffic cop and executes the
183  *              query plan.
184  *
185  *              ExecutorStart must have been called already.
186  *
187  *              If direction is NoMovementScanDirection then nothing is done
188  *              except to start up/shut down the destination.  Otherwise,
189  *              we retrieve up to 'count' tuples in the specified direction.
190  *
191  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
192  *              completion.
193  *
194  * ----------------------------------------------------------------
195  */
196 TupleTableSlot *
197 ExecutorRun(QueryDesc *queryDesc,
198                         ScanDirection direction, long count)
199 {
200         EState     *estate;
201         CmdType         operation;
202         DestReceiver *dest;
203         bool            sendTuples;
204         TupleTableSlot *result;
205         MemoryContext oldcontext;
206
207         /* sanity checks */
208         Assert(queryDesc != NULL);
209
210         estate = queryDesc->estate;
211
212         Assert(estate != NULL);
213
214         /*
215          * Switch into per-query memory context
216          */
217         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
218
219         /*
220          * extract information from the query descriptor and the query feature.
221          */
222         operation = queryDesc->operation;
223         dest = queryDesc->dest;
224
225         /*
226          * startup tuple receiver, if we will be emitting tuples
227          */
228         estate->es_processed = 0;
229         estate->es_lastoid = InvalidOid;
230
231         sendTuples = (operation == CMD_SELECT ||
232                                   queryDesc->plannedstmt->returningLists);
233
234         if (sendTuples)
235                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
236
237         /*
238          * run plan
239          */
240         if (ScanDirectionIsNoMovement(direction))
241                 result = NULL;
242         else
243                 result = ExecutePlan(estate,
244                                                          queryDesc->planstate,
245                                                          operation,
246                                                          count,
247                                                          direction,
248                                                          dest);
249
250         /*
251          * shutdown tuple receiver, if we started it
252          */
253         if (sendTuples)
254                 (*dest->rShutdown) (dest);
255
256         MemoryContextSwitchTo(oldcontext);
257
258         return result;
259 }
260
261 /* ----------------------------------------------------------------
262  *              ExecutorEnd
263  *
264  *              This routine must be called at the end of execution of any
265  *              query plan
266  * ----------------------------------------------------------------
267  */
268 void
269 ExecutorEnd(QueryDesc *queryDesc)
270 {
271         EState     *estate;
272         MemoryContext oldcontext;
273
274         /* sanity checks */
275         Assert(queryDesc != NULL);
276
277         estate = queryDesc->estate;
278
279         Assert(estate != NULL);
280
281         /*
282          * Switch into per-query memory context to run ExecEndPlan
283          */
284         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
285
286         ExecEndPlan(queryDesc->planstate, estate);
287
288         /*
289          * Close the SELECT INTO relation if any
290          */
291         if (estate->es_select_into)
292                 CloseIntoRel(queryDesc);
293
294         /*
295          * Must switch out of context before destroying it
296          */
297         MemoryContextSwitchTo(oldcontext);
298
299         /*
300          * Release EState and per-query memory context.  This should release
301          * everything the executor has allocated.
302          */
303         FreeExecutorState(estate);
304
305         /* Reset queryDesc fields that no longer point to anything */
306         queryDesc->tupDesc = NULL;
307         queryDesc->estate = NULL;
308         queryDesc->planstate = NULL;
309 }
310
311 /* ----------------------------------------------------------------
312  *              ExecutorRewind
313  *
314  *              This routine may be called on an open queryDesc to rewind it
315  *              to the start.
316  * ----------------------------------------------------------------
317  */
318 void
319 ExecutorRewind(QueryDesc *queryDesc)
320 {
321         EState     *estate;
322         MemoryContext oldcontext;
323
324         /* sanity checks */
325         Assert(queryDesc != NULL);
326
327         estate = queryDesc->estate;
328
329         Assert(estate != NULL);
330
331         /* It's probably not sensible to rescan updating queries */
332         Assert(queryDesc->operation == CMD_SELECT);
333
334         /*
335          * Switch into per-query memory context
336          */
337         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
338
339         /*
340          * rescan plan
341          */
342         ExecReScan(queryDesc->planstate, NULL);
343
344         MemoryContextSwitchTo(oldcontext);
345 }
346
347
348 /*
349  * ExecCheckRTPerms
350  *              Check access permissions for all relations listed in a range table.
351  */
352 static void
353 ExecCheckRTPerms(List *rangeTable)
354 {
355         ListCell   *l;
356
357         foreach(l, rangeTable)
358         {
359                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
360         }
361 }
362
363 /*
364  * ExecCheckRTEPerms
365  *              Check access permissions for a single RTE.
366  */
367 static void
368 ExecCheckRTEPerms(RangeTblEntry *rte)
369 {
370         AclMode         requiredPerms;
371         Oid                     relOid;
372         Oid                     userid;
373
374         /*
375          * Only plain-relation RTEs need to be checked here.  Function RTEs are
376          * checked by init_fcache when the function is prepared for execution.
377          * Join, subquery, and special RTEs need no checks.
378          */
379         if (rte->rtekind != RTE_RELATION)
380                 return;
381
382         /*
383          * No work if requiredPerms is empty.
384          */
385         requiredPerms = rte->requiredPerms;
386         if (requiredPerms == 0)
387                 return;
388
389         relOid = rte->relid;
390
391         /*
392          * userid to check as: current user unless we have a setuid indication.
393          *
394          * Note: GetUserId() is presently fast enough that there's no harm in
395          * calling it separately for each RTE.  If that stops being true, we could
396          * call it once in ExecCheckRTPerms and pass the userid down from there.
397          * But for now, no need for the extra clutter.
398          */
399         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
400
401         /*
402          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
403          */
404         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
405                 != requiredPerms)
406                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
407                                            get_rel_name(relOid));
408 }
409
410 /*
411  * Check that the query does not imply any writes to non-temp tables.
412  */
413 static void
414 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
415 {
416         ListCell   *l;
417
418         /*
419          * CREATE TABLE AS or SELECT INTO?
420          *
421          * XXX should we allow this if the destination is temp?
422          */
423         if (plannedstmt->intoClause != NULL)
424                 goto fail;
425
426         /* Fail if write permissions are requested on any non-temp table */
427         foreach(l, plannedstmt->rtable)
428         {
429                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
430
431                 if (rte->rtekind != RTE_RELATION)
432                         continue;
433
434                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
435                         continue;
436
437                 if (isTempNamespace(get_rel_namespace(rte->relid)))
438                         continue;
439
440                 goto fail;
441         }
442
443         return;
444
445 fail:
446         ereport(ERROR,
447                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
448                          errmsg("transaction is read-only")));
449 }
450
451
452 /* ----------------------------------------------------------------
453  *              InitPlan
454  *
455  *              Initializes the query plan: open files, allocate storage
456  *              and start up the rule manager
457  * ----------------------------------------------------------------
458  */
459 static void
460 InitPlan(QueryDesc *queryDesc, int eflags)
461 {
462         CmdType         operation = queryDesc->operation;
463         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
464         Plan       *plan = plannedstmt->planTree;
465         List       *rangeTable = plannedstmt->rtable;
466         EState     *estate = queryDesc->estate;
467         PlanState  *planstate;
468         TupleDesc       tupType;
469         ListCell   *l;
470         int                     i;
471
472         /*
473          * Do permissions checks
474          */
475         ExecCheckRTPerms(rangeTable);
476
477         /*
478          * initialize the node's execution state
479          */
480         estate->es_range_table = rangeTable;
481
482         /*
483          * initialize result relation stuff
484          */
485         if (plannedstmt->resultRelations)
486         {
487                 List       *resultRelations = plannedstmt->resultRelations;
488                 int                     numResultRelations = list_length(resultRelations);
489                 ResultRelInfo *resultRelInfos;
490                 ResultRelInfo *resultRelInfo;
491
492                 resultRelInfos = (ResultRelInfo *)
493                         palloc(numResultRelations * sizeof(ResultRelInfo));
494                 resultRelInfo = resultRelInfos;
495                 foreach(l, resultRelations)
496                 {
497                         Index           resultRelationIndex = lfirst_int(l);
498                         Oid                     resultRelationOid;
499                         Relation        resultRelation;
500
501                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
502                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
503                         initResultRelInfo(resultRelInfo,
504                                                           resultRelation,
505                                                           resultRelationIndex,
506                                                           operation,
507                                                           estate->es_instrument);
508                         resultRelInfo++;
509                 }
510                 estate->es_result_relations = resultRelInfos;
511                 estate->es_num_result_relations = numResultRelations;
512                 /* Initialize to first or only result rel */
513                 estate->es_result_relation_info = resultRelInfos;
514         }
515         else
516         {
517                 /*
518                  * if no result relation, then set state appropriately
519                  */
520                 estate->es_result_relations = NULL;
521                 estate->es_num_result_relations = 0;
522                 estate->es_result_relation_info = NULL;
523         }
524
525         /*
526          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
527          * flag appropriately so that the plan tree will be initialized with the
528          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
529          */
530         estate->es_select_into = false;
531         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
532         {
533                 estate->es_select_into = true;
534                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
535         }
536
537         /*
538          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
539          * initialize the plan tree, else we'd be doing a lock upgrade.
540          * While we are at it, build the ExecRowMark list.
541          */
542         estate->es_rowMarks = NIL;
543         foreach(l, plannedstmt->rowMarks)
544         {
545                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
546                 Oid                     relid = getrelid(rc->rti, rangeTable);
547                 Relation        relation;
548                 ExecRowMark *erm;
549
550                 relation = heap_open(relid, RowShareLock);
551                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
552                 erm->relation = relation;
553                 erm->rti = rc->rti;
554                 erm->forUpdate = rc->forUpdate;
555                 erm->noWait = rc->noWait;
556                 /* We'll set up ctidAttno below */
557                 erm->ctidAttNo = InvalidAttrNumber;
558                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
559         }
560
561         /*
562          * Initialize the executor "tuple" table.  We need slots for all the plan
563          * nodes, plus possibly output slots for the junkfilter(s). At this point
564          * we aren't sure if we need junkfilters, so just add slots for them
565          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
566          * trigger output tuples.  Also, one for RETURNING-list evaluation.
567          */
568         {
569                 int                     nSlots;
570
571                 /* Slots for the main plan tree */
572                 nSlots = ExecCountSlotsNode(plan);
573                 /* Add slots for subplans and initplans */
574                 foreach(l, plannedstmt->subplans)
575                 {
576                         Plan   *subplan = (Plan *) lfirst(l);
577
578                         nSlots += ExecCountSlotsNode(subplan);
579                 }
580                 /* Add slots for junkfilter(s) */
581                 if (plannedstmt->resultRelations != NIL)
582                         nSlots += list_length(plannedstmt->resultRelations);
583                 else
584                         nSlots += 1;
585                 if (operation != CMD_SELECT)
586                         nSlots++;                       /* for es_trig_tuple_slot */
587                 if (plannedstmt->returningLists)
588                         nSlots++;                       /* for RETURNING projection */
589
590                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
591
592                 if (operation != CMD_SELECT)
593                         estate->es_trig_tuple_slot =
594                                 ExecAllocTableSlot(estate->es_tupleTable);
595         }
596
597         /* mark EvalPlanQual not active */
598         estate->es_plannedstmt = plannedstmt;
599         estate->es_evalPlanQual = NULL;
600         estate->es_evTupleNull = NULL;
601         estate->es_evTuple = NULL;
602         estate->es_useEvalPlan = false;
603
604         /*
605          * Initialize private state information for each SubPlan.  We must do
606          * this before running ExecInitNode on the main query tree, since
607          * ExecInitSubPlan expects to be able to find these entries.
608          */
609         Assert(estate->es_subplanstates == NIL);
610         i = 1;                                          /* subplan indices count from 1 */
611         foreach(l, plannedstmt->subplans)
612         {
613                 Plan   *subplan = (Plan *) lfirst(l);
614                 PlanState *subplanstate;
615                 int             sp_eflags;
616
617                 /*
618                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE.
619                  * If it is a parameterless subplan (not initplan), we suggest that it
620                  * be prepared to handle REWIND efficiently; otherwise there is no
621                  * need.
622                  */
623                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
624                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
625                         sp_eflags |= EXEC_FLAG_REWIND;
626
627                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
628
629                 estate->es_subplanstates = lappend(estate->es_subplanstates,
630                                                                                    subplanstate);
631
632                 i++;
633         }
634
635         /*
636          * Initialize the private state information for all the nodes in the query
637          * tree.  This opens files, allocates storage and leaves us ready to start
638          * processing tuples.
639          */
640         planstate = ExecInitNode(plan, estate, eflags);
641
642         /*
643          * Get the tuple descriptor describing the type of tuples to return. (this
644          * is especially important if we are creating a relation with "SELECT
645          * INTO")
646          */
647         tupType = ExecGetResultType(planstate);
648
649         /*
650          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
651          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
652          * INTO also need a filter if the plan may return raw disk tuples (else
653          * heap_insert will be scribbling on the source relation!). UPDATE and
654          * DELETE always need a filter, since there's always a junk 'ctid'
655          * attribute present --- no need to look first.
656          */
657         {
658                 bool            junk_filter_needed = false;
659                 ListCell   *tlist;
660
661                 switch (operation)
662                 {
663                         case CMD_SELECT:
664                         case CMD_INSERT:
665                                 foreach(tlist, plan->targetlist)
666                                 {
667                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
668
669                                         if (tle->resjunk)
670                                         {
671                                                 junk_filter_needed = true;
672                                                 break;
673                                         }
674                                 }
675                                 if (!junk_filter_needed &&
676                                         (operation == CMD_INSERT || estate->es_select_into) &&
677                                         ExecMayReturnRawTuples(planstate))
678                                         junk_filter_needed = true;
679                                 break;
680                         case CMD_UPDATE:
681                         case CMD_DELETE:
682                                 junk_filter_needed = true;
683                                 break;
684                         default:
685                                 break;
686                 }
687
688                 if (junk_filter_needed)
689                 {
690                         /*
691                          * If there are multiple result relations, each one needs its own
692                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
693                          * we can't be fooled by some needing a filter and some not.
694                          */
695                         if (list_length(plannedstmt->resultRelations) > 1)
696                         {
697                                 PlanState **appendplans;
698                                 int                     as_nplans;
699                                 ResultRelInfo *resultRelInfo;
700
701                                 /* Top plan had better be an Append here. */
702                                 Assert(IsA(plan, Append));
703                                 Assert(((Append *) plan)->isTarget);
704                                 Assert(IsA(planstate, AppendState));
705                                 appendplans = ((AppendState *) planstate)->appendplans;
706                                 as_nplans = ((AppendState *) planstate)->as_nplans;
707                                 Assert(as_nplans == estate->es_num_result_relations);
708                                 resultRelInfo = estate->es_result_relations;
709                                 for (i = 0; i < as_nplans; i++)
710                                 {
711                                         PlanState  *subplan = appendplans[i];
712                                         JunkFilter *j;
713
714                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
715                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
716                                                                   ExecAllocTableSlot(estate->es_tupleTable));
717                                         /*
718                                          * Since it must be UPDATE/DELETE, there had better be
719                                          * a "ctid" junk attribute in the tlist ... but ctid could
720                                          * be at a different resno for each result relation.
721                                          * We look up the ctid resnos now and save them in the
722                                          * junkfilters.
723                                          */
724                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
725                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
726                                                 elog(ERROR, "could not find junk ctid column");
727                                         resultRelInfo->ri_junkFilter = j;
728                                         resultRelInfo++;
729                                 }
730
731                                 /*
732                                  * Set active junkfilter too; at this point ExecInitAppend has
733                                  * already selected an active result relation...
734                                  */
735                                 estate->es_junkFilter =
736                                         estate->es_result_relation_info->ri_junkFilter;
737                         }
738                         else
739                         {
740                                 /* Normal case with just one JunkFilter */
741                                 JunkFilter *j;
742
743                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
744                                                                            tupType->tdhasoid,
745                                                                   ExecAllocTableSlot(estate->es_tupleTable));
746                                 estate->es_junkFilter = j;
747                                 if (estate->es_result_relation_info)
748                                         estate->es_result_relation_info->ri_junkFilter = j;
749
750                                 if (operation == CMD_SELECT)
751                                 {
752                                         /* For SELECT, want to return the cleaned tuple type */
753                                         tupType = j->jf_cleanTupType;
754                                         /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
755                                         foreach(l, estate->es_rowMarks)
756                                         {
757                                                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
758                                                 char            resname[32];
759
760                                                 snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
761                                                 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
762                                                 if (!AttributeNumberIsValid(erm->ctidAttNo))
763                                                         elog(ERROR, "could not find junk \"%s\" column",
764                                                                  resname);
765                                         }
766                                 }
767                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
768                                 {
769                                         /* For UPDATE/DELETE, find the ctid junk attr now */
770                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
771                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
772                                                 elog(ERROR, "could not find junk ctid column");
773                                 }
774                         }
775                 }
776                 else
777                         estate->es_junkFilter = NULL;
778         }
779
780         /*
781          * Initialize RETURNING projections if needed.
782          */
783         if (plannedstmt->returningLists)
784         {
785                 TupleTableSlot *slot;
786                 ExprContext *econtext;
787                 ResultRelInfo *resultRelInfo;
788
789                 /*
790                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
791                  * We assume all the sublists will generate the same output tupdesc.
792                  */
793                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
794                                                                  false);
795
796                 /* Set up a slot for the output of the RETURNING projection(s) */
797                 slot = ExecAllocTableSlot(estate->es_tupleTable);
798                 ExecSetSlotDescriptor(slot, tupType);
799                 /* Need an econtext too */
800                 econtext = CreateExprContext(estate);
801
802                 /*
803                  * Build a projection for each result rel.      Note that any SubPlans in
804                  * the RETURNING lists get attached to the topmost plan node.
805                  */
806                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
807                 resultRelInfo = estate->es_result_relations;
808                 foreach(l, plannedstmt->returningLists)
809                 {
810                         List       *rlist = (List *) lfirst(l);
811                         List       *rliststate;
812
813                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
814                         resultRelInfo->ri_projectReturning =
815                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
816                                                                            resultRelInfo->ri_RelationDesc->rd_att);
817                         resultRelInfo++;
818                 }
819         }
820
821         queryDesc->tupDesc = tupType;
822         queryDesc->planstate = planstate;
823
824         /*
825          * If doing SELECT INTO, initialize the "into" relation.  We must wait
826          * till now so we have the "clean" result tuple type to create the new
827          * table from.
828          *
829          * If EXPLAIN, skip creating the "into" relation.
830          */
831         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
832                 OpenIntoRel(queryDesc);
833 }
834
835 /*
836  * Initialize ResultRelInfo data for one result relation
837  */
838 static void
839 initResultRelInfo(ResultRelInfo *resultRelInfo,
840                                   Relation resultRelationDesc,
841                                   Index resultRelationIndex,
842                                   CmdType operation,
843                                   bool doInstrument)
844 {
845         /*
846          * Check valid relkind ... parser and/or planner should have noticed
847          * this already, but let's make sure.
848          */
849         switch (resultRelationDesc->rd_rel->relkind)
850         {
851                 case RELKIND_RELATION:
852                         /* OK */
853                         break;
854                 case RELKIND_SEQUENCE:
855                         ereport(ERROR,
856                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
857                                          errmsg("cannot change sequence \"%s\"",
858                                                         RelationGetRelationName(resultRelationDesc))));
859                         break;
860                 case RELKIND_TOASTVALUE:
861                         ereport(ERROR,
862                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
863                                          errmsg("cannot change TOAST relation \"%s\"",
864                                                         RelationGetRelationName(resultRelationDesc))));
865                         break;
866                 case RELKIND_VIEW:
867                         ereport(ERROR,
868                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
869                                          errmsg("cannot change view \"%s\"",
870                                                         RelationGetRelationName(resultRelationDesc))));
871                         break;
872                 default:
873                         ereport(ERROR,
874                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
875                                          errmsg("cannot change relation \"%s\"",
876                                                         RelationGetRelationName(resultRelationDesc))));
877                         break;
878         }
879
880         /* OK, fill in the node */
881         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
882         resultRelInfo->type = T_ResultRelInfo;
883         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
884         resultRelInfo->ri_RelationDesc = resultRelationDesc;
885         resultRelInfo->ri_NumIndices = 0;
886         resultRelInfo->ri_IndexRelationDescs = NULL;
887         resultRelInfo->ri_IndexRelationInfo = NULL;
888         /* make a copy so as not to depend on relcache info not changing... */
889         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
890         if (resultRelInfo->ri_TrigDesc)
891         {
892                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
893
894                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
895                         palloc0(n * sizeof(FmgrInfo));
896                 if (doInstrument)
897                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
898                 else
899                         resultRelInfo->ri_TrigInstrument = NULL;
900         }
901         else
902         {
903                 resultRelInfo->ri_TrigFunctions = NULL;
904                 resultRelInfo->ri_TrigInstrument = NULL;
905         }
906         resultRelInfo->ri_ConstraintExprs = NULL;
907         resultRelInfo->ri_junkFilter = NULL;
908         resultRelInfo->ri_projectReturning = NULL;
909
910         /*
911          * If there are indices on the result relation, open them and save
912          * descriptors in the result relation info, so that we can add new index
913          * entries for the tuples we add/update.  We need not do this for a
914          * DELETE, however, since deletion doesn't affect indexes.
915          */
916         if (resultRelationDesc->rd_rel->relhasindex &&
917                 operation != CMD_DELETE)
918                 ExecOpenIndices(resultRelInfo);
919 }
920
921 /*
922  *              ExecGetTriggerResultRel
923  *
924  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
925  * triggers are fired on one of the result relations of the query, and so
926  * we can just return a member of the es_result_relations array.  (Note: in
927  * self-join situations there might be multiple members with the same OID;
928  * if so it doesn't matter which one we pick.)  However, it is sometimes
929  * necessary to fire triggers on other relations; this happens mainly when an
930  * RI update trigger queues additional triggers on other relations, which will
931  * be processed in the context of the outer query.  For efficiency's sake,
932  * we want to have a ResultRelInfo for those triggers too; that can avoid
933  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
934  * ANALYZE to report the runtimes of such triggers.)  So we make additional
935  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
936  */
937 ResultRelInfo *
938 ExecGetTriggerResultRel(EState *estate, Oid relid)
939 {
940         ResultRelInfo *rInfo;
941         int                     nr;
942         ListCell   *l;
943         Relation        rel;
944         MemoryContext oldcontext;
945
946         /* First, search through the query result relations */
947         rInfo = estate->es_result_relations;
948         nr = estate->es_num_result_relations;
949         while (nr > 0)
950         {
951                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
952                         return rInfo;
953                 rInfo++;
954                 nr--;
955         }
956         /* Nope, but maybe we already made an extra ResultRelInfo for it */
957         foreach(l, estate->es_trig_target_relations)
958         {
959                 rInfo = (ResultRelInfo *) lfirst(l);
960                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
961                         return rInfo;
962         }
963         /* Nope, so we need a new one */
964
965         /*
966          * Open the target relation's relcache entry.  We assume that an
967          * appropriate lock is still held by the backend from whenever the
968          * trigger event got queued, so we need take no new lock here.
969          */
970         rel = heap_open(relid, NoLock);
971
972         /*
973          * Make the new entry in the right context.  Currently, we don't need
974          * any index information in ResultRelInfos used only for triggers,
975          * so tell initResultRelInfo it's a DELETE.
976          */
977         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
978         rInfo = makeNode(ResultRelInfo);
979         initResultRelInfo(rInfo,
980                                           rel,
981                                           0,            /* dummy rangetable index */
982                                           CMD_DELETE,
983                                           estate->es_instrument);
984         estate->es_trig_target_relations =
985                 lappend(estate->es_trig_target_relations, rInfo);
986         MemoryContextSwitchTo(oldcontext);
987
988         return rInfo;
989 }
990
991 /*
992  *              ExecContextForcesOids
993  *
994  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
995  * we need to ensure that result tuples have space for an OID iff they are
996  * going to be stored into a relation that has OIDs.  In other contexts
997  * we are free to choose whether to leave space for OIDs in result tuples
998  * (we generally don't want to, but we do if a physical-tlist optimization
999  * is possible).  This routine checks the plan context and returns TRUE if the
1000  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1001  * *hasoids is set to the required value.
1002  *
1003  * One reason this is ugly is that all plan nodes in the plan tree will emit
1004  * tuples with space for an OID, though we really only need the topmost node
1005  * to do so.  However, node types like Sort don't project new tuples but just
1006  * return their inputs, and in those cases the requirement propagates down
1007  * to the input node.  Eventually we might make this code smart enough to
1008  * recognize how far down the requirement really goes, but for now we just
1009  * make all plan nodes do the same thing if the top level forces the choice.
1010  *
1011  * We assume that estate->es_result_relation_info is already set up to
1012  * describe the target relation.  Note that in an UPDATE that spans an
1013  * inheritance tree, some of the target relations may have OIDs and some not.
1014  * We have to make the decisions on a per-relation basis as we initialize
1015  * each of the child plans of the topmost Append plan.
1016  *
1017  * SELECT INTO is even uglier, because we don't have the INTO relation's
1018  * descriptor available when this code runs; we have to look aside at a
1019  * flag set by InitPlan().
1020  */
1021 bool
1022 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1023 {
1024         if (planstate->state->es_select_into)
1025         {
1026                 *hasoids = planstate->state->es_into_oids;
1027                 return true;
1028         }
1029         else
1030         {
1031                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1032
1033                 if (ri != NULL)
1034                 {
1035                         Relation        rel = ri->ri_RelationDesc;
1036
1037                         if (rel != NULL)
1038                         {
1039                                 *hasoids = rel->rd_rel->relhasoids;
1040                                 return true;
1041                         }
1042                 }
1043         }
1044
1045         return false;
1046 }
1047
1048 /* ----------------------------------------------------------------
1049  *              ExecEndPlan
1050  *
1051  *              Cleans up the query plan -- closes files and frees up storage
1052  *
1053  * NOTE: we are no longer very worried about freeing storage per se
1054  * in this code; FreeExecutorState should be guaranteed to release all
1055  * memory that needs to be released.  What we are worried about doing
1056  * is closing relations and dropping buffer pins.  Thus, for example,
1057  * tuple tables must be cleared or dropped to ensure pins are released.
1058  * ----------------------------------------------------------------
1059  */
1060 static void
1061 ExecEndPlan(PlanState *planstate, EState *estate)
1062 {
1063         ResultRelInfo *resultRelInfo;
1064         int                     i;
1065         ListCell   *l;
1066
1067         /*
1068          * shut down any PlanQual processing we were doing
1069          */
1070         if (estate->es_evalPlanQual != NULL)
1071                 EndEvalPlanQual(estate);
1072
1073         /*
1074          * shut down the node-type-specific query processing
1075          */
1076         ExecEndNode(planstate);
1077
1078         /*
1079          * for subplans too
1080          */
1081         foreach(l, estate->es_subplanstates)
1082         {
1083                 PlanState *subplanstate = (PlanState *) lfirst(l);
1084
1085                 ExecEndNode(subplanstate);
1086         }
1087
1088         /*
1089          * destroy the executor "tuple" table.
1090          */
1091         ExecDropTupleTable(estate->es_tupleTable, true);
1092         estate->es_tupleTable = NULL;
1093
1094         /*
1095          * close the result relation(s) if any, but hold locks until xact commit.
1096          */
1097         resultRelInfo = estate->es_result_relations;
1098         for (i = estate->es_num_result_relations; i > 0; i--)
1099         {
1100                 /* Close indices and then the relation itself */
1101                 ExecCloseIndices(resultRelInfo);
1102                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1103                 resultRelInfo++;
1104         }
1105
1106         /*
1107          * likewise close any trigger target relations
1108          */
1109         foreach(l, estate->es_trig_target_relations)
1110         {
1111                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1112                 /* Close indices and then the relation itself */
1113                 ExecCloseIndices(resultRelInfo);
1114                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1115         }
1116
1117         /*
1118          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1119          */
1120         foreach(l, estate->es_rowMarks)
1121         {
1122                 ExecRowMark *erm = lfirst(l);
1123
1124                 heap_close(erm->relation, NoLock);
1125         }
1126 }
1127
1128 /* ----------------------------------------------------------------
1129  *              ExecutePlan
1130  *
1131  *              processes the query plan to retrieve 'numberTuples' tuples in the
1132  *              direction specified.
1133  *
1134  *              Retrieves all tuples if numberTuples is 0
1135  *
1136  *              result is either a slot containing the last tuple in the case
1137  *              of a SELECT or NULL otherwise.
1138  *
1139  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1140  * user can see it
1141  * ----------------------------------------------------------------
1142  */
1143 static TupleTableSlot *
1144 ExecutePlan(EState *estate,
1145                         PlanState *planstate,
1146                         CmdType operation,
1147                         long numberTuples,
1148                         ScanDirection direction,
1149                         DestReceiver *dest)
1150 {
1151         JunkFilter *junkfilter;
1152         TupleTableSlot *planSlot;
1153         TupleTableSlot *slot;
1154         ItemPointer tupleid = NULL;
1155         ItemPointerData tuple_ctid;
1156         long            current_tuple_count;
1157         TupleTableSlot *result;
1158
1159         /*
1160          * initialize local variables
1161          */
1162         current_tuple_count = 0;
1163         result = NULL;
1164
1165         /*
1166          * Set the direction.
1167          */
1168         estate->es_direction = direction;
1169
1170         /*
1171          * Process BEFORE EACH STATEMENT triggers
1172          */
1173         switch (operation)
1174         {
1175                 case CMD_UPDATE:
1176                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1177                         break;
1178                 case CMD_DELETE:
1179                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1180                         break;
1181                 case CMD_INSERT:
1182                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1183                         break;
1184                 default:
1185                         /* do nothing */
1186                         break;
1187         }
1188
1189         /*
1190          * Loop until we've processed the proper number of tuples from the plan.
1191          */
1192
1193         for (;;)
1194         {
1195                 /* Reset the per-output-tuple exprcontext */
1196                 ResetPerTupleExprContext(estate);
1197
1198                 /*
1199                  * Execute the plan and obtain a tuple
1200                  */
1201 lnext:  ;
1202                 if (estate->es_useEvalPlan)
1203                 {
1204                         planSlot = EvalPlanQualNext(estate);
1205                         if (TupIsNull(planSlot))
1206                                 planSlot = ExecProcNode(planstate);
1207                 }
1208                 else
1209                         planSlot = ExecProcNode(planstate);
1210
1211                 /*
1212                  * if the tuple is null, then we assume there is nothing more to
1213                  * process so we just return null...
1214                  */
1215                 if (TupIsNull(planSlot))
1216                 {
1217                         result = NULL;
1218                         break;
1219                 }
1220                 slot = planSlot;
1221
1222                 /*
1223                  * if we have a junk filter, then project a new tuple with the junk
1224                  * removed.
1225                  *
1226                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1227                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1228                  * because that tuple slot has the wrong descriptor.)
1229                  *
1230                  * Also, extract all the junk information we need.
1231                  */
1232                 if ((junkfilter = estate->es_junkFilter) != NULL)
1233                 {
1234                         Datum           datum;
1235                         bool            isNull;
1236
1237                         /*
1238                          * extract the 'ctid' junk attribute.
1239                          */
1240                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1241                         {
1242                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1243                                                                                          &isNull);
1244                                 /* shouldn't ever get a null result... */
1245                                 if (isNull)
1246                                         elog(ERROR, "ctid is NULL");
1247
1248                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1249                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1250                                 tupleid = &tuple_ctid;
1251                         }
1252
1253                         /*
1254                          * Process any FOR UPDATE or FOR SHARE locking requested.
1255                          */
1256                         else if (estate->es_rowMarks != NIL)
1257                         {
1258                                 ListCell   *l;
1259
1260                 lmark:  ;
1261                                 foreach(l, estate->es_rowMarks)
1262                                 {
1263                                         ExecRowMark *erm = lfirst(l);
1264                                         HeapTupleData tuple;
1265                                         Buffer          buffer;
1266                                         ItemPointerData update_ctid;
1267                                         TransactionId update_xmax;
1268                                         TupleTableSlot *newSlot;
1269                                         LockTupleMode lockmode;
1270                                         HTSU_Result test;
1271
1272                                         datum = ExecGetJunkAttribute(slot,
1273                                                                                                  erm->ctidAttNo,
1274                                                                                                  &isNull);
1275                                         /* shouldn't ever get a null result... */
1276                                         if (isNull)
1277                                                 elog(ERROR, "ctid is NULL");
1278
1279                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1280
1281                                         if (erm->forUpdate)
1282                                                 lockmode = LockTupleExclusive;
1283                                         else
1284                                                 lockmode = LockTupleShared;
1285
1286                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1287                                                                                    &update_ctid, &update_xmax,
1288                                                                                    estate->es_snapshot->curcid,
1289                                                                                    lockmode, erm->noWait);
1290                                         ReleaseBuffer(buffer);
1291                                         switch (test)
1292                                         {
1293                                                 case HeapTupleSelfUpdated:
1294                                                         /* treat it as deleted; do not process */
1295                                                         goto lnext;
1296
1297                                                 case HeapTupleMayBeUpdated:
1298                                                         break;
1299
1300                                                 case HeapTupleUpdated:
1301                                                         if (IsXactIsoLevelSerializable)
1302                                                                 ereport(ERROR,
1303                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1304                                                                   errmsg("could not serialize access due to concurrent update")));
1305                                                         if (!ItemPointerEquals(&update_ctid,
1306                                                                                                    &tuple.t_self))
1307                                                         {
1308                                                                 /* updated, so look at updated version */
1309                                                                 newSlot = EvalPlanQual(estate,
1310                                                                                                            erm->rti,
1311                                                                                                            &update_ctid,
1312                                                                                                            update_xmax,
1313                                                                                                 estate->es_snapshot->curcid);
1314                                                                 if (!TupIsNull(newSlot))
1315                                                                 {
1316                                                                         slot = planSlot = newSlot;
1317                                                                         estate->es_useEvalPlan = true;
1318                                                                         goto lmark;
1319                                                                 }
1320                                                         }
1321
1322                                                         /*
1323                                                          * if tuple was deleted or PlanQual failed for
1324                                                          * updated tuple - we must not return this tuple!
1325                                                          */
1326                                                         goto lnext;
1327
1328                                                 default:
1329                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1330                                                                  test);
1331                                                         return NULL;
1332                                         }
1333                                 }
1334                         }
1335
1336                         /*
1337                          * Create a new "clean" tuple with all junk attributes removed. We
1338                          * don't need to do this for DELETE, however (there will in fact
1339                          * be no non-junk attributes in a DELETE!)
1340                          */
1341                         if (operation != CMD_DELETE)
1342                                 slot = ExecFilterJunk(junkfilter, slot);
1343                 }
1344
1345                 /*
1346                  * now that we have a tuple, do the appropriate thing with it.. either
1347                  * return it to the user, add it to a relation someplace, delete it
1348                  * from a relation, or modify some of its attributes.
1349                  */
1350                 switch (operation)
1351                 {
1352                         case CMD_SELECT:
1353                                 ExecSelect(slot, dest, estate);
1354                                 result = slot;
1355                                 break;
1356
1357                         case CMD_INSERT:
1358                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1359                                 result = NULL;
1360                                 break;
1361
1362                         case CMD_DELETE:
1363                                 ExecDelete(tupleid, planSlot, dest, estate);
1364                                 result = NULL;
1365                                 break;
1366
1367                         case CMD_UPDATE:
1368                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1369                                 result = NULL;
1370                                 break;
1371
1372                         default:
1373                                 elog(ERROR, "unrecognized operation code: %d",
1374                                          (int) operation);
1375                                 result = NULL;
1376                                 break;
1377                 }
1378
1379                 /*
1380                  * check our tuple count.. if we've processed the proper number then
1381                  * quit, else loop again and process more tuples.  Zero numberTuples
1382                  * means no limit.
1383                  */
1384                 current_tuple_count++;
1385                 if (numberTuples && numberTuples == current_tuple_count)
1386                         break;
1387         }
1388
1389         /*
1390          * Process AFTER EACH STATEMENT triggers
1391          */
1392         switch (operation)
1393         {
1394                 case CMD_UPDATE:
1395                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1396                         break;
1397                 case CMD_DELETE:
1398                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1399                         break;
1400                 case CMD_INSERT:
1401                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1402                         break;
1403                 default:
1404                         /* do nothing */
1405                         break;
1406         }
1407
1408         /*
1409          * here, result is either a slot containing a tuple in the case of a
1410          * SELECT or NULL otherwise.
1411          */
1412         return result;
1413 }
1414
1415 /* ----------------------------------------------------------------
1416  *              ExecSelect
1417  *
1418  *              SELECTs are easy.. we just pass the tuple to the appropriate
1419  *              output function.
1420  * ----------------------------------------------------------------
1421  */
1422 static void
1423 ExecSelect(TupleTableSlot *slot,
1424                    DestReceiver *dest,
1425                    EState *estate)
1426 {
1427         (*dest->receiveSlot) (slot, dest);
1428         IncrRetrieved();
1429         (estate->es_processed)++;
1430 }
1431
1432 /* ----------------------------------------------------------------
1433  *              ExecInsert
1434  *
1435  *              INSERTs are trickier.. we have to insert the tuple into
1436  *              the base relation and insert appropriate tuples into the
1437  *              index relations.
1438  * ----------------------------------------------------------------
1439  */
1440 static void
1441 ExecInsert(TupleTableSlot *slot,
1442                    ItemPointer tupleid,
1443                    TupleTableSlot *planSlot,
1444                    DestReceiver *dest,
1445                    EState *estate)
1446 {
1447         HeapTuple       tuple;
1448         ResultRelInfo *resultRelInfo;
1449         Relation        resultRelationDesc;
1450         Oid                     newId;
1451
1452         /*
1453          * get the heap tuple out of the tuple table slot, making sure we have a
1454          * writable copy
1455          */
1456         tuple = ExecMaterializeSlot(slot);
1457
1458         /*
1459          * get information on the (current) result relation
1460          */
1461         resultRelInfo = estate->es_result_relation_info;
1462         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1463
1464         /* BEFORE ROW INSERT Triggers */
1465         if (resultRelInfo->ri_TrigDesc &&
1466                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1467         {
1468                 HeapTuple       newtuple;
1469
1470                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1471
1472                 if (newtuple == NULL)   /* "do nothing" */
1473                         return;
1474
1475                 if (newtuple != tuple)  /* modified by Trigger(s) */
1476                 {
1477                         /*
1478                          * Put the modified tuple into a slot for convenience of routines
1479                          * below.  We assume the tuple was allocated in per-tuple memory
1480                          * context, and therefore will go away by itself. The tuple table
1481                          * slot should not try to clear it.
1482                          */
1483                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1484
1485                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1486                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1487                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1488                         slot = newslot;
1489                         tuple = newtuple;
1490                 }
1491         }
1492
1493         /*
1494          * Check the constraints of the tuple
1495          */
1496         if (resultRelationDesc->rd_att->constr)
1497                 ExecConstraints(resultRelInfo, slot, estate);
1498
1499         /*
1500          * insert the tuple
1501          *
1502          * Note: heap_insert returns the tid (location) of the new tuple in the
1503          * t_self field.
1504          */
1505         newId = heap_insert(resultRelationDesc, tuple,
1506                                                 estate->es_snapshot->curcid,
1507                                                 true, true);
1508
1509         IncrAppended();
1510         (estate->es_processed)++;
1511         estate->es_lastoid = newId;
1512         setLastTid(&(tuple->t_self));
1513
1514         /*
1515          * insert index entries for tuple
1516          */
1517         if (resultRelInfo->ri_NumIndices > 0)
1518                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1519
1520         /* AFTER ROW INSERT Triggers */
1521         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1522
1523         /* Process RETURNING if present */
1524         if (resultRelInfo->ri_projectReturning)
1525                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1526                                                          slot, planSlot, dest);
1527 }
1528
1529 /* ----------------------------------------------------------------
1530  *              ExecDelete
1531  *
1532  *              DELETE is like UPDATE, except that we delete the tuple and no
1533  *              index modifications are needed
1534  * ----------------------------------------------------------------
1535  */
1536 static void
1537 ExecDelete(ItemPointer tupleid,
1538                    TupleTableSlot *planSlot,
1539                    DestReceiver *dest,
1540                    EState *estate)
1541 {
1542         ResultRelInfo *resultRelInfo;
1543         Relation        resultRelationDesc;
1544         HTSU_Result result;
1545         ItemPointerData update_ctid;
1546         TransactionId update_xmax;
1547
1548         /*
1549          * get information on the (current) result relation
1550          */
1551         resultRelInfo = estate->es_result_relation_info;
1552         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1553
1554         /* BEFORE ROW DELETE Triggers */
1555         if (resultRelInfo->ri_TrigDesc &&
1556                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1557         {
1558                 bool            dodelete;
1559
1560                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1561                                                                                 estate->es_snapshot->curcid);
1562
1563                 if (!dodelete)                  /* "do nothing" */
1564                         return;
1565         }
1566
1567         /*
1568          * delete the tuple
1569          *
1570          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1571          * the row to be deleted is visible to that snapshot, and throw a can't-
1572          * serialize error if not.      This is a special-case behavior needed for
1573          * referential integrity updates in serializable transactions.
1574          */
1575 ldelete:;
1576         result = heap_delete(resultRelationDesc, tupleid,
1577                                                  &update_ctid, &update_xmax,
1578                                                  estate->es_snapshot->curcid,
1579                                                  estate->es_crosscheck_snapshot,
1580                                                  true /* wait for commit */ );
1581         switch (result)
1582         {
1583                 case HeapTupleSelfUpdated:
1584                         /* already deleted by self; nothing to do */
1585                         return;
1586
1587                 case HeapTupleMayBeUpdated:
1588                         break;
1589
1590                 case HeapTupleUpdated:
1591                         if (IsXactIsoLevelSerializable)
1592                                 ereport(ERROR,
1593                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1594                                                  errmsg("could not serialize access due to concurrent update")));
1595                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1596                         {
1597                                 TupleTableSlot *epqslot;
1598
1599                                 epqslot = EvalPlanQual(estate,
1600                                                                            resultRelInfo->ri_RangeTableIndex,
1601                                                                            &update_ctid,
1602                                                                            update_xmax,
1603                                                                            estate->es_snapshot->curcid);
1604                                 if (!TupIsNull(epqslot))
1605                                 {
1606                                         *tupleid = update_ctid;
1607                                         goto ldelete;
1608                                 }
1609                         }
1610                         /* tuple already deleted; nothing to do */
1611                         return;
1612
1613                 default:
1614                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1615                         return;
1616         }
1617
1618         IncrDeleted();
1619         (estate->es_processed)++;
1620
1621         /*
1622          * Note: Normally one would think that we have to delete index tuples
1623          * associated with the heap tuple now...
1624          *
1625          * ... but in POSTGRES, we have no need to do this because VACUUM will
1626          * take care of it later.  We can't delete index tuples immediately
1627          * anyway, since the tuple is still visible to other transactions.
1628          */
1629
1630         /* AFTER ROW DELETE Triggers */
1631         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1632
1633         /* Process RETURNING if present */
1634         if (resultRelInfo->ri_projectReturning)
1635         {
1636                 /*
1637                  * We have to put the target tuple into a slot, which means first we
1638                  * gotta fetch it.      We can use the trigger tuple slot.
1639                  */
1640                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1641                 HeapTupleData deltuple;
1642                 Buffer          delbuffer;
1643
1644                 deltuple.t_self = *tupleid;
1645                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1646                                                 &deltuple, &delbuffer, false, NULL))
1647                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1648
1649                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1650                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1651                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1652
1653                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1654                                                          slot, planSlot, dest);
1655
1656                 ExecClearTuple(slot);
1657                 ReleaseBuffer(delbuffer);
1658         }
1659 }
1660
1661 /* ----------------------------------------------------------------
1662  *              ExecUpdate
1663  *
1664  *              note: we can't run UPDATE queries with transactions
1665  *              off because UPDATEs are actually INSERTs and our
1666  *              scan will mistakenly loop forever, updating the tuple
1667  *              it just inserted..      This should be fixed but until it
1668  *              is, we don't want to get stuck in an infinite loop
1669  *              which corrupts your database..
1670  * ----------------------------------------------------------------
1671  */
1672 static void
1673 ExecUpdate(TupleTableSlot *slot,
1674                    ItemPointer tupleid,
1675                    TupleTableSlot *planSlot,
1676                    DestReceiver *dest,
1677                    EState *estate)
1678 {
1679         HeapTuple       tuple;
1680         ResultRelInfo *resultRelInfo;
1681         Relation        resultRelationDesc;
1682         HTSU_Result result;
1683         ItemPointerData update_ctid;
1684         TransactionId update_xmax;
1685
1686         /*
1687          * abort the operation if not running transactions
1688          */
1689         if (IsBootstrapProcessingMode())
1690                 elog(ERROR, "cannot UPDATE during bootstrap");
1691
1692         /*
1693          * get the heap tuple out of the tuple table slot, making sure we have a
1694          * writable copy
1695          */
1696         tuple = ExecMaterializeSlot(slot);
1697
1698         /*
1699          * get information on the (current) result relation
1700          */
1701         resultRelInfo = estate->es_result_relation_info;
1702         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1703
1704         /* BEFORE ROW UPDATE Triggers */
1705         if (resultRelInfo->ri_TrigDesc &&
1706                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1707         {
1708                 HeapTuple       newtuple;
1709
1710                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1711                                                                                 tupleid, tuple,
1712                                                                                 estate->es_snapshot->curcid);
1713
1714                 if (newtuple == NULL)   /* "do nothing" */
1715                         return;
1716
1717                 if (newtuple != tuple)  /* modified by Trigger(s) */
1718                 {
1719                         /*
1720                          * Put the modified tuple into a slot for convenience of routines
1721                          * below.  We assume the tuple was allocated in per-tuple memory
1722                          * context, and therefore will go away by itself. The tuple table
1723                          * slot should not try to clear it.
1724                          */
1725                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1726
1727                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1728                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1729                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1730                         slot = newslot;
1731                         tuple = newtuple;
1732                 }
1733         }
1734
1735         /*
1736          * Check the constraints of the tuple
1737          *
1738          * If we generate a new candidate tuple after EvalPlanQual testing, we
1739          * must loop back here and recheck constraints.  (We don't need to redo
1740          * triggers, however.  If there are any BEFORE triggers then trigger.c
1741          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1742          * need to do them again.)
1743          */
1744 lreplace:;
1745         if (resultRelationDesc->rd_att->constr)
1746                 ExecConstraints(resultRelInfo, slot, estate);
1747
1748         /*
1749          * replace the heap tuple
1750          *
1751          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1752          * the row to be updated is visible to that snapshot, and throw a can't-
1753          * serialize error if not.      This is a special-case behavior needed for
1754          * referential integrity updates in serializable transactions.
1755          */
1756         result = heap_update(resultRelationDesc, tupleid, tuple,
1757                                                  &update_ctid, &update_xmax,
1758                                                  estate->es_snapshot->curcid,
1759                                                  estate->es_crosscheck_snapshot,
1760                                                  true /* wait for commit */ );
1761         switch (result)
1762         {
1763                 case HeapTupleSelfUpdated:
1764                         /* already deleted by self; nothing to do */
1765                         return;
1766
1767                 case HeapTupleMayBeUpdated:
1768                         break;
1769
1770                 case HeapTupleUpdated:
1771                         if (IsXactIsoLevelSerializable)
1772                                 ereport(ERROR,
1773                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1774                                                  errmsg("could not serialize access due to concurrent update")));
1775                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1776                         {
1777                                 TupleTableSlot *epqslot;
1778
1779                                 epqslot = EvalPlanQual(estate,
1780                                                                            resultRelInfo->ri_RangeTableIndex,
1781                                                                            &update_ctid,
1782                                                                            update_xmax,
1783                                                                            estate->es_snapshot->curcid);
1784                                 if (!TupIsNull(epqslot))
1785                                 {
1786                                         *tupleid = update_ctid;
1787                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1788                                         tuple = ExecMaterializeSlot(slot);
1789                                         goto lreplace;
1790                                 }
1791                         }
1792                         /* tuple already deleted; nothing to do */
1793                         return;
1794
1795                 default:
1796                         elog(ERROR, "unrecognized heap_update status: %u", result);
1797                         return;
1798         }
1799
1800         IncrReplaced();
1801         (estate->es_processed)++;
1802
1803         /*
1804          * Note: instead of having to update the old index tuples associated with
1805          * the heap tuple, all we do is form and insert new index tuples. This is
1806          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1807          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1808          * here is insert new index tuples.  -cim 9/27/89
1809          */
1810
1811         /*
1812          * insert index entries for tuple
1813          *
1814          * Note: heap_update returns the tid (location) of the new tuple in the
1815          * t_self field.
1816          */
1817         if (resultRelInfo->ri_NumIndices > 0)
1818                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1819
1820         /* AFTER ROW UPDATE Triggers */
1821         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1822
1823         /* Process RETURNING if present */
1824         if (resultRelInfo->ri_projectReturning)
1825                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1826                                                          slot, planSlot, dest);
1827 }
1828
1829 /*
1830  * ExecRelCheck --- check that tuple meets constraints for result relation
1831  */
1832 static const char *
1833 ExecRelCheck(ResultRelInfo *resultRelInfo,
1834                          TupleTableSlot *slot, EState *estate)
1835 {
1836         Relation        rel = resultRelInfo->ri_RelationDesc;
1837         int                     ncheck = rel->rd_att->constr->num_check;
1838         ConstrCheck *check = rel->rd_att->constr->check;
1839         ExprContext *econtext;
1840         MemoryContext oldContext;
1841         List       *qual;
1842         int                     i;
1843
1844         /*
1845          * If first time through for this result relation, build expression
1846          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1847          * memory context so they'll survive throughout the query.
1848          */
1849         if (resultRelInfo->ri_ConstraintExprs == NULL)
1850         {
1851                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1852                 resultRelInfo->ri_ConstraintExprs =
1853                         (List **) palloc(ncheck * sizeof(List *));
1854                 for (i = 0; i < ncheck; i++)
1855                 {
1856                         /* ExecQual wants implicit-AND form */
1857                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1858                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1859                                 ExecPrepareExpr((Expr *) qual, estate);
1860                 }
1861                 MemoryContextSwitchTo(oldContext);
1862         }
1863
1864         /*
1865          * We will use the EState's per-tuple context for evaluating constraint
1866          * expressions (creating it if it's not already there).
1867          */
1868         econtext = GetPerTupleExprContext(estate);
1869
1870         /* Arrange for econtext's scan tuple to be the tuple under test */
1871         econtext->ecxt_scantuple = slot;
1872
1873         /* And evaluate the constraints */
1874         for (i = 0; i < ncheck; i++)
1875         {
1876                 qual = resultRelInfo->ri_ConstraintExprs[i];
1877
1878                 /*
1879                  * NOTE: SQL92 specifies that a NULL result from a constraint
1880                  * expression is not to be treated as a failure.  Therefore, tell
1881                  * ExecQual to return TRUE for NULL.
1882                  */
1883                 if (!ExecQual(qual, econtext, true))
1884                         return check[i].ccname;
1885         }
1886
1887         /* NULL result means no error */
1888         return NULL;
1889 }
1890
1891 void
1892 ExecConstraints(ResultRelInfo *resultRelInfo,
1893                                 TupleTableSlot *slot, EState *estate)
1894 {
1895         Relation        rel = resultRelInfo->ri_RelationDesc;
1896         TupleConstr *constr = rel->rd_att->constr;
1897
1898         Assert(constr);
1899
1900         if (constr->has_not_null)
1901         {
1902                 int                     natts = rel->rd_att->natts;
1903                 int                     attrChk;
1904
1905                 for (attrChk = 1; attrChk <= natts; attrChk++)
1906                 {
1907                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1908                                 slot_attisnull(slot, attrChk))
1909                                 ereport(ERROR,
1910                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1911                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1912                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1913                 }
1914         }
1915
1916         if (constr->num_check > 0)
1917         {
1918                 const char *failed;
1919
1920                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1921                         ereport(ERROR,
1922                                         (errcode(ERRCODE_CHECK_VIOLATION),
1923                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1924                                                         RelationGetRelationName(rel), failed)));
1925         }
1926 }
1927
1928 /*
1929  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
1930  *
1931  * projectReturning: RETURNING projection info for current result rel
1932  * tupleSlot: slot holding tuple actually inserted/updated/deleted
1933  * planSlot: slot holding tuple returned by top plan node
1934  * dest: where to send the output
1935  */
1936 static void
1937 ExecProcessReturning(ProjectionInfo *projectReturning,
1938                                          TupleTableSlot *tupleSlot,
1939                                          TupleTableSlot *planSlot,
1940                                          DestReceiver *dest)
1941 {
1942         ExprContext *econtext = projectReturning->pi_exprContext;
1943         TupleTableSlot *retSlot;
1944
1945         /*
1946          * Reset per-tuple memory context to free any expression evaluation
1947          * storage allocated in the previous cycle.
1948          */
1949         ResetExprContext(econtext);
1950
1951         /* Make tuple and any needed join variables available to ExecProject */
1952         econtext->ecxt_scantuple = tupleSlot;
1953         econtext->ecxt_outertuple = planSlot;
1954
1955         /* Compute the RETURNING expressions */
1956         retSlot = ExecProject(projectReturning, NULL);
1957
1958         /* Send to dest */
1959         (*dest->receiveSlot) (retSlot, dest);
1960
1961         ExecClearTuple(retSlot);
1962 }
1963
1964 /*
1965  * Check a modified tuple to see if we want to process its updated version
1966  * under READ COMMITTED rules.
1967  *
1968  * See backend/executor/README for some info about how this works.
1969  *
1970  *      estate - executor state data
1971  *      rti - rangetable index of table containing tuple
1972  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1973  *      priorXmax - t_xmax from the outdated tuple
1974  *      curCid - command ID of current command of my transaction
1975  *
1976  * *tid is also an output parameter: it's modified to hold the TID of the
1977  * latest version of the tuple (note this may be changed even on failure)
1978  *
1979  * Returns a slot containing the new candidate update/delete tuple, or
1980  * NULL if we determine we shouldn't process the row.
1981  */
1982 TupleTableSlot *
1983 EvalPlanQual(EState *estate, Index rti,
1984                          ItemPointer tid, TransactionId priorXmax, CommandId curCid)
1985 {
1986         evalPlanQual *epq;
1987         EState     *epqstate;
1988         Relation        relation;
1989         HeapTupleData tuple;
1990         HeapTuple       copyTuple = NULL;
1991         SnapshotData SnapshotDirty;
1992         bool            endNode;
1993
1994         Assert(rti != 0);
1995
1996         /*
1997          * find relation containing target tuple
1998          */
1999         if (estate->es_result_relation_info != NULL &&
2000                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2001                 relation = estate->es_result_relation_info->ri_RelationDesc;
2002         else
2003         {
2004                 ListCell   *l;
2005
2006                 relation = NULL;
2007                 foreach(l, estate->es_rowMarks)
2008                 {
2009                         if (((ExecRowMark *) lfirst(l))->rti == rti)
2010                         {
2011                                 relation = ((ExecRowMark *) lfirst(l))->relation;
2012                                 break;
2013                         }
2014                 }
2015                 if (relation == NULL)
2016                         elog(ERROR, "could not find RowMark for RT index %u", rti);
2017         }
2018
2019         /*
2020          * fetch tid tuple
2021          *
2022          * Loop here to deal with updated or busy tuples
2023          */
2024         InitDirtySnapshot(SnapshotDirty);
2025         tuple.t_self = *tid;
2026         for (;;)
2027         {
2028                 Buffer          buffer;
2029
2030                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2031                 {
2032                         /*
2033                          * If xmin isn't what we're expecting, the slot must have been
2034                          * recycled and reused for an unrelated tuple.  This implies that
2035                          * the latest version of the row was deleted, so we need do
2036                          * nothing.  (Should be safe to examine xmin without getting
2037                          * buffer's content lock, since xmin never changes in an existing
2038                          * tuple.)
2039                          */
2040                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2041                                                                          priorXmax))
2042                         {
2043                                 ReleaseBuffer(buffer);
2044                                 return NULL;
2045                         }
2046
2047                         /* otherwise xmin should not be dirty... */
2048                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2049                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2050
2051                         /*
2052                          * If tuple is being updated by other transaction then we have to
2053                          * wait for its commit/abort.
2054                          */
2055                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2056                         {
2057                                 ReleaseBuffer(buffer);
2058                                 XactLockTableWait(SnapshotDirty.xmax);
2059                                 continue;               /* loop back to repeat heap_fetch */
2060                         }
2061
2062                         /*
2063                          * If tuple was inserted by our own transaction, we have to check
2064                          * cmin against curCid: cmin >= curCid means our command cannot
2065                          * see the tuple, so we should ignore it.  Without this we are
2066                          * open to the "Halloween problem" of indefinitely re-updating the
2067                          * same tuple.  (We need not check cmax because
2068                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2069                          * transaction dead, regardless of cmax.)  We just checked that
2070                          * priorXmax == xmin, so we can test that variable instead of
2071                          * doing HeapTupleHeaderGetXmin again.
2072                          */
2073                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2074                                 HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
2075                         {
2076                                 ReleaseBuffer(buffer);
2077                                 return NULL;
2078                         }
2079
2080                         /*
2081                          * We got tuple - now copy it for use by recheck query.
2082                          */
2083                         copyTuple = heap_copytuple(&tuple);
2084                         ReleaseBuffer(buffer);
2085                         break;
2086                 }
2087
2088                 /*
2089                  * If the referenced slot was actually empty, the latest version of
2090                  * the row must have been deleted, so we need do nothing.
2091                  */
2092                 if (tuple.t_data == NULL)
2093                 {
2094                         ReleaseBuffer(buffer);
2095                         return NULL;
2096                 }
2097
2098                 /*
2099                  * As above, if xmin isn't what we're expecting, do nothing.
2100                  */
2101                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2102                                                                  priorXmax))
2103                 {
2104                         ReleaseBuffer(buffer);
2105                         return NULL;
2106                 }
2107
2108                 /*
2109                  * If we get here, the tuple was found but failed SnapshotDirty.
2110                  * Assuming the xmin is either a committed xact or our own xact (as it
2111                  * certainly should be if we're trying to modify the tuple), this must
2112                  * mean that the row was updated or deleted by either a committed xact
2113                  * or our own xact.  If it was deleted, we can ignore it; if it was
2114                  * updated then chain up to the next version and repeat the whole
2115                  * test.
2116                  *
2117                  * As above, it should be safe to examine xmax and t_ctid without the
2118                  * buffer content lock, because they can't be changing.
2119                  */
2120                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2121                 {
2122                         /* deleted, so forget about it */
2123                         ReleaseBuffer(buffer);
2124                         return NULL;
2125                 }
2126
2127                 /* updated, so look at the updated row */
2128                 tuple.t_self = tuple.t_data->t_ctid;
2129                 /* updated row should have xmin matching this xmax */
2130                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2131                 ReleaseBuffer(buffer);
2132                 /* loop back to fetch next in chain */
2133         }
2134
2135         /*
2136          * For UPDATE/DELETE we have to return tid of actual row we're executing
2137          * PQ for.
2138          */
2139         *tid = tuple.t_self;
2140
2141         /*
2142          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2143          */
2144         epq = estate->es_evalPlanQual;
2145         endNode = true;
2146
2147         if (epq != NULL && epq->rti == 0)
2148         {
2149                 /* Top PQ stack entry is idle, so re-use it */
2150                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2151                 epq->rti = rti;
2152                 endNode = false;
2153         }
2154
2155         /*
2156          * If this is request for another RTE - Ra, - then we have to check wasn't
2157          * PlanQual requested for Ra already and if so then Ra' row was updated
2158          * again and we have to re-start old execution for Ra and forget all what
2159          * we done after Ra was suspended. Cool? -:))
2160          */
2161         if (epq != NULL && epq->rti != rti &&
2162                 epq->estate->es_evTuple[rti - 1] != NULL)
2163         {
2164                 do
2165                 {
2166                         evalPlanQual *oldepq;
2167
2168                         /* stop execution */
2169                         EvalPlanQualStop(epq);
2170                         /* pop previous PlanQual from the stack */
2171                         oldepq = epq->next;
2172                         Assert(oldepq && oldepq->rti != 0);
2173                         /* push current PQ to freePQ stack */
2174                         oldepq->free = epq;
2175                         epq = oldepq;
2176                         estate->es_evalPlanQual = epq;
2177                 } while (epq->rti != rti);
2178         }
2179
2180         /*
2181          * If we are requested for another RTE then we have to suspend execution
2182          * of current PlanQual and start execution for new one.
2183          */
2184         if (epq == NULL || epq->rti != rti)
2185         {
2186                 /* try to reuse plan used previously */
2187                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2188
2189                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2190                 {
2191                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2192                         newepq->free = NULL;
2193                         newepq->estate = NULL;
2194                         newepq->planstate = NULL;
2195                 }
2196                 else
2197                 {
2198                         /* recycle previously used PlanQual */
2199                         Assert(newepq->estate == NULL);
2200                         epq->free = NULL;
2201                 }
2202                 /* push current PQ to the stack */
2203                 newepq->next = epq;
2204                 epq = newepq;
2205                 estate->es_evalPlanQual = epq;
2206                 epq->rti = rti;
2207                 endNode = false;
2208         }
2209
2210         Assert(epq->rti == rti);
2211
2212         /*
2213          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2214          * end and restart execution of the plan, because ExecReScan wouldn't
2215          * ensure that upper plan nodes would reset themselves.  We could make
2216          * that work if insertion of the target tuple were integrated with the
2217          * Param mechanism somehow, so that the upper plan nodes know that their
2218          * children's outputs have changed.
2219          *
2220          * Note that the stack of free evalPlanQual nodes is quite useless at the
2221          * moment, since it only saves us from pallocing/releasing the
2222          * evalPlanQual nodes themselves.  But it will be useful once we implement
2223          * ReScan instead of end/restart for re-using PlanQual nodes.
2224          */
2225         if (endNode)
2226         {
2227                 /* stop execution */
2228                 EvalPlanQualStop(epq);
2229         }
2230
2231         /*
2232          * Initialize new recheck query.
2233          *
2234          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2235          * instead copy down changeable state from the top plan (including
2236          * es_result_relation_info, es_junkFilter) and reset locally changeable
2237          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2238          */
2239         EvalPlanQualStart(epq, estate, epq->next);
2240
2241         /*
2242          * free old RTE' tuple, if any, and store target tuple where relation's
2243          * scan node will see it
2244          */
2245         epqstate = epq->estate;
2246         if (epqstate->es_evTuple[rti - 1] != NULL)
2247                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2248         epqstate->es_evTuple[rti - 1] = copyTuple;
2249
2250         return EvalPlanQualNext(estate);
2251 }
2252
2253 static TupleTableSlot *
2254 EvalPlanQualNext(EState *estate)
2255 {
2256         evalPlanQual *epq = estate->es_evalPlanQual;
2257         MemoryContext oldcontext;
2258         TupleTableSlot *slot;
2259
2260         Assert(epq->rti != 0);
2261
2262 lpqnext:;
2263         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2264         slot = ExecProcNode(epq->planstate);
2265         MemoryContextSwitchTo(oldcontext);
2266
2267         /*
2268          * No more tuples for this PQ. Continue previous one.
2269          */
2270         if (TupIsNull(slot))
2271         {
2272                 evalPlanQual *oldepq;
2273
2274                 /* stop execution */
2275                 EvalPlanQualStop(epq);
2276                 /* pop old PQ from the stack */
2277                 oldepq = epq->next;
2278                 if (oldepq == NULL)
2279                 {
2280                         /* this is the first (oldest) PQ - mark as free */
2281                         epq->rti = 0;
2282                         estate->es_useEvalPlan = false;
2283                         /* and continue Query execution */
2284                         return NULL;
2285                 }
2286                 Assert(oldepq->rti != 0);
2287                 /* push current PQ to freePQ stack */
2288                 oldepq->free = epq;
2289                 epq = oldepq;
2290                 estate->es_evalPlanQual = epq;
2291                 goto lpqnext;
2292         }
2293
2294         return slot;
2295 }
2296
2297 static void
2298 EndEvalPlanQual(EState *estate)
2299 {
2300         evalPlanQual *epq = estate->es_evalPlanQual;
2301
2302         if (epq->rti == 0)                      /* plans already shutdowned */
2303         {
2304                 Assert(epq->next == NULL);
2305                 return;
2306         }
2307
2308         for (;;)
2309         {
2310                 evalPlanQual *oldepq;
2311
2312                 /* stop execution */
2313                 EvalPlanQualStop(epq);
2314                 /* pop old PQ from the stack */
2315                 oldepq = epq->next;
2316                 if (oldepq == NULL)
2317                 {
2318                         /* this is the first (oldest) PQ - mark as free */
2319                         epq->rti = 0;
2320                         estate->es_useEvalPlan = false;
2321                         break;
2322                 }
2323                 Assert(oldepq->rti != 0);
2324                 /* push current PQ to freePQ stack */
2325                 oldepq->free = epq;
2326                 epq = oldepq;
2327                 estate->es_evalPlanQual = epq;
2328         }
2329 }
2330
2331 /*
2332  * Start execution of one level of PlanQual.
2333  *
2334  * This is a cut-down version of ExecutorStart(): we copy some state from
2335  * the top-level estate rather than initializing it fresh.
2336  */
2337 static void
2338 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2339 {
2340         EState     *epqstate;
2341         int                     rtsize;
2342         MemoryContext oldcontext;
2343         ListCell   *l;
2344
2345         rtsize = list_length(estate->es_range_table);
2346
2347         epq->estate = epqstate = CreateExecutorState();
2348
2349         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2350
2351         /*
2352          * The epqstates share the top query's copy of unchanging state such as
2353          * the snapshot, rangetable, result-rel info, and external Param info.
2354          * They need their own copies of local state, including a tuple table,
2355          * es_param_exec_vals, etc.
2356          */
2357         epqstate->es_direction = ForwardScanDirection;
2358         epqstate->es_snapshot = estate->es_snapshot;
2359         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2360         epqstate->es_range_table = estate->es_range_table;
2361         epqstate->es_result_relations = estate->es_result_relations;
2362         epqstate->es_num_result_relations = estate->es_num_result_relations;
2363         epqstate->es_result_relation_info = estate->es_result_relation_info;
2364         epqstate->es_junkFilter = estate->es_junkFilter;
2365         /* es_trig_target_relations must NOT be copied */
2366         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2367         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2368         epqstate->es_param_list_info = estate->es_param_list_info;
2369         if (estate->es_plannedstmt->nParamExec > 0)
2370                 epqstate->es_param_exec_vals = (ParamExecData *)
2371                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2372         epqstate->es_rowMarks = estate->es_rowMarks;
2373         epqstate->es_instrument = estate->es_instrument;
2374         epqstate->es_select_into = estate->es_select_into;
2375         epqstate->es_into_oids = estate->es_into_oids;
2376         epqstate->es_plannedstmt = estate->es_plannedstmt;
2377
2378         /*
2379          * Each epqstate must have its own es_evTupleNull state, but all the stack
2380          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2381          * the value being examined by an outer recheck.
2382          */
2383         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2384         if (priorepq == NULL)
2385                 /* first PQ stack entry */
2386                 epqstate->es_evTuple = (HeapTuple *)
2387                         palloc0(rtsize * sizeof(HeapTuple));
2388         else
2389                 /* later stack entries share the same storage */
2390                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2391
2392         /*
2393          * Create sub-tuple-table; we needn't redo the CountSlots work though.
2394          */
2395         epqstate->es_tupleTable =
2396                 ExecCreateTupleTable(estate->es_tupleTable->size);
2397
2398         /*
2399          * Initialize private state information for each SubPlan.  We must do
2400          * this before running ExecInitNode on the main query tree, since
2401          * ExecInitSubPlan expects to be able to find these entries.
2402          */
2403         Assert(epqstate->es_subplanstates == NIL);
2404         foreach(l, estate->es_plannedstmt->subplans)
2405         {
2406                 Plan   *subplan = (Plan *) lfirst(l);
2407                 PlanState *subplanstate;
2408
2409                 subplanstate = ExecInitNode(subplan, epqstate, 0);
2410
2411                 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2412                                                                                          subplanstate);
2413         }
2414
2415         /*
2416          * Initialize the private state information for all the nodes in the query
2417          * tree.  This opens files, allocates storage and leaves us ready to start
2418          * processing tuples.
2419          */
2420         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2421
2422         MemoryContextSwitchTo(oldcontext);
2423 }
2424
2425 /*
2426  * End execution of one level of PlanQual.
2427  *
2428  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2429  * of the normal cleanup, but *not* close result relations (which we are
2430  * just sharing from the outer query).  We do, however, have to close any
2431  * trigger target relations that got opened, since those are not shared.
2432  */
2433 static void
2434 EvalPlanQualStop(evalPlanQual *epq)
2435 {
2436         EState     *epqstate = epq->estate;
2437         MemoryContext oldcontext;
2438         ListCell   *l;
2439
2440         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2441
2442         ExecEndNode(epq->planstate);
2443
2444         foreach(l, epqstate->es_subplanstates)
2445         {
2446                 PlanState *subplanstate = (PlanState *) lfirst(l);
2447
2448                 ExecEndNode(subplanstate);
2449         }
2450
2451         ExecDropTupleTable(epqstate->es_tupleTable, true);
2452         epqstate->es_tupleTable = NULL;
2453
2454         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2455         {
2456                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2457                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2458         }
2459
2460         foreach(l, epqstate->es_trig_target_relations)
2461         {
2462                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2463
2464                 /* Close indices and then the relation itself */
2465                 ExecCloseIndices(resultRelInfo);
2466                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2467         }
2468
2469         MemoryContextSwitchTo(oldcontext);
2470
2471         FreeExecutorState(epqstate);
2472
2473         epq->estate = NULL;
2474         epq->planstate = NULL;
2475 }
2476
2477 /*
2478  * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2479  *
2480  * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2481  * are looking at a row returned by the EvalPlanQual machinery, we need
2482  * to look at the subsidiary state instead.
2483  */
2484 PlanState *
2485 ExecGetActivePlanTree(QueryDesc *queryDesc)
2486 {
2487         EState     *estate = queryDesc->estate;
2488
2489         if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2490                 return estate->es_evalPlanQual->planstate;
2491         else
2492                 return queryDesc->planstate;
2493 }
2494
2495
2496 /*
2497  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2498  *
2499  * We implement SELECT INTO by diverting SELECT's normal output with
2500  * a specialized DestReceiver type.
2501  *
2502  * TODO: remove some of the INTO-specific cruft from EState, and keep
2503  * it in the DestReceiver instead.
2504  */
2505
2506 typedef struct
2507 {
2508         DestReceiver pub;                       /* publicly-known function pointers */
2509         EState     *estate;                     /* EState we are working with */
2510 } DR_intorel;
2511
2512 /*
2513  * OpenIntoRel --- actually create the SELECT INTO target relation
2514  *
2515  * This also replaces QueryDesc->dest with the special DestReceiver for
2516  * SELECT INTO.  We assume that the correct result tuple type has already
2517  * been placed in queryDesc->tupDesc.
2518  */
2519 static void
2520 OpenIntoRel(QueryDesc *queryDesc)
2521 {
2522         IntoClause *into = queryDesc->plannedstmt->intoClause;
2523         EState     *estate = queryDesc->estate;
2524         Relation        intoRelationDesc;
2525         char       *intoName;
2526         Oid                     namespaceId;
2527         Oid                     tablespaceId;
2528         Datum           reloptions;
2529         AclResult       aclresult;
2530         Oid                     intoRelationId;
2531         TupleDesc       tupdesc;
2532         DR_intorel *myState;
2533
2534         Assert(into);
2535
2536         /*
2537          * Check consistency of arguments
2538          */
2539         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2540                 ereport(ERROR,
2541                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2542                                  errmsg("ON COMMIT can only be used on temporary tables")));
2543
2544         /*
2545          * Find namespace to create in, check its permissions
2546          */
2547         intoName = into->rel->relname;
2548         namespaceId = RangeVarGetCreationNamespace(into->rel);
2549
2550         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2551                                                                           ACL_CREATE);
2552         if (aclresult != ACLCHECK_OK)
2553                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2554                                            get_namespace_name(namespaceId));
2555
2556         /*
2557          * Select tablespace to use.  If not specified, use default tablespace
2558          * (which may in turn default to database's default).
2559          */
2560         if (into->tableSpaceName)
2561         {
2562                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2563                 if (!OidIsValid(tablespaceId))
2564                         ereport(ERROR,
2565                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2566                                          errmsg("tablespace \"%s\" does not exist",
2567                                                         into->tableSpaceName)));
2568         }
2569         else
2570         {
2571                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2572                 /* note InvalidOid is OK in this case */
2573         }
2574
2575         /* Check permissions except when using the database's default space */
2576         if (OidIsValid(tablespaceId))
2577         {
2578                 AclResult       aclresult;
2579
2580                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2581                                                                                    ACL_CREATE);
2582
2583                 if (aclresult != ACLCHECK_OK)
2584                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2585                                                    get_tablespace_name(tablespaceId));
2586         }
2587
2588         /* Parse and validate any reloptions */
2589         reloptions = transformRelOptions((Datum) 0,
2590                                                                          into->options,
2591                                                                          true,
2592                                                                          false);
2593         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2594
2595         /* have to copy the actual tupdesc to get rid of any constraints */
2596         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2597
2598         /* Now we can actually create the new relation */
2599         intoRelationId = heap_create_with_catalog(intoName,
2600                                                                                           namespaceId,
2601                                                                                           tablespaceId,
2602                                                                                           InvalidOid,
2603                                                                                           GetUserId(),
2604                                                                                           tupdesc,
2605                                                                                           RELKIND_RELATION,
2606                                                                                           false,
2607                                                                                           true,
2608                                                                                           0,
2609                                                                                           into->onCommit,
2610                                                                                           reloptions,
2611                                                                                           allowSystemTableMods);
2612
2613         FreeTupleDesc(tupdesc);
2614
2615         /*
2616          * Advance command counter so that the newly-created relation's catalog
2617          * tuples will be visible to heap_open.
2618          */
2619         CommandCounterIncrement();
2620
2621         /*
2622          * If necessary, create a TOAST table for the INTO relation. Note that
2623          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2624          * the TOAST table will be visible for insertion.
2625          */
2626         AlterTableCreateToastTable(intoRelationId);
2627
2628         /*
2629          * And open the constructed table for writing.
2630          */
2631         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2632
2633         /* use_wal off requires rd_targblock be initially invalid */
2634         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2635
2636         /*
2637          * We can skip WAL-logging the insertions, unless PITR is in use.
2638          *
2639          * Note that for a non-temp INTO table, this is safe only because we know
2640          * that the catalog changes above will have been WAL-logged, and so
2641          * RecordTransactionCommit will think it needs to WAL-log the eventual
2642          * transaction commit.  Else the commit might be lost, even though all the
2643          * data is safely fsync'd ...
2644          */
2645         estate->es_into_relation_use_wal = XLogArchivingActive();
2646         estate->es_into_relation_descriptor = intoRelationDesc;
2647
2648         /*
2649          * Now replace the query's DestReceiver with one for SELECT INTO
2650          */
2651         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2652         myState = (DR_intorel *) queryDesc->dest;
2653         Assert(myState->pub.mydest == DestIntoRel);
2654         myState->estate = estate;
2655 }
2656
2657 /*
2658  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2659  */
2660 static void
2661 CloseIntoRel(QueryDesc *queryDesc)
2662 {
2663         EState     *estate = queryDesc->estate;
2664
2665         /* OpenIntoRel might never have gotten called */
2666         if (estate->es_into_relation_descriptor)
2667         {
2668                 /* If we skipped using WAL, must heap_sync before commit */
2669                 if (!estate->es_into_relation_use_wal)
2670                         heap_sync(estate->es_into_relation_descriptor);
2671
2672                 /* close rel, but keep lock until commit */
2673                 heap_close(estate->es_into_relation_descriptor, NoLock);
2674
2675                 estate->es_into_relation_descriptor = NULL;
2676         }
2677 }
2678
2679 /*
2680  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2681  *
2682  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2683  * we just leave the private fields empty here.  OpenIntoRel will
2684  * fill them in.
2685  */
2686 DestReceiver *
2687 CreateIntoRelDestReceiver(void)
2688 {
2689         DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
2690
2691         self->pub.receiveSlot = intorel_receive;
2692         self->pub.rStartup = intorel_startup;
2693         self->pub.rShutdown = intorel_shutdown;
2694         self->pub.rDestroy = intorel_destroy;
2695         self->pub.mydest = DestIntoRel;
2696
2697         self->estate = NULL;
2698
2699         return (DestReceiver *) self;
2700 }
2701
2702 /*
2703  * intorel_startup --- executor startup
2704  */
2705 static void
2706 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2707 {
2708         /* no-op */
2709 }
2710
2711 /*
2712  * intorel_receive --- receive one tuple
2713  */
2714 static void
2715 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2716 {
2717         DR_intorel *myState = (DR_intorel *) self;
2718         EState     *estate = myState->estate;
2719         HeapTuple       tuple;
2720
2721         tuple = ExecCopySlotTuple(slot);
2722
2723         heap_insert(estate->es_into_relation_descriptor,
2724                                 tuple,
2725                                 estate->es_snapshot->curcid,
2726                                 estate->es_into_relation_use_wal,
2727                                 false);                 /* never any point in using FSM */
2728
2729         /* We know this is a newly created relation, so there are no indexes */
2730
2731         heap_freetuple(tuple);
2732
2733         IncrAppended();
2734 }
2735
2736 /*
2737  * intorel_shutdown --- executor end
2738  */
2739 static void
2740 intorel_shutdown(DestReceiver *self)
2741 {
2742         /* no-op */
2743 }
2744
2745 /*
2746  * intorel_destroy --- release DestReceiver object
2747  */
2748 static void
2749 intorel_destroy(DestReceiver *self)
2750 {
2751         pfree(self);
2752 }