]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Support statement-level ON TRUNCATE triggers. Simon Riggs
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.305 2008/03/28 00:21:55 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "optimizer/clauses.h"
49 #include "parser/parse_clause.h"
50 #include "parser/parsetree.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/tqual.h"
56
57
58 typedef struct evalPlanQual
59 {
60         Index           rti;
61         EState     *estate;
62         PlanState  *planstate;
63         struct evalPlanQual *next;      /* stack of active PlanQual plans */
64         struct evalPlanQual *free;      /* list of free PlanQual plans */
65 } evalPlanQual;
66
67 /* decls for local routines only used within this module */
68 static void InitPlan(QueryDesc *queryDesc, int eflags);
69 static void ExecEndPlan(PlanState *planstate, EState *estate);
70 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
71                         CmdType operation,
72                         long numberTuples,
73                         ScanDirection direction,
74                         DestReceiver *dest);
75 static void ExecSelect(TupleTableSlot *slot,
76                    DestReceiver *dest, EState *estate);
77 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
78                    TupleTableSlot *planSlot,
79                    DestReceiver *dest, EState *estate);
80 static void ExecDelete(ItemPointer tupleid,
81                    TupleTableSlot *planSlot,
82                    DestReceiver *dest, EState *estate);
83 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
84                    TupleTableSlot *planSlot,
85                    DestReceiver *dest, EState *estate);
86 static void ExecProcessReturning(ProjectionInfo *projectReturning,
87                                          TupleTableSlot *tupleSlot,
88                                          TupleTableSlot *planSlot,
89                                          DestReceiver *dest);
90 static TupleTableSlot *EvalPlanQualNext(EState *estate);
91 static void EndEvalPlanQual(EState *estate);
92 static void ExecCheckRTPerms(List *rangeTable);
93 static void ExecCheckRTEPerms(RangeTblEntry *rte);
94 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
95 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
96                                   evalPlanQual *priorepq);
97 static void EvalPlanQualStop(evalPlanQual *epq);
98 static void OpenIntoRel(QueryDesc *queryDesc);
99 static void CloseIntoRel(QueryDesc *queryDesc);
100 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
101 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
102 static void intorel_shutdown(DestReceiver *self);
103 static void intorel_destroy(DestReceiver *self);
104
105 /* end of local decls */
106
107
108 /* ----------------------------------------------------------------
109  *              ExecutorStart
110  *
111  *              This routine must be called at the beginning of any execution of any
112  *              query plan
113  *
114  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
115  * clear why we bother to separate the two functions, but...).  The tupDesc
116  * field of the QueryDesc is filled in to describe the tuples that will be
117  * returned, and the internal fields (estate and planstate) are set up.
118  *
119  * eflags contains flag bits as described in executor.h.
120  *
121  * NB: the CurrentMemoryContext when this is called will become the parent
122  * of the per-query context used for this Executor invocation.
123  * ----------------------------------------------------------------
124  */
125 void
126 ExecutorStart(QueryDesc *queryDesc, int eflags)
127 {
128         EState     *estate;
129         MemoryContext oldcontext;
130
131         /* sanity checks: queryDesc must not be started already */
132         Assert(queryDesc != NULL);
133         Assert(queryDesc->estate == NULL);
134
135         /*
136          * If the transaction is read-only, we need to check if any writes are
137          * planned to non-temporary tables.  EXPLAIN is considered read-only.
138          */
139         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
140                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
141
142         /*
143          * Build EState, switch into per-query memory context for startup.
144          */
145         estate = CreateExecutorState();
146         queryDesc->estate = estate;
147
148         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
149
150         /*
151          * Fill in parameters, if any, from queryDesc
152          */
153         estate->es_param_list_info = queryDesc->params;
154
155         if (queryDesc->plannedstmt->nParamExec > 0)
156                 estate->es_param_exec_vals = (ParamExecData *)
157                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
158
159         /*
160          * If non-read-only query, set the command ID to mark output tuples with
161          */
162         switch (queryDesc->operation)
163         {
164                 case CMD_SELECT:
165                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
166                         if (queryDesc->plannedstmt->intoClause != NULL ||
167                                 queryDesc->plannedstmt->rowMarks != NIL)
168                                 estate->es_output_cid = GetCurrentCommandId(true);
169                         break;
170
171                 case CMD_INSERT:
172                 case CMD_DELETE:
173                 case CMD_UPDATE:
174                         estate->es_output_cid = GetCurrentCommandId(true);
175                         break;
176
177                 default:
178                         elog(ERROR, "unrecognized operation code: %d",
179                                  (int) queryDesc->operation);
180                         break;
181         }
182
183         /*
184          * Copy other important information into the EState
185          */
186         estate->es_snapshot = queryDesc->snapshot;
187         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
188         estate->es_instrument = queryDesc->doInstrument;
189
190         /*
191          * Initialize the plan state tree
192          */
193         InitPlan(queryDesc, eflags);
194
195         MemoryContextSwitchTo(oldcontext);
196 }
197
198 /* ----------------------------------------------------------------
199  *              ExecutorRun
200  *
201  *              This is the main routine of the executor module. It accepts
202  *              the query descriptor from the traffic cop and executes the
203  *              query plan.
204  *
205  *              ExecutorStart must have been called already.
206  *
207  *              If direction is NoMovementScanDirection then nothing is done
208  *              except to start up/shut down the destination.  Otherwise,
209  *              we retrieve up to 'count' tuples in the specified direction.
210  *
211  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
212  *              completion.
213  *
214  * ----------------------------------------------------------------
215  */
216 TupleTableSlot *
217 ExecutorRun(QueryDesc *queryDesc,
218                         ScanDirection direction, long count)
219 {
220         EState     *estate;
221         CmdType         operation;
222         DestReceiver *dest;
223         bool            sendTuples;
224         TupleTableSlot *result;
225         MemoryContext oldcontext;
226
227         /* sanity checks */
228         Assert(queryDesc != NULL);
229
230         estate = queryDesc->estate;
231
232         Assert(estate != NULL);
233
234         /*
235          * Switch into per-query memory context
236          */
237         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
238
239         /*
240          * extract information from the query descriptor and the query feature.
241          */
242         operation = queryDesc->operation;
243         dest = queryDesc->dest;
244
245         /*
246          * startup tuple receiver, if we will be emitting tuples
247          */
248         estate->es_processed = 0;
249         estate->es_lastoid = InvalidOid;
250
251         sendTuples = (operation == CMD_SELECT ||
252                                   queryDesc->plannedstmt->returningLists);
253
254         if (sendTuples)
255                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
256
257         /*
258          * run plan
259          */
260         if (ScanDirectionIsNoMovement(direction))
261                 result = NULL;
262         else
263                 result = ExecutePlan(estate,
264                                                          queryDesc->planstate,
265                                                          operation,
266                                                          count,
267                                                          direction,
268                                                          dest);
269
270         /*
271          * shutdown tuple receiver, if we started it
272          */
273         if (sendTuples)
274                 (*dest->rShutdown) (dest);
275
276         MemoryContextSwitchTo(oldcontext);
277
278         return result;
279 }
280
281 /* ----------------------------------------------------------------
282  *              ExecutorEnd
283  *
284  *              This routine must be called at the end of execution of any
285  *              query plan
286  * ----------------------------------------------------------------
287  */
288 void
289 ExecutorEnd(QueryDesc *queryDesc)
290 {
291         EState     *estate;
292         MemoryContext oldcontext;
293
294         /* sanity checks */
295         Assert(queryDesc != NULL);
296
297         estate = queryDesc->estate;
298
299         Assert(estate != NULL);
300
301         /*
302          * Switch into per-query memory context to run ExecEndPlan
303          */
304         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
305
306         ExecEndPlan(queryDesc->planstate, estate);
307
308         /*
309          * Close the SELECT INTO relation if any
310          */
311         if (estate->es_select_into)
312                 CloseIntoRel(queryDesc);
313
314         /*
315          * Must switch out of context before destroying it
316          */
317         MemoryContextSwitchTo(oldcontext);
318
319         /*
320          * Release EState and per-query memory context.  This should release
321          * everything the executor has allocated.
322          */
323         FreeExecutorState(estate);
324
325         /* Reset queryDesc fields that no longer point to anything */
326         queryDesc->tupDesc = NULL;
327         queryDesc->estate = NULL;
328         queryDesc->planstate = NULL;
329 }
330
331 /* ----------------------------------------------------------------
332  *              ExecutorRewind
333  *
334  *              This routine may be called on an open queryDesc to rewind it
335  *              to the start.
336  * ----------------------------------------------------------------
337  */
338 void
339 ExecutorRewind(QueryDesc *queryDesc)
340 {
341         EState     *estate;
342         MemoryContext oldcontext;
343
344         /* sanity checks */
345         Assert(queryDesc != NULL);
346
347         estate = queryDesc->estate;
348
349         Assert(estate != NULL);
350
351         /* It's probably not sensible to rescan updating queries */
352         Assert(queryDesc->operation == CMD_SELECT);
353
354         /*
355          * Switch into per-query memory context
356          */
357         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
358
359         /*
360          * rescan plan
361          */
362         ExecReScan(queryDesc->planstate, NULL);
363
364         MemoryContextSwitchTo(oldcontext);
365 }
366
367
368 /*
369  * ExecCheckRTPerms
370  *              Check access permissions for all relations listed in a range table.
371  */
372 static void
373 ExecCheckRTPerms(List *rangeTable)
374 {
375         ListCell   *l;
376
377         foreach(l, rangeTable)
378         {
379                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
380         }
381 }
382
383 /*
384  * ExecCheckRTEPerms
385  *              Check access permissions for a single RTE.
386  */
387 static void
388 ExecCheckRTEPerms(RangeTblEntry *rte)
389 {
390         AclMode         requiredPerms;
391         Oid                     relOid;
392         Oid                     userid;
393
394         /*
395          * Only plain-relation RTEs need to be checked here.  Function RTEs are
396          * checked by init_fcache when the function is prepared for execution.
397          * Join, subquery, and special RTEs need no checks.
398          */
399         if (rte->rtekind != RTE_RELATION)
400                 return;
401
402         /*
403          * No work if requiredPerms is empty.
404          */
405         requiredPerms = rte->requiredPerms;
406         if (requiredPerms == 0)
407                 return;
408
409         relOid = rte->relid;
410
411         /*
412          * userid to check as: current user unless we have a setuid indication.
413          *
414          * Note: GetUserId() is presently fast enough that there's no harm in
415          * calling it separately for each RTE.  If that stops being true, we could
416          * call it once in ExecCheckRTPerms and pass the userid down from there.
417          * But for now, no need for the extra clutter.
418          */
419         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
420
421         /*
422          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
423          */
424         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
425                 != requiredPerms)
426                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
427                                            get_rel_name(relOid));
428 }
429
430 /*
431  * Check that the query does not imply any writes to non-temp tables.
432  */
433 static void
434 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
435 {
436         ListCell   *l;
437
438         /*
439          * CREATE TABLE AS or SELECT INTO?
440          *
441          * XXX should we allow this if the destination is temp?
442          */
443         if (plannedstmt->intoClause != NULL)
444                 goto fail;
445
446         /* Fail if write permissions are requested on any non-temp table */
447         foreach(l, plannedstmt->rtable)
448         {
449                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
450
451                 if (rte->rtekind != RTE_RELATION)
452                         continue;
453
454                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
455                         continue;
456
457                 if (isTempNamespace(get_rel_namespace(rte->relid)))
458                         continue;
459
460                 goto fail;
461         }
462
463         return;
464
465 fail:
466         ereport(ERROR,
467                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
468                          errmsg("transaction is read-only")));
469 }
470
471
472 /* ----------------------------------------------------------------
473  *              InitPlan
474  *
475  *              Initializes the query plan: open files, allocate storage
476  *              and start up the rule manager
477  * ----------------------------------------------------------------
478  */
479 static void
480 InitPlan(QueryDesc *queryDesc, int eflags)
481 {
482         CmdType         operation = queryDesc->operation;
483         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
484         Plan       *plan = plannedstmt->planTree;
485         List       *rangeTable = plannedstmt->rtable;
486         EState     *estate = queryDesc->estate;
487         PlanState  *planstate;
488         TupleDesc       tupType;
489         ListCell   *l;
490         int                     i;
491
492         /*
493          * Do permissions checks
494          */
495         ExecCheckRTPerms(rangeTable);
496
497         /*
498          * initialize the node's execution state
499          */
500         estate->es_range_table = rangeTable;
501
502         /*
503          * initialize result relation stuff
504          */
505         if (plannedstmt->resultRelations)
506         {
507                 List       *resultRelations = plannedstmt->resultRelations;
508                 int                     numResultRelations = list_length(resultRelations);
509                 ResultRelInfo *resultRelInfos;
510                 ResultRelInfo *resultRelInfo;
511
512                 resultRelInfos = (ResultRelInfo *)
513                         palloc(numResultRelations * sizeof(ResultRelInfo));
514                 resultRelInfo = resultRelInfos;
515                 foreach(l, resultRelations)
516                 {
517                         Index           resultRelationIndex = lfirst_int(l);
518                         Oid                     resultRelationOid;
519                         Relation        resultRelation;
520
521                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
522                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
523                         InitResultRelInfo(resultRelInfo,
524                                                           resultRelation,
525                                                           resultRelationIndex,
526                                                           operation,
527                                                           estate->es_instrument);
528                         resultRelInfo++;
529                 }
530                 estate->es_result_relations = resultRelInfos;
531                 estate->es_num_result_relations = numResultRelations;
532                 /* Initialize to first or only result rel */
533                 estate->es_result_relation_info = resultRelInfos;
534         }
535         else
536         {
537                 /*
538                  * if no result relation, then set state appropriately
539                  */
540                 estate->es_result_relations = NULL;
541                 estate->es_num_result_relations = 0;
542                 estate->es_result_relation_info = NULL;
543         }
544
545         /*
546          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
547          * flag appropriately so that the plan tree will be initialized with the
548          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
549          */
550         estate->es_select_into = false;
551         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
552         {
553                 estate->es_select_into = true;
554                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
555         }
556
557         /*
558          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
559          * initialize the plan tree, else we'd be doing a lock upgrade. While we
560          * are at it, build the ExecRowMark list.
561          */
562         estate->es_rowMarks = NIL;
563         foreach(l, plannedstmt->rowMarks)
564         {
565                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
566                 Oid                     relid = getrelid(rc->rti, rangeTable);
567                 Relation        relation;
568                 ExecRowMark *erm;
569
570                 relation = heap_open(relid, RowShareLock);
571                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
572                 erm->relation = relation;
573                 erm->rti = rc->rti;
574                 erm->forUpdate = rc->forUpdate;
575                 erm->noWait = rc->noWait;
576                 /* We'll set up ctidAttno below */
577                 erm->ctidAttNo = InvalidAttrNumber;
578                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
579         }
580
581         /*
582          * Initialize the executor "tuple" table.  We need slots for all the plan
583          * nodes, plus possibly output slots for the junkfilter(s). At this point
584          * we aren't sure if we need junkfilters, so just add slots for them
585          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
586          * trigger output tuples.  Also, one for RETURNING-list evaluation.
587          */
588         {
589                 int                     nSlots;
590
591                 /* Slots for the main plan tree */
592                 nSlots = ExecCountSlotsNode(plan);
593                 /* Add slots for subplans and initplans */
594                 foreach(l, plannedstmt->subplans)
595                 {
596                         Plan       *subplan = (Plan *) lfirst(l);
597
598                         nSlots += ExecCountSlotsNode(subplan);
599                 }
600                 /* Add slots for junkfilter(s) */
601                 if (plannedstmt->resultRelations != NIL)
602                         nSlots += list_length(plannedstmt->resultRelations);
603                 else
604                         nSlots += 1;
605                 if (operation != CMD_SELECT)
606                         nSlots++;                       /* for es_trig_tuple_slot */
607                 if (plannedstmt->returningLists)
608                         nSlots++;                       /* for RETURNING projection */
609
610                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
611
612                 if (operation != CMD_SELECT)
613                         estate->es_trig_tuple_slot =
614                                 ExecAllocTableSlot(estate->es_tupleTable);
615         }
616
617         /* mark EvalPlanQual not active */
618         estate->es_plannedstmt = plannedstmt;
619         estate->es_evalPlanQual = NULL;
620         estate->es_evTupleNull = NULL;
621         estate->es_evTuple = NULL;
622         estate->es_useEvalPlan = false;
623
624         /*
625          * Initialize private state information for each SubPlan.  We must do this
626          * before running ExecInitNode on the main query tree, since
627          * ExecInitSubPlan expects to be able to find these entries.
628          */
629         Assert(estate->es_subplanstates == NIL);
630         i = 1;                                          /* subplan indices count from 1 */
631         foreach(l, plannedstmt->subplans)
632         {
633                 Plan       *subplan = (Plan *) lfirst(l);
634                 PlanState  *subplanstate;
635                 int                     sp_eflags;
636
637                 /*
638                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
639                  * it is a parameterless subplan (not initplan), we suggest that it be
640                  * prepared to handle REWIND efficiently; otherwise there is no need.
641                  */
642                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
643                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
644                         sp_eflags |= EXEC_FLAG_REWIND;
645
646                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
647
648                 estate->es_subplanstates = lappend(estate->es_subplanstates,
649                                                                                    subplanstate);
650
651                 i++;
652         }
653
654         /*
655          * Initialize the private state information for all the nodes in the query
656          * tree.  This opens files, allocates storage and leaves us ready to start
657          * processing tuples.
658          */
659         planstate = ExecInitNode(plan, estate, eflags);
660
661         /*
662          * Get the tuple descriptor describing the type of tuples to return. (this
663          * is especially important if we are creating a relation with "SELECT
664          * INTO")
665          */
666         tupType = ExecGetResultType(planstate);
667
668         /*
669          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
670          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
671          * INTO also need a filter if the plan may return raw disk tuples (else
672          * heap_insert will be scribbling on the source relation!). UPDATE and
673          * DELETE always need a filter, since there's always a junk 'ctid'
674          * attribute present --- no need to look first.
675          */
676         {
677                 bool            junk_filter_needed = false;
678                 ListCell   *tlist;
679
680                 switch (operation)
681                 {
682                         case CMD_SELECT:
683                         case CMD_INSERT:
684                                 foreach(tlist, plan->targetlist)
685                                 {
686                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
687
688                                         if (tle->resjunk)
689                                         {
690                                                 junk_filter_needed = true;
691                                                 break;
692                                         }
693                                 }
694                                 if (!junk_filter_needed &&
695                                         (operation == CMD_INSERT || estate->es_select_into) &&
696                                         ExecMayReturnRawTuples(planstate))
697                                         junk_filter_needed = true;
698                                 break;
699                         case CMD_UPDATE:
700                         case CMD_DELETE:
701                                 junk_filter_needed = true;
702                                 break;
703                         default:
704                                 break;
705                 }
706
707                 if (junk_filter_needed)
708                 {
709                         /*
710                          * If there are multiple result relations, each one needs its own
711                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
712                          * we can't be fooled by some needing a filter and some not.
713                          */
714                         if (list_length(plannedstmt->resultRelations) > 1)
715                         {
716                                 PlanState **appendplans;
717                                 int                     as_nplans;
718                                 ResultRelInfo *resultRelInfo;
719
720                                 /* Top plan had better be an Append here. */
721                                 Assert(IsA(plan, Append));
722                                 Assert(((Append *) plan)->isTarget);
723                                 Assert(IsA(planstate, AppendState));
724                                 appendplans = ((AppendState *) planstate)->appendplans;
725                                 as_nplans = ((AppendState *) planstate)->as_nplans;
726                                 Assert(as_nplans == estate->es_num_result_relations);
727                                 resultRelInfo = estate->es_result_relations;
728                                 for (i = 0; i < as_nplans; i++)
729                                 {
730                                         PlanState  *subplan = appendplans[i];
731                                         JunkFilter *j;
732
733                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
734                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
735                                                                   ExecAllocTableSlot(estate->es_tupleTable));
736
737                                         /*
738                                          * Since it must be UPDATE/DELETE, there had better be a
739                                          * "ctid" junk attribute in the tlist ... but ctid could
740                                          * be at a different resno for each result relation. We
741                                          * look up the ctid resnos now and save them in the
742                                          * junkfilters.
743                                          */
744                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
745                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
746                                                 elog(ERROR, "could not find junk ctid column");
747                                         resultRelInfo->ri_junkFilter = j;
748                                         resultRelInfo++;
749                                 }
750
751                                 /*
752                                  * Set active junkfilter too; at this point ExecInitAppend has
753                                  * already selected an active result relation...
754                                  */
755                                 estate->es_junkFilter =
756                                         estate->es_result_relation_info->ri_junkFilter;
757                         }
758                         else
759                         {
760                                 /* Normal case with just one JunkFilter */
761                                 JunkFilter *j;
762
763                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
764                                                                            tupType->tdhasoid,
765                                                                   ExecAllocTableSlot(estate->es_tupleTable));
766                                 estate->es_junkFilter = j;
767                                 if (estate->es_result_relation_info)
768                                         estate->es_result_relation_info->ri_junkFilter = j;
769
770                                 if (operation == CMD_SELECT)
771                                 {
772                                         /* For SELECT, want to return the cleaned tuple type */
773                                         tupType = j->jf_cleanTupType;
774                                         /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
775                                         foreach(l, estate->es_rowMarks)
776                                         {
777                                                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
778                                                 char            resname[32];
779
780                                                 snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
781                                                 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
782                                                 if (!AttributeNumberIsValid(erm->ctidAttNo))
783                                                         elog(ERROR, "could not find junk \"%s\" column",
784                                                                  resname);
785                                         }
786                                 }
787                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
788                                 {
789                                         /* For UPDATE/DELETE, find the ctid junk attr now */
790                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
791                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
792                                                 elog(ERROR, "could not find junk ctid column");
793                                 }
794                         }
795                 }
796                 else
797                         estate->es_junkFilter = NULL;
798         }
799
800         /*
801          * Initialize RETURNING projections if needed.
802          */
803         if (plannedstmt->returningLists)
804         {
805                 TupleTableSlot *slot;
806                 ExprContext *econtext;
807                 ResultRelInfo *resultRelInfo;
808
809                 /*
810                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
811                  * We assume all the sublists will generate the same output tupdesc.
812                  */
813                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
814                                                                  false);
815
816                 /* Set up a slot for the output of the RETURNING projection(s) */
817                 slot = ExecAllocTableSlot(estate->es_tupleTable);
818                 ExecSetSlotDescriptor(slot, tupType);
819                 /* Need an econtext too */
820                 econtext = CreateExprContext(estate);
821
822                 /*
823                  * Build a projection for each result rel.      Note that any SubPlans in
824                  * the RETURNING lists get attached to the topmost plan node.
825                  */
826                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
827                 resultRelInfo = estate->es_result_relations;
828                 foreach(l, plannedstmt->returningLists)
829                 {
830                         List       *rlist = (List *) lfirst(l);
831                         List       *rliststate;
832
833                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
834                         resultRelInfo->ri_projectReturning =
835                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
836                                                                          resultRelInfo->ri_RelationDesc->rd_att);
837                         resultRelInfo++;
838                 }
839         }
840
841         queryDesc->tupDesc = tupType;
842         queryDesc->planstate = planstate;
843
844         /*
845          * If doing SELECT INTO, initialize the "into" relation.  We must wait
846          * till now so we have the "clean" result tuple type to create the new
847          * table from.
848          *
849          * If EXPLAIN, skip creating the "into" relation.
850          */
851         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
852                 OpenIntoRel(queryDesc);
853 }
854
855 /*
856  * Initialize ResultRelInfo data for one result relation
857  */
858 void
859 InitResultRelInfo(ResultRelInfo *resultRelInfo,
860                                   Relation resultRelationDesc,
861                                   Index resultRelationIndex,
862                                   CmdType operation,
863                                   bool doInstrument)
864 {
865         /*
866          * Check valid relkind ... parser and/or planner should have noticed this
867          * already, but let's make sure.
868          */
869         switch (resultRelationDesc->rd_rel->relkind)
870         {
871                 case RELKIND_RELATION:
872                         /* OK */
873                         break;
874                 case RELKIND_SEQUENCE:
875                         ereport(ERROR,
876                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
877                                          errmsg("cannot change sequence \"%s\"",
878                                                         RelationGetRelationName(resultRelationDesc))));
879                         break;
880                 case RELKIND_TOASTVALUE:
881                         ereport(ERROR,
882                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
883                                          errmsg("cannot change TOAST relation \"%s\"",
884                                                         RelationGetRelationName(resultRelationDesc))));
885                         break;
886                 case RELKIND_VIEW:
887                         ereport(ERROR,
888                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
889                                          errmsg("cannot change view \"%s\"",
890                                                         RelationGetRelationName(resultRelationDesc))));
891                         break;
892                 default:
893                         ereport(ERROR,
894                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
895                                          errmsg("cannot change relation \"%s\"",
896                                                         RelationGetRelationName(resultRelationDesc))));
897                         break;
898         }
899
900         /* OK, fill in the node */
901         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
902         resultRelInfo->type = T_ResultRelInfo;
903         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
904         resultRelInfo->ri_RelationDesc = resultRelationDesc;
905         resultRelInfo->ri_NumIndices = 0;
906         resultRelInfo->ri_IndexRelationDescs = NULL;
907         resultRelInfo->ri_IndexRelationInfo = NULL;
908         /* make a copy so as not to depend on relcache info not changing... */
909         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
910         if (resultRelInfo->ri_TrigDesc)
911         {
912                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
913
914                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
915                         palloc0(n * sizeof(FmgrInfo));
916                 if (doInstrument)
917                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
918                 else
919                         resultRelInfo->ri_TrigInstrument = NULL;
920         }
921         else
922         {
923                 resultRelInfo->ri_TrigFunctions = NULL;
924                 resultRelInfo->ri_TrigInstrument = NULL;
925         }
926         resultRelInfo->ri_ConstraintExprs = NULL;
927         resultRelInfo->ri_junkFilter = NULL;
928         resultRelInfo->ri_projectReturning = NULL;
929
930         /*
931          * If there are indices on the result relation, open them and save
932          * descriptors in the result relation info, so that we can add new index
933          * entries for the tuples we add/update.  We need not do this for a
934          * DELETE, however, since deletion doesn't affect indexes.
935          */
936         if (resultRelationDesc->rd_rel->relhasindex &&
937                 operation != CMD_DELETE)
938                 ExecOpenIndices(resultRelInfo);
939 }
940
941 /*
942  *              ExecGetTriggerResultRel
943  *
944  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
945  * triggers are fired on one of the result relations of the query, and so
946  * we can just return a member of the es_result_relations array.  (Note: in
947  * self-join situations there might be multiple members with the same OID;
948  * if so it doesn't matter which one we pick.)  However, it is sometimes
949  * necessary to fire triggers on other relations; this happens mainly when an
950  * RI update trigger queues additional triggers on other relations, which will
951  * be processed in the context of the outer query.      For efficiency's sake,
952  * we want to have a ResultRelInfo for those triggers too; that can avoid
953  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
954  * ANALYZE to report the runtimes of such triggers.)  So we make additional
955  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
956  */
957 ResultRelInfo *
958 ExecGetTriggerResultRel(EState *estate, Oid relid)
959 {
960         ResultRelInfo *rInfo;
961         int                     nr;
962         ListCell   *l;
963         Relation        rel;
964         MemoryContext oldcontext;
965
966         /* First, search through the query result relations */
967         rInfo = estate->es_result_relations;
968         nr = estate->es_num_result_relations;
969         while (nr > 0)
970         {
971                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
972                         return rInfo;
973                 rInfo++;
974                 nr--;
975         }
976         /* Nope, but maybe we already made an extra ResultRelInfo for it */
977         foreach(l, estate->es_trig_target_relations)
978         {
979                 rInfo = (ResultRelInfo *) lfirst(l);
980                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
981                         return rInfo;
982         }
983         /* Nope, so we need a new one */
984
985         /*
986          * Open the target relation's relcache entry.  We assume that an
987          * appropriate lock is still held by the backend from whenever the trigger
988          * event got queued, so we need take no new lock here.
989          */
990         rel = heap_open(relid, NoLock);
991
992         /*
993          * Make the new entry in the right context.  Currently, we don't need any
994          * index information in ResultRelInfos used only for triggers, so tell
995          * InitResultRelInfo it's a DELETE.
996          */
997         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
998         rInfo = makeNode(ResultRelInfo);
999         InitResultRelInfo(rInfo,
1000                                           rel,
1001                                           0,            /* dummy rangetable index */
1002                                           CMD_DELETE,
1003                                           estate->es_instrument);
1004         estate->es_trig_target_relations =
1005                 lappend(estate->es_trig_target_relations, rInfo);
1006         MemoryContextSwitchTo(oldcontext);
1007
1008         return rInfo;
1009 }
1010
1011 /*
1012  *              ExecContextForcesOids
1013  *
1014  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1015  * we need to ensure that result tuples have space for an OID iff they are
1016  * going to be stored into a relation that has OIDs.  In other contexts
1017  * we are free to choose whether to leave space for OIDs in result tuples
1018  * (we generally don't want to, but we do if a physical-tlist optimization
1019  * is possible).  This routine checks the plan context and returns TRUE if the
1020  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1021  * *hasoids is set to the required value.
1022  *
1023  * One reason this is ugly is that all plan nodes in the plan tree will emit
1024  * tuples with space for an OID, though we really only need the topmost node
1025  * to do so.  However, node types like Sort don't project new tuples but just
1026  * return their inputs, and in those cases the requirement propagates down
1027  * to the input node.  Eventually we might make this code smart enough to
1028  * recognize how far down the requirement really goes, but for now we just
1029  * make all plan nodes do the same thing if the top level forces the choice.
1030  *
1031  * We assume that estate->es_result_relation_info is already set up to
1032  * describe the target relation.  Note that in an UPDATE that spans an
1033  * inheritance tree, some of the target relations may have OIDs and some not.
1034  * We have to make the decisions on a per-relation basis as we initialize
1035  * each of the child plans of the topmost Append plan.
1036  *
1037  * SELECT INTO is even uglier, because we don't have the INTO relation's
1038  * descriptor available when this code runs; we have to look aside at a
1039  * flag set by InitPlan().
1040  */
1041 bool
1042 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1043 {
1044         if (planstate->state->es_select_into)
1045         {
1046                 *hasoids = planstate->state->es_into_oids;
1047                 return true;
1048         }
1049         else
1050         {
1051                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1052
1053                 if (ri != NULL)
1054                 {
1055                         Relation        rel = ri->ri_RelationDesc;
1056
1057                         if (rel != NULL)
1058                         {
1059                                 *hasoids = rel->rd_rel->relhasoids;
1060                                 return true;
1061                         }
1062                 }
1063         }
1064
1065         return false;
1066 }
1067
1068 /* ----------------------------------------------------------------
1069  *              ExecEndPlan
1070  *
1071  *              Cleans up the query plan -- closes files and frees up storage
1072  *
1073  * NOTE: we are no longer very worried about freeing storage per se
1074  * in this code; FreeExecutorState should be guaranteed to release all
1075  * memory that needs to be released.  What we are worried about doing
1076  * is closing relations and dropping buffer pins.  Thus, for example,
1077  * tuple tables must be cleared or dropped to ensure pins are released.
1078  * ----------------------------------------------------------------
1079  */
1080 static void
1081 ExecEndPlan(PlanState *planstate, EState *estate)
1082 {
1083         ResultRelInfo *resultRelInfo;
1084         int                     i;
1085         ListCell   *l;
1086
1087         /*
1088          * shut down any PlanQual processing we were doing
1089          */
1090         if (estate->es_evalPlanQual != NULL)
1091                 EndEvalPlanQual(estate);
1092
1093         /*
1094          * shut down the node-type-specific query processing
1095          */
1096         ExecEndNode(planstate);
1097
1098         /*
1099          * for subplans too
1100          */
1101         foreach(l, estate->es_subplanstates)
1102         {
1103                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1104
1105                 ExecEndNode(subplanstate);
1106         }
1107
1108         /*
1109          * destroy the executor "tuple" table.
1110          */
1111         ExecDropTupleTable(estate->es_tupleTable, true);
1112         estate->es_tupleTable = NULL;
1113
1114         /*
1115          * close the result relation(s) if any, but hold locks until xact commit.
1116          */
1117         resultRelInfo = estate->es_result_relations;
1118         for (i = estate->es_num_result_relations; i > 0; i--)
1119         {
1120                 /* Close indices and then the relation itself */
1121                 ExecCloseIndices(resultRelInfo);
1122                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1123                 resultRelInfo++;
1124         }
1125
1126         /*
1127          * likewise close any trigger target relations
1128          */
1129         foreach(l, estate->es_trig_target_relations)
1130         {
1131                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1132                 /* Close indices and then the relation itself */
1133                 ExecCloseIndices(resultRelInfo);
1134                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1135         }
1136
1137         /*
1138          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1139          */
1140         foreach(l, estate->es_rowMarks)
1141         {
1142                 ExecRowMark *erm = lfirst(l);
1143
1144                 heap_close(erm->relation, NoLock);
1145         }
1146 }
1147
1148 /* ----------------------------------------------------------------
1149  *              ExecutePlan
1150  *
1151  *              processes the query plan to retrieve 'numberTuples' tuples in the
1152  *              direction specified.
1153  *
1154  *              Retrieves all tuples if numberTuples is 0
1155  *
1156  *              result is either a slot containing the last tuple in the case
1157  *              of a SELECT or NULL otherwise.
1158  *
1159  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1160  * user can see it
1161  * ----------------------------------------------------------------
1162  */
1163 static TupleTableSlot *
1164 ExecutePlan(EState *estate,
1165                         PlanState *planstate,
1166                         CmdType operation,
1167                         long numberTuples,
1168                         ScanDirection direction,
1169                         DestReceiver *dest)
1170 {
1171         JunkFilter *junkfilter;
1172         TupleTableSlot *planSlot;
1173         TupleTableSlot *slot;
1174         ItemPointer tupleid = NULL;
1175         ItemPointerData tuple_ctid;
1176         long            current_tuple_count;
1177         TupleTableSlot *result;
1178
1179         /*
1180          * initialize local variables
1181          */
1182         current_tuple_count = 0;
1183         result = NULL;
1184
1185         /*
1186          * Set the direction.
1187          */
1188         estate->es_direction = direction;
1189
1190         /*
1191          * Process BEFORE EACH STATEMENT triggers
1192          */
1193         switch (operation)
1194         {
1195                 case CMD_UPDATE:
1196                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1197                         break;
1198                 case CMD_DELETE:
1199                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1200                         break;
1201                 case CMD_INSERT:
1202                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1203                         break;
1204                 default:
1205                         /* do nothing */
1206                         break;
1207         }
1208
1209         /*
1210          * Loop until we've processed the proper number of tuples from the plan.
1211          */
1212
1213         for (;;)
1214         {
1215                 /* Reset the per-output-tuple exprcontext */
1216                 ResetPerTupleExprContext(estate);
1217
1218                 /*
1219                  * Execute the plan and obtain a tuple
1220                  */
1221 lnext:  ;
1222                 if (estate->es_useEvalPlan)
1223                 {
1224                         planSlot = EvalPlanQualNext(estate);
1225                         if (TupIsNull(planSlot))
1226                                 planSlot = ExecProcNode(planstate);
1227                 }
1228                 else
1229                         planSlot = ExecProcNode(planstate);
1230
1231                 /*
1232                  * if the tuple is null, then we assume there is nothing more to
1233                  * process so we just return null...
1234                  */
1235                 if (TupIsNull(planSlot))
1236                 {
1237                         result = NULL;
1238                         break;
1239                 }
1240                 slot = planSlot;
1241
1242                 /*
1243                  * if we have a junk filter, then project a new tuple with the junk
1244                  * removed.
1245                  *
1246                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1247                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1248                  * because that tuple slot has the wrong descriptor.)
1249                  *
1250                  * Also, extract all the junk information we need.
1251                  */
1252                 if ((junkfilter = estate->es_junkFilter) != NULL)
1253                 {
1254                         Datum           datum;
1255                         bool            isNull;
1256
1257                         /*
1258                          * extract the 'ctid' junk attribute.
1259                          */
1260                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1261                         {
1262                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1263                                                                                          &isNull);
1264                                 /* shouldn't ever get a null result... */
1265                                 if (isNull)
1266                                         elog(ERROR, "ctid is NULL");
1267
1268                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1269                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1270                                 tupleid = &tuple_ctid;
1271                         }
1272
1273                         /*
1274                          * Process any FOR UPDATE or FOR SHARE locking requested.
1275                          */
1276                         else if (estate->es_rowMarks != NIL)
1277                         {
1278                                 ListCell   *l;
1279
1280                 lmark:  ;
1281                                 foreach(l, estate->es_rowMarks)
1282                                 {
1283                                         ExecRowMark *erm = lfirst(l);
1284                                         HeapTupleData tuple;
1285                                         Buffer          buffer;
1286                                         ItemPointerData update_ctid;
1287                                         TransactionId update_xmax;
1288                                         TupleTableSlot *newSlot;
1289                                         LockTupleMode lockmode;
1290                                         HTSU_Result test;
1291
1292                                         datum = ExecGetJunkAttribute(slot,
1293                                                                                                  erm->ctidAttNo,
1294                                                                                                  &isNull);
1295                                         /* shouldn't ever get a null result... */
1296                                         if (isNull)
1297                                                 elog(ERROR, "ctid is NULL");
1298
1299                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1300
1301                                         if (erm->forUpdate)
1302                                                 lockmode = LockTupleExclusive;
1303                                         else
1304                                                 lockmode = LockTupleShared;
1305
1306                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1307                                                                                    &update_ctid, &update_xmax,
1308                                                                                    estate->es_output_cid,
1309                                                                                    lockmode, erm->noWait);
1310                                         ReleaseBuffer(buffer);
1311                                         switch (test)
1312                                         {
1313                                                 case HeapTupleSelfUpdated:
1314                                                         /* treat it as deleted; do not process */
1315                                                         goto lnext;
1316
1317                                                 case HeapTupleMayBeUpdated:
1318                                                         break;
1319
1320                                                 case HeapTupleUpdated:
1321                                                         if (IsXactIsoLevelSerializable)
1322                                                                 ereport(ERROR,
1323                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1324                                                                   errmsg("could not serialize access due to concurrent update")));
1325                                                         if (!ItemPointerEquals(&update_ctid,
1326                                                                                                    &tuple.t_self))
1327                                                         {
1328                                                                 /* updated, so look at updated version */
1329                                                                 newSlot = EvalPlanQual(estate,
1330                                                                                                            erm->rti,
1331                                                                                                            &update_ctid,
1332                                                                                                            update_xmax);
1333                                                                 if (!TupIsNull(newSlot))
1334                                                                 {
1335                                                                         slot = planSlot = newSlot;
1336                                                                         estate->es_useEvalPlan = true;
1337                                                                         goto lmark;
1338                                                                 }
1339                                                         }
1340
1341                                                         /*
1342                                                          * if tuple was deleted or PlanQual failed for
1343                                                          * updated tuple - we must not return this tuple!
1344                                                          */
1345                                                         goto lnext;
1346
1347                                                 default:
1348                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1349                                                                  test);
1350                                                         return NULL;
1351                                         }
1352                                 }
1353                         }
1354
1355                         /*
1356                          * Create a new "clean" tuple with all junk attributes removed. We
1357                          * don't need to do this for DELETE, however (there will in fact
1358                          * be no non-junk attributes in a DELETE!)
1359                          */
1360                         if (operation != CMD_DELETE)
1361                                 slot = ExecFilterJunk(junkfilter, slot);
1362                 }
1363
1364                 /*
1365                  * now that we have a tuple, do the appropriate thing with it.. either
1366                  * return it to the user, add it to a relation someplace, delete it
1367                  * from a relation, or modify some of its attributes.
1368                  */
1369                 switch (operation)
1370                 {
1371                         case CMD_SELECT:
1372                                 ExecSelect(slot, dest, estate);
1373                                 result = slot;
1374                                 break;
1375
1376                         case CMD_INSERT:
1377                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1378                                 result = NULL;
1379                                 break;
1380
1381                         case CMD_DELETE:
1382                                 ExecDelete(tupleid, planSlot, dest, estate);
1383                                 result = NULL;
1384                                 break;
1385
1386                         case CMD_UPDATE:
1387                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1388                                 result = NULL;
1389                                 break;
1390
1391                         default:
1392                                 elog(ERROR, "unrecognized operation code: %d",
1393                                          (int) operation);
1394                                 result = NULL;
1395                                 break;
1396                 }
1397
1398                 /*
1399                  * check our tuple count.. if we've processed the proper number then
1400                  * quit, else loop again and process more tuples.  Zero numberTuples
1401                  * means no limit.
1402                  */
1403                 current_tuple_count++;
1404                 if (numberTuples && numberTuples == current_tuple_count)
1405                         break;
1406         }
1407
1408         /*
1409          * Process AFTER EACH STATEMENT triggers
1410          */
1411         switch (operation)
1412         {
1413                 case CMD_UPDATE:
1414                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1415                         break;
1416                 case CMD_DELETE:
1417                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1418                         break;
1419                 case CMD_INSERT:
1420                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1421                         break;
1422                 default:
1423                         /* do nothing */
1424                         break;
1425         }
1426
1427         /*
1428          * here, result is either a slot containing a tuple in the case of a
1429          * SELECT or NULL otherwise.
1430          */
1431         return result;
1432 }
1433
1434 /* ----------------------------------------------------------------
1435  *              ExecSelect
1436  *
1437  *              SELECTs are easy.. we just pass the tuple to the appropriate
1438  *              output function.
1439  * ----------------------------------------------------------------
1440  */
1441 static void
1442 ExecSelect(TupleTableSlot *slot,
1443                    DestReceiver *dest,
1444                    EState *estate)
1445 {
1446         (*dest->receiveSlot) (slot, dest);
1447         IncrRetrieved();
1448         (estate->es_processed)++;
1449 }
1450
1451 /* ----------------------------------------------------------------
1452  *              ExecInsert
1453  *
1454  *              INSERTs are trickier.. we have to insert the tuple into
1455  *              the base relation and insert appropriate tuples into the
1456  *              index relations.
1457  * ----------------------------------------------------------------
1458  */
1459 static void
1460 ExecInsert(TupleTableSlot *slot,
1461                    ItemPointer tupleid,
1462                    TupleTableSlot *planSlot,
1463                    DestReceiver *dest,
1464                    EState *estate)
1465 {
1466         HeapTuple       tuple;
1467         ResultRelInfo *resultRelInfo;
1468         Relation        resultRelationDesc;
1469         Oid                     newId;
1470
1471         /*
1472          * get the heap tuple out of the tuple table slot, making sure we have a
1473          * writable copy
1474          */
1475         tuple = ExecMaterializeSlot(slot);
1476
1477         /*
1478          * get information on the (current) result relation
1479          */
1480         resultRelInfo = estate->es_result_relation_info;
1481         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1482
1483         /* BEFORE ROW INSERT Triggers */
1484         if (resultRelInfo->ri_TrigDesc &&
1485                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1486         {
1487                 HeapTuple       newtuple;
1488
1489                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1490
1491                 if (newtuple == NULL)   /* "do nothing" */
1492                         return;
1493
1494                 if (newtuple != tuple)  /* modified by Trigger(s) */
1495                 {
1496                         /*
1497                          * Put the modified tuple into a slot for convenience of routines
1498                          * below.  We assume the tuple was allocated in per-tuple memory
1499                          * context, and therefore will go away by itself. The tuple table
1500                          * slot should not try to clear it.
1501                          */
1502                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1503
1504                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1505                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1506                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1507                         slot = newslot;
1508                         tuple = newtuple;
1509                 }
1510         }
1511
1512         /*
1513          * Check the constraints of the tuple
1514          */
1515         if (resultRelationDesc->rd_att->constr)
1516                 ExecConstraints(resultRelInfo, slot, estate);
1517
1518         /*
1519          * insert the tuple
1520          *
1521          * Note: heap_insert returns the tid (location) of the new tuple in the
1522          * t_self field.
1523          */
1524         newId = heap_insert(resultRelationDesc, tuple,
1525                                                 estate->es_output_cid,
1526                                                 true, true);
1527
1528         IncrAppended();
1529         (estate->es_processed)++;
1530         estate->es_lastoid = newId;
1531         setLastTid(&(tuple->t_self));
1532
1533         /*
1534          * insert index entries for tuple
1535          */
1536         if (resultRelInfo->ri_NumIndices > 0)
1537                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1538
1539         /* AFTER ROW INSERT Triggers */
1540         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1541
1542         /* Process RETURNING if present */
1543         if (resultRelInfo->ri_projectReturning)
1544                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1545                                                          slot, planSlot, dest);
1546 }
1547
1548 /* ----------------------------------------------------------------
1549  *              ExecDelete
1550  *
1551  *              DELETE is like UPDATE, except that we delete the tuple and no
1552  *              index modifications are needed
1553  * ----------------------------------------------------------------
1554  */
1555 static void
1556 ExecDelete(ItemPointer tupleid,
1557                    TupleTableSlot *planSlot,
1558                    DestReceiver *dest,
1559                    EState *estate)
1560 {
1561         ResultRelInfo *resultRelInfo;
1562         Relation        resultRelationDesc;
1563         HTSU_Result result;
1564         ItemPointerData update_ctid;
1565         TransactionId update_xmax;
1566
1567         /*
1568          * get information on the (current) result relation
1569          */
1570         resultRelInfo = estate->es_result_relation_info;
1571         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1572
1573         /* BEFORE ROW DELETE Triggers */
1574         if (resultRelInfo->ri_TrigDesc &&
1575                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1576         {
1577                 bool            dodelete;
1578
1579                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1580
1581                 if (!dodelete)                  /* "do nothing" */
1582                         return;
1583         }
1584
1585         /*
1586          * delete the tuple
1587          *
1588          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1589          * the row to be deleted is visible to that snapshot, and throw a can't-
1590          * serialize error if not.      This is a special-case behavior needed for
1591          * referential integrity updates in serializable transactions.
1592          */
1593 ldelete:;
1594         result = heap_delete(resultRelationDesc, tupleid,
1595                                                  &update_ctid, &update_xmax,
1596                                                  estate->es_output_cid,
1597                                                  estate->es_crosscheck_snapshot,
1598                                                  true /* wait for commit */ );
1599         switch (result)
1600         {
1601                 case HeapTupleSelfUpdated:
1602                         /* already deleted by self; nothing to do */
1603                         return;
1604
1605                 case HeapTupleMayBeUpdated:
1606                         break;
1607
1608                 case HeapTupleUpdated:
1609                         if (IsXactIsoLevelSerializable)
1610                                 ereport(ERROR,
1611                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1612                                                  errmsg("could not serialize access due to concurrent update")));
1613                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1614                         {
1615                                 TupleTableSlot *epqslot;
1616
1617                                 epqslot = EvalPlanQual(estate,
1618                                                                            resultRelInfo->ri_RangeTableIndex,
1619                                                                            &update_ctid,
1620                                                                            update_xmax);
1621                                 if (!TupIsNull(epqslot))
1622                                 {
1623                                         *tupleid = update_ctid;
1624                                         goto ldelete;
1625                                 }
1626                         }
1627                         /* tuple already deleted; nothing to do */
1628                         return;
1629
1630                 default:
1631                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1632                         return;
1633         }
1634
1635         IncrDeleted();
1636         (estate->es_processed)++;
1637
1638         /*
1639          * Note: Normally one would think that we have to delete index tuples
1640          * associated with the heap tuple now...
1641          *
1642          * ... but in POSTGRES, we have no need to do this because VACUUM will
1643          * take care of it later.  We can't delete index tuples immediately
1644          * anyway, since the tuple is still visible to other transactions.
1645          */
1646
1647         /* AFTER ROW DELETE Triggers */
1648         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1649
1650         /* Process RETURNING if present */
1651         if (resultRelInfo->ri_projectReturning)
1652         {
1653                 /*
1654                  * We have to put the target tuple into a slot, which means first we
1655                  * gotta fetch it.      We can use the trigger tuple slot.
1656                  */
1657                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1658                 HeapTupleData deltuple;
1659                 Buffer          delbuffer;
1660
1661                 deltuple.t_self = *tupleid;
1662                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1663                                                 &deltuple, &delbuffer, false, NULL))
1664                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1665
1666                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1667                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1668                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1669
1670                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1671                                                          slot, planSlot, dest);
1672
1673                 ExecClearTuple(slot);
1674                 ReleaseBuffer(delbuffer);
1675         }
1676 }
1677
1678 /* ----------------------------------------------------------------
1679  *              ExecUpdate
1680  *
1681  *              note: we can't run UPDATE queries with transactions
1682  *              off because UPDATEs are actually INSERTs and our
1683  *              scan will mistakenly loop forever, updating the tuple
1684  *              it just inserted..      This should be fixed but until it
1685  *              is, we don't want to get stuck in an infinite loop
1686  *              which corrupts your database..
1687  * ----------------------------------------------------------------
1688  */
1689 static void
1690 ExecUpdate(TupleTableSlot *slot,
1691                    ItemPointer tupleid,
1692                    TupleTableSlot *planSlot,
1693                    DestReceiver *dest,
1694                    EState *estate)
1695 {
1696         HeapTuple       tuple;
1697         ResultRelInfo *resultRelInfo;
1698         Relation        resultRelationDesc;
1699         HTSU_Result result;
1700         ItemPointerData update_ctid;
1701         TransactionId update_xmax;
1702
1703         /*
1704          * abort the operation if not running transactions
1705          */
1706         if (IsBootstrapProcessingMode())
1707                 elog(ERROR, "cannot UPDATE during bootstrap");
1708
1709         /*
1710          * get the heap tuple out of the tuple table slot, making sure we have a
1711          * writable copy
1712          */
1713         tuple = ExecMaterializeSlot(slot);
1714
1715         /*
1716          * get information on the (current) result relation
1717          */
1718         resultRelInfo = estate->es_result_relation_info;
1719         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1720
1721         /* BEFORE ROW UPDATE Triggers */
1722         if (resultRelInfo->ri_TrigDesc &&
1723                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1724         {
1725                 HeapTuple       newtuple;
1726
1727                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1728                                                                                 tupleid, tuple);
1729
1730                 if (newtuple == NULL)   /* "do nothing" */
1731                         return;
1732
1733                 if (newtuple != tuple)  /* modified by Trigger(s) */
1734                 {
1735                         /*
1736                          * Put the modified tuple into a slot for convenience of routines
1737                          * below.  We assume the tuple was allocated in per-tuple memory
1738                          * context, and therefore will go away by itself. The tuple table
1739                          * slot should not try to clear it.
1740                          */
1741                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1742
1743                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1744                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1745                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1746                         slot = newslot;
1747                         tuple = newtuple;
1748                 }
1749         }
1750
1751         /*
1752          * Check the constraints of the tuple
1753          *
1754          * If we generate a new candidate tuple after EvalPlanQual testing, we
1755          * must loop back here and recheck constraints.  (We don't need to redo
1756          * triggers, however.  If there are any BEFORE triggers then trigger.c
1757          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1758          * need to do them again.)
1759          */
1760 lreplace:;
1761         if (resultRelationDesc->rd_att->constr)
1762                 ExecConstraints(resultRelInfo, slot, estate);
1763
1764         /*
1765          * replace the heap tuple
1766          *
1767          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1768          * the row to be updated is visible to that snapshot, and throw a can't-
1769          * serialize error if not.      This is a special-case behavior needed for
1770          * referential integrity updates in serializable transactions.
1771          */
1772         result = heap_update(resultRelationDesc, tupleid, tuple,
1773                                                  &update_ctid, &update_xmax,
1774                                                  estate->es_output_cid,
1775                                                  estate->es_crosscheck_snapshot,
1776                                                  true /* wait for commit */ );
1777         switch (result)
1778         {
1779                 case HeapTupleSelfUpdated:
1780                         /* already deleted by self; nothing to do */
1781                         return;
1782
1783                 case HeapTupleMayBeUpdated:
1784                         break;
1785
1786                 case HeapTupleUpdated:
1787                         if (IsXactIsoLevelSerializable)
1788                                 ereport(ERROR,
1789                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1790                                                  errmsg("could not serialize access due to concurrent update")));
1791                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1792                         {
1793                                 TupleTableSlot *epqslot;
1794
1795                                 epqslot = EvalPlanQual(estate,
1796                                                                            resultRelInfo->ri_RangeTableIndex,
1797                                                                            &update_ctid,
1798                                                                            update_xmax);
1799                                 if (!TupIsNull(epqslot))
1800                                 {
1801                                         *tupleid = update_ctid;
1802                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1803                                         tuple = ExecMaterializeSlot(slot);
1804                                         goto lreplace;
1805                                 }
1806                         }
1807                         /* tuple already deleted; nothing to do */
1808                         return;
1809
1810                 default:
1811                         elog(ERROR, "unrecognized heap_update status: %u", result);
1812                         return;
1813         }
1814
1815         IncrReplaced();
1816         (estate->es_processed)++;
1817
1818         /*
1819          * Note: instead of having to update the old index tuples associated with
1820          * the heap tuple, all we do is form and insert new index tuples. This is
1821          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1822          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1823          * here is insert new index tuples.  -cim 9/27/89
1824          */
1825
1826         /*
1827          * insert index entries for tuple
1828          *
1829          * Note: heap_update returns the tid (location) of the new tuple in the
1830          * t_self field.
1831          *
1832          * If it's a HOT update, we mustn't insert new index entries.
1833          */
1834         if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1835                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1836
1837         /* AFTER ROW UPDATE Triggers */
1838         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1839
1840         /* Process RETURNING if present */
1841         if (resultRelInfo->ri_projectReturning)
1842                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1843                                                          slot, planSlot, dest);
1844 }
1845
1846 /*
1847  * ExecRelCheck --- check that tuple meets constraints for result relation
1848  */
1849 static const char *
1850 ExecRelCheck(ResultRelInfo *resultRelInfo,
1851                          TupleTableSlot *slot, EState *estate)
1852 {
1853         Relation        rel = resultRelInfo->ri_RelationDesc;
1854         int                     ncheck = rel->rd_att->constr->num_check;
1855         ConstrCheck *check = rel->rd_att->constr->check;
1856         ExprContext *econtext;
1857         MemoryContext oldContext;
1858         List       *qual;
1859         int                     i;
1860
1861         /*
1862          * If first time through for this result relation, build expression
1863          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1864          * memory context so they'll survive throughout the query.
1865          */
1866         if (resultRelInfo->ri_ConstraintExprs == NULL)
1867         {
1868                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1869                 resultRelInfo->ri_ConstraintExprs =
1870                         (List **) palloc(ncheck * sizeof(List *));
1871                 for (i = 0; i < ncheck; i++)
1872                 {
1873                         /* ExecQual wants implicit-AND form */
1874                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1875                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1876                                 ExecPrepareExpr((Expr *) qual, estate);
1877                 }
1878                 MemoryContextSwitchTo(oldContext);
1879         }
1880
1881         /*
1882          * We will use the EState's per-tuple context for evaluating constraint
1883          * expressions (creating it if it's not already there).
1884          */
1885         econtext = GetPerTupleExprContext(estate);
1886
1887         /* Arrange for econtext's scan tuple to be the tuple under test */
1888         econtext->ecxt_scantuple = slot;
1889
1890         /* And evaluate the constraints */
1891         for (i = 0; i < ncheck; i++)
1892         {
1893                 qual = resultRelInfo->ri_ConstraintExprs[i];
1894
1895                 /*
1896                  * NOTE: SQL92 specifies that a NULL result from a constraint
1897                  * expression is not to be treated as a failure.  Therefore, tell
1898                  * ExecQual to return TRUE for NULL.
1899                  */
1900                 if (!ExecQual(qual, econtext, true))
1901                         return check[i].ccname;
1902         }
1903
1904         /* NULL result means no error */
1905         return NULL;
1906 }
1907
1908 void
1909 ExecConstraints(ResultRelInfo *resultRelInfo,
1910                                 TupleTableSlot *slot, EState *estate)
1911 {
1912         Relation        rel = resultRelInfo->ri_RelationDesc;
1913         TupleConstr *constr = rel->rd_att->constr;
1914
1915         Assert(constr);
1916
1917         if (constr->has_not_null)
1918         {
1919                 int                     natts = rel->rd_att->natts;
1920                 int                     attrChk;
1921
1922                 for (attrChk = 1; attrChk <= natts; attrChk++)
1923                 {
1924                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1925                                 slot_attisnull(slot, attrChk))
1926                                 ereport(ERROR,
1927                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1928                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1929                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1930                 }
1931         }
1932
1933         if (constr->num_check > 0)
1934         {
1935                 const char *failed;
1936
1937                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1938                         ereport(ERROR,
1939                                         (errcode(ERRCODE_CHECK_VIOLATION),
1940                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1941                                                         RelationGetRelationName(rel), failed)));
1942         }
1943 }
1944
1945 /*
1946  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
1947  *
1948  * projectReturning: RETURNING projection info for current result rel
1949  * tupleSlot: slot holding tuple actually inserted/updated/deleted
1950  * planSlot: slot holding tuple returned by top plan node
1951  * dest: where to send the output
1952  */
1953 static void
1954 ExecProcessReturning(ProjectionInfo *projectReturning,
1955                                          TupleTableSlot *tupleSlot,
1956                                          TupleTableSlot *planSlot,
1957                                          DestReceiver *dest)
1958 {
1959         ExprContext *econtext = projectReturning->pi_exprContext;
1960         TupleTableSlot *retSlot;
1961
1962         /*
1963          * Reset per-tuple memory context to free any expression evaluation
1964          * storage allocated in the previous cycle.
1965          */
1966         ResetExprContext(econtext);
1967
1968         /* Make tuple and any needed join variables available to ExecProject */
1969         econtext->ecxt_scantuple = tupleSlot;
1970         econtext->ecxt_outertuple = planSlot;
1971
1972         /* Compute the RETURNING expressions */
1973         retSlot = ExecProject(projectReturning, NULL);
1974
1975         /* Send to dest */
1976         (*dest->receiveSlot) (retSlot, dest);
1977
1978         ExecClearTuple(retSlot);
1979 }
1980
1981 /*
1982  * Check a modified tuple to see if we want to process its updated version
1983  * under READ COMMITTED rules.
1984  *
1985  * See backend/executor/README for some info about how this works.
1986  *
1987  *      estate - executor state data
1988  *      rti - rangetable index of table containing tuple
1989  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1990  *      priorXmax - t_xmax from the outdated tuple
1991  *
1992  * *tid is also an output parameter: it's modified to hold the TID of the
1993  * latest version of the tuple (note this may be changed even on failure)
1994  *
1995  * Returns a slot containing the new candidate update/delete tuple, or
1996  * NULL if we determine we shouldn't process the row.
1997  */
1998 TupleTableSlot *
1999 EvalPlanQual(EState *estate, Index rti,
2000                          ItemPointer tid, TransactionId priorXmax)
2001 {
2002         evalPlanQual *epq;
2003         EState     *epqstate;
2004         Relation        relation;
2005         HeapTupleData tuple;
2006         HeapTuple       copyTuple = NULL;
2007         SnapshotData SnapshotDirty;
2008         bool            endNode;
2009
2010         Assert(rti != 0);
2011
2012         /*
2013          * find relation containing target tuple
2014          */
2015         if (estate->es_result_relation_info != NULL &&
2016                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2017                 relation = estate->es_result_relation_info->ri_RelationDesc;
2018         else
2019         {
2020                 ListCell   *l;
2021
2022                 relation = NULL;
2023                 foreach(l, estate->es_rowMarks)
2024                 {
2025                         if (((ExecRowMark *) lfirst(l))->rti == rti)
2026                         {
2027                                 relation = ((ExecRowMark *) lfirst(l))->relation;
2028                                 break;
2029                         }
2030                 }
2031                 if (relation == NULL)
2032                         elog(ERROR, "could not find RowMark for RT index %u", rti);
2033         }
2034
2035         /*
2036          * fetch tid tuple
2037          *
2038          * Loop here to deal with updated or busy tuples
2039          */
2040         InitDirtySnapshot(SnapshotDirty);
2041         tuple.t_self = *tid;
2042         for (;;)
2043         {
2044                 Buffer          buffer;
2045
2046                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2047                 {
2048                         /*
2049                          * If xmin isn't what we're expecting, the slot must have been
2050                          * recycled and reused for an unrelated tuple.  This implies that
2051                          * the latest version of the row was deleted, so we need do
2052                          * nothing.  (Should be safe to examine xmin without getting
2053                          * buffer's content lock, since xmin never changes in an existing
2054                          * tuple.)
2055                          */
2056                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2057                                                                          priorXmax))
2058                         {
2059                                 ReleaseBuffer(buffer);
2060                                 return NULL;
2061                         }
2062
2063                         /* otherwise xmin should not be dirty... */
2064                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2065                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2066
2067                         /*
2068                          * If tuple is being updated by other transaction then we have to
2069                          * wait for its commit/abort.
2070                          */
2071                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2072                         {
2073                                 ReleaseBuffer(buffer);
2074                                 XactLockTableWait(SnapshotDirty.xmax);
2075                                 continue;               /* loop back to repeat heap_fetch */
2076                         }
2077
2078                         /*
2079                          * If tuple was inserted by our own transaction, we have to check
2080                          * cmin against es_output_cid: cmin >= current CID means our
2081                          * command cannot see the tuple, so we should ignore it.  Without
2082                          * this we are open to the "Halloween problem" of indefinitely
2083                          * re-updating the same tuple. (We need not check cmax because
2084                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2085                          * transaction dead, regardless of cmax.)  We just checked that
2086                          * priorXmax == xmin, so we can test that variable instead of
2087                          * doing HeapTupleHeaderGetXmin again.
2088                          */
2089                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2090                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2091                         {
2092                                 ReleaseBuffer(buffer);
2093                                 return NULL;
2094                         }
2095
2096                         /*
2097                          * We got tuple - now copy it for use by recheck query.
2098                          */
2099                         copyTuple = heap_copytuple(&tuple);
2100                         ReleaseBuffer(buffer);
2101                         break;
2102                 }
2103
2104                 /*
2105                  * If the referenced slot was actually empty, the latest version of
2106                  * the row must have been deleted, so we need do nothing.
2107                  */
2108                 if (tuple.t_data == NULL)
2109                 {
2110                         ReleaseBuffer(buffer);
2111                         return NULL;
2112                 }
2113
2114                 /*
2115                  * As above, if xmin isn't what we're expecting, do nothing.
2116                  */
2117                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2118                                                                  priorXmax))
2119                 {
2120                         ReleaseBuffer(buffer);
2121                         return NULL;
2122                 }
2123
2124                 /*
2125                  * If we get here, the tuple was found but failed SnapshotDirty.
2126                  * Assuming the xmin is either a committed xact or our own xact (as it
2127                  * certainly should be if we're trying to modify the tuple), this must
2128                  * mean that the row was updated or deleted by either a committed xact
2129                  * or our own xact.  If it was deleted, we can ignore it; if it was
2130                  * updated then chain up to the next version and repeat the whole
2131                  * test.
2132                  *
2133                  * As above, it should be safe to examine xmax and t_ctid without the
2134                  * buffer content lock, because they can't be changing.
2135                  */
2136                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2137                 {
2138                         /* deleted, so forget about it */
2139                         ReleaseBuffer(buffer);
2140                         return NULL;
2141                 }
2142
2143                 /* updated, so look at the updated row */
2144                 tuple.t_self = tuple.t_data->t_ctid;
2145                 /* updated row should have xmin matching this xmax */
2146                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2147                 ReleaseBuffer(buffer);
2148                 /* loop back to fetch next in chain */
2149         }
2150
2151         /*
2152          * For UPDATE/DELETE we have to return tid of actual row we're executing
2153          * PQ for.
2154          */
2155         *tid = tuple.t_self;
2156
2157         /*
2158          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2159          */
2160         epq = estate->es_evalPlanQual;
2161         endNode = true;
2162
2163         if (epq != NULL && epq->rti == 0)
2164         {
2165                 /* Top PQ stack entry is idle, so re-use it */
2166                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2167                 epq->rti = rti;
2168                 endNode = false;
2169         }
2170
2171         /*
2172          * If this is request for another RTE - Ra, - then we have to check wasn't
2173          * PlanQual requested for Ra already and if so then Ra' row was updated
2174          * again and we have to re-start old execution for Ra and forget all what
2175          * we done after Ra was suspended. Cool? -:))
2176          */
2177         if (epq != NULL && epq->rti != rti &&
2178                 epq->estate->es_evTuple[rti - 1] != NULL)
2179         {
2180                 do
2181                 {
2182                         evalPlanQual *oldepq;
2183
2184                         /* stop execution */
2185                         EvalPlanQualStop(epq);
2186                         /* pop previous PlanQual from the stack */
2187                         oldepq = epq->next;
2188                         Assert(oldepq && oldepq->rti != 0);
2189                         /* push current PQ to freePQ stack */
2190                         oldepq->free = epq;
2191                         epq = oldepq;
2192                         estate->es_evalPlanQual = epq;
2193                 } while (epq->rti != rti);
2194         }
2195
2196         /*
2197          * If we are requested for another RTE then we have to suspend execution
2198          * of current PlanQual and start execution for new one.
2199          */
2200         if (epq == NULL || epq->rti != rti)
2201         {
2202                 /* try to reuse plan used previously */
2203                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2204
2205                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2206                 {
2207                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2208                         newepq->free = NULL;
2209                         newepq->estate = NULL;
2210                         newepq->planstate = NULL;
2211                 }
2212                 else
2213                 {
2214                         /* recycle previously used PlanQual */
2215                         Assert(newepq->estate == NULL);
2216                         epq->free = NULL;
2217                 }
2218                 /* push current PQ to the stack */
2219                 newepq->next = epq;
2220                 epq = newepq;
2221                 estate->es_evalPlanQual = epq;
2222                 epq->rti = rti;
2223                 endNode = false;
2224         }
2225
2226         Assert(epq->rti == rti);
2227
2228         /*
2229          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2230          * end and restart execution of the plan, because ExecReScan wouldn't
2231          * ensure that upper plan nodes would reset themselves.  We could make
2232          * that work if insertion of the target tuple were integrated with the
2233          * Param mechanism somehow, so that the upper plan nodes know that their
2234          * children's outputs have changed.
2235          *
2236          * Note that the stack of free evalPlanQual nodes is quite useless at the
2237          * moment, since it only saves us from pallocing/releasing the
2238          * evalPlanQual nodes themselves.  But it will be useful once we implement
2239          * ReScan instead of end/restart for re-using PlanQual nodes.
2240          */
2241         if (endNode)
2242         {
2243                 /* stop execution */
2244                 EvalPlanQualStop(epq);
2245         }
2246
2247         /*
2248          * Initialize new recheck query.
2249          *
2250          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2251          * instead copy down changeable state from the top plan (including
2252          * es_result_relation_info, es_junkFilter) and reset locally changeable
2253          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2254          */
2255         EvalPlanQualStart(epq, estate, epq->next);
2256
2257         /*
2258          * free old RTE' tuple, if any, and store target tuple where relation's
2259          * scan node will see it
2260          */
2261         epqstate = epq->estate;
2262         if (epqstate->es_evTuple[rti - 1] != NULL)
2263                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2264         epqstate->es_evTuple[rti - 1] = copyTuple;
2265
2266         return EvalPlanQualNext(estate);
2267 }
2268
2269 static TupleTableSlot *
2270 EvalPlanQualNext(EState *estate)
2271 {
2272         evalPlanQual *epq = estate->es_evalPlanQual;
2273         MemoryContext oldcontext;
2274         TupleTableSlot *slot;
2275
2276         Assert(epq->rti != 0);
2277
2278 lpqnext:;
2279         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2280         slot = ExecProcNode(epq->planstate);
2281         MemoryContextSwitchTo(oldcontext);
2282
2283         /*
2284          * No more tuples for this PQ. Continue previous one.
2285          */
2286         if (TupIsNull(slot))
2287         {
2288                 evalPlanQual *oldepq;
2289
2290                 /* stop execution */
2291                 EvalPlanQualStop(epq);
2292                 /* pop old PQ from the stack */
2293                 oldepq = epq->next;
2294                 if (oldepq == NULL)
2295                 {
2296                         /* this is the first (oldest) PQ - mark as free */
2297                         epq->rti = 0;
2298                         estate->es_useEvalPlan = false;
2299                         /* and continue Query execution */
2300                         return NULL;
2301                 }
2302                 Assert(oldepq->rti != 0);
2303                 /* push current PQ to freePQ stack */
2304                 oldepq->free = epq;
2305                 epq = oldepq;
2306                 estate->es_evalPlanQual = epq;
2307                 goto lpqnext;
2308         }
2309
2310         return slot;
2311 }
2312
2313 static void
2314 EndEvalPlanQual(EState *estate)
2315 {
2316         evalPlanQual *epq = estate->es_evalPlanQual;
2317
2318         if (epq->rti == 0)                      /* plans already shutdowned */
2319         {
2320                 Assert(epq->next == NULL);
2321                 return;
2322         }
2323
2324         for (;;)
2325         {
2326                 evalPlanQual *oldepq;
2327
2328                 /* stop execution */
2329                 EvalPlanQualStop(epq);
2330                 /* pop old PQ from the stack */
2331                 oldepq = epq->next;
2332                 if (oldepq == NULL)
2333                 {
2334                         /* this is the first (oldest) PQ - mark as free */
2335                         epq->rti = 0;
2336                         estate->es_useEvalPlan = false;
2337                         break;
2338                 }
2339                 Assert(oldepq->rti != 0);
2340                 /* push current PQ to freePQ stack */
2341                 oldepq->free = epq;
2342                 epq = oldepq;
2343                 estate->es_evalPlanQual = epq;
2344         }
2345 }
2346
2347 /*
2348  * Start execution of one level of PlanQual.
2349  *
2350  * This is a cut-down version of ExecutorStart(): we copy some state from
2351  * the top-level estate rather than initializing it fresh.
2352  */
2353 static void
2354 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2355 {
2356         EState     *epqstate;
2357         int                     rtsize;
2358         MemoryContext oldcontext;
2359         ListCell   *l;
2360
2361         rtsize = list_length(estate->es_range_table);
2362
2363         epq->estate = epqstate = CreateExecutorState();
2364
2365         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2366
2367         /*
2368          * The epqstates share the top query's copy of unchanging state such as
2369          * the snapshot, rangetable, result-rel info, and external Param info.
2370          * They need their own copies of local state, including a tuple table,
2371          * es_param_exec_vals, etc.
2372          */
2373         epqstate->es_direction = ForwardScanDirection;
2374         epqstate->es_snapshot = estate->es_snapshot;
2375         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2376         epqstate->es_range_table = estate->es_range_table;
2377         epqstate->es_output_cid = estate->es_output_cid;
2378         epqstate->es_result_relations = estate->es_result_relations;
2379         epqstate->es_num_result_relations = estate->es_num_result_relations;
2380         epqstate->es_result_relation_info = estate->es_result_relation_info;
2381         epqstate->es_junkFilter = estate->es_junkFilter;
2382         /* es_trig_target_relations must NOT be copied */
2383         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2384         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2385         epqstate->es_param_list_info = estate->es_param_list_info;
2386         if (estate->es_plannedstmt->nParamExec > 0)
2387                 epqstate->es_param_exec_vals = (ParamExecData *)
2388                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2389         epqstate->es_rowMarks = estate->es_rowMarks;
2390         epqstate->es_instrument = estate->es_instrument;
2391         epqstate->es_select_into = estate->es_select_into;
2392         epqstate->es_into_oids = estate->es_into_oids;
2393         epqstate->es_plannedstmt = estate->es_plannedstmt;
2394
2395         /*
2396          * Each epqstate must have its own es_evTupleNull state, but all the stack
2397          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2398          * the value being examined by an outer recheck.
2399          */
2400         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2401         if (priorepq == NULL)
2402                 /* first PQ stack entry */
2403                 epqstate->es_evTuple = (HeapTuple *)
2404                         palloc0(rtsize * sizeof(HeapTuple));
2405         else
2406                 /* later stack entries share the same storage */
2407                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2408
2409         /*
2410          * Create sub-tuple-table; we needn't redo the CountSlots work though.
2411          */
2412         epqstate->es_tupleTable =
2413                 ExecCreateTupleTable(estate->es_tupleTable->size);
2414
2415         /*
2416          * Initialize private state information for each SubPlan.  We must do this
2417          * before running ExecInitNode on the main query tree, since
2418          * ExecInitSubPlan expects to be able to find these entries.
2419          */
2420         Assert(epqstate->es_subplanstates == NIL);
2421         foreach(l, estate->es_plannedstmt->subplans)
2422         {
2423                 Plan       *subplan = (Plan *) lfirst(l);
2424                 PlanState  *subplanstate;
2425
2426                 subplanstate = ExecInitNode(subplan, epqstate, 0);
2427
2428                 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2429                                                                                          subplanstate);
2430         }
2431
2432         /*
2433          * Initialize the private state information for all the nodes in the query
2434          * tree.  This opens files, allocates storage and leaves us ready to start
2435          * processing tuples.
2436          */
2437         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2438
2439         MemoryContextSwitchTo(oldcontext);
2440 }
2441
2442 /*
2443  * End execution of one level of PlanQual.
2444  *
2445  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2446  * of the normal cleanup, but *not* close result relations (which we are
2447  * just sharing from the outer query).  We do, however, have to close any
2448  * trigger target relations that got opened, since those are not shared.
2449  */
2450 static void
2451 EvalPlanQualStop(evalPlanQual *epq)
2452 {
2453         EState     *epqstate = epq->estate;
2454         MemoryContext oldcontext;
2455         ListCell   *l;
2456
2457         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2458
2459         ExecEndNode(epq->planstate);
2460
2461         foreach(l, epqstate->es_subplanstates)
2462         {
2463                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2464
2465                 ExecEndNode(subplanstate);
2466         }
2467
2468         ExecDropTupleTable(epqstate->es_tupleTable, true);
2469         epqstate->es_tupleTable = NULL;
2470
2471         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2472         {
2473                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2474                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2475         }
2476
2477         foreach(l, epqstate->es_trig_target_relations)
2478         {
2479                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2480
2481                 /* Close indices and then the relation itself */
2482                 ExecCloseIndices(resultRelInfo);
2483                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2484         }
2485
2486         MemoryContextSwitchTo(oldcontext);
2487
2488         FreeExecutorState(epqstate);
2489
2490         epq->estate = NULL;
2491         epq->planstate = NULL;
2492 }
2493
2494 /*
2495  * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2496  *
2497  * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2498  * are looking at a row returned by the EvalPlanQual machinery, we need
2499  * to look at the subsidiary state instead.
2500  */
2501 PlanState *
2502 ExecGetActivePlanTree(QueryDesc *queryDesc)
2503 {
2504         EState     *estate = queryDesc->estate;
2505
2506         if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2507                 return estate->es_evalPlanQual->planstate;
2508         else
2509                 return queryDesc->planstate;
2510 }
2511
2512
2513 /*
2514  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2515  *
2516  * We implement SELECT INTO by diverting SELECT's normal output with
2517  * a specialized DestReceiver type.
2518  *
2519  * TODO: remove some of the INTO-specific cruft from EState, and keep
2520  * it in the DestReceiver instead.
2521  */
2522
2523 typedef struct
2524 {
2525         DestReceiver pub;                       /* publicly-known function pointers */
2526         EState     *estate;                     /* EState we are working with */
2527 } DR_intorel;
2528
2529 /*
2530  * OpenIntoRel --- actually create the SELECT INTO target relation
2531  *
2532  * This also replaces QueryDesc->dest with the special DestReceiver for
2533  * SELECT INTO.  We assume that the correct result tuple type has already
2534  * been placed in queryDesc->tupDesc.
2535  */
2536 static void
2537 OpenIntoRel(QueryDesc *queryDesc)
2538 {
2539         IntoClause *into = queryDesc->plannedstmt->intoClause;
2540         EState     *estate = queryDesc->estate;
2541         Relation        intoRelationDesc;
2542         char       *intoName;
2543         Oid                     namespaceId;
2544         Oid                     tablespaceId;
2545         Datum           reloptions;
2546         AclResult       aclresult;
2547         Oid                     intoRelationId;
2548         TupleDesc       tupdesc;
2549         DR_intorel *myState;
2550
2551         Assert(into);
2552
2553         /*
2554          * Check consistency of arguments
2555          */
2556         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2557                 ereport(ERROR,
2558                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2559                                  errmsg("ON COMMIT can only be used on temporary tables")));
2560
2561         /*
2562          * Find namespace to create in, check its permissions
2563          */
2564         intoName = into->rel->relname;
2565         namespaceId = RangeVarGetCreationNamespace(into->rel);
2566
2567         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2568                                                                           ACL_CREATE);
2569         if (aclresult != ACLCHECK_OK)
2570                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2571                                            get_namespace_name(namespaceId));
2572
2573         /*
2574          * Select tablespace to use.  If not specified, use default tablespace
2575          * (which may in turn default to database's default).
2576          */
2577         if (into->tableSpaceName)
2578         {
2579                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2580                 if (!OidIsValid(tablespaceId))
2581                         ereport(ERROR,
2582                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2583                                          errmsg("tablespace \"%s\" does not exist",
2584                                                         into->tableSpaceName)));
2585         }
2586         else
2587         {
2588                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2589                 /* note InvalidOid is OK in this case */
2590         }
2591
2592         /* Check permissions except when using the database's default space */
2593         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2594         {
2595                 AclResult       aclresult;
2596
2597                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2598                                                                                    ACL_CREATE);
2599
2600                 if (aclresult != ACLCHECK_OK)
2601                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2602                                                    get_tablespace_name(tablespaceId));
2603         }
2604
2605         /* Parse and validate any reloptions */
2606         reloptions = transformRelOptions((Datum) 0,
2607                                                                          into->options,
2608                                                                          true,
2609                                                                          false);
2610         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2611
2612         /* have to copy the actual tupdesc to get rid of any constraints */
2613         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2614
2615         /* Now we can actually create the new relation */
2616         intoRelationId = heap_create_with_catalog(intoName,
2617                                                                                           namespaceId,
2618                                                                                           tablespaceId,
2619                                                                                           InvalidOid,
2620                                                                                           GetUserId(),
2621                                                                                           tupdesc,
2622                                                                                           RELKIND_RELATION,
2623                                                                                           false,
2624                                                                                           true,
2625                                                                                           0,
2626                                                                                           into->onCommit,
2627                                                                                           reloptions,
2628                                                                                           allowSystemTableMods);
2629
2630         FreeTupleDesc(tupdesc);
2631
2632         /*
2633          * Advance command counter so that the newly-created relation's catalog
2634          * tuples will be visible to heap_open.
2635          */
2636         CommandCounterIncrement();
2637
2638         /*
2639          * If necessary, create a TOAST table for the INTO relation. Note that
2640          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2641          * the TOAST table will be visible for insertion.
2642          */
2643         AlterTableCreateToastTable(intoRelationId);
2644
2645         /*
2646          * And open the constructed table for writing.
2647          */
2648         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2649
2650         /* use_wal off requires rd_targblock be initially invalid */
2651         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2652
2653         /*
2654          * We can skip WAL-logging the insertions, unless PITR is in use.
2655          */
2656         estate->es_into_relation_use_wal = XLogArchivingActive();
2657         estate->es_into_relation_descriptor = intoRelationDesc;
2658
2659         /*
2660          * Now replace the query's DestReceiver with one for SELECT INTO
2661          */
2662         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2663         myState = (DR_intorel *) queryDesc->dest;
2664         Assert(myState->pub.mydest == DestIntoRel);
2665         myState->estate = estate;
2666 }
2667
2668 /*
2669  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2670  */
2671 static void
2672 CloseIntoRel(QueryDesc *queryDesc)
2673 {
2674         EState     *estate = queryDesc->estate;
2675
2676         /* OpenIntoRel might never have gotten called */
2677         if (estate->es_into_relation_descriptor)
2678         {
2679                 /* If we skipped using WAL, must heap_sync before commit */
2680                 if (!estate->es_into_relation_use_wal)
2681                         heap_sync(estate->es_into_relation_descriptor);
2682
2683                 /* close rel, but keep lock until commit */
2684                 heap_close(estate->es_into_relation_descriptor, NoLock);
2685
2686                 estate->es_into_relation_descriptor = NULL;
2687         }
2688 }
2689
2690 /*
2691  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2692  *
2693  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2694  * we just leave the private fields empty here.  OpenIntoRel will
2695  * fill them in.
2696  */
2697 DestReceiver *
2698 CreateIntoRelDestReceiver(void)
2699 {
2700         DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
2701
2702         self->pub.receiveSlot = intorel_receive;
2703         self->pub.rStartup = intorel_startup;
2704         self->pub.rShutdown = intorel_shutdown;
2705         self->pub.rDestroy = intorel_destroy;
2706         self->pub.mydest = DestIntoRel;
2707
2708         self->estate = NULL;
2709
2710         return (DestReceiver *) self;
2711 }
2712
2713 /*
2714  * intorel_startup --- executor startup
2715  */
2716 static void
2717 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2718 {
2719         /* no-op */
2720 }
2721
2722 /*
2723  * intorel_receive --- receive one tuple
2724  */
2725 static void
2726 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2727 {
2728         DR_intorel *myState = (DR_intorel *) self;
2729         EState     *estate = myState->estate;
2730         HeapTuple       tuple;
2731
2732         tuple = ExecCopySlotTuple(slot);
2733
2734         heap_insert(estate->es_into_relation_descriptor,
2735                                 tuple,
2736                                 estate->es_output_cid,
2737                                 estate->es_into_relation_use_wal,
2738                                 false);                 /* never any point in using FSM */
2739
2740         /* We know this is a newly created relation, so there are no indexes */
2741
2742         heap_freetuple(tuple);
2743
2744         IncrAppended();
2745 }
2746
2747 /*
2748  * intorel_shutdown --- executor end
2749  */
2750 static void
2751 intorel_shutdown(DestReceiver *self)
2752 {
2753         /* no-op */
2754 }
2755
2756 /*
2757  * intorel_destroy --- release DestReceiver object
2758  */
2759 static void
2760 intorel_destroy(DestReceiver *self)
2761 {
2762         pfree(self);
2763 }