]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Change the rules for inherited CHECK constraints to be essentially the same
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.307 2008/05/09 23:32:04 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "optimizer/clauses.h"
49 #include "parser/parse_clause.h"
50 #include "parser/parsetree.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/tqual.h"
56
57
58 typedef struct evalPlanQual
59 {
60         Index           rti;
61         EState     *estate;
62         PlanState  *planstate;
63         struct evalPlanQual *next;      /* stack of active PlanQual plans */
64         struct evalPlanQual *free;      /* list of free PlanQual plans */
65 } evalPlanQual;
66
67 /* decls for local routines only used within this module */
68 static void InitPlan(QueryDesc *queryDesc, int eflags);
69 static void ExecEndPlan(PlanState *planstate, EState *estate);
70 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
71                         CmdType operation,
72                         long numberTuples,
73                         ScanDirection direction,
74                         DestReceiver *dest);
75 static void ExecSelect(TupleTableSlot *slot,
76                    DestReceiver *dest, EState *estate);
77 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
78                    TupleTableSlot *planSlot,
79                    DestReceiver *dest, EState *estate);
80 static void ExecDelete(ItemPointer tupleid,
81                    TupleTableSlot *planSlot,
82                    DestReceiver *dest, EState *estate);
83 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
84                    TupleTableSlot *planSlot,
85                    DestReceiver *dest, EState *estate);
86 static void ExecProcessReturning(ProjectionInfo *projectReturning,
87                                          TupleTableSlot *tupleSlot,
88                                          TupleTableSlot *planSlot,
89                                          DestReceiver *dest);
90 static TupleTableSlot *EvalPlanQualNext(EState *estate);
91 static void EndEvalPlanQual(EState *estate);
92 static void ExecCheckRTPerms(List *rangeTable);
93 static void ExecCheckRTEPerms(RangeTblEntry *rte);
94 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
95 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
96                                   evalPlanQual *priorepq);
97 static void EvalPlanQualStop(evalPlanQual *epq);
98 static void OpenIntoRel(QueryDesc *queryDesc);
99 static void CloseIntoRel(QueryDesc *queryDesc);
100 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
101 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
102 static void intorel_shutdown(DestReceiver *self);
103 static void intorel_destroy(DestReceiver *self);
104
105 /* end of local decls */
106
107
108 /* ----------------------------------------------------------------
109  *              ExecutorStart
110  *
111  *              This routine must be called at the beginning of any execution of any
112  *              query plan
113  *
114  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
115  * clear why we bother to separate the two functions, but...).  The tupDesc
116  * field of the QueryDesc is filled in to describe the tuples that will be
117  * returned, and the internal fields (estate and planstate) are set up.
118  *
119  * eflags contains flag bits as described in executor.h.
120  *
121  * NB: the CurrentMemoryContext when this is called will become the parent
122  * of the per-query context used for this Executor invocation.
123  * ----------------------------------------------------------------
124  */
125 void
126 ExecutorStart(QueryDesc *queryDesc, int eflags)
127 {
128         EState     *estate;
129         MemoryContext oldcontext;
130
131         /* sanity checks: queryDesc must not be started already */
132         Assert(queryDesc != NULL);
133         Assert(queryDesc->estate == NULL);
134
135         /*
136          * If the transaction is read-only, we need to check if any writes are
137          * planned to non-temporary tables.  EXPLAIN is considered read-only.
138          */
139         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
140                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
141
142         /*
143          * Build EState, switch into per-query memory context for startup.
144          */
145         estate = CreateExecutorState();
146         queryDesc->estate = estate;
147
148         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
149
150         /*
151          * Fill in parameters, if any, from queryDesc
152          */
153         estate->es_param_list_info = queryDesc->params;
154
155         if (queryDesc->plannedstmt->nParamExec > 0)
156                 estate->es_param_exec_vals = (ParamExecData *)
157                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
158
159         /*
160          * If non-read-only query, set the command ID to mark output tuples with
161          */
162         switch (queryDesc->operation)
163         {
164                 case CMD_SELECT:
165                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
166                         if (queryDesc->plannedstmt->intoClause != NULL ||
167                                 queryDesc->plannedstmt->rowMarks != NIL)
168                                 estate->es_output_cid = GetCurrentCommandId(true);
169                         break;
170
171                 case CMD_INSERT:
172                 case CMD_DELETE:
173                 case CMD_UPDATE:
174                         estate->es_output_cid = GetCurrentCommandId(true);
175                         break;
176
177                 default:
178                         elog(ERROR, "unrecognized operation code: %d",
179                                  (int) queryDesc->operation);
180                         break;
181         }
182
183         /*
184          * Copy other important information into the EState
185          */
186         estate->es_snapshot = queryDesc->snapshot;
187         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
188         estate->es_instrument = queryDesc->doInstrument;
189
190         /*
191          * Initialize the plan state tree
192          */
193         InitPlan(queryDesc, eflags);
194
195         MemoryContextSwitchTo(oldcontext);
196 }
197
198 /* ----------------------------------------------------------------
199  *              ExecutorRun
200  *
201  *              This is the main routine of the executor module. It accepts
202  *              the query descriptor from the traffic cop and executes the
203  *              query plan.
204  *
205  *              ExecutorStart must have been called already.
206  *
207  *              If direction is NoMovementScanDirection then nothing is done
208  *              except to start up/shut down the destination.  Otherwise,
209  *              we retrieve up to 'count' tuples in the specified direction.
210  *
211  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
212  *              completion.
213  *
214  * ----------------------------------------------------------------
215  */
216 TupleTableSlot *
217 ExecutorRun(QueryDesc *queryDesc,
218                         ScanDirection direction, long count)
219 {
220         EState     *estate;
221         CmdType         operation;
222         DestReceiver *dest;
223         bool            sendTuples;
224         TupleTableSlot *result;
225         MemoryContext oldcontext;
226
227         /* sanity checks */
228         Assert(queryDesc != NULL);
229
230         estate = queryDesc->estate;
231
232         Assert(estate != NULL);
233
234         /*
235          * Switch into per-query memory context
236          */
237         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
238
239         /*
240          * extract information from the query descriptor and the query feature.
241          */
242         operation = queryDesc->operation;
243         dest = queryDesc->dest;
244
245         /*
246          * startup tuple receiver, if we will be emitting tuples
247          */
248         estate->es_processed = 0;
249         estate->es_lastoid = InvalidOid;
250
251         sendTuples = (operation == CMD_SELECT ||
252                                   queryDesc->plannedstmt->returningLists);
253
254         if (sendTuples)
255                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
256
257         /*
258          * run plan
259          */
260         if (ScanDirectionIsNoMovement(direction))
261                 result = NULL;
262         else
263                 result = ExecutePlan(estate,
264                                                          queryDesc->planstate,
265                                                          operation,
266                                                          count,
267                                                          direction,
268                                                          dest);
269
270         /*
271          * shutdown tuple receiver, if we started it
272          */
273         if (sendTuples)
274                 (*dest->rShutdown) (dest);
275
276         MemoryContextSwitchTo(oldcontext);
277
278         return result;
279 }
280
281 /* ----------------------------------------------------------------
282  *              ExecutorEnd
283  *
284  *              This routine must be called at the end of execution of any
285  *              query plan
286  * ----------------------------------------------------------------
287  */
288 void
289 ExecutorEnd(QueryDesc *queryDesc)
290 {
291         EState     *estate;
292         MemoryContext oldcontext;
293
294         /* sanity checks */
295         Assert(queryDesc != NULL);
296
297         estate = queryDesc->estate;
298
299         Assert(estate != NULL);
300
301         /*
302          * Switch into per-query memory context to run ExecEndPlan
303          */
304         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
305
306         ExecEndPlan(queryDesc->planstate, estate);
307
308         /*
309          * Close the SELECT INTO relation if any
310          */
311         if (estate->es_select_into)
312                 CloseIntoRel(queryDesc);
313
314         /*
315          * Must switch out of context before destroying it
316          */
317         MemoryContextSwitchTo(oldcontext);
318
319         /*
320          * Release EState and per-query memory context.  This should release
321          * everything the executor has allocated.
322          */
323         FreeExecutorState(estate);
324
325         /* Reset queryDesc fields that no longer point to anything */
326         queryDesc->tupDesc = NULL;
327         queryDesc->estate = NULL;
328         queryDesc->planstate = NULL;
329 }
330
331 /* ----------------------------------------------------------------
332  *              ExecutorRewind
333  *
334  *              This routine may be called on an open queryDesc to rewind it
335  *              to the start.
336  * ----------------------------------------------------------------
337  */
338 void
339 ExecutorRewind(QueryDesc *queryDesc)
340 {
341         EState     *estate;
342         MemoryContext oldcontext;
343
344         /* sanity checks */
345         Assert(queryDesc != NULL);
346
347         estate = queryDesc->estate;
348
349         Assert(estate != NULL);
350
351         /* It's probably not sensible to rescan updating queries */
352         Assert(queryDesc->operation == CMD_SELECT);
353
354         /*
355          * Switch into per-query memory context
356          */
357         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
358
359         /*
360          * rescan plan
361          */
362         ExecReScan(queryDesc->planstate, NULL);
363
364         MemoryContextSwitchTo(oldcontext);
365 }
366
367
368 /*
369  * ExecCheckRTPerms
370  *              Check access permissions for all relations listed in a range table.
371  */
372 static void
373 ExecCheckRTPerms(List *rangeTable)
374 {
375         ListCell   *l;
376
377         foreach(l, rangeTable)
378         {
379                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
380         }
381 }
382
383 /*
384  * ExecCheckRTEPerms
385  *              Check access permissions for a single RTE.
386  */
387 static void
388 ExecCheckRTEPerms(RangeTblEntry *rte)
389 {
390         AclMode         requiredPerms;
391         Oid                     relOid;
392         Oid                     userid;
393
394         /*
395          * Only plain-relation RTEs need to be checked here.  Function RTEs are
396          * checked by init_fcache when the function is prepared for execution.
397          * Join, subquery, and special RTEs need no checks.
398          */
399         if (rte->rtekind != RTE_RELATION)
400                 return;
401
402         /*
403          * No work if requiredPerms is empty.
404          */
405         requiredPerms = rte->requiredPerms;
406         if (requiredPerms == 0)
407                 return;
408
409         relOid = rte->relid;
410
411         /*
412          * userid to check as: current user unless we have a setuid indication.
413          *
414          * Note: GetUserId() is presently fast enough that there's no harm in
415          * calling it separately for each RTE.  If that stops being true, we could
416          * call it once in ExecCheckRTPerms and pass the userid down from there.
417          * But for now, no need for the extra clutter.
418          */
419         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
420
421         /*
422          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
423          */
424         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
425                 != requiredPerms)
426                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
427                                            get_rel_name(relOid));
428 }
429
430 /*
431  * Check that the query does not imply any writes to non-temp tables.
432  */
433 static void
434 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
435 {
436         ListCell   *l;
437
438         /*
439          * CREATE TABLE AS or SELECT INTO?
440          *
441          * XXX should we allow this if the destination is temp?
442          */
443         if (plannedstmt->intoClause != NULL)
444                 goto fail;
445
446         /* Fail if write permissions are requested on any non-temp table */
447         foreach(l, plannedstmt->rtable)
448         {
449                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
450
451                 if (rte->rtekind != RTE_RELATION)
452                         continue;
453
454                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
455                         continue;
456
457                 if (isTempNamespace(get_rel_namespace(rte->relid)))
458                         continue;
459
460                 goto fail;
461         }
462
463         return;
464
465 fail:
466         ereport(ERROR,
467                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
468                          errmsg("transaction is read-only")));
469 }
470
471
472 /* ----------------------------------------------------------------
473  *              InitPlan
474  *
475  *              Initializes the query plan: open files, allocate storage
476  *              and start up the rule manager
477  * ----------------------------------------------------------------
478  */
479 static void
480 InitPlan(QueryDesc *queryDesc, int eflags)
481 {
482         CmdType         operation = queryDesc->operation;
483         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
484         Plan       *plan = plannedstmt->planTree;
485         List       *rangeTable = plannedstmt->rtable;
486         EState     *estate = queryDesc->estate;
487         PlanState  *planstate;
488         TupleDesc       tupType;
489         ListCell   *l;
490         int                     i;
491
492         /*
493          * Do permissions checks
494          */
495         ExecCheckRTPerms(rangeTable);
496
497         /*
498          * initialize the node's execution state
499          */
500         estate->es_range_table = rangeTable;
501
502         /*
503          * initialize result relation stuff
504          */
505         if (plannedstmt->resultRelations)
506         {
507                 List       *resultRelations = plannedstmt->resultRelations;
508                 int                     numResultRelations = list_length(resultRelations);
509                 ResultRelInfo *resultRelInfos;
510                 ResultRelInfo *resultRelInfo;
511
512                 resultRelInfos = (ResultRelInfo *)
513                         palloc(numResultRelations * sizeof(ResultRelInfo));
514                 resultRelInfo = resultRelInfos;
515                 foreach(l, resultRelations)
516                 {
517                         Index           resultRelationIndex = lfirst_int(l);
518                         Oid                     resultRelationOid;
519                         Relation        resultRelation;
520
521                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
522                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
523                         InitResultRelInfo(resultRelInfo,
524                                                           resultRelation,
525                                                           resultRelationIndex,
526                                                           operation,
527                                                           estate->es_instrument);
528                         resultRelInfo++;
529                 }
530                 estate->es_result_relations = resultRelInfos;
531                 estate->es_num_result_relations = numResultRelations;
532                 /* Initialize to first or only result rel */
533                 estate->es_result_relation_info = resultRelInfos;
534         }
535         else
536         {
537                 /*
538                  * if no result relation, then set state appropriately
539                  */
540                 estate->es_result_relations = NULL;
541                 estate->es_num_result_relations = 0;
542                 estate->es_result_relation_info = NULL;
543         }
544
545         /*
546          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
547          * flag appropriately so that the plan tree will be initialized with the
548          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
549          */
550         estate->es_select_into = false;
551         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
552         {
553                 estate->es_select_into = true;
554                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
555         }
556
557         /*
558          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
559          * initialize the plan tree, else we'd be doing a lock upgrade. While we
560          * are at it, build the ExecRowMark list.
561          */
562         estate->es_rowMarks = NIL;
563         foreach(l, plannedstmt->rowMarks)
564         {
565                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
566                 Oid                     relid = getrelid(rc->rti, rangeTable);
567                 Relation        relation;
568                 ExecRowMark *erm;
569
570                 relation = heap_open(relid, RowShareLock);
571                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
572                 erm->relation = relation;
573                 erm->rti = rc->rti;
574                 erm->forUpdate = rc->forUpdate;
575                 erm->noWait = rc->noWait;
576                 /* We'll set up ctidAttno below */
577                 erm->ctidAttNo = InvalidAttrNumber;
578                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
579         }
580
581         /*
582          * Initialize the executor "tuple" table.  We need slots for all the plan
583          * nodes, plus possibly output slots for the junkfilter(s). At this point
584          * we aren't sure if we need junkfilters, so just add slots for them
585          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
586          * trigger output tuples.  Also, one for RETURNING-list evaluation.
587          */
588         {
589                 int                     nSlots;
590
591                 /* Slots for the main plan tree */
592                 nSlots = ExecCountSlotsNode(plan);
593                 /* Add slots for subplans and initplans */
594                 foreach(l, plannedstmt->subplans)
595                 {
596                         Plan       *subplan = (Plan *) lfirst(l);
597
598                         nSlots += ExecCountSlotsNode(subplan);
599                 }
600                 /* Add slots for junkfilter(s) */
601                 if (plannedstmt->resultRelations != NIL)
602                         nSlots += list_length(plannedstmt->resultRelations);
603                 else
604                         nSlots += 1;
605                 if (operation != CMD_SELECT)
606                         nSlots++;                       /* for es_trig_tuple_slot */
607                 if (plannedstmt->returningLists)
608                         nSlots++;                       /* for RETURNING projection */
609
610                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
611
612                 if (operation != CMD_SELECT)
613                         estate->es_trig_tuple_slot =
614                                 ExecAllocTableSlot(estate->es_tupleTable);
615         }
616
617         /* mark EvalPlanQual not active */
618         estate->es_plannedstmt = plannedstmt;
619         estate->es_evalPlanQual = NULL;
620         estate->es_evTupleNull = NULL;
621         estate->es_evTuple = NULL;
622         estate->es_useEvalPlan = false;
623
624         /*
625          * Initialize private state information for each SubPlan.  We must do this
626          * before running ExecInitNode on the main query tree, since
627          * ExecInitSubPlan expects to be able to find these entries.
628          */
629         Assert(estate->es_subplanstates == NIL);
630         i = 1;                                          /* subplan indices count from 1 */
631         foreach(l, plannedstmt->subplans)
632         {
633                 Plan       *subplan = (Plan *) lfirst(l);
634                 PlanState  *subplanstate;
635                 int                     sp_eflags;
636
637                 /*
638                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
639                  * it is a parameterless subplan (not initplan), we suggest that it be
640                  * prepared to handle REWIND efficiently; otherwise there is no need.
641                  */
642                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
643                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
644                         sp_eflags |= EXEC_FLAG_REWIND;
645
646                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
647
648                 estate->es_subplanstates = lappend(estate->es_subplanstates,
649                                                                                    subplanstate);
650
651                 i++;
652         }
653
654         /*
655          * Initialize the private state information for all the nodes in the query
656          * tree.  This opens files, allocates storage and leaves us ready to start
657          * processing tuples.
658          */
659         planstate = ExecInitNode(plan, estate, eflags);
660
661         /*
662          * Get the tuple descriptor describing the type of tuples to return. (this
663          * is especially important if we are creating a relation with "SELECT
664          * INTO")
665          */
666         tupType = ExecGetResultType(planstate);
667
668         /*
669          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
670          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
671          * INTO also need a filter if the plan may return raw disk tuples (else
672          * heap_insert will be scribbling on the source relation!). UPDATE and
673          * DELETE always need a filter, since there's always a junk 'ctid'
674          * attribute present --- no need to look first.
675          */
676         {
677                 bool            junk_filter_needed = false;
678                 ListCell   *tlist;
679
680                 switch (operation)
681                 {
682                         case CMD_SELECT:
683                         case CMD_INSERT:
684                                 foreach(tlist, plan->targetlist)
685                                 {
686                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
687
688                                         if (tle->resjunk)
689                                         {
690                                                 junk_filter_needed = true;
691                                                 break;
692                                         }
693                                 }
694                                 if (!junk_filter_needed &&
695                                         (operation == CMD_INSERT || estate->es_select_into) &&
696                                         ExecMayReturnRawTuples(planstate))
697                                         junk_filter_needed = true;
698                                 break;
699                         case CMD_UPDATE:
700                         case CMD_DELETE:
701                                 junk_filter_needed = true;
702                                 break;
703                         default:
704                                 break;
705                 }
706
707                 if (junk_filter_needed)
708                 {
709                         /*
710                          * If there are multiple result relations, each one needs its own
711                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
712                          * we can't be fooled by some needing a filter and some not.
713                          */
714                         if (list_length(plannedstmt->resultRelations) > 1)
715                         {
716                                 PlanState **appendplans;
717                                 int                     as_nplans;
718                                 ResultRelInfo *resultRelInfo;
719
720                                 /* Top plan had better be an Append here. */
721                                 Assert(IsA(plan, Append));
722                                 Assert(((Append *) plan)->isTarget);
723                                 Assert(IsA(planstate, AppendState));
724                                 appendplans = ((AppendState *) planstate)->appendplans;
725                                 as_nplans = ((AppendState *) planstate)->as_nplans;
726                                 Assert(as_nplans == estate->es_num_result_relations);
727                                 resultRelInfo = estate->es_result_relations;
728                                 for (i = 0; i < as_nplans; i++)
729                                 {
730                                         PlanState  *subplan = appendplans[i];
731                                         JunkFilter *j;
732
733                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
734                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
735                                                                   ExecAllocTableSlot(estate->es_tupleTable));
736
737                                         /*
738                                          * Since it must be UPDATE/DELETE, there had better be a
739                                          * "ctid" junk attribute in the tlist ... but ctid could
740                                          * be at a different resno for each result relation. We
741                                          * look up the ctid resnos now and save them in the
742                                          * junkfilters.
743                                          */
744                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
745                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
746                                                 elog(ERROR, "could not find junk ctid column");
747                                         resultRelInfo->ri_junkFilter = j;
748                                         resultRelInfo++;
749                                 }
750
751                                 /*
752                                  * Set active junkfilter too; at this point ExecInitAppend has
753                                  * already selected an active result relation...
754                                  */
755                                 estate->es_junkFilter =
756                                         estate->es_result_relation_info->ri_junkFilter;
757
758                                 /*
759                                  * We currently can't support rowmarks in this case, because
760                                  * the associated junk CTIDs might have different resnos in
761                                  * different subplans.
762                                  */
763                                 if (estate->es_rowMarks)
764                                         ereport(ERROR,
765                                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
766                                                          errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations")));
767                         }
768                         else
769                         {
770                                 /* Normal case with just one JunkFilter */
771                                 JunkFilter *j;
772
773                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
774                                                                            tupType->tdhasoid,
775                                                                   ExecAllocTableSlot(estate->es_tupleTable));
776                                 estate->es_junkFilter = j;
777                                 if (estate->es_result_relation_info)
778                                         estate->es_result_relation_info->ri_junkFilter = j;
779
780                                 if (operation == CMD_SELECT)
781                                 {
782                                         /* For SELECT, want to return the cleaned tuple type */
783                                         tupType = j->jf_cleanTupType;
784                                 }
785                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
786                                 {
787                                         /* For UPDATE/DELETE, find the ctid junk attr now */
788                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
789                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
790                                                 elog(ERROR, "could not find junk ctid column");
791                                 }
792
793                                 /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
794                                 foreach(l, estate->es_rowMarks)
795                                 {
796                                         ExecRowMark *erm = (ExecRowMark *) lfirst(l);
797                                         char            resname[32];
798
799                                         snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
800                                         erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
801                                         if (!AttributeNumberIsValid(erm->ctidAttNo))
802                                                 elog(ERROR, "could not find junk \"%s\" column",
803                                                          resname);
804                                 }
805                         }
806                 }
807                 else
808                 {
809                         estate->es_junkFilter = NULL;
810                         if (estate->es_rowMarks)
811                                 elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns");
812                 }
813         }
814
815         /*
816          * Initialize RETURNING projections if needed.
817          */
818         if (plannedstmt->returningLists)
819         {
820                 TupleTableSlot *slot;
821                 ExprContext *econtext;
822                 ResultRelInfo *resultRelInfo;
823
824                 /*
825                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
826                  * We assume all the sublists will generate the same output tupdesc.
827                  */
828                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
829                                                                  false);
830
831                 /* Set up a slot for the output of the RETURNING projection(s) */
832                 slot = ExecAllocTableSlot(estate->es_tupleTable);
833                 ExecSetSlotDescriptor(slot, tupType);
834                 /* Need an econtext too */
835                 econtext = CreateExprContext(estate);
836
837                 /*
838                  * Build a projection for each result rel.      Note that any SubPlans in
839                  * the RETURNING lists get attached to the topmost plan node.
840                  */
841                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
842                 resultRelInfo = estate->es_result_relations;
843                 foreach(l, plannedstmt->returningLists)
844                 {
845                         List       *rlist = (List *) lfirst(l);
846                         List       *rliststate;
847
848                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
849                         resultRelInfo->ri_projectReturning =
850                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
851                                                                          resultRelInfo->ri_RelationDesc->rd_att);
852                         resultRelInfo++;
853                 }
854         }
855
856         queryDesc->tupDesc = tupType;
857         queryDesc->planstate = planstate;
858
859         /*
860          * If doing SELECT INTO, initialize the "into" relation.  We must wait
861          * till now so we have the "clean" result tuple type to create the new
862          * table from.
863          *
864          * If EXPLAIN, skip creating the "into" relation.
865          */
866         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
867                 OpenIntoRel(queryDesc);
868 }
869
870 /*
871  * Initialize ResultRelInfo data for one result relation
872  */
873 void
874 InitResultRelInfo(ResultRelInfo *resultRelInfo,
875                                   Relation resultRelationDesc,
876                                   Index resultRelationIndex,
877                                   CmdType operation,
878                                   bool doInstrument)
879 {
880         /*
881          * Check valid relkind ... parser and/or planner should have noticed this
882          * already, but let's make sure.
883          */
884         switch (resultRelationDesc->rd_rel->relkind)
885         {
886                 case RELKIND_RELATION:
887                         /* OK */
888                         break;
889                 case RELKIND_SEQUENCE:
890                         ereport(ERROR,
891                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
892                                          errmsg("cannot change sequence \"%s\"",
893                                                         RelationGetRelationName(resultRelationDesc))));
894                         break;
895                 case RELKIND_TOASTVALUE:
896                         ereport(ERROR,
897                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
898                                          errmsg("cannot change TOAST relation \"%s\"",
899                                                         RelationGetRelationName(resultRelationDesc))));
900                         break;
901                 case RELKIND_VIEW:
902                         ereport(ERROR,
903                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
904                                          errmsg("cannot change view \"%s\"",
905                                                         RelationGetRelationName(resultRelationDesc))));
906                         break;
907                 default:
908                         ereport(ERROR,
909                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
910                                          errmsg("cannot change relation \"%s\"",
911                                                         RelationGetRelationName(resultRelationDesc))));
912                         break;
913         }
914
915         /* OK, fill in the node */
916         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
917         resultRelInfo->type = T_ResultRelInfo;
918         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
919         resultRelInfo->ri_RelationDesc = resultRelationDesc;
920         resultRelInfo->ri_NumIndices = 0;
921         resultRelInfo->ri_IndexRelationDescs = NULL;
922         resultRelInfo->ri_IndexRelationInfo = NULL;
923         /* make a copy so as not to depend on relcache info not changing... */
924         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
925         if (resultRelInfo->ri_TrigDesc)
926         {
927                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
928
929                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
930                         palloc0(n * sizeof(FmgrInfo));
931                 if (doInstrument)
932                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
933                 else
934                         resultRelInfo->ri_TrigInstrument = NULL;
935         }
936         else
937         {
938                 resultRelInfo->ri_TrigFunctions = NULL;
939                 resultRelInfo->ri_TrigInstrument = NULL;
940         }
941         resultRelInfo->ri_ConstraintExprs = NULL;
942         resultRelInfo->ri_junkFilter = NULL;
943         resultRelInfo->ri_projectReturning = NULL;
944
945         /*
946          * If there are indices on the result relation, open them and save
947          * descriptors in the result relation info, so that we can add new index
948          * entries for the tuples we add/update.  We need not do this for a
949          * DELETE, however, since deletion doesn't affect indexes.
950          */
951         if (resultRelationDesc->rd_rel->relhasindex &&
952                 operation != CMD_DELETE)
953                 ExecOpenIndices(resultRelInfo);
954 }
955
956 /*
957  *              ExecGetTriggerResultRel
958  *
959  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
960  * triggers are fired on one of the result relations of the query, and so
961  * we can just return a member of the es_result_relations array.  (Note: in
962  * self-join situations there might be multiple members with the same OID;
963  * if so it doesn't matter which one we pick.)  However, it is sometimes
964  * necessary to fire triggers on other relations; this happens mainly when an
965  * RI update trigger queues additional triggers on other relations, which will
966  * be processed in the context of the outer query.      For efficiency's sake,
967  * we want to have a ResultRelInfo for those triggers too; that can avoid
968  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
969  * ANALYZE to report the runtimes of such triggers.)  So we make additional
970  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
971  */
972 ResultRelInfo *
973 ExecGetTriggerResultRel(EState *estate, Oid relid)
974 {
975         ResultRelInfo *rInfo;
976         int                     nr;
977         ListCell   *l;
978         Relation        rel;
979         MemoryContext oldcontext;
980
981         /* First, search through the query result relations */
982         rInfo = estate->es_result_relations;
983         nr = estate->es_num_result_relations;
984         while (nr > 0)
985         {
986                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
987                         return rInfo;
988                 rInfo++;
989                 nr--;
990         }
991         /* Nope, but maybe we already made an extra ResultRelInfo for it */
992         foreach(l, estate->es_trig_target_relations)
993         {
994                 rInfo = (ResultRelInfo *) lfirst(l);
995                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
996                         return rInfo;
997         }
998         /* Nope, so we need a new one */
999
1000         /*
1001          * Open the target relation's relcache entry.  We assume that an
1002          * appropriate lock is still held by the backend from whenever the trigger
1003          * event got queued, so we need take no new lock here.
1004          */
1005         rel = heap_open(relid, NoLock);
1006
1007         /*
1008          * Make the new entry in the right context.  Currently, we don't need any
1009          * index information in ResultRelInfos used only for triggers, so tell
1010          * InitResultRelInfo it's a DELETE.
1011          */
1012         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1013         rInfo = makeNode(ResultRelInfo);
1014         InitResultRelInfo(rInfo,
1015                                           rel,
1016                                           0,            /* dummy rangetable index */
1017                                           CMD_DELETE,
1018                                           estate->es_instrument);
1019         estate->es_trig_target_relations =
1020                 lappend(estate->es_trig_target_relations, rInfo);
1021         MemoryContextSwitchTo(oldcontext);
1022
1023         return rInfo;
1024 }
1025
1026 /*
1027  *              ExecContextForcesOids
1028  *
1029  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1030  * we need to ensure that result tuples have space for an OID iff they are
1031  * going to be stored into a relation that has OIDs.  In other contexts
1032  * we are free to choose whether to leave space for OIDs in result tuples
1033  * (we generally don't want to, but we do if a physical-tlist optimization
1034  * is possible).  This routine checks the plan context and returns TRUE if the
1035  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1036  * *hasoids is set to the required value.
1037  *
1038  * One reason this is ugly is that all plan nodes in the plan tree will emit
1039  * tuples with space for an OID, though we really only need the topmost node
1040  * to do so.  However, node types like Sort don't project new tuples but just
1041  * return their inputs, and in those cases the requirement propagates down
1042  * to the input node.  Eventually we might make this code smart enough to
1043  * recognize how far down the requirement really goes, but for now we just
1044  * make all plan nodes do the same thing if the top level forces the choice.
1045  *
1046  * We assume that estate->es_result_relation_info is already set up to
1047  * describe the target relation.  Note that in an UPDATE that spans an
1048  * inheritance tree, some of the target relations may have OIDs and some not.
1049  * We have to make the decisions on a per-relation basis as we initialize
1050  * each of the child plans of the topmost Append plan.
1051  *
1052  * SELECT INTO is even uglier, because we don't have the INTO relation's
1053  * descriptor available when this code runs; we have to look aside at a
1054  * flag set by InitPlan().
1055  */
1056 bool
1057 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1058 {
1059         if (planstate->state->es_select_into)
1060         {
1061                 *hasoids = planstate->state->es_into_oids;
1062                 return true;
1063         }
1064         else
1065         {
1066                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1067
1068                 if (ri != NULL)
1069                 {
1070                         Relation        rel = ri->ri_RelationDesc;
1071
1072                         if (rel != NULL)
1073                         {
1074                                 *hasoids = rel->rd_rel->relhasoids;
1075                                 return true;
1076                         }
1077                 }
1078         }
1079
1080         return false;
1081 }
1082
1083 /* ----------------------------------------------------------------
1084  *              ExecEndPlan
1085  *
1086  *              Cleans up the query plan -- closes files and frees up storage
1087  *
1088  * NOTE: we are no longer very worried about freeing storage per se
1089  * in this code; FreeExecutorState should be guaranteed to release all
1090  * memory that needs to be released.  What we are worried about doing
1091  * is closing relations and dropping buffer pins.  Thus, for example,
1092  * tuple tables must be cleared or dropped to ensure pins are released.
1093  * ----------------------------------------------------------------
1094  */
1095 static void
1096 ExecEndPlan(PlanState *planstate, EState *estate)
1097 {
1098         ResultRelInfo *resultRelInfo;
1099         int                     i;
1100         ListCell   *l;
1101
1102         /*
1103          * shut down any PlanQual processing we were doing
1104          */
1105         if (estate->es_evalPlanQual != NULL)
1106                 EndEvalPlanQual(estate);
1107
1108         /*
1109          * shut down the node-type-specific query processing
1110          */
1111         ExecEndNode(planstate);
1112
1113         /*
1114          * for subplans too
1115          */
1116         foreach(l, estate->es_subplanstates)
1117         {
1118                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1119
1120                 ExecEndNode(subplanstate);
1121         }
1122
1123         /*
1124          * destroy the executor "tuple" table.
1125          */
1126         ExecDropTupleTable(estate->es_tupleTable, true);
1127         estate->es_tupleTable = NULL;
1128
1129         /*
1130          * close the result relation(s) if any, but hold locks until xact commit.
1131          */
1132         resultRelInfo = estate->es_result_relations;
1133         for (i = estate->es_num_result_relations; i > 0; i--)
1134         {
1135                 /* Close indices and then the relation itself */
1136                 ExecCloseIndices(resultRelInfo);
1137                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1138                 resultRelInfo++;
1139         }
1140
1141         /*
1142          * likewise close any trigger target relations
1143          */
1144         foreach(l, estate->es_trig_target_relations)
1145         {
1146                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1147                 /* Close indices and then the relation itself */
1148                 ExecCloseIndices(resultRelInfo);
1149                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1150         }
1151
1152         /*
1153          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1154          */
1155         foreach(l, estate->es_rowMarks)
1156         {
1157                 ExecRowMark *erm = lfirst(l);
1158
1159                 heap_close(erm->relation, NoLock);
1160         }
1161 }
1162
1163 /* ----------------------------------------------------------------
1164  *              ExecutePlan
1165  *
1166  *              processes the query plan to retrieve 'numberTuples' tuples in the
1167  *              direction specified.
1168  *
1169  *              Retrieves all tuples if numberTuples is 0
1170  *
1171  *              result is either a slot containing the last tuple in the case
1172  *              of a SELECT or NULL otherwise.
1173  *
1174  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1175  * user can see it
1176  * ----------------------------------------------------------------
1177  */
1178 static TupleTableSlot *
1179 ExecutePlan(EState *estate,
1180                         PlanState *planstate,
1181                         CmdType operation,
1182                         long numberTuples,
1183                         ScanDirection direction,
1184                         DestReceiver *dest)
1185 {
1186         JunkFilter *junkfilter;
1187         TupleTableSlot *planSlot;
1188         TupleTableSlot *slot;
1189         ItemPointer tupleid = NULL;
1190         ItemPointerData tuple_ctid;
1191         long            current_tuple_count;
1192         TupleTableSlot *result;
1193
1194         /*
1195          * initialize local variables
1196          */
1197         current_tuple_count = 0;
1198         result = NULL;
1199
1200         /*
1201          * Set the direction.
1202          */
1203         estate->es_direction = direction;
1204
1205         /*
1206          * Process BEFORE EACH STATEMENT triggers
1207          */
1208         switch (operation)
1209         {
1210                 case CMD_UPDATE:
1211                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1212                         break;
1213                 case CMD_DELETE:
1214                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1215                         break;
1216                 case CMD_INSERT:
1217                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1218                         break;
1219                 default:
1220                         /* do nothing */
1221                         break;
1222         }
1223
1224         /*
1225          * Loop until we've processed the proper number of tuples from the plan.
1226          */
1227
1228         for (;;)
1229         {
1230                 /* Reset the per-output-tuple exprcontext */
1231                 ResetPerTupleExprContext(estate);
1232
1233                 /*
1234                  * Execute the plan and obtain a tuple
1235                  */
1236 lnext:  ;
1237                 if (estate->es_useEvalPlan)
1238                 {
1239                         planSlot = EvalPlanQualNext(estate);
1240                         if (TupIsNull(planSlot))
1241                                 planSlot = ExecProcNode(planstate);
1242                 }
1243                 else
1244                         planSlot = ExecProcNode(planstate);
1245
1246                 /*
1247                  * if the tuple is null, then we assume there is nothing more to
1248                  * process so we just return null...
1249                  */
1250                 if (TupIsNull(planSlot))
1251                 {
1252                         result = NULL;
1253                         break;
1254                 }
1255                 slot = planSlot;
1256
1257                 /*
1258                  * If we have a junk filter, then project a new tuple with the junk
1259                  * removed.
1260                  *
1261                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1262                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1263                  * because that tuple slot has the wrong descriptor.)
1264                  *
1265                  * But first, extract all the junk information we need.
1266                  */
1267                 if ((junkfilter = estate->es_junkFilter) != NULL)
1268                 {
1269                         /*
1270                          * Process any FOR UPDATE or FOR SHARE locking requested.
1271                          */
1272                         if (estate->es_rowMarks != NIL)
1273                         {
1274                                 ListCell   *l;
1275
1276                 lmark:  ;
1277                                 foreach(l, estate->es_rowMarks)
1278                                 {
1279                                         ExecRowMark *erm = lfirst(l);
1280                                         Datum           datum;
1281                                         bool            isNull;
1282                                         HeapTupleData tuple;
1283                                         Buffer          buffer;
1284                                         ItemPointerData update_ctid;
1285                                         TransactionId update_xmax;
1286                                         TupleTableSlot *newSlot;
1287                                         LockTupleMode lockmode;
1288                                         HTSU_Result test;
1289
1290                                         datum = ExecGetJunkAttribute(slot,
1291                                                                                                  erm->ctidAttNo,
1292                                                                                                  &isNull);
1293                                         /* shouldn't ever get a null result... */
1294                                         if (isNull)
1295                                                 elog(ERROR, "ctid is NULL");
1296
1297                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1298
1299                                         if (erm->forUpdate)
1300                                                 lockmode = LockTupleExclusive;
1301                                         else
1302                                                 lockmode = LockTupleShared;
1303
1304                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1305                                                                                    &update_ctid, &update_xmax,
1306                                                                                    estate->es_output_cid,
1307                                                                                    lockmode, erm->noWait);
1308                                         ReleaseBuffer(buffer);
1309                                         switch (test)
1310                                         {
1311                                                 case HeapTupleSelfUpdated:
1312                                                         /* treat it as deleted; do not process */
1313                                                         goto lnext;
1314
1315                                                 case HeapTupleMayBeUpdated:
1316                                                         break;
1317
1318                                                 case HeapTupleUpdated:
1319                                                         if (IsXactIsoLevelSerializable)
1320                                                                 ereport(ERROR,
1321                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1322                                                                   errmsg("could not serialize access due to concurrent update")));
1323                                                         if (!ItemPointerEquals(&update_ctid,
1324                                                                                                    &tuple.t_self))
1325                                                         {
1326                                                                 /* updated, so look at updated version */
1327                                                                 newSlot = EvalPlanQual(estate,
1328                                                                                                            erm->rti,
1329                                                                                                            &update_ctid,
1330                                                                                                            update_xmax);
1331                                                                 if (!TupIsNull(newSlot))
1332                                                                 {
1333                                                                         slot = planSlot = newSlot;
1334                                                                         estate->es_useEvalPlan = true;
1335                                                                         goto lmark;
1336                                                                 }
1337                                                         }
1338
1339                                                         /*
1340                                                          * if tuple was deleted or PlanQual failed for
1341                                                          * updated tuple - we must not return this tuple!
1342                                                          */
1343                                                         goto lnext;
1344
1345                                                 default:
1346                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1347                                                                  test);
1348                                                         return NULL;
1349                                         }
1350                                 }
1351                         }
1352
1353                         /*
1354                          * extract the 'ctid' junk attribute.
1355                          */
1356                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1357                         {
1358                                 Datum           datum;
1359                                 bool            isNull;
1360
1361                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1362                                                                                          &isNull);
1363                                 /* shouldn't ever get a null result... */
1364                                 if (isNull)
1365                                         elog(ERROR, "ctid is NULL");
1366
1367                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1368                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1369                                 tupleid = &tuple_ctid;
1370                         }
1371
1372                         /*
1373                          * Create a new "clean" tuple with all junk attributes removed. We
1374                          * don't need to do this for DELETE, however (there will in fact
1375                          * be no non-junk attributes in a DELETE!)
1376                          */
1377                         if (operation != CMD_DELETE)
1378                                 slot = ExecFilterJunk(junkfilter, slot);
1379                 }
1380
1381                 /*
1382                  * now that we have a tuple, do the appropriate thing with it.. either
1383                  * return it to the user, add it to a relation someplace, delete it
1384                  * from a relation, or modify some of its attributes.
1385                  */
1386                 switch (operation)
1387                 {
1388                         case CMD_SELECT:
1389                                 ExecSelect(slot, dest, estate);
1390                                 result = slot;
1391                                 break;
1392
1393                         case CMD_INSERT:
1394                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1395                                 result = NULL;
1396                                 break;
1397
1398                         case CMD_DELETE:
1399                                 ExecDelete(tupleid, planSlot, dest, estate);
1400                                 result = NULL;
1401                                 break;
1402
1403                         case CMD_UPDATE:
1404                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1405                                 result = NULL;
1406                                 break;
1407
1408                         default:
1409                                 elog(ERROR, "unrecognized operation code: %d",
1410                                          (int) operation);
1411                                 result = NULL;
1412                                 break;
1413                 }
1414
1415                 /*
1416                  * check our tuple count.. if we've processed the proper number then
1417                  * quit, else loop again and process more tuples.  Zero numberTuples
1418                  * means no limit.
1419                  */
1420                 current_tuple_count++;
1421                 if (numberTuples && numberTuples == current_tuple_count)
1422                         break;
1423         }
1424
1425         /*
1426          * Process AFTER EACH STATEMENT triggers
1427          */
1428         switch (operation)
1429         {
1430                 case CMD_UPDATE:
1431                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1432                         break;
1433                 case CMD_DELETE:
1434                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1435                         break;
1436                 case CMD_INSERT:
1437                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1438                         break;
1439                 default:
1440                         /* do nothing */
1441                         break;
1442         }
1443
1444         /*
1445          * here, result is either a slot containing a tuple in the case of a
1446          * SELECT or NULL otherwise.
1447          */
1448         return result;
1449 }
1450
1451 /* ----------------------------------------------------------------
1452  *              ExecSelect
1453  *
1454  *              SELECTs are easy.. we just pass the tuple to the appropriate
1455  *              output function.
1456  * ----------------------------------------------------------------
1457  */
1458 static void
1459 ExecSelect(TupleTableSlot *slot,
1460                    DestReceiver *dest,
1461                    EState *estate)
1462 {
1463         (*dest->receiveSlot) (slot, dest);
1464         IncrRetrieved();
1465         (estate->es_processed)++;
1466 }
1467
1468 /* ----------------------------------------------------------------
1469  *              ExecInsert
1470  *
1471  *              INSERTs are trickier.. we have to insert the tuple into
1472  *              the base relation and insert appropriate tuples into the
1473  *              index relations.
1474  * ----------------------------------------------------------------
1475  */
1476 static void
1477 ExecInsert(TupleTableSlot *slot,
1478                    ItemPointer tupleid,
1479                    TupleTableSlot *planSlot,
1480                    DestReceiver *dest,
1481                    EState *estate)
1482 {
1483         HeapTuple       tuple;
1484         ResultRelInfo *resultRelInfo;
1485         Relation        resultRelationDesc;
1486         Oid                     newId;
1487
1488         /*
1489          * get the heap tuple out of the tuple table slot, making sure we have a
1490          * writable copy
1491          */
1492         tuple = ExecMaterializeSlot(slot);
1493
1494         /*
1495          * get information on the (current) result relation
1496          */
1497         resultRelInfo = estate->es_result_relation_info;
1498         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1499
1500         /* BEFORE ROW INSERT Triggers */
1501         if (resultRelInfo->ri_TrigDesc &&
1502                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1503         {
1504                 HeapTuple       newtuple;
1505
1506                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1507
1508                 if (newtuple == NULL)   /* "do nothing" */
1509                         return;
1510
1511                 if (newtuple != tuple)  /* modified by Trigger(s) */
1512                 {
1513                         /*
1514                          * Put the modified tuple into a slot for convenience of routines
1515                          * below.  We assume the tuple was allocated in per-tuple memory
1516                          * context, and therefore will go away by itself. The tuple table
1517                          * slot should not try to clear it.
1518                          */
1519                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1520
1521                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1522                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1523                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1524                         slot = newslot;
1525                         tuple = newtuple;
1526                 }
1527         }
1528
1529         /*
1530          * Check the constraints of the tuple
1531          */
1532         if (resultRelationDesc->rd_att->constr)
1533                 ExecConstraints(resultRelInfo, slot, estate);
1534
1535         /*
1536          * insert the tuple
1537          *
1538          * Note: heap_insert returns the tid (location) of the new tuple in the
1539          * t_self field.
1540          */
1541         newId = heap_insert(resultRelationDesc, tuple,
1542                                                 estate->es_output_cid,
1543                                                 true, true);
1544
1545         IncrAppended();
1546         (estate->es_processed)++;
1547         estate->es_lastoid = newId;
1548         setLastTid(&(tuple->t_self));
1549
1550         /*
1551          * insert index entries for tuple
1552          */
1553         if (resultRelInfo->ri_NumIndices > 0)
1554                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1555
1556         /* AFTER ROW INSERT Triggers */
1557         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1558
1559         /* Process RETURNING if present */
1560         if (resultRelInfo->ri_projectReturning)
1561                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1562                                                          slot, planSlot, dest);
1563 }
1564
1565 /* ----------------------------------------------------------------
1566  *              ExecDelete
1567  *
1568  *              DELETE is like UPDATE, except that we delete the tuple and no
1569  *              index modifications are needed
1570  * ----------------------------------------------------------------
1571  */
1572 static void
1573 ExecDelete(ItemPointer tupleid,
1574                    TupleTableSlot *planSlot,
1575                    DestReceiver *dest,
1576                    EState *estate)
1577 {
1578         ResultRelInfo *resultRelInfo;
1579         Relation        resultRelationDesc;
1580         HTSU_Result result;
1581         ItemPointerData update_ctid;
1582         TransactionId update_xmax;
1583
1584         /*
1585          * get information on the (current) result relation
1586          */
1587         resultRelInfo = estate->es_result_relation_info;
1588         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1589
1590         /* BEFORE ROW DELETE Triggers */
1591         if (resultRelInfo->ri_TrigDesc &&
1592                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1593         {
1594                 bool            dodelete;
1595
1596                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1597
1598                 if (!dodelete)                  /* "do nothing" */
1599                         return;
1600         }
1601
1602         /*
1603          * delete the tuple
1604          *
1605          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1606          * the row to be deleted is visible to that snapshot, and throw a can't-
1607          * serialize error if not.      This is a special-case behavior needed for
1608          * referential integrity updates in serializable transactions.
1609          */
1610 ldelete:;
1611         result = heap_delete(resultRelationDesc, tupleid,
1612                                                  &update_ctid, &update_xmax,
1613                                                  estate->es_output_cid,
1614                                                  estate->es_crosscheck_snapshot,
1615                                                  true /* wait for commit */ );
1616         switch (result)
1617         {
1618                 case HeapTupleSelfUpdated:
1619                         /* already deleted by self; nothing to do */
1620                         return;
1621
1622                 case HeapTupleMayBeUpdated:
1623                         break;
1624
1625                 case HeapTupleUpdated:
1626                         if (IsXactIsoLevelSerializable)
1627                                 ereport(ERROR,
1628                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1629                                                  errmsg("could not serialize access due to concurrent update")));
1630                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1631                         {
1632                                 TupleTableSlot *epqslot;
1633
1634                                 epqslot = EvalPlanQual(estate,
1635                                                                            resultRelInfo->ri_RangeTableIndex,
1636                                                                            &update_ctid,
1637                                                                            update_xmax);
1638                                 if (!TupIsNull(epqslot))
1639                                 {
1640                                         *tupleid = update_ctid;
1641                                         goto ldelete;
1642                                 }
1643                         }
1644                         /* tuple already deleted; nothing to do */
1645                         return;
1646
1647                 default:
1648                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1649                         return;
1650         }
1651
1652         IncrDeleted();
1653         (estate->es_processed)++;
1654
1655         /*
1656          * Note: Normally one would think that we have to delete index tuples
1657          * associated with the heap tuple now...
1658          *
1659          * ... but in POSTGRES, we have no need to do this because VACUUM will
1660          * take care of it later.  We can't delete index tuples immediately
1661          * anyway, since the tuple is still visible to other transactions.
1662          */
1663
1664         /* AFTER ROW DELETE Triggers */
1665         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1666
1667         /* Process RETURNING if present */
1668         if (resultRelInfo->ri_projectReturning)
1669         {
1670                 /*
1671                  * We have to put the target tuple into a slot, which means first we
1672                  * gotta fetch it.      We can use the trigger tuple slot.
1673                  */
1674                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1675                 HeapTupleData deltuple;
1676                 Buffer          delbuffer;
1677
1678                 deltuple.t_self = *tupleid;
1679                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1680                                                 &deltuple, &delbuffer, false, NULL))
1681                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1682
1683                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1684                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1685                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1686
1687                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1688                                                          slot, planSlot, dest);
1689
1690                 ExecClearTuple(slot);
1691                 ReleaseBuffer(delbuffer);
1692         }
1693 }
1694
1695 /* ----------------------------------------------------------------
1696  *              ExecUpdate
1697  *
1698  *              note: we can't run UPDATE queries with transactions
1699  *              off because UPDATEs are actually INSERTs and our
1700  *              scan will mistakenly loop forever, updating the tuple
1701  *              it just inserted..      This should be fixed but until it
1702  *              is, we don't want to get stuck in an infinite loop
1703  *              which corrupts your database..
1704  * ----------------------------------------------------------------
1705  */
1706 static void
1707 ExecUpdate(TupleTableSlot *slot,
1708                    ItemPointer tupleid,
1709                    TupleTableSlot *planSlot,
1710                    DestReceiver *dest,
1711                    EState *estate)
1712 {
1713         HeapTuple       tuple;
1714         ResultRelInfo *resultRelInfo;
1715         Relation        resultRelationDesc;
1716         HTSU_Result result;
1717         ItemPointerData update_ctid;
1718         TransactionId update_xmax;
1719
1720         /*
1721          * abort the operation if not running transactions
1722          */
1723         if (IsBootstrapProcessingMode())
1724                 elog(ERROR, "cannot UPDATE during bootstrap");
1725
1726         /*
1727          * get the heap tuple out of the tuple table slot, making sure we have a
1728          * writable copy
1729          */
1730         tuple = ExecMaterializeSlot(slot);
1731
1732         /*
1733          * get information on the (current) result relation
1734          */
1735         resultRelInfo = estate->es_result_relation_info;
1736         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1737
1738         /* BEFORE ROW UPDATE Triggers */
1739         if (resultRelInfo->ri_TrigDesc &&
1740                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1741         {
1742                 HeapTuple       newtuple;
1743
1744                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1745                                                                                 tupleid, tuple);
1746
1747                 if (newtuple == NULL)   /* "do nothing" */
1748                         return;
1749
1750                 if (newtuple != tuple)  /* modified by Trigger(s) */
1751                 {
1752                         /*
1753                          * Put the modified tuple into a slot for convenience of routines
1754                          * below.  We assume the tuple was allocated in per-tuple memory
1755                          * context, and therefore will go away by itself. The tuple table
1756                          * slot should not try to clear it.
1757                          */
1758                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1759
1760                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1761                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1762                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1763                         slot = newslot;
1764                         tuple = newtuple;
1765                 }
1766         }
1767
1768         /*
1769          * Check the constraints of the tuple
1770          *
1771          * If we generate a new candidate tuple after EvalPlanQual testing, we
1772          * must loop back here and recheck constraints.  (We don't need to redo
1773          * triggers, however.  If there are any BEFORE triggers then trigger.c
1774          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1775          * need to do them again.)
1776          */
1777 lreplace:;
1778         if (resultRelationDesc->rd_att->constr)
1779                 ExecConstraints(resultRelInfo, slot, estate);
1780
1781         /*
1782          * replace the heap tuple
1783          *
1784          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1785          * the row to be updated is visible to that snapshot, and throw a can't-
1786          * serialize error if not.      This is a special-case behavior needed for
1787          * referential integrity updates in serializable transactions.
1788          */
1789         result = heap_update(resultRelationDesc, tupleid, tuple,
1790                                                  &update_ctid, &update_xmax,
1791                                                  estate->es_output_cid,
1792                                                  estate->es_crosscheck_snapshot,
1793                                                  true /* wait for commit */ );
1794         switch (result)
1795         {
1796                 case HeapTupleSelfUpdated:
1797                         /* already deleted by self; nothing to do */
1798                         return;
1799
1800                 case HeapTupleMayBeUpdated:
1801                         break;
1802
1803                 case HeapTupleUpdated:
1804                         if (IsXactIsoLevelSerializable)
1805                                 ereport(ERROR,
1806                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1807                                                  errmsg("could not serialize access due to concurrent update")));
1808                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1809                         {
1810                                 TupleTableSlot *epqslot;
1811
1812                                 epqslot = EvalPlanQual(estate,
1813                                                                            resultRelInfo->ri_RangeTableIndex,
1814                                                                            &update_ctid,
1815                                                                            update_xmax);
1816                                 if (!TupIsNull(epqslot))
1817                                 {
1818                                         *tupleid = update_ctid;
1819                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1820                                         tuple = ExecMaterializeSlot(slot);
1821                                         goto lreplace;
1822                                 }
1823                         }
1824                         /* tuple already deleted; nothing to do */
1825                         return;
1826
1827                 default:
1828                         elog(ERROR, "unrecognized heap_update status: %u", result);
1829                         return;
1830         }
1831
1832         IncrReplaced();
1833         (estate->es_processed)++;
1834
1835         /*
1836          * Note: instead of having to update the old index tuples associated with
1837          * the heap tuple, all we do is form and insert new index tuples. This is
1838          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1839          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1840          * here is insert new index tuples.  -cim 9/27/89
1841          */
1842
1843         /*
1844          * insert index entries for tuple
1845          *
1846          * Note: heap_update returns the tid (location) of the new tuple in the
1847          * t_self field.
1848          *
1849          * If it's a HOT update, we mustn't insert new index entries.
1850          */
1851         if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1852                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1853
1854         /* AFTER ROW UPDATE Triggers */
1855         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1856
1857         /* Process RETURNING if present */
1858         if (resultRelInfo->ri_projectReturning)
1859                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1860                                                          slot, planSlot, dest);
1861 }
1862
1863 /*
1864  * ExecRelCheck --- check that tuple meets constraints for result relation
1865  */
1866 static const char *
1867 ExecRelCheck(ResultRelInfo *resultRelInfo,
1868                          TupleTableSlot *slot, EState *estate)
1869 {
1870         Relation        rel = resultRelInfo->ri_RelationDesc;
1871         int                     ncheck = rel->rd_att->constr->num_check;
1872         ConstrCheck *check = rel->rd_att->constr->check;
1873         ExprContext *econtext;
1874         MemoryContext oldContext;
1875         List       *qual;
1876         int                     i;
1877
1878         /*
1879          * If first time through for this result relation, build expression
1880          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1881          * memory context so they'll survive throughout the query.
1882          */
1883         if (resultRelInfo->ri_ConstraintExprs == NULL)
1884         {
1885                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1886                 resultRelInfo->ri_ConstraintExprs =
1887                         (List **) palloc(ncheck * sizeof(List *));
1888                 for (i = 0; i < ncheck; i++)
1889                 {
1890                         /* ExecQual wants implicit-AND form */
1891                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1892                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1893                                 ExecPrepareExpr((Expr *) qual, estate);
1894                 }
1895                 MemoryContextSwitchTo(oldContext);
1896         }
1897
1898         /*
1899          * We will use the EState's per-tuple context for evaluating constraint
1900          * expressions (creating it if it's not already there).
1901          */
1902         econtext = GetPerTupleExprContext(estate);
1903
1904         /* Arrange for econtext's scan tuple to be the tuple under test */
1905         econtext->ecxt_scantuple = slot;
1906
1907         /* And evaluate the constraints */
1908         for (i = 0; i < ncheck; i++)
1909         {
1910                 qual = resultRelInfo->ri_ConstraintExprs[i];
1911
1912                 /*
1913                  * NOTE: SQL92 specifies that a NULL result from a constraint
1914                  * expression is not to be treated as a failure.  Therefore, tell
1915                  * ExecQual to return TRUE for NULL.
1916                  */
1917                 if (!ExecQual(qual, econtext, true))
1918                         return check[i].ccname;
1919         }
1920
1921         /* NULL result means no error */
1922         return NULL;
1923 }
1924
1925 void
1926 ExecConstraints(ResultRelInfo *resultRelInfo,
1927                                 TupleTableSlot *slot, EState *estate)
1928 {
1929         Relation        rel = resultRelInfo->ri_RelationDesc;
1930         TupleConstr *constr = rel->rd_att->constr;
1931
1932         Assert(constr);
1933
1934         if (constr->has_not_null)
1935         {
1936                 int                     natts = rel->rd_att->natts;
1937                 int                     attrChk;
1938
1939                 for (attrChk = 1; attrChk <= natts; attrChk++)
1940                 {
1941                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1942                                 slot_attisnull(slot, attrChk))
1943                                 ereport(ERROR,
1944                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1945                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1946                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1947                 }
1948         }
1949
1950         if (constr->num_check > 0)
1951         {
1952                 const char *failed;
1953
1954                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1955                         ereport(ERROR,
1956                                         (errcode(ERRCODE_CHECK_VIOLATION),
1957                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1958                                                         RelationGetRelationName(rel), failed)));
1959         }
1960 }
1961
1962 /*
1963  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
1964  *
1965  * projectReturning: RETURNING projection info for current result rel
1966  * tupleSlot: slot holding tuple actually inserted/updated/deleted
1967  * planSlot: slot holding tuple returned by top plan node
1968  * dest: where to send the output
1969  */
1970 static void
1971 ExecProcessReturning(ProjectionInfo *projectReturning,
1972                                          TupleTableSlot *tupleSlot,
1973                                          TupleTableSlot *planSlot,
1974                                          DestReceiver *dest)
1975 {
1976         ExprContext *econtext = projectReturning->pi_exprContext;
1977         TupleTableSlot *retSlot;
1978
1979         /*
1980          * Reset per-tuple memory context to free any expression evaluation
1981          * storage allocated in the previous cycle.
1982          */
1983         ResetExprContext(econtext);
1984
1985         /* Make tuple and any needed join variables available to ExecProject */
1986         econtext->ecxt_scantuple = tupleSlot;
1987         econtext->ecxt_outertuple = planSlot;
1988
1989         /* Compute the RETURNING expressions */
1990         retSlot = ExecProject(projectReturning, NULL);
1991
1992         /* Send to dest */
1993         (*dest->receiveSlot) (retSlot, dest);
1994
1995         ExecClearTuple(retSlot);
1996 }
1997
1998 /*
1999  * Check a modified tuple to see if we want to process its updated version
2000  * under READ COMMITTED rules.
2001  *
2002  * See backend/executor/README for some info about how this works.
2003  *
2004  *      estate - executor state data
2005  *      rti - rangetable index of table containing tuple
2006  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2007  *      priorXmax - t_xmax from the outdated tuple
2008  *
2009  * *tid is also an output parameter: it's modified to hold the TID of the
2010  * latest version of the tuple (note this may be changed even on failure)
2011  *
2012  * Returns a slot containing the new candidate update/delete tuple, or
2013  * NULL if we determine we shouldn't process the row.
2014  */
2015 TupleTableSlot *
2016 EvalPlanQual(EState *estate, Index rti,
2017                          ItemPointer tid, TransactionId priorXmax)
2018 {
2019         evalPlanQual *epq;
2020         EState     *epqstate;
2021         Relation        relation;
2022         HeapTupleData tuple;
2023         HeapTuple       copyTuple = NULL;
2024         SnapshotData SnapshotDirty;
2025         bool            endNode;
2026
2027         Assert(rti != 0);
2028
2029         /*
2030          * find relation containing target tuple
2031          */
2032         if (estate->es_result_relation_info != NULL &&
2033                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2034                 relation = estate->es_result_relation_info->ri_RelationDesc;
2035         else
2036         {
2037                 ListCell   *l;
2038
2039                 relation = NULL;
2040                 foreach(l, estate->es_rowMarks)
2041                 {
2042                         if (((ExecRowMark *) lfirst(l))->rti == rti)
2043                         {
2044                                 relation = ((ExecRowMark *) lfirst(l))->relation;
2045                                 break;
2046                         }
2047                 }
2048                 if (relation == NULL)
2049                         elog(ERROR, "could not find RowMark for RT index %u", rti);
2050         }
2051
2052         /*
2053          * fetch tid tuple
2054          *
2055          * Loop here to deal with updated or busy tuples
2056          */
2057         InitDirtySnapshot(SnapshotDirty);
2058         tuple.t_self = *tid;
2059         for (;;)
2060         {
2061                 Buffer          buffer;
2062
2063                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2064                 {
2065                         /*
2066                          * If xmin isn't what we're expecting, the slot must have been
2067                          * recycled and reused for an unrelated tuple.  This implies that
2068                          * the latest version of the row was deleted, so we need do
2069                          * nothing.  (Should be safe to examine xmin without getting
2070                          * buffer's content lock, since xmin never changes in an existing
2071                          * tuple.)
2072                          */
2073                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2074                                                                          priorXmax))
2075                         {
2076                                 ReleaseBuffer(buffer);
2077                                 return NULL;
2078                         }
2079
2080                         /* otherwise xmin should not be dirty... */
2081                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2082                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2083
2084                         /*
2085                          * If tuple is being updated by other transaction then we have to
2086                          * wait for its commit/abort.
2087                          */
2088                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2089                         {
2090                                 ReleaseBuffer(buffer);
2091                                 XactLockTableWait(SnapshotDirty.xmax);
2092                                 continue;               /* loop back to repeat heap_fetch */
2093                         }
2094
2095                         /*
2096                          * If tuple was inserted by our own transaction, we have to check
2097                          * cmin against es_output_cid: cmin >= current CID means our
2098                          * command cannot see the tuple, so we should ignore it.  Without
2099                          * this we are open to the "Halloween problem" of indefinitely
2100                          * re-updating the same tuple. (We need not check cmax because
2101                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2102                          * transaction dead, regardless of cmax.)  We just checked that
2103                          * priorXmax == xmin, so we can test that variable instead of
2104                          * doing HeapTupleHeaderGetXmin again.
2105                          */
2106                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2107                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2108                         {
2109                                 ReleaseBuffer(buffer);
2110                                 return NULL;
2111                         }
2112
2113                         /*
2114                          * We got tuple - now copy it for use by recheck query.
2115                          */
2116                         copyTuple = heap_copytuple(&tuple);
2117                         ReleaseBuffer(buffer);
2118                         break;
2119                 }
2120
2121                 /*
2122                  * If the referenced slot was actually empty, the latest version of
2123                  * the row must have been deleted, so we need do nothing.
2124                  */
2125                 if (tuple.t_data == NULL)
2126                 {
2127                         ReleaseBuffer(buffer);
2128                         return NULL;
2129                 }
2130
2131                 /*
2132                  * As above, if xmin isn't what we're expecting, do nothing.
2133                  */
2134                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2135                                                                  priorXmax))
2136                 {
2137                         ReleaseBuffer(buffer);
2138                         return NULL;
2139                 }
2140
2141                 /*
2142                  * If we get here, the tuple was found but failed SnapshotDirty.
2143                  * Assuming the xmin is either a committed xact or our own xact (as it
2144                  * certainly should be if we're trying to modify the tuple), this must
2145                  * mean that the row was updated or deleted by either a committed xact
2146                  * or our own xact.  If it was deleted, we can ignore it; if it was
2147                  * updated then chain up to the next version and repeat the whole
2148                  * test.
2149                  *
2150                  * As above, it should be safe to examine xmax and t_ctid without the
2151                  * buffer content lock, because they can't be changing.
2152                  */
2153                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2154                 {
2155                         /* deleted, so forget about it */
2156                         ReleaseBuffer(buffer);
2157                         return NULL;
2158                 }
2159
2160                 /* updated, so look at the updated row */
2161                 tuple.t_self = tuple.t_data->t_ctid;
2162                 /* updated row should have xmin matching this xmax */
2163                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2164                 ReleaseBuffer(buffer);
2165                 /* loop back to fetch next in chain */
2166         }
2167
2168         /*
2169          * For UPDATE/DELETE we have to return tid of actual row we're executing
2170          * PQ for.
2171          */
2172         *tid = tuple.t_self;
2173
2174         /*
2175          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2176          */
2177         epq = estate->es_evalPlanQual;
2178         endNode = true;
2179
2180         if (epq != NULL && epq->rti == 0)
2181         {
2182                 /* Top PQ stack entry is idle, so re-use it */
2183                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2184                 epq->rti = rti;
2185                 endNode = false;
2186         }
2187
2188         /*
2189          * If this is request for another RTE - Ra, - then we have to check wasn't
2190          * PlanQual requested for Ra already and if so then Ra' row was updated
2191          * again and we have to re-start old execution for Ra and forget all what
2192          * we done after Ra was suspended. Cool? -:))
2193          */
2194         if (epq != NULL && epq->rti != rti &&
2195                 epq->estate->es_evTuple[rti - 1] != NULL)
2196         {
2197                 do
2198                 {
2199                         evalPlanQual *oldepq;
2200
2201                         /* stop execution */
2202                         EvalPlanQualStop(epq);
2203                         /* pop previous PlanQual from the stack */
2204                         oldepq = epq->next;
2205                         Assert(oldepq && oldepq->rti != 0);
2206                         /* push current PQ to freePQ stack */
2207                         oldepq->free = epq;
2208                         epq = oldepq;
2209                         estate->es_evalPlanQual = epq;
2210                 } while (epq->rti != rti);
2211         }
2212
2213         /*
2214          * If we are requested for another RTE then we have to suspend execution
2215          * of current PlanQual and start execution for new one.
2216          */
2217         if (epq == NULL || epq->rti != rti)
2218         {
2219                 /* try to reuse plan used previously */
2220                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2221
2222                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2223                 {
2224                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2225                         newepq->free = NULL;
2226                         newepq->estate = NULL;
2227                         newepq->planstate = NULL;
2228                 }
2229                 else
2230                 {
2231                         /* recycle previously used PlanQual */
2232                         Assert(newepq->estate == NULL);
2233                         epq->free = NULL;
2234                 }
2235                 /* push current PQ to the stack */
2236                 newepq->next = epq;
2237                 epq = newepq;
2238                 estate->es_evalPlanQual = epq;
2239                 epq->rti = rti;
2240                 endNode = false;
2241         }
2242
2243         Assert(epq->rti == rti);
2244
2245         /*
2246          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2247          * end and restart execution of the plan, because ExecReScan wouldn't
2248          * ensure that upper plan nodes would reset themselves.  We could make
2249          * that work if insertion of the target tuple were integrated with the
2250          * Param mechanism somehow, so that the upper plan nodes know that their
2251          * children's outputs have changed.
2252          *
2253          * Note that the stack of free evalPlanQual nodes is quite useless at the
2254          * moment, since it only saves us from pallocing/releasing the
2255          * evalPlanQual nodes themselves.  But it will be useful once we implement
2256          * ReScan instead of end/restart for re-using PlanQual nodes.
2257          */
2258         if (endNode)
2259         {
2260                 /* stop execution */
2261                 EvalPlanQualStop(epq);
2262         }
2263
2264         /*
2265          * Initialize new recheck query.
2266          *
2267          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2268          * instead copy down changeable state from the top plan (including
2269          * es_result_relation_info, es_junkFilter) and reset locally changeable
2270          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2271          */
2272         EvalPlanQualStart(epq, estate, epq->next);
2273
2274         /*
2275          * free old RTE' tuple, if any, and store target tuple where relation's
2276          * scan node will see it
2277          */
2278         epqstate = epq->estate;
2279         if (epqstate->es_evTuple[rti - 1] != NULL)
2280                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2281         epqstate->es_evTuple[rti - 1] = copyTuple;
2282
2283         return EvalPlanQualNext(estate);
2284 }
2285
2286 static TupleTableSlot *
2287 EvalPlanQualNext(EState *estate)
2288 {
2289         evalPlanQual *epq = estate->es_evalPlanQual;
2290         MemoryContext oldcontext;
2291         TupleTableSlot *slot;
2292
2293         Assert(epq->rti != 0);
2294
2295 lpqnext:;
2296         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2297         slot = ExecProcNode(epq->planstate);
2298         MemoryContextSwitchTo(oldcontext);
2299
2300         /*
2301          * No more tuples for this PQ. Continue previous one.
2302          */
2303         if (TupIsNull(slot))
2304         {
2305                 evalPlanQual *oldepq;
2306
2307                 /* stop execution */
2308                 EvalPlanQualStop(epq);
2309                 /* pop old PQ from the stack */
2310                 oldepq = epq->next;
2311                 if (oldepq == NULL)
2312                 {
2313                         /* this is the first (oldest) PQ - mark as free */
2314                         epq->rti = 0;
2315                         estate->es_useEvalPlan = false;
2316                         /* and continue Query execution */
2317                         return NULL;
2318                 }
2319                 Assert(oldepq->rti != 0);
2320                 /* push current PQ to freePQ stack */
2321                 oldepq->free = epq;
2322                 epq = oldepq;
2323                 estate->es_evalPlanQual = epq;
2324                 goto lpqnext;
2325         }
2326
2327         return slot;
2328 }
2329
2330 static void
2331 EndEvalPlanQual(EState *estate)
2332 {
2333         evalPlanQual *epq = estate->es_evalPlanQual;
2334
2335         if (epq->rti == 0)                      /* plans already shutdowned */
2336         {
2337                 Assert(epq->next == NULL);
2338                 return;
2339         }
2340
2341         for (;;)
2342         {
2343                 evalPlanQual *oldepq;
2344
2345                 /* stop execution */
2346                 EvalPlanQualStop(epq);
2347                 /* pop old PQ from the stack */
2348                 oldepq = epq->next;
2349                 if (oldepq == NULL)
2350                 {
2351                         /* this is the first (oldest) PQ - mark as free */
2352                         epq->rti = 0;
2353                         estate->es_useEvalPlan = false;
2354                         break;
2355                 }
2356                 Assert(oldepq->rti != 0);
2357                 /* push current PQ to freePQ stack */
2358                 oldepq->free = epq;
2359                 epq = oldepq;
2360                 estate->es_evalPlanQual = epq;
2361         }
2362 }
2363
2364 /*
2365  * Start execution of one level of PlanQual.
2366  *
2367  * This is a cut-down version of ExecutorStart(): we copy some state from
2368  * the top-level estate rather than initializing it fresh.
2369  */
2370 static void
2371 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2372 {
2373         EState     *epqstate;
2374         int                     rtsize;
2375         MemoryContext oldcontext;
2376         ListCell   *l;
2377
2378         rtsize = list_length(estate->es_range_table);
2379
2380         epq->estate = epqstate = CreateExecutorState();
2381
2382         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2383
2384         /*
2385          * The epqstates share the top query's copy of unchanging state such as
2386          * the snapshot, rangetable, result-rel info, and external Param info.
2387          * They need their own copies of local state, including a tuple table,
2388          * es_param_exec_vals, etc.
2389          */
2390         epqstate->es_direction = ForwardScanDirection;
2391         epqstate->es_snapshot = estate->es_snapshot;
2392         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2393         epqstate->es_range_table = estate->es_range_table;
2394         epqstate->es_output_cid = estate->es_output_cid;
2395         epqstate->es_result_relations = estate->es_result_relations;
2396         epqstate->es_num_result_relations = estate->es_num_result_relations;
2397         epqstate->es_result_relation_info = estate->es_result_relation_info;
2398         epqstate->es_junkFilter = estate->es_junkFilter;
2399         /* es_trig_target_relations must NOT be copied */
2400         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2401         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2402         epqstate->es_param_list_info = estate->es_param_list_info;
2403         if (estate->es_plannedstmt->nParamExec > 0)
2404                 epqstate->es_param_exec_vals = (ParamExecData *)
2405                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2406         epqstate->es_rowMarks = estate->es_rowMarks;
2407         epqstate->es_instrument = estate->es_instrument;
2408         epqstate->es_select_into = estate->es_select_into;
2409         epqstate->es_into_oids = estate->es_into_oids;
2410         epqstate->es_plannedstmt = estate->es_plannedstmt;
2411
2412         /*
2413          * Each epqstate must have its own es_evTupleNull state, but all the stack
2414          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2415          * the value being examined by an outer recheck.
2416          */
2417         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2418         if (priorepq == NULL)
2419                 /* first PQ stack entry */
2420                 epqstate->es_evTuple = (HeapTuple *)
2421                         palloc0(rtsize * sizeof(HeapTuple));
2422         else
2423                 /* later stack entries share the same storage */
2424                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2425
2426         /*
2427          * Create sub-tuple-table; we needn't redo the CountSlots work though.
2428          */
2429         epqstate->es_tupleTable =
2430                 ExecCreateTupleTable(estate->es_tupleTable->size);
2431
2432         /*
2433          * Initialize private state information for each SubPlan.  We must do this
2434          * before running ExecInitNode on the main query tree, since
2435          * ExecInitSubPlan expects to be able to find these entries.
2436          */
2437         Assert(epqstate->es_subplanstates == NIL);
2438         foreach(l, estate->es_plannedstmt->subplans)
2439         {
2440                 Plan       *subplan = (Plan *) lfirst(l);
2441                 PlanState  *subplanstate;
2442
2443                 subplanstate = ExecInitNode(subplan, epqstate, 0);
2444
2445                 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2446                                                                                          subplanstate);
2447         }
2448
2449         /*
2450          * Initialize the private state information for all the nodes in the query
2451          * tree.  This opens files, allocates storage and leaves us ready to start
2452          * processing tuples.
2453          */
2454         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2455
2456         MemoryContextSwitchTo(oldcontext);
2457 }
2458
2459 /*
2460  * End execution of one level of PlanQual.
2461  *
2462  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2463  * of the normal cleanup, but *not* close result relations (which we are
2464  * just sharing from the outer query).  We do, however, have to close any
2465  * trigger target relations that got opened, since those are not shared.
2466  */
2467 static void
2468 EvalPlanQualStop(evalPlanQual *epq)
2469 {
2470         EState     *epqstate = epq->estate;
2471         MemoryContext oldcontext;
2472         ListCell   *l;
2473
2474         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2475
2476         ExecEndNode(epq->planstate);
2477
2478         foreach(l, epqstate->es_subplanstates)
2479         {
2480                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2481
2482                 ExecEndNode(subplanstate);
2483         }
2484
2485         ExecDropTupleTable(epqstate->es_tupleTable, true);
2486         epqstate->es_tupleTable = NULL;
2487
2488         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2489         {
2490                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2491                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2492         }
2493
2494         foreach(l, epqstate->es_trig_target_relations)
2495         {
2496                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2497
2498                 /* Close indices and then the relation itself */
2499                 ExecCloseIndices(resultRelInfo);
2500                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2501         }
2502
2503         MemoryContextSwitchTo(oldcontext);
2504
2505         FreeExecutorState(epqstate);
2506
2507         epq->estate = NULL;
2508         epq->planstate = NULL;
2509 }
2510
2511 /*
2512  * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2513  *
2514  * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2515  * are looking at a row returned by the EvalPlanQual machinery, we need
2516  * to look at the subsidiary state instead.
2517  */
2518 PlanState *
2519 ExecGetActivePlanTree(QueryDesc *queryDesc)
2520 {
2521         EState     *estate = queryDesc->estate;
2522
2523         if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2524                 return estate->es_evalPlanQual->planstate;
2525         else
2526                 return queryDesc->planstate;
2527 }
2528
2529
2530 /*
2531  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2532  *
2533  * We implement SELECT INTO by diverting SELECT's normal output with
2534  * a specialized DestReceiver type.
2535  *
2536  * TODO: remove some of the INTO-specific cruft from EState, and keep
2537  * it in the DestReceiver instead.
2538  */
2539
2540 typedef struct
2541 {
2542         DestReceiver pub;                       /* publicly-known function pointers */
2543         EState     *estate;                     /* EState we are working with */
2544 } DR_intorel;
2545
2546 /*
2547  * OpenIntoRel --- actually create the SELECT INTO target relation
2548  *
2549  * This also replaces QueryDesc->dest with the special DestReceiver for
2550  * SELECT INTO.  We assume that the correct result tuple type has already
2551  * been placed in queryDesc->tupDesc.
2552  */
2553 static void
2554 OpenIntoRel(QueryDesc *queryDesc)
2555 {
2556         IntoClause *into = queryDesc->plannedstmt->intoClause;
2557         EState     *estate = queryDesc->estate;
2558         Relation        intoRelationDesc;
2559         char       *intoName;
2560         Oid                     namespaceId;
2561         Oid                     tablespaceId;
2562         Datum           reloptions;
2563         AclResult       aclresult;
2564         Oid                     intoRelationId;
2565         TupleDesc       tupdesc;
2566         DR_intorel *myState;
2567
2568         Assert(into);
2569
2570         /*
2571          * Check consistency of arguments
2572          */
2573         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2574                 ereport(ERROR,
2575                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2576                                  errmsg("ON COMMIT can only be used on temporary tables")));
2577
2578         /*
2579          * Find namespace to create in, check its permissions
2580          */
2581         intoName = into->rel->relname;
2582         namespaceId = RangeVarGetCreationNamespace(into->rel);
2583
2584         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2585                                                                           ACL_CREATE);
2586         if (aclresult != ACLCHECK_OK)
2587                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2588                                            get_namespace_name(namespaceId));
2589
2590         /*
2591          * Select tablespace to use.  If not specified, use default tablespace
2592          * (which may in turn default to database's default).
2593          */
2594         if (into->tableSpaceName)
2595         {
2596                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2597                 if (!OidIsValid(tablespaceId))
2598                         ereport(ERROR,
2599                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2600                                          errmsg("tablespace \"%s\" does not exist",
2601                                                         into->tableSpaceName)));
2602         }
2603         else
2604         {
2605                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2606                 /* note InvalidOid is OK in this case */
2607         }
2608
2609         /* Check permissions except when using the database's default space */
2610         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2611         {
2612                 AclResult       aclresult;
2613
2614                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2615                                                                                    ACL_CREATE);
2616
2617                 if (aclresult != ACLCHECK_OK)
2618                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2619                                                    get_tablespace_name(tablespaceId));
2620         }
2621
2622         /* Parse and validate any reloptions */
2623         reloptions = transformRelOptions((Datum) 0,
2624                                                                          into->options,
2625                                                                          true,
2626                                                                          false);
2627         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2628
2629         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2630         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2631
2632         /* Now we can actually create the new relation */
2633         intoRelationId = heap_create_with_catalog(intoName,
2634                                                                                           namespaceId,
2635                                                                                           tablespaceId,
2636                                                                                           InvalidOid,
2637                                                                                           GetUserId(),
2638                                                                                           tupdesc,
2639                                                                                           NIL,
2640                                                                                           RELKIND_RELATION,
2641                                                                                           false,
2642                                                                                           true,
2643                                                                                           0,
2644                                                                                           into->onCommit,
2645                                                                                           reloptions,
2646                                                                                           allowSystemTableMods);
2647
2648         FreeTupleDesc(tupdesc);
2649
2650         /*
2651          * Advance command counter so that the newly-created relation's catalog
2652          * tuples will be visible to heap_open.
2653          */
2654         CommandCounterIncrement();
2655
2656         /*
2657          * If necessary, create a TOAST table for the INTO relation. Note that
2658          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2659          * the TOAST table will be visible for insertion.
2660          */
2661         AlterTableCreateToastTable(intoRelationId);
2662
2663         /*
2664          * And open the constructed table for writing.
2665          */
2666         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2667
2668         /* use_wal off requires rd_targblock be initially invalid */
2669         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2670
2671         /*
2672          * We can skip WAL-logging the insertions, unless PITR is in use.
2673          */
2674         estate->es_into_relation_use_wal = XLogArchivingActive();
2675         estate->es_into_relation_descriptor = intoRelationDesc;
2676
2677         /*
2678          * Now replace the query's DestReceiver with one for SELECT INTO
2679          */
2680         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2681         myState = (DR_intorel *) queryDesc->dest;
2682         Assert(myState->pub.mydest == DestIntoRel);
2683         myState->estate = estate;
2684 }
2685
2686 /*
2687  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2688  */
2689 static void
2690 CloseIntoRel(QueryDesc *queryDesc)
2691 {
2692         EState     *estate = queryDesc->estate;
2693
2694         /* OpenIntoRel might never have gotten called */
2695         if (estate->es_into_relation_descriptor)
2696         {
2697                 /* If we skipped using WAL, must heap_sync before commit */
2698                 if (!estate->es_into_relation_use_wal)
2699                         heap_sync(estate->es_into_relation_descriptor);
2700
2701                 /* close rel, but keep lock until commit */
2702                 heap_close(estate->es_into_relation_descriptor, NoLock);
2703
2704                 estate->es_into_relation_descriptor = NULL;
2705         }
2706 }
2707
2708 /*
2709  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2710  *
2711  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2712  * we just leave the private fields empty here.  OpenIntoRel will
2713  * fill them in.
2714  */
2715 DestReceiver *
2716 CreateIntoRelDestReceiver(void)
2717 {
2718         DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
2719
2720         self->pub.receiveSlot = intorel_receive;
2721         self->pub.rStartup = intorel_startup;
2722         self->pub.rShutdown = intorel_shutdown;
2723         self->pub.rDestroy = intorel_destroy;
2724         self->pub.mydest = DestIntoRel;
2725
2726         self->estate = NULL;
2727
2728         return (DestReceiver *) self;
2729 }
2730
2731 /*
2732  * intorel_startup --- executor startup
2733  */
2734 static void
2735 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2736 {
2737         /* no-op */
2738 }
2739
2740 /*
2741  * intorel_receive --- receive one tuple
2742  */
2743 static void
2744 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2745 {
2746         DR_intorel *myState = (DR_intorel *) self;
2747         EState     *estate = myState->estate;
2748         HeapTuple       tuple;
2749
2750         tuple = ExecCopySlotTuple(slot);
2751
2752         heap_insert(estate->es_into_relation_descriptor,
2753                                 tuple,
2754                                 estate->es_output_cid,
2755                                 estate->es_into_relation_use_wal,
2756                                 false);                 /* never any point in using FSM */
2757
2758         /* We know this is a newly created relation, so there are no indexes */
2759
2760         heap_freetuple(tuple);
2761
2762         IncrAppended();
2763 }
2764
2765 /*
2766  * intorel_shutdown --- executor end
2767  */
2768 static void
2769 intorel_shutdown(DestReceiver *self)
2770 {
2771         /* no-op */
2772 }
2773
2774 /*
2775  * intorel_destroy --- release DestReceiver object
2776  */
2777 static void
2778 intorel_destroy(DestReceiver *self)
2779 {
2780         pfree(self);
2781 }