]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Make SELECT FOR UPDATE/SHARE work on inheritance trees, by having the plan
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.316 2008/11/15 19:43:45 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "nodes/nodeFuncs.h"
49 #include "optimizer/clauses.h"
50 #include "parser/parse_clause.h"
51 #include "parser/parsetree.h"
52 #include "storage/bufmgr.h"
53 #include "storage/lmgr.h"
54 #include "storage/smgr.h"
55 #include "utils/acl.h"
56 #include "utils/builtins.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/snapmgr.h"
60 #include "utils/tqual.h"
61
62
63 /* Hook for plugins to get control in ExecutorRun() */
64 ExecutorRun_hook_type ExecutorRun_hook = NULL;
65
66 typedef struct evalPlanQual
67 {
68         Index           rti;
69         EState     *estate;
70         PlanState  *planstate;
71         struct evalPlanQual *next;      /* stack of active PlanQual plans */
72         struct evalPlanQual *free;      /* list of free PlanQual plans */
73 } evalPlanQual;
74
75 /* decls for local routines only used within this module */
76 static void InitPlan(QueryDesc *queryDesc, int eflags);
77 static void ExecCheckPlanOutput(Relation resultRel, List *targetList);
78 static void ExecEndPlan(PlanState *planstate, EState *estate);
79 static void ExecutePlan(EState *estate, PlanState *planstate,
80                         CmdType operation,
81                         long numberTuples,
82                         ScanDirection direction,
83                         DestReceiver *dest);
84 static void ExecSelect(TupleTableSlot *slot,
85                    DestReceiver *dest, EState *estate);
86 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
87                    TupleTableSlot *planSlot,
88                    DestReceiver *dest, EState *estate);
89 static void ExecDelete(ItemPointer tupleid,
90                    TupleTableSlot *planSlot,
91                    DestReceiver *dest, EState *estate);
92 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
93                    TupleTableSlot *planSlot,
94                    DestReceiver *dest, EState *estate);
95 static void ExecProcessReturning(ProjectionInfo *projectReturning,
96                                          TupleTableSlot *tupleSlot,
97                                          TupleTableSlot *planSlot,
98                                          DestReceiver *dest);
99 static TupleTableSlot *EvalPlanQualNext(EState *estate);
100 static void EndEvalPlanQual(EState *estate);
101 static void ExecCheckRTPerms(List *rangeTable);
102 static void ExecCheckRTEPerms(RangeTblEntry *rte);
103 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
104 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
105                                   evalPlanQual *priorepq);
106 static void EvalPlanQualStop(evalPlanQual *epq);
107 static void OpenIntoRel(QueryDesc *queryDesc);
108 static void CloseIntoRel(QueryDesc *queryDesc);
109 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
110 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
111 static void intorel_shutdown(DestReceiver *self);
112 static void intorel_destroy(DestReceiver *self);
113
114 /* end of local decls */
115
116
117 /* ----------------------------------------------------------------
118  *              ExecutorStart
119  *
120  *              This routine must be called at the beginning of any execution of any
121  *              query plan
122  *
123  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
124  * clear why we bother to separate the two functions, but...).  The tupDesc
125  * field of the QueryDesc is filled in to describe the tuples that will be
126  * returned, and the internal fields (estate and planstate) are set up.
127  *
128  * eflags contains flag bits as described in executor.h.
129  *
130  * NB: the CurrentMemoryContext when this is called will become the parent
131  * of the per-query context used for this Executor invocation.
132  * ----------------------------------------------------------------
133  */
134 void
135 ExecutorStart(QueryDesc *queryDesc, int eflags)
136 {
137         EState     *estate;
138         MemoryContext oldcontext;
139
140         /* sanity checks: queryDesc must not be started already */
141         Assert(queryDesc != NULL);
142         Assert(queryDesc->estate == NULL);
143
144         /*
145          * If the transaction is read-only, we need to check if any writes are
146          * planned to non-temporary tables.  EXPLAIN is considered read-only.
147          */
148         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
149                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
150
151         /*
152          * Build EState, switch into per-query memory context for startup.
153          */
154         estate = CreateExecutorState();
155         queryDesc->estate = estate;
156
157         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
158
159         /*
160          * Fill in parameters, if any, from queryDesc
161          */
162         estate->es_param_list_info = queryDesc->params;
163
164         if (queryDesc->plannedstmt->nParamExec > 0)
165                 estate->es_param_exec_vals = (ParamExecData *)
166                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
167
168         /*
169          * If non-read-only query, set the command ID to mark output tuples with
170          */
171         switch (queryDesc->operation)
172         {
173                 case CMD_SELECT:
174                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
175                         if (queryDesc->plannedstmt->intoClause != NULL ||
176                                 queryDesc->plannedstmt->rowMarks != NIL)
177                                 estate->es_output_cid = GetCurrentCommandId(true);
178                         break;
179
180                 case CMD_INSERT:
181                 case CMD_DELETE:
182                 case CMD_UPDATE:
183                         estate->es_output_cid = GetCurrentCommandId(true);
184                         break;
185
186                 default:
187                         elog(ERROR, "unrecognized operation code: %d",
188                                  (int) queryDesc->operation);
189                         break;
190         }
191
192         /*
193          * Copy other important information into the EState
194          */
195         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
196         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
197         estate->es_instrument = queryDesc->doInstrument;
198
199         /*
200          * Initialize the plan state tree
201          */
202         InitPlan(queryDesc, eflags);
203
204         MemoryContextSwitchTo(oldcontext);
205 }
206
207 /* ----------------------------------------------------------------
208  *              ExecutorRun
209  *
210  *              This is the main routine of the executor module. It accepts
211  *              the query descriptor from the traffic cop and executes the
212  *              query plan.
213  *
214  *              ExecutorStart must have been called already.
215  *
216  *              If direction is NoMovementScanDirection then nothing is done
217  *              except to start up/shut down the destination.  Otherwise,
218  *              we retrieve up to 'count' tuples in the specified direction.
219  *
220  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
221  *              completion.
222  *
223  *              There is no return value, but output tuples (if any) are sent to
224  *              the destination receiver specified in the QueryDesc; and the number
225  *              of tuples processed at the top level can be found in
226  *              estate->es_processed.
227  *
228  *              We provide a function hook variable that lets loadable plugins
229  *              get control when ExecutorRun is called.  Such a plugin would
230  *              normally call standard_ExecutorRun().
231  *
232  * ----------------------------------------------------------------
233  */
234 void
235 ExecutorRun(QueryDesc *queryDesc,
236                         ScanDirection direction, long count)
237 {
238         if (ExecutorRun_hook)
239                 (*ExecutorRun_hook) (queryDesc, direction, count);
240         else
241                 standard_ExecutorRun(queryDesc, direction, count);
242 }
243
244 void
245 standard_ExecutorRun(QueryDesc *queryDesc,
246                                          ScanDirection direction, long count)
247 {
248         EState     *estate;
249         CmdType         operation;
250         DestReceiver *dest;
251         bool            sendTuples;
252         MemoryContext oldcontext;
253
254         /* sanity checks */
255         Assert(queryDesc != NULL);
256
257         estate = queryDesc->estate;
258
259         Assert(estate != NULL);
260
261         /*
262          * Switch into per-query memory context
263          */
264         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
265
266         /*
267          * extract information from the query descriptor and the query feature.
268          */
269         operation = queryDesc->operation;
270         dest = queryDesc->dest;
271
272         /*
273          * startup tuple receiver, if we will be emitting tuples
274          */
275         estate->es_processed = 0;
276         estate->es_lastoid = InvalidOid;
277
278         sendTuples = (operation == CMD_SELECT ||
279                                   queryDesc->plannedstmt->returningLists);
280
281         if (sendTuples)
282                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
283
284         /*
285          * run plan
286          */
287         if (!ScanDirectionIsNoMovement(direction))
288                 ExecutePlan(estate,
289                                         queryDesc->planstate,
290                                         operation,
291                                         count,
292                                         direction,
293                                         dest);
294
295         /*
296          * shutdown tuple receiver, if we started it
297          */
298         if (sendTuples)
299                 (*dest->rShutdown) (dest);
300
301         MemoryContextSwitchTo(oldcontext);
302 }
303
304 /* ----------------------------------------------------------------
305  *              ExecutorEnd
306  *
307  *              This routine must be called at the end of execution of any
308  *              query plan
309  * ----------------------------------------------------------------
310  */
311 void
312 ExecutorEnd(QueryDesc *queryDesc)
313 {
314         EState     *estate;
315         MemoryContext oldcontext;
316
317         /* sanity checks */
318         Assert(queryDesc != NULL);
319
320         estate = queryDesc->estate;
321
322         Assert(estate != NULL);
323
324         /*
325          * Switch into per-query memory context to run ExecEndPlan
326          */
327         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
328
329         ExecEndPlan(queryDesc->planstate, estate);
330
331         /*
332          * Close the SELECT INTO relation if any
333          */
334         if (estate->es_select_into)
335                 CloseIntoRel(queryDesc);
336
337         /* do away with our snapshots */
338         UnregisterSnapshot(estate->es_snapshot);
339         UnregisterSnapshot(estate->es_crosscheck_snapshot);
340
341         /*
342          * Must switch out of context before destroying it
343          */
344         MemoryContextSwitchTo(oldcontext);
345
346         /*
347          * Release EState and per-query memory context.  This should release
348          * everything the executor has allocated.
349          */
350         FreeExecutorState(estate);
351
352         /* Reset queryDesc fields that no longer point to anything */
353         queryDesc->tupDesc = NULL;
354         queryDesc->estate = NULL;
355         queryDesc->planstate = NULL;
356 }
357
358 /* ----------------------------------------------------------------
359  *              ExecutorRewind
360  *
361  *              This routine may be called on an open queryDesc to rewind it
362  *              to the start.
363  * ----------------------------------------------------------------
364  */
365 void
366 ExecutorRewind(QueryDesc *queryDesc)
367 {
368         EState     *estate;
369         MemoryContext oldcontext;
370
371         /* sanity checks */
372         Assert(queryDesc != NULL);
373
374         estate = queryDesc->estate;
375
376         Assert(estate != NULL);
377
378         /* It's probably not sensible to rescan updating queries */
379         Assert(queryDesc->operation == CMD_SELECT);
380
381         /*
382          * Switch into per-query memory context
383          */
384         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
385
386         /*
387          * rescan plan
388          */
389         ExecReScan(queryDesc->planstate, NULL);
390
391         MemoryContextSwitchTo(oldcontext);
392 }
393
394
395 /*
396  * ExecCheckRTPerms
397  *              Check access permissions for all relations listed in a range table.
398  */
399 static void
400 ExecCheckRTPerms(List *rangeTable)
401 {
402         ListCell   *l;
403
404         foreach(l, rangeTable)
405         {
406                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
407         }
408 }
409
410 /*
411  * ExecCheckRTEPerms
412  *              Check access permissions for a single RTE.
413  */
414 static void
415 ExecCheckRTEPerms(RangeTblEntry *rte)
416 {
417         AclMode         requiredPerms;
418         Oid                     relOid;
419         Oid                     userid;
420
421         /*
422          * Only plain-relation RTEs need to be checked here.  Function RTEs are
423          * checked by init_fcache when the function is prepared for execution.
424          * Join, subquery, and special RTEs need no checks.
425          */
426         if (rte->rtekind != RTE_RELATION)
427                 return;
428
429         /*
430          * No work if requiredPerms is empty.
431          */
432         requiredPerms = rte->requiredPerms;
433         if (requiredPerms == 0)
434                 return;
435
436         relOid = rte->relid;
437
438         /*
439          * userid to check as: current user unless we have a setuid indication.
440          *
441          * Note: GetUserId() is presently fast enough that there's no harm in
442          * calling it separately for each RTE.  If that stops being true, we could
443          * call it once in ExecCheckRTPerms and pass the userid down from there.
444          * But for now, no need for the extra clutter.
445          */
446         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
447
448         /*
449          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
450          */
451         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
452                 != requiredPerms)
453                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
454                                            get_rel_name(relOid));
455 }
456
457 /*
458  * Check that the query does not imply any writes to non-temp tables.
459  */
460 static void
461 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
462 {
463         ListCell   *l;
464
465         /*
466          * CREATE TABLE AS or SELECT INTO?
467          *
468          * XXX should we allow this if the destination is temp?
469          */
470         if (plannedstmt->intoClause != NULL)
471                 goto fail;
472
473         /* Fail if write permissions are requested on any non-temp table */
474         foreach(l, plannedstmt->rtable)
475         {
476                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
477
478                 if (rte->rtekind != RTE_RELATION)
479                         continue;
480
481                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
482                         continue;
483
484                 if (isTempNamespace(get_rel_namespace(rte->relid)))
485                         continue;
486
487                 goto fail;
488         }
489
490         return;
491
492 fail:
493         ereport(ERROR,
494                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
495                          errmsg("transaction is read-only")));
496 }
497
498
499 /* ----------------------------------------------------------------
500  *              InitPlan
501  *
502  *              Initializes the query plan: open files, allocate storage
503  *              and start up the rule manager
504  * ----------------------------------------------------------------
505  */
506 static void
507 InitPlan(QueryDesc *queryDesc, int eflags)
508 {
509         CmdType         operation = queryDesc->operation;
510         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
511         Plan       *plan = plannedstmt->planTree;
512         List       *rangeTable = plannedstmt->rtable;
513         EState     *estate = queryDesc->estate;
514         PlanState  *planstate;
515         TupleDesc       tupType;
516         ListCell   *l;
517         int                     i;
518
519         /*
520          * Do permissions checks
521          */
522         ExecCheckRTPerms(rangeTable);
523
524         /*
525          * initialize the node's execution state
526          */
527         estate->es_range_table = rangeTable;
528
529         /*
530          * initialize result relation stuff
531          */
532         if (plannedstmt->resultRelations)
533         {
534                 List       *resultRelations = plannedstmt->resultRelations;
535                 int                     numResultRelations = list_length(resultRelations);
536                 ResultRelInfo *resultRelInfos;
537                 ResultRelInfo *resultRelInfo;
538
539                 resultRelInfos = (ResultRelInfo *)
540                         palloc(numResultRelations * sizeof(ResultRelInfo));
541                 resultRelInfo = resultRelInfos;
542                 foreach(l, resultRelations)
543                 {
544                         Index           resultRelationIndex = lfirst_int(l);
545                         Oid                     resultRelationOid;
546                         Relation        resultRelation;
547
548                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
549                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
550                         InitResultRelInfo(resultRelInfo,
551                                                           resultRelation,
552                                                           resultRelationIndex,
553                                                           operation,
554                                                           estate->es_instrument);
555                         resultRelInfo++;
556                 }
557                 estate->es_result_relations = resultRelInfos;
558                 estate->es_num_result_relations = numResultRelations;
559                 /* Initialize to first or only result rel */
560                 estate->es_result_relation_info = resultRelInfos;
561         }
562         else
563         {
564                 /*
565                  * if no result relation, then set state appropriately
566                  */
567                 estate->es_result_relations = NULL;
568                 estate->es_num_result_relations = 0;
569                 estate->es_result_relation_info = NULL;
570         }
571
572         /*
573          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
574          * flag appropriately so that the plan tree will be initialized with the
575          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
576          */
577         estate->es_select_into = false;
578         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
579         {
580                 estate->es_select_into = true;
581                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
582         }
583
584         /*
585          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
586          * initialize the plan tree, else we'd be doing a lock upgrade. While we
587          * are at it, build the ExecRowMark list.
588          */
589         estate->es_rowMarks = NIL;
590         foreach(l, plannedstmt->rowMarks)
591         {
592                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
593                 Oid                     relid;
594                 Relation        relation;
595                 ExecRowMark *erm;
596
597                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
598                 if (rc->isParent)
599                         continue;
600
601                 relid = getrelid(rc->rti, rangeTable);
602                 relation = heap_open(relid, RowShareLock);
603                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
604                 erm->relation = relation;
605                 erm->rti = rc->rti;
606                 erm->prti = rc->prti;
607                 erm->forUpdate = rc->forUpdate;
608                 erm->noWait = rc->noWait;
609                 /* We'll locate the junk attrs below */
610                 erm->ctidAttNo = InvalidAttrNumber;
611                 erm->toidAttNo = InvalidAttrNumber;
612                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
613         }
614
615         /*
616          * Initialize the executor "tuple" table.  We need slots for all the plan
617          * nodes, plus possibly output slots for the junkfilter(s). At this point
618          * we aren't sure if we need junkfilters, so just add slots for them
619          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
620          * trigger output tuples.  Also, one for RETURNING-list evaluation.
621          */
622         {
623                 int                     nSlots;
624
625                 /* Slots for the main plan tree */
626                 nSlots = ExecCountSlotsNode(plan);
627                 /* Add slots for subplans and initplans */
628                 foreach(l, plannedstmt->subplans)
629                 {
630                         Plan       *subplan = (Plan *) lfirst(l);
631
632                         nSlots += ExecCountSlotsNode(subplan);
633                 }
634                 /* Add slots for junkfilter(s) */
635                 if (plannedstmt->resultRelations != NIL)
636                         nSlots += list_length(plannedstmt->resultRelations);
637                 else
638                         nSlots += 1;
639                 if (operation != CMD_SELECT)
640                         nSlots++;                       /* for es_trig_tuple_slot */
641                 if (plannedstmt->returningLists)
642                         nSlots++;                       /* for RETURNING projection */
643
644                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
645
646                 if (operation != CMD_SELECT)
647                         estate->es_trig_tuple_slot =
648                                 ExecAllocTableSlot(estate->es_tupleTable);
649         }
650
651         /* mark EvalPlanQual not active */
652         estate->es_plannedstmt = plannedstmt;
653         estate->es_evalPlanQual = NULL;
654         estate->es_evTupleNull = NULL;
655         estate->es_evTuple = NULL;
656         estate->es_useEvalPlan = false;
657
658         /*
659          * Initialize private state information for each SubPlan.  We must do this
660          * before running ExecInitNode on the main query tree, since
661          * ExecInitSubPlan expects to be able to find these entries.
662          */
663         Assert(estate->es_subplanstates == NIL);
664         i = 1;                                          /* subplan indices count from 1 */
665         foreach(l, plannedstmt->subplans)
666         {
667                 Plan       *subplan = (Plan *) lfirst(l);
668                 PlanState  *subplanstate;
669                 int                     sp_eflags;
670
671                 /*
672                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
673                  * it is a parameterless subplan (not initplan), we suggest that it be
674                  * prepared to handle REWIND efficiently; otherwise there is no need.
675                  */
676                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
677                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
678                         sp_eflags |= EXEC_FLAG_REWIND;
679
680                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
681
682                 estate->es_subplanstates = lappend(estate->es_subplanstates,
683                                                                                    subplanstate);
684
685                 i++;
686         }
687
688         /*
689          * Initialize the private state information for all the nodes in the query
690          * tree.  This opens files, allocates storage and leaves us ready to start
691          * processing tuples.
692          */
693         planstate = ExecInitNode(plan, estate, eflags);
694
695         /*
696          * Get the tuple descriptor describing the type of tuples to return. (this
697          * is especially important if we are creating a relation with "SELECT
698          * INTO")
699          */
700         tupType = ExecGetResultType(planstate);
701
702         /*
703          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
704          * filter if there are any junk attrs in the tlist.  UPDATE and
705          * DELETE always need a filter, since there's always a junk 'ctid'
706          * attribute present --- no need to look first.
707          *
708          * This section of code is also a convenient place to verify that the
709          * output of an INSERT or UPDATE matches the target table(s).
710          */
711         {
712                 bool            junk_filter_needed = false;
713                 ListCell   *tlist;
714
715                 switch (operation)
716                 {
717                         case CMD_SELECT:
718                         case CMD_INSERT:
719                                 foreach(tlist, plan->targetlist)
720                                 {
721                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
722
723                                         if (tle->resjunk)
724                                         {
725                                                 junk_filter_needed = true;
726                                                 break;
727                                         }
728                                 }
729                                 break;
730                         case CMD_UPDATE:
731                         case CMD_DELETE:
732                                 junk_filter_needed = true;
733                                 break;
734                         default:
735                                 break;
736                 }
737
738                 if (junk_filter_needed)
739                 {
740                         /*
741                          * If there are multiple result relations, each one needs its own
742                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
743                          * we can't be fooled by some needing a filter and some not.
744                          */
745                         if (list_length(plannedstmt->resultRelations) > 1)
746                         {
747                                 PlanState **appendplans;
748                                 int                     as_nplans;
749                                 ResultRelInfo *resultRelInfo;
750
751                                 /* Top plan had better be an Append here. */
752                                 Assert(IsA(plan, Append));
753                                 Assert(((Append *) plan)->isTarget);
754                                 Assert(IsA(planstate, AppendState));
755                                 appendplans = ((AppendState *) planstate)->appendplans;
756                                 as_nplans = ((AppendState *) planstate)->as_nplans;
757                                 Assert(as_nplans == estate->es_num_result_relations);
758                                 resultRelInfo = estate->es_result_relations;
759                                 for (i = 0; i < as_nplans; i++)
760                                 {
761                                         PlanState  *subplan = appendplans[i];
762                                         JunkFilter *j;
763
764                                         if (operation == CMD_UPDATE)
765                                                 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
766                                                                                         subplan->plan->targetlist);
767
768                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
769                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
770                                                                   ExecAllocTableSlot(estate->es_tupleTable));
771
772                                         /*
773                                          * Since it must be UPDATE/DELETE, there had better be a
774                                          * "ctid" junk attribute in the tlist ... but ctid could
775                                          * be at a different resno for each result relation. We
776                                          * look up the ctid resnos now and save them in the
777                                          * junkfilters.
778                                          */
779                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
780                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
781                                                 elog(ERROR, "could not find junk ctid column");
782                                         resultRelInfo->ri_junkFilter = j;
783                                         resultRelInfo++;
784                                 }
785
786                                 /*
787                                  * Set active junkfilter too; at this point ExecInitAppend has
788                                  * already selected an active result relation...
789                                  */
790                                 estate->es_junkFilter =
791                                         estate->es_result_relation_info->ri_junkFilter;
792
793                                 /*
794                                  * We currently can't support rowmarks in this case, because
795                                  * the associated junk CTIDs might have different resnos in
796                                  * different subplans.
797                                  */
798                                 if (estate->es_rowMarks)
799                                         ereport(ERROR,
800                                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
801                                                          errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations")));
802                         }
803                         else
804                         {
805                                 /* Normal case with just one JunkFilter */
806                                 JunkFilter *j;
807
808                                 if (operation == CMD_INSERT || operation == CMD_UPDATE)
809                                         ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
810                                                                                 planstate->plan->targetlist);
811
812                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
813                                                                            tupType->tdhasoid,
814                                                                   ExecAllocTableSlot(estate->es_tupleTable));
815                                 estate->es_junkFilter = j;
816                                 if (estate->es_result_relation_info)
817                                         estate->es_result_relation_info->ri_junkFilter = j;
818
819                                 if (operation == CMD_SELECT)
820                                 {
821                                         /* For SELECT, want to return the cleaned tuple type */
822                                         tupType = j->jf_cleanTupType;
823                                 }
824                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
825                                 {
826                                         /* For UPDATE/DELETE, find the ctid junk attr now */
827                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
828                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
829                                                 elog(ERROR, "could not find junk ctid column");
830                                 }
831
832                                 /* For SELECT FOR UPDATE/SHARE, find the junk attrs now */
833                                 foreach(l, estate->es_rowMarks)
834                                 {
835                                         ExecRowMark *erm = (ExecRowMark *) lfirst(l);
836                                         char            resname[32];
837
838                                         /* always need the ctid */
839                                         snprintf(resname, sizeof(resname), "ctid%u",
840                                                          erm->prti);
841                                         erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
842                                         if (!AttributeNumberIsValid(erm->ctidAttNo))
843                                                 elog(ERROR, "could not find junk \"%s\" column",
844                                                          resname);
845                                         /* if child relation, need tableoid too */
846                                         if (erm->rti != erm->prti)
847                                         {
848                                                 snprintf(resname, sizeof(resname), "tableoid%u",
849                                                                  erm->prti);
850                                                 erm->toidAttNo = ExecFindJunkAttribute(j, resname);
851                                                 if (!AttributeNumberIsValid(erm->toidAttNo))
852                                                         elog(ERROR, "could not find junk \"%s\" column",
853                                                                  resname);
854                                         }
855                                 }
856                         }
857                 }
858                 else
859                 {
860                         if (operation == CMD_INSERT)
861                                 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
862                                                                         planstate->plan->targetlist);
863
864                         estate->es_junkFilter = NULL;
865                         if (estate->es_rowMarks)
866                                 elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns");
867                 }
868         }
869
870         /*
871          * Initialize RETURNING projections if needed.
872          */
873         if (plannedstmt->returningLists)
874         {
875                 TupleTableSlot *slot;
876                 ExprContext *econtext;
877                 ResultRelInfo *resultRelInfo;
878
879                 /*
880                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
881                  * We assume all the sublists will generate the same output tupdesc.
882                  */
883                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
884                                                                  false);
885
886                 /* Set up a slot for the output of the RETURNING projection(s) */
887                 slot = ExecAllocTableSlot(estate->es_tupleTable);
888                 ExecSetSlotDescriptor(slot, tupType);
889                 /* Need an econtext too */
890                 econtext = CreateExprContext(estate);
891
892                 /*
893                  * Build a projection for each result rel.      Note that any SubPlans in
894                  * the RETURNING lists get attached to the topmost plan node.
895                  */
896                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
897                 resultRelInfo = estate->es_result_relations;
898                 foreach(l, plannedstmt->returningLists)
899                 {
900                         List       *rlist = (List *) lfirst(l);
901                         List       *rliststate;
902
903                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
904                         resultRelInfo->ri_projectReturning =
905                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
906                                                                          resultRelInfo->ri_RelationDesc->rd_att);
907                         resultRelInfo++;
908                 }
909         }
910
911         queryDesc->tupDesc = tupType;
912         queryDesc->planstate = planstate;
913
914         /*
915          * If doing SELECT INTO, initialize the "into" relation.  We must wait
916          * till now so we have the "clean" result tuple type to create the new
917          * table from.
918          *
919          * If EXPLAIN, skip creating the "into" relation.
920          */
921         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
922                 OpenIntoRel(queryDesc);
923 }
924
925 /*
926  * Initialize ResultRelInfo data for one result relation
927  */
928 void
929 InitResultRelInfo(ResultRelInfo *resultRelInfo,
930                                   Relation resultRelationDesc,
931                                   Index resultRelationIndex,
932                                   CmdType operation,
933                                   bool doInstrument)
934 {
935         /*
936          * Check valid relkind ... parser and/or planner should have noticed this
937          * already, but let's make sure.
938          */
939         switch (resultRelationDesc->rd_rel->relkind)
940         {
941                 case RELKIND_RELATION:
942                         /* OK */
943                         break;
944                 case RELKIND_SEQUENCE:
945                         ereport(ERROR,
946                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
947                                          errmsg("cannot change sequence \"%s\"",
948                                                         RelationGetRelationName(resultRelationDesc))));
949                         break;
950                 case RELKIND_TOASTVALUE:
951                         ereport(ERROR,
952                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
953                                          errmsg("cannot change TOAST relation \"%s\"",
954                                                         RelationGetRelationName(resultRelationDesc))));
955                         break;
956                 case RELKIND_VIEW:
957                         ereport(ERROR,
958                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
959                                          errmsg("cannot change view \"%s\"",
960                                                         RelationGetRelationName(resultRelationDesc))));
961                         break;
962                 default:
963                         ereport(ERROR,
964                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
965                                          errmsg("cannot change relation \"%s\"",
966                                                         RelationGetRelationName(resultRelationDesc))));
967                         break;
968         }
969
970         /* OK, fill in the node */
971         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
972         resultRelInfo->type = T_ResultRelInfo;
973         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
974         resultRelInfo->ri_RelationDesc = resultRelationDesc;
975         resultRelInfo->ri_NumIndices = 0;
976         resultRelInfo->ri_IndexRelationDescs = NULL;
977         resultRelInfo->ri_IndexRelationInfo = NULL;
978         /* make a copy so as not to depend on relcache info not changing... */
979         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
980         if (resultRelInfo->ri_TrigDesc)
981         {
982                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
983
984                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
985                         palloc0(n * sizeof(FmgrInfo));
986                 if (doInstrument)
987                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
988                 else
989                         resultRelInfo->ri_TrigInstrument = NULL;
990         }
991         else
992         {
993                 resultRelInfo->ri_TrigFunctions = NULL;
994                 resultRelInfo->ri_TrigInstrument = NULL;
995         }
996         resultRelInfo->ri_ConstraintExprs = NULL;
997         resultRelInfo->ri_junkFilter = NULL;
998         resultRelInfo->ri_projectReturning = NULL;
999
1000         /*
1001          * If there are indices on the result relation, open them and save
1002          * descriptors in the result relation info, so that we can add new index
1003          * entries for the tuples we add/update.  We need not do this for a
1004          * DELETE, however, since deletion doesn't affect indexes.
1005          */
1006         if (resultRelationDesc->rd_rel->relhasindex &&
1007                 operation != CMD_DELETE)
1008                 ExecOpenIndices(resultRelInfo);
1009 }
1010
1011 /*
1012  * Verify that the tuples to be produced by INSERT or UPDATE match the
1013  * target relation's rowtype
1014  *
1015  * We do this to guard against stale plans.  If plan invalidation is
1016  * functioning properly then we should never get a failure here, but better
1017  * safe than sorry.  Note that this is called after we have obtained lock
1018  * on the target rel, so the rowtype can't change underneath us.
1019  *
1020  * The plan output is represented by its targetlist, because that makes
1021  * handling the dropped-column case easier.
1022  */
1023 static void
1024 ExecCheckPlanOutput(Relation resultRel, List *targetList)
1025 {
1026         TupleDesc       resultDesc = RelationGetDescr(resultRel);
1027         int                     attno = 0;
1028         ListCell   *lc;
1029
1030         foreach(lc, targetList)
1031         {
1032                 TargetEntry *tle = (TargetEntry *) lfirst(lc);
1033                 Form_pg_attribute attr;
1034
1035                 if (tle->resjunk)
1036                         continue;                       /* ignore junk tlist items */
1037
1038                 if (attno >= resultDesc->natts)
1039                         ereport(ERROR,
1040                                         (errcode(ERRCODE_DATATYPE_MISMATCH),
1041                                          errmsg("table row type and query-specified row type do not match"),
1042                                          errdetail("Query has too many columns.")));
1043                 attr = resultDesc->attrs[attno++];
1044
1045                 if (!attr->attisdropped)
1046                 {
1047                         /* Normal case: demand type match */
1048                         if (exprType((Node *) tle->expr) != attr->atttypid)
1049                                 ereport(ERROR,
1050                                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1051                                                  errmsg("table row type and query-specified row type do not match"),
1052                                                  errdetail("Table has type %s at ordinal position %d, but query expects %s.",
1053                                                                    format_type_be(attr->atttypid),
1054                                                                    attno,
1055                                                                    format_type_be(exprType((Node *) tle->expr)))));
1056                 }
1057                 else
1058                 {
1059                         /*
1060                          * For a dropped column, we can't check atttypid (it's likely 0).
1061                          * In any case the planner has most likely inserted an INT4 null.
1062                          * What we insist on is just *some* NULL constant.
1063                          */
1064                         if (!IsA(tle->expr, Const) ||
1065                                 !((Const *) tle->expr)->constisnull)
1066                                 ereport(ERROR,
1067                                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1068                                                  errmsg("table row type and query-specified row type do not match"),
1069                                                  errdetail("Query provides a value for a dropped column at ordinal position %d.",
1070                                                                    attno)));
1071                 }
1072         }
1073         if (attno != resultDesc->natts)
1074                 ereport(ERROR,
1075                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1076                                  errmsg("table row type and query-specified row type do not match"),
1077                                  errdetail("Query has too few columns.")));
1078 }
1079
1080 /*
1081  *              ExecGetTriggerResultRel
1082  *
1083  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
1084  * triggers are fired on one of the result relations of the query, and so
1085  * we can just return a member of the es_result_relations array.  (Note: in
1086  * self-join situations there might be multiple members with the same OID;
1087  * if so it doesn't matter which one we pick.)  However, it is sometimes
1088  * necessary to fire triggers on other relations; this happens mainly when an
1089  * RI update trigger queues additional triggers on other relations, which will
1090  * be processed in the context of the outer query.      For efficiency's sake,
1091  * we want to have a ResultRelInfo for those triggers too; that can avoid
1092  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
1093  * ANALYZE to report the runtimes of such triggers.)  So we make additional
1094  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1095  */
1096 ResultRelInfo *
1097 ExecGetTriggerResultRel(EState *estate, Oid relid)
1098 {
1099         ResultRelInfo *rInfo;
1100         int                     nr;
1101         ListCell   *l;
1102         Relation        rel;
1103         MemoryContext oldcontext;
1104
1105         /* First, search through the query result relations */
1106         rInfo = estate->es_result_relations;
1107         nr = estate->es_num_result_relations;
1108         while (nr > 0)
1109         {
1110                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1111                         return rInfo;
1112                 rInfo++;
1113                 nr--;
1114         }
1115         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1116         foreach(l, estate->es_trig_target_relations)
1117         {
1118                 rInfo = (ResultRelInfo *) lfirst(l);
1119                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1120                         return rInfo;
1121         }
1122         /* Nope, so we need a new one */
1123
1124         /*
1125          * Open the target relation's relcache entry.  We assume that an
1126          * appropriate lock is still held by the backend from whenever the trigger
1127          * event got queued, so we need take no new lock here.
1128          */
1129         rel = heap_open(relid, NoLock);
1130
1131         /*
1132          * Make the new entry in the right context.  Currently, we don't need any
1133          * index information in ResultRelInfos used only for triggers, so tell
1134          * InitResultRelInfo it's a DELETE.
1135          */
1136         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1137         rInfo = makeNode(ResultRelInfo);
1138         InitResultRelInfo(rInfo,
1139                                           rel,
1140                                           0,            /* dummy rangetable index */
1141                                           CMD_DELETE,
1142                                           estate->es_instrument);
1143         estate->es_trig_target_relations =
1144                 lappend(estate->es_trig_target_relations, rInfo);
1145         MemoryContextSwitchTo(oldcontext);
1146
1147         return rInfo;
1148 }
1149
1150 /*
1151  *              ExecContextForcesOids
1152  *
1153  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1154  * we need to ensure that result tuples have space for an OID iff they are
1155  * going to be stored into a relation that has OIDs.  In other contexts
1156  * we are free to choose whether to leave space for OIDs in result tuples
1157  * (we generally don't want to, but we do if a physical-tlist optimization
1158  * is possible).  This routine checks the plan context and returns TRUE if the
1159  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1160  * *hasoids is set to the required value.
1161  *
1162  * One reason this is ugly is that all plan nodes in the plan tree will emit
1163  * tuples with space for an OID, though we really only need the topmost node
1164  * to do so.  However, node types like Sort don't project new tuples but just
1165  * return their inputs, and in those cases the requirement propagates down
1166  * to the input node.  Eventually we might make this code smart enough to
1167  * recognize how far down the requirement really goes, but for now we just
1168  * make all plan nodes do the same thing if the top level forces the choice.
1169  *
1170  * We assume that estate->es_result_relation_info is already set up to
1171  * describe the target relation.  Note that in an UPDATE that spans an
1172  * inheritance tree, some of the target relations may have OIDs and some not.
1173  * We have to make the decisions on a per-relation basis as we initialize
1174  * each of the child plans of the topmost Append plan.
1175  *
1176  * SELECT INTO is even uglier, because we don't have the INTO relation's
1177  * descriptor available when this code runs; we have to look aside at a
1178  * flag set by InitPlan().
1179  */
1180 bool
1181 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1182 {
1183         if (planstate->state->es_select_into)
1184         {
1185                 *hasoids = planstate->state->es_into_oids;
1186                 return true;
1187         }
1188         else
1189         {
1190                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1191
1192                 if (ri != NULL)
1193                 {
1194                         Relation        rel = ri->ri_RelationDesc;
1195
1196                         if (rel != NULL)
1197                         {
1198                                 *hasoids = rel->rd_rel->relhasoids;
1199                                 return true;
1200                         }
1201                 }
1202         }
1203
1204         return false;
1205 }
1206
1207 /* ----------------------------------------------------------------
1208  *              ExecEndPlan
1209  *
1210  *              Cleans up the query plan -- closes files and frees up storage
1211  *
1212  * NOTE: we are no longer very worried about freeing storage per se
1213  * in this code; FreeExecutorState should be guaranteed to release all
1214  * memory that needs to be released.  What we are worried about doing
1215  * is closing relations and dropping buffer pins.  Thus, for example,
1216  * tuple tables must be cleared or dropped to ensure pins are released.
1217  * ----------------------------------------------------------------
1218  */
1219 static void
1220 ExecEndPlan(PlanState *planstate, EState *estate)
1221 {
1222         ResultRelInfo *resultRelInfo;
1223         int                     i;
1224         ListCell   *l;
1225
1226         /*
1227          * shut down any PlanQual processing we were doing
1228          */
1229         if (estate->es_evalPlanQual != NULL)
1230                 EndEvalPlanQual(estate);
1231
1232         /*
1233          * shut down the node-type-specific query processing
1234          */
1235         ExecEndNode(planstate);
1236
1237         /*
1238          * for subplans too
1239          */
1240         foreach(l, estate->es_subplanstates)
1241         {
1242                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1243
1244                 ExecEndNode(subplanstate);
1245         }
1246
1247         /*
1248          * destroy the executor "tuple" table.
1249          */
1250         ExecDropTupleTable(estate->es_tupleTable, true);
1251         estate->es_tupleTable = NULL;
1252
1253         /*
1254          * close the result relation(s) if any, but hold locks until xact commit.
1255          */
1256         resultRelInfo = estate->es_result_relations;
1257         for (i = estate->es_num_result_relations; i > 0; i--)
1258         {
1259                 /* Close indices and then the relation itself */
1260                 ExecCloseIndices(resultRelInfo);
1261                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1262                 resultRelInfo++;
1263         }
1264
1265         /*
1266          * likewise close any trigger target relations
1267          */
1268         foreach(l, estate->es_trig_target_relations)
1269         {
1270                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1271                 /* Close indices and then the relation itself */
1272                 ExecCloseIndices(resultRelInfo);
1273                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1274         }
1275
1276         /*
1277          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1278          */
1279         foreach(l, estate->es_rowMarks)
1280         {
1281                 ExecRowMark *erm = lfirst(l);
1282
1283                 heap_close(erm->relation, NoLock);
1284         }
1285 }
1286
1287 /* ----------------------------------------------------------------
1288  *              ExecutePlan
1289  *
1290  *              Processes the query plan until we have processed 'numberTuples' tuples,
1291  *              moving in the specified direction.
1292  *
1293  *              Runs to completion if numberTuples is 0
1294  *
1295  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1296  * user can see it
1297  * ----------------------------------------------------------------
1298  */
1299 static void
1300 ExecutePlan(EState *estate,
1301                         PlanState *planstate,
1302                         CmdType operation,
1303                         long numberTuples,
1304                         ScanDirection direction,
1305                         DestReceiver *dest)
1306 {
1307         JunkFilter *junkfilter;
1308         TupleTableSlot *planSlot;
1309         TupleTableSlot *slot;
1310         ItemPointer tupleid = NULL;
1311         ItemPointerData tuple_ctid;
1312         long            current_tuple_count;
1313
1314         /*
1315          * initialize local variables
1316          */
1317         current_tuple_count = 0;
1318
1319         /*
1320          * Set the direction.
1321          */
1322         estate->es_direction = direction;
1323
1324         /*
1325          * Process BEFORE EACH STATEMENT triggers
1326          */
1327         switch (operation)
1328         {
1329                 case CMD_UPDATE:
1330                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1331                         break;
1332                 case CMD_DELETE:
1333                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1334                         break;
1335                 case CMD_INSERT:
1336                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1337                         break;
1338                 default:
1339                         /* do nothing */
1340                         break;
1341         }
1342
1343         /*
1344          * Loop until we've processed the proper number of tuples from the plan.
1345          */
1346         for (;;)
1347         {
1348                 /* Reset the per-output-tuple exprcontext */
1349                 ResetPerTupleExprContext(estate);
1350
1351                 /*
1352                  * Execute the plan and obtain a tuple
1353                  */
1354 lnext:  ;
1355                 if (estate->es_useEvalPlan)
1356                 {
1357                         planSlot = EvalPlanQualNext(estate);
1358                         if (TupIsNull(planSlot))
1359                                 planSlot = ExecProcNode(planstate);
1360                 }
1361                 else
1362                         planSlot = ExecProcNode(planstate);
1363
1364                 /*
1365                  * if the tuple is null, then we assume there is nothing more to
1366                  * process so we just end the loop...
1367                  */
1368                 if (TupIsNull(planSlot))
1369                         break;
1370                 slot = planSlot;
1371
1372                 /*
1373                  * If we have a junk filter, then project a new tuple with the junk
1374                  * removed.
1375                  *
1376                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1377                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1378                  * because that tuple slot has the wrong descriptor.)
1379                  *
1380                  * But first, extract all the junk information we need.
1381                  */
1382                 if ((junkfilter = estate->es_junkFilter) != NULL)
1383                 {
1384                         /*
1385                          * Process any FOR UPDATE or FOR SHARE locking requested.
1386                          */
1387                         if (estate->es_rowMarks != NIL)
1388                         {
1389                                 ListCell   *l;
1390
1391                 lmark:  ;
1392                                 foreach(l, estate->es_rowMarks)
1393                                 {
1394                                         ExecRowMark *erm = lfirst(l);
1395                                         Datum           datum;
1396                                         bool            isNull;
1397                                         HeapTupleData tuple;
1398                                         Buffer          buffer;
1399                                         ItemPointerData update_ctid;
1400                                         TransactionId update_xmax;
1401                                         TupleTableSlot *newSlot;
1402                                         LockTupleMode lockmode;
1403                                         HTSU_Result test;
1404
1405                                         /* if child rel, must check whether it produced this row */
1406                                         if (erm->rti != erm->prti)
1407                                         {
1408                                                 Oid             tableoid;
1409
1410                                                 datum = ExecGetJunkAttribute(slot,
1411                                                                                                          erm->toidAttNo,
1412                                                                                                          &isNull);
1413                                                 /* shouldn't ever get a null result... */
1414                                                 if (isNull)
1415                                                         elog(ERROR, "tableoid is NULL");
1416                                                 tableoid = DatumGetObjectId(datum);
1417
1418                                                 if (tableoid != RelationGetRelid(erm->relation))
1419                                                 {
1420                                                         /* this child is inactive right now */
1421                                                         continue;
1422                                                 }
1423                                         }
1424
1425                                         /* okay, fetch the tuple by ctid */
1426                                         datum = ExecGetJunkAttribute(slot,
1427                                                                                                  erm->ctidAttNo,
1428                                                                                                  &isNull);
1429                                         /* shouldn't ever get a null result... */
1430                                         if (isNull)
1431                                                 elog(ERROR, "ctid is NULL");
1432                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1433
1434                                         if (erm->forUpdate)
1435                                                 lockmode = LockTupleExclusive;
1436                                         else
1437                                                 lockmode = LockTupleShared;
1438
1439                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1440                                                                                    &update_ctid, &update_xmax,
1441                                                                                    estate->es_output_cid,
1442                                                                                    lockmode, erm->noWait);
1443                                         ReleaseBuffer(buffer);
1444                                         switch (test)
1445                                         {
1446                                                 case HeapTupleSelfUpdated:
1447                                                         /* treat it as deleted; do not process */
1448                                                         goto lnext;
1449
1450                                                 case HeapTupleMayBeUpdated:
1451                                                         break;
1452
1453                                                 case HeapTupleUpdated:
1454                                                         if (IsXactIsoLevelSerializable)
1455                                                                 ereport(ERROR,
1456                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1457                                                                   errmsg("could not serialize access due to concurrent update")));
1458                                                         if (!ItemPointerEquals(&update_ctid,
1459                                                                                                    &tuple.t_self))
1460                                                         {
1461                                                                 /* updated, so look at updated version */
1462                                                                 newSlot = EvalPlanQual(estate,
1463                                                                                                            erm->rti,
1464                                                                                                            &update_ctid,
1465                                                                                                            update_xmax);
1466                                                                 if (!TupIsNull(newSlot))
1467                                                                 {
1468                                                                         slot = planSlot = newSlot;
1469                                                                         estate->es_useEvalPlan = true;
1470                                                                         goto lmark;
1471                                                                 }
1472                                                         }
1473
1474                                                         /*
1475                                                          * if tuple was deleted or PlanQual failed for
1476                                                          * updated tuple - we must not return this tuple!
1477                                                          */
1478                                                         goto lnext;
1479
1480                                                 default:
1481                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1482                                                                  test);
1483                                         }
1484                                 }
1485                         }
1486
1487                         /*
1488                          * extract the 'ctid' junk attribute.
1489                          */
1490                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1491                         {
1492                                 Datum           datum;
1493                                 bool            isNull;
1494
1495                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1496                                                                                          &isNull);
1497                                 /* shouldn't ever get a null result... */
1498                                 if (isNull)
1499                                         elog(ERROR, "ctid is NULL");
1500
1501                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1502                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1503                                 tupleid = &tuple_ctid;
1504                         }
1505
1506                         /*
1507                          * Create a new "clean" tuple with all junk attributes removed. We
1508                          * don't need to do this for DELETE, however (there will in fact
1509                          * be no non-junk attributes in a DELETE!)
1510                          */
1511                         if (operation != CMD_DELETE)
1512                                 slot = ExecFilterJunk(junkfilter, slot);
1513                 }
1514
1515                 /*
1516                  * now that we have a tuple, do the appropriate thing with it.. either
1517                  * send it to the output destination, add it to a relation someplace,
1518                  * delete it from a relation, or modify some of its attributes.
1519                  */
1520                 switch (operation)
1521                 {
1522                         case CMD_SELECT:
1523                                 ExecSelect(slot, dest, estate);
1524                                 break;
1525
1526                         case CMD_INSERT:
1527                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1528                                 break;
1529
1530                         case CMD_DELETE:
1531                                 ExecDelete(tupleid, planSlot, dest, estate);
1532                                 break;
1533
1534                         case CMD_UPDATE:
1535                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1536                                 break;
1537
1538                         default:
1539                                 elog(ERROR, "unrecognized operation code: %d",
1540                                          (int) operation);
1541                                 break;
1542                 }
1543
1544                 /*
1545                  * check our tuple count.. if we've processed the proper number then
1546                  * quit, else loop again and process more tuples.  Zero numberTuples
1547                  * means no limit.
1548                  */
1549                 current_tuple_count++;
1550                 if (numberTuples && numberTuples == current_tuple_count)
1551                         break;
1552         }
1553
1554         /*
1555          * Process AFTER EACH STATEMENT triggers
1556          */
1557         switch (operation)
1558         {
1559                 case CMD_UPDATE:
1560                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1561                         break;
1562                 case CMD_DELETE:
1563                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1564                         break;
1565                 case CMD_INSERT:
1566                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1567                         break;
1568                 default:
1569                         /* do nothing */
1570                         break;
1571         }
1572 }
1573
1574 /* ----------------------------------------------------------------
1575  *              ExecSelect
1576  *
1577  *              SELECTs are easy.. we just pass the tuple to the appropriate
1578  *              output function.
1579  * ----------------------------------------------------------------
1580  */
1581 static void
1582 ExecSelect(TupleTableSlot *slot,
1583                    DestReceiver *dest,
1584                    EState *estate)
1585 {
1586         (*dest->receiveSlot) (slot, dest);
1587         IncrRetrieved();
1588         (estate->es_processed)++;
1589 }
1590
1591 /* ----------------------------------------------------------------
1592  *              ExecInsert
1593  *
1594  *              INSERTs are trickier.. we have to insert the tuple into
1595  *              the base relation and insert appropriate tuples into the
1596  *              index relations.
1597  * ----------------------------------------------------------------
1598  */
1599 static void
1600 ExecInsert(TupleTableSlot *slot,
1601                    ItemPointer tupleid,
1602                    TupleTableSlot *planSlot,
1603                    DestReceiver *dest,
1604                    EState *estate)
1605 {
1606         HeapTuple       tuple;
1607         ResultRelInfo *resultRelInfo;
1608         Relation        resultRelationDesc;
1609         Oid                     newId;
1610
1611         /*
1612          * get the heap tuple out of the tuple table slot, making sure we have a
1613          * writable copy
1614          */
1615         tuple = ExecMaterializeSlot(slot);
1616
1617         /*
1618          * get information on the (current) result relation
1619          */
1620         resultRelInfo = estate->es_result_relation_info;
1621         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1622
1623         /* BEFORE ROW INSERT Triggers */
1624         if (resultRelInfo->ri_TrigDesc &&
1625                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1626         {
1627                 HeapTuple       newtuple;
1628
1629                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1630
1631                 if (newtuple == NULL)   /* "do nothing" */
1632                         return;
1633
1634                 if (newtuple != tuple)  /* modified by Trigger(s) */
1635                 {
1636                         /*
1637                          * Put the modified tuple into a slot for convenience of routines
1638                          * below.  We assume the tuple was allocated in per-tuple memory
1639                          * context, and therefore will go away by itself. The tuple table
1640                          * slot should not try to clear it.
1641                          */
1642                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1643
1644                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1645                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1646                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1647                         slot = newslot;
1648                         tuple = newtuple;
1649                 }
1650         }
1651
1652         /*
1653          * Check the constraints of the tuple
1654          */
1655         if (resultRelationDesc->rd_att->constr)
1656                 ExecConstraints(resultRelInfo, slot, estate);
1657
1658         /*
1659          * insert the tuple
1660          *
1661          * Note: heap_insert returns the tid (location) of the new tuple in the
1662          * t_self field.
1663          */
1664         newId = heap_insert(resultRelationDesc, tuple,
1665                                                 estate->es_output_cid, 0, NULL);
1666
1667         IncrAppended();
1668         (estate->es_processed)++;
1669         estate->es_lastoid = newId;
1670         setLastTid(&(tuple->t_self));
1671
1672         /*
1673          * insert index entries for tuple
1674          */
1675         if (resultRelInfo->ri_NumIndices > 0)
1676                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1677
1678         /* AFTER ROW INSERT Triggers */
1679         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1680
1681         /* Process RETURNING if present */
1682         if (resultRelInfo->ri_projectReturning)
1683                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1684                                                          slot, planSlot, dest);
1685 }
1686
1687 /* ----------------------------------------------------------------
1688  *              ExecDelete
1689  *
1690  *              DELETE is like UPDATE, except that we delete the tuple and no
1691  *              index modifications are needed
1692  * ----------------------------------------------------------------
1693  */
1694 static void
1695 ExecDelete(ItemPointer tupleid,
1696                    TupleTableSlot *planSlot,
1697                    DestReceiver *dest,
1698                    EState *estate)
1699 {
1700         ResultRelInfo *resultRelInfo;
1701         Relation        resultRelationDesc;
1702         HTSU_Result result;
1703         ItemPointerData update_ctid;
1704         TransactionId update_xmax;
1705
1706         /*
1707          * get information on the (current) result relation
1708          */
1709         resultRelInfo = estate->es_result_relation_info;
1710         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1711
1712         /* BEFORE ROW DELETE Triggers */
1713         if (resultRelInfo->ri_TrigDesc &&
1714                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1715         {
1716                 bool            dodelete;
1717
1718                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1719
1720                 if (!dodelete)                  /* "do nothing" */
1721                         return;
1722         }
1723
1724         /*
1725          * delete the tuple
1726          *
1727          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1728          * the row to be deleted is visible to that snapshot, and throw a can't-
1729          * serialize error if not.      This is a special-case behavior needed for
1730          * referential integrity updates in serializable transactions.
1731          */
1732 ldelete:;
1733         result = heap_delete(resultRelationDesc, tupleid,
1734                                                  &update_ctid, &update_xmax,
1735                                                  estate->es_output_cid,
1736                                                  estate->es_crosscheck_snapshot,
1737                                                  true /* wait for commit */ );
1738         switch (result)
1739         {
1740                 case HeapTupleSelfUpdated:
1741                         /* already deleted by self; nothing to do */
1742                         return;
1743
1744                 case HeapTupleMayBeUpdated:
1745                         break;
1746
1747                 case HeapTupleUpdated:
1748                         if (IsXactIsoLevelSerializable)
1749                                 ereport(ERROR,
1750                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1751                                                  errmsg("could not serialize access due to concurrent update")));
1752                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1753                         {
1754                                 TupleTableSlot *epqslot;
1755
1756                                 epqslot = EvalPlanQual(estate,
1757                                                                            resultRelInfo->ri_RangeTableIndex,
1758                                                                            &update_ctid,
1759                                                                            update_xmax);
1760                                 if (!TupIsNull(epqslot))
1761                                 {
1762                                         *tupleid = update_ctid;
1763                                         goto ldelete;
1764                                 }
1765                         }
1766                         /* tuple already deleted; nothing to do */
1767                         return;
1768
1769                 default:
1770                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1771                         return;
1772         }
1773
1774         IncrDeleted();
1775         (estate->es_processed)++;
1776
1777         /*
1778          * Note: Normally one would think that we have to delete index tuples
1779          * associated with the heap tuple now...
1780          *
1781          * ... but in POSTGRES, we have no need to do this because VACUUM will
1782          * take care of it later.  We can't delete index tuples immediately
1783          * anyway, since the tuple is still visible to other transactions.
1784          */
1785
1786         /* AFTER ROW DELETE Triggers */
1787         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1788
1789         /* Process RETURNING if present */
1790         if (resultRelInfo->ri_projectReturning)
1791         {
1792                 /*
1793                  * We have to put the target tuple into a slot, which means first we
1794                  * gotta fetch it.      We can use the trigger tuple slot.
1795                  */
1796                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1797                 HeapTupleData deltuple;
1798                 Buffer          delbuffer;
1799
1800                 deltuple.t_self = *tupleid;
1801                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1802                                                 &deltuple, &delbuffer, false, NULL))
1803                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1804
1805                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1806                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1807                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1808
1809                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1810                                                          slot, planSlot, dest);
1811
1812                 ExecClearTuple(slot);
1813                 ReleaseBuffer(delbuffer);
1814         }
1815 }
1816
1817 /* ----------------------------------------------------------------
1818  *              ExecUpdate
1819  *
1820  *              note: we can't run UPDATE queries with transactions
1821  *              off because UPDATEs are actually INSERTs and our
1822  *              scan will mistakenly loop forever, updating the tuple
1823  *              it just inserted..      This should be fixed but until it
1824  *              is, we don't want to get stuck in an infinite loop
1825  *              which corrupts your database..
1826  * ----------------------------------------------------------------
1827  */
1828 static void
1829 ExecUpdate(TupleTableSlot *slot,
1830                    ItemPointer tupleid,
1831                    TupleTableSlot *planSlot,
1832                    DestReceiver *dest,
1833                    EState *estate)
1834 {
1835         HeapTuple       tuple;
1836         ResultRelInfo *resultRelInfo;
1837         Relation        resultRelationDesc;
1838         HTSU_Result result;
1839         ItemPointerData update_ctid;
1840         TransactionId update_xmax;
1841
1842         /*
1843          * abort the operation if not running transactions
1844          */
1845         if (IsBootstrapProcessingMode())
1846                 elog(ERROR, "cannot UPDATE during bootstrap");
1847
1848         /*
1849          * get the heap tuple out of the tuple table slot, making sure we have a
1850          * writable copy
1851          */
1852         tuple = ExecMaterializeSlot(slot);
1853
1854         /*
1855          * get information on the (current) result relation
1856          */
1857         resultRelInfo = estate->es_result_relation_info;
1858         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1859
1860         /* BEFORE ROW UPDATE Triggers */
1861         if (resultRelInfo->ri_TrigDesc &&
1862                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1863         {
1864                 HeapTuple       newtuple;
1865
1866                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1867                                                                                 tupleid, tuple);
1868
1869                 if (newtuple == NULL)   /* "do nothing" */
1870                         return;
1871
1872                 if (newtuple != tuple)  /* modified by Trigger(s) */
1873                 {
1874                         /*
1875                          * Put the modified tuple into a slot for convenience of routines
1876                          * below.  We assume the tuple was allocated in per-tuple memory
1877                          * context, and therefore will go away by itself. The tuple table
1878                          * slot should not try to clear it.
1879                          */
1880                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1881
1882                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1883                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1884                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1885                         slot = newslot;
1886                         tuple = newtuple;
1887                 }
1888         }
1889
1890         /*
1891          * Check the constraints of the tuple
1892          *
1893          * If we generate a new candidate tuple after EvalPlanQual testing, we
1894          * must loop back here and recheck constraints.  (We don't need to redo
1895          * triggers, however.  If there are any BEFORE triggers then trigger.c
1896          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1897          * need to do them again.)
1898          */
1899 lreplace:;
1900         if (resultRelationDesc->rd_att->constr)
1901                 ExecConstraints(resultRelInfo, slot, estate);
1902
1903         /*
1904          * replace the heap tuple
1905          *
1906          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1907          * the row to be updated is visible to that snapshot, and throw a can't-
1908          * serialize error if not.      This is a special-case behavior needed for
1909          * referential integrity updates in serializable transactions.
1910          */
1911         result = heap_update(resultRelationDesc, tupleid, tuple,
1912                                                  &update_ctid, &update_xmax,
1913                                                  estate->es_output_cid,
1914                                                  estate->es_crosscheck_snapshot,
1915                                                  true /* wait for commit */ );
1916         switch (result)
1917         {
1918                 case HeapTupleSelfUpdated:
1919                         /* already deleted by self; nothing to do */
1920                         return;
1921
1922                 case HeapTupleMayBeUpdated:
1923                         break;
1924
1925                 case HeapTupleUpdated:
1926                         if (IsXactIsoLevelSerializable)
1927                                 ereport(ERROR,
1928                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1929                                                  errmsg("could not serialize access due to concurrent update")));
1930                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1931                         {
1932                                 TupleTableSlot *epqslot;
1933
1934                                 epqslot = EvalPlanQual(estate,
1935                                                                            resultRelInfo->ri_RangeTableIndex,
1936                                                                            &update_ctid,
1937                                                                            update_xmax);
1938                                 if (!TupIsNull(epqslot))
1939                                 {
1940                                         *tupleid = update_ctid;
1941                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1942                                         tuple = ExecMaterializeSlot(slot);
1943                                         goto lreplace;
1944                                 }
1945                         }
1946                         /* tuple already deleted; nothing to do */
1947                         return;
1948
1949                 default:
1950                         elog(ERROR, "unrecognized heap_update status: %u", result);
1951                         return;
1952         }
1953
1954         IncrReplaced();
1955         (estate->es_processed)++;
1956
1957         /*
1958          * Note: instead of having to update the old index tuples associated with
1959          * the heap tuple, all we do is form and insert new index tuples. This is
1960          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1961          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1962          * here is insert new index tuples.  -cim 9/27/89
1963          */
1964
1965         /*
1966          * insert index entries for tuple
1967          *
1968          * Note: heap_update returns the tid (location) of the new tuple in the
1969          * t_self field.
1970          *
1971          * If it's a HOT update, we mustn't insert new index entries.
1972          */
1973         if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1974                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1975
1976         /* AFTER ROW UPDATE Triggers */
1977         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1978
1979         /* Process RETURNING if present */
1980         if (resultRelInfo->ri_projectReturning)
1981                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1982                                                          slot, planSlot, dest);
1983 }
1984
1985 /*
1986  * ExecRelCheck --- check that tuple meets constraints for result relation
1987  */
1988 static const char *
1989 ExecRelCheck(ResultRelInfo *resultRelInfo,
1990                          TupleTableSlot *slot, EState *estate)
1991 {
1992         Relation        rel = resultRelInfo->ri_RelationDesc;
1993         int                     ncheck = rel->rd_att->constr->num_check;
1994         ConstrCheck *check = rel->rd_att->constr->check;
1995         ExprContext *econtext;
1996         MemoryContext oldContext;
1997         List       *qual;
1998         int                     i;
1999
2000         /*
2001          * If first time through for this result relation, build expression
2002          * nodetrees for rel's constraint expressions.  Keep them in the per-query
2003          * memory context so they'll survive throughout the query.
2004          */
2005         if (resultRelInfo->ri_ConstraintExprs == NULL)
2006         {
2007                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2008                 resultRelInfo->ri_ConstraintExprs =
2009                         (List **) palloc(ncheck * sizeof(List *));
2010                 for (i = 0; i < ncheck; i++)
2011                 {
2012                         /* ExecQual wants implicit-AND form */
2013                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
2014                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
2015                                 ExecPrepareExpr((Expr *) qual, estate);
2016                 }
2017                 MemoryContextSwitchTo(oldContext);
2018         }
2019
2020         /*
2021          * We will use the EState's per-tuple context for evaluating constraint
2022          * expressions (creating it if it's not already there).
2023          */
2024         econtext = GetPerTupleExprContext(estate);
2025
2026         /* Arrange for econtext's scan tuple to be the tuple under test */
2027         econtext->ecxt_scantuple = slot;
2028
2029         /* And evaluate the constraints */
2030         for (i = 0; i < ncheck; i++)
2031         {
2032                 qual = resultRelInfo->ri_ConstraintExprs[i];
2033
2034                 /*
2035                  * NOTE: SQL92 specifies that a NULL result from a constraint
2036                  * expression is not to be treated as a failure.  Therefore, tell
2037                  * ExecQual to return TRUE for NULL.
2038                  */
2039                 if (!ExecQual(qual, econtext, true))
2040                         return check[i].ccname;
2041         }
2042
2043         /* NULL result means no error */
2044         return NULL;
2045 }
2046
2047 void
2048 ExecConstraints(ResultRelInfo *resultRelInfo,
2049                                 TupleTableSlot *slot, EState *estate)
2050 {
2051         Relation        rel = resultRelInfo->ri_RelationDesc;
2052         TupleConstr *constr = rel->rd_att->constr;
2053
2054         Assert(constr);
2055
2056         if (constr->has_not_null)
2057         {
2058                 int                     natts = rel->rd_att->natts;
2059                 int                     attrChk;
2060
2061                 for (attrChk = 1; attrChk <= natts; attrChk++)
2062                 {
2063                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
2064                                 slot_attisnull(slot, attrChk))
2065                                 ereport(ERROR,
2066                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2067                                                  errmsg("null value in column \"%s\" violates not-null constraint",
2068                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
2069                 }
2070         }
2071
2072         if (constr->num_check > 0)
2073         {
2074                 const char *failed;
2075
2076                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2077                         ereport(ERROR,
2078                                         (errcode(ERRCODE_CHECK_VIOLATION),
2079                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2080                                                         RelationGetRelationName(rel), failed)));
2081         }
2082 }
2083
2084 /*
2085  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
2086  *
2087  * projectReturning: RETURNING projection info for current result rel
2088  * tupleSlot: slot holding tuple actually inserted/updated/deleted
2089  * planSlot: slot holding tuple returned by top plan node
2090  * dest: where to send the output
2091  */
2092 static void
2093 ExecProcessReturning(ProjectionInfo *projectReturning,
2094                                          TupleTableSlot *tupleSlot,
2095                                          TupleTableSlot *planSlot,
2096                                          DestReceiver *dest)
2097 {
2098         ExprContext *econtext = projectReturning->pi_exprContext;
2099         TupleTableSlot *retSlot;
2100
2101         /*
2102          * Reset per-tuple memory context to free any expression evaluation
2103          * storage allocated in the previous cycle.
2104          */
2105         ResetExprContext(econtext);
2106
2107         /* Make tuple and any needed join variables available to ExecProject */
2108         econtext->ecxt_scantuple = tupleSlot;
2109         econtext->ecxt_outertuple = planSlot;
2110
2111         /* Compute the RETURNING expressions */
2112         retSlot = ExecProject(projectReturning, NULL);
2113
2114         /* Send to dest */
2115         (*dest->receiveSlot) (retSlot, dest);
2116
2117         ExecClearTuple(retSlot);
2118 }
2119
2120 /*
2121  * Check a modified tuple to see if we want to process its updated version
2122  * under READ COMMITTED rules.
2123  *
2124  * See backend/executor/README for some info about how this works.
2125  *
2126  *      estate - executor state data
2127  *      rti - rangetable index of table containing tuple
2128  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2129  *      priorXmax - t_xmax from the outdated tuple
2130  *
2131  * *tid is also an output parameter: it's modified to hold the TID of the
2132  * latest version of the tuple (note this may be changed even on failure)
2133  *
2134  * Returns a slot containing the new candidate update/delete tuple, or
2135  * NULL if we determine we shouldn't process the row.
2136  */
2137 TupleTableSlot *
2138 EvalPlanQual(EState *estate, Index rti,
2139                          ItemPointer tid, TransactionId priorXmax)
2140 {
2141         evalPlanQual *epq;
2142         EState     *epqstate;
2143         Relation        relation;
2144         HeapTupleData tuple;
2145         HeapTuple       copyTuple = NULL;
2146         SnapshotData SnapshotDirty;
2147         bool            endNode;
2148
2149         Assert(rti != 0);
2150
2151         /*
2152          * find relation containing target tuple
2153          */
2154         if (estate->es_result_relation_info != NULL &&
2155                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2156                 relation = estate->es_result_relation_info->ri_RelationDesc;
2157         else
2158         {
2159                 ListCell   *l;
2160
2161                 relation = NULL;
2162                 foreach(l, estate->es_rowMarks)
2163                 {
2164                         ExecRowMark *erm = lfirst(l);
2165
2166                         if (erm->rti == rti)
2167                         {
2168                                 relation = erm->relation;
2169                                 break;
2170                         }
2171                 }
2172                 if (relation == NULL)
2173                         elog(ERROR, "could not find RowMark for RT index %u", rti);
2174         }
2175
2176         /*
2177          * fetch tid tuple
2178          *
2179          * Loop here to deal with updated or busy tuples
2180          */
2181         InitDirtySnapshot(SnapshotDirty);
2182         tuple.t_self = *tid;
2183         for (;;)
2184         {
2185                 Buffer          buffer;
2186
2187                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2188                 {
2189                         /*
2190                          * If xmin isn't what we're expecting, the slot must have been
2191                          * recycled and reused for an unrelated tuple.  This implies that
2192                          * the latest version of the row was deleted, so we need do
2193                          * nothing.  (Should be safe to examine xmin without getting
2194                          * buffer's content lock, since xmin never changes in an existing
2195                          * tuple.)
2196                          */
2197                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2198                                                                          priorXmax))
2199                         {
2200                                 ReleaseBuffer(buffer);
2201                                 return NULL;
2202                         }
2203
2204                         /* otherwise xmin should not be dirty... */
2205                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2206                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2207
2208                         /*
2209                          * If tuple is being updated by other transaction then we have to
2210                          * wait for its commit/abort.
2211                          */
2212                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2213                         {
2214                                 ReleaseBuffer(buffer);
2215                                 XactLockTableWait(SnapshotDirty.xmax);
2216                                 continue;               /* loop back to repeat heap_fetch */
2217                         }
2218
2219                         /*
2220                          * If tuple was inserted by our own transaction, we have to check
2221                          * cmin against es_output_cid: cmin >= current CID means our
2222                          * command cannot see the tuple, so we should ignore it.  Without
2223                          * this we are open to the "Halloween problem" of indefinitely
2224                          * re-updating the same tuple. (We need not check cmax because
2225                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2226                          * transaction dead, regardless of cmax.)  We just checked that
2227                          * priorXmax == xmin, so we can test that variable instead of
2228                          * doing HeapTupleHeaderGetXmin again.
2229                          */
2230                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2231                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2232                         {
2233                                 ReleaseBuffer(buffer);
2234                                 return NULL;
2235                         }
2236
2237                         /*
2238                          * We got tuple - now copy it for use by recheck query.
2239                          */
2240                         copyTuple = heap_copytuple(&tuple);
2241                         ReleaseBuffer(buffer);
2242                         break;
2243                 }
2244
2245                 /*
2246                  * If the referenced slot was actually empty, the latest version of
2247                  * the row must have been deleted, so we need do nothing.
2248                  */
2249                 if (tuple.t_data == NULL)
2250                 {
2251                         ReleaseBuffer(buffer);
2252                         return NULL;
2253                 }
2254
2255                 /*
2256                  * As above, if xmin isn't what we're expecting, do nothing.
2257                  */
2258                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2259                                                                  priorXmax))
2260                 {
2261                         ReleaseBuffer(buffer);
2262                         return NULL;
2263                 }
2264
2265                 /*
2266                  * If we get here, the tuple was found but failed SnapshotDirty.
2267                  * Assuming the xmin is either a committed xact or our own xact (as it
2268                  * certainly should be if we're trying to modify the tuple), this must
2269                  * mean that the row was updated or deleted by either a committed xact
2270                  * or our own xact.  If it was deleted, we can ignore it; if it was
2271                  * updated then chain up to the next version and repeat the whole
2272                  * test.
2273                  *
2274                  * As above, it should be safe to examine xmax and t_ctid without the
2275                  * buffer content lock, because they can't be changing.
2276                  */
2277                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2278                 {
2279                         /* deleted, so forget about it */
2280                         ReleaseBuffer(buffer);
2281                         return NULL;
2282                 }
2283
2284                 /* updated, so look at the updated row */
2285                 tuple.t_self = tuple.t_data->t_ctid;
2286                 /* updated row should have xmin matching this xmax */
2287                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2288                 ReleaseBuffer(buffer);
2289                 /* loop back to fetch next in chain */
2290         }
2291
2292         /*
2293          * For UPDATE/DELETE we have to return tid of actual row we're executing
2294          * PQ for.
2295          */
2296         *tid = tuple.t_self;
2297
2298         /*
2299          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2300          */
2301         epq = estate->es_evalPlanQual;
2302         endNode = true;
2303
2304         if (epq != NULL && epq->rti == 0)
2305         {
2306                 /* Top PQ stack entry is idle, so re-use it */
2307                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2308                 epq->rti = rti;
2309                 endNode = false;
2310         }
2311
2312         /*
2313          * If this is request for another RTE - Ra, - then we have to check wasn't
2314          * PlanQual requested for Ra already and if so then Ra' row was updated
2315          * again and we have to re-start old execution for Ra and forget all what
2316          * we done after Ra was suspended. Cool? -:))
2317          */
2318         if (epq != NULL && epq->rti != rti &&
2319                 epq->estate->es_evTuple[rti - 1] != NULL)
2320         {
2321                 do
2322                 {
2323                         evalPlanQual *oldepq;
2324
2325                         /* stop execution */
2326                         EvalPlanQualStop(epq);
2327                         /* pop previous PlanQual from the stack */
2328                         oldepq = epq->next;
2329                         Assert(oldepq && oldepq->rti != 0);
2330                         /* push current PQ to freePQ stack */
2331                         oldepq->free = epq;
2332                         epq = oldepq;
2333                         estate->es_evalPlanQual = epq;
2334                 } while (epq->rti != rti);
2335         }
2336
2337         /*
2338          * If we are requested for another RTE then we have to suspend execution
2339          * of current PlanQual and start execution for new one.
2340          */
2341         if (epq == NULL || epq->rti != rti)
2342         {
2343                 /* try to reuse plan used previously */
2344                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2345
2346                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2347                 {
2348                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2349                         newepq->free = NULL;
2350                         newepq->estate = NULL;
2351                         newepq->planstate = NULL;
2352                 }
2353                 else
2354                 {
2355                         /* recycle previously used PlanQual */
2356                         Assert(newepq->estate == NULL);
2357                         epq->free = NULL;
2358                 }
2359                 /* push current PQ to the stack */
2360                 newepq->next = epq;
2361                 epq = newepq;
2362                 estate->es_evalPlanQual = epq;
2363                 epq->rti = rti;
2364                 endNode = false;
2365         }
2366
2367         Assert(epq->rti == rti);
2368
2369         /*
2370          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2371          * end and restart execution of the plan, because ExecReScan wouldn't
2372          * ensure that upper plan nodes would reset themselves.  We could make
2373          * that work if insertion of the target tuple were integrated with the
2374          * Param mechanism somehow, so that the upper plan nodes know that their
2375          * children's outputs have changed.
2376          *
2377          * Note that the stack of free evalPlanQual nodes is quite useless at the
2378          * moment, since it only saves us from pallocing/releasing the
2379          * evalPlanQual nodes themselves.  But it will be useful once we implement
2380          * ReScan instead of end/restart for re-using PlanQual nodes.
2381          */
2382         if (endNode)
2383         {
2384                 /* stop execution */
2385                 EvalPlanQualStop(epq);
2386         }
2387
2388         /*
2389          * Initialize new recheck query.
2390          *
2391          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2392          * instead copy down changeable state from the top plan (including
2393          * es_result_relation_info, es_junkFilter) and reset locally changeable
2394          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2395          */
2396         EvalPlanQualStart(epq, estate, epq->next);
2397
2398         /*
2399          * free old RTE' tuple, if any, and store target tuple where relation's
2400          * scan node will see it
2401          */
2402         epqstate = epq->estate;
2403         if (epqstate->es_evTuple[rti - 1] != NULL)
2404                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2405         epqstate->es_evTuple[rti - 1] = copyTuple;
2406
2407         return EvalPlanQualNext(estate);
2408 }
2409
2410 static TupleTableSlot *
2411 EvalPlanQualNext(EState *estate)
2412 {
2413         evalPlanQual *epq = estate->es_evalPlanQual;
2414         MemoryContext oldcontext;
2415         TupleTableSlot *slot;
2416
2417         Assert(epq->rti != 0);
2418
2419 lpqnext:;
2420         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2421         slot = ExecProcNode(epq->planstate);
2422         MemoryContextSwitchTo(oldcontext);
2423
2424         /*
2425          * No more tuples for this PQ. Continue previous one.
2426          */
2427         if (TupIsNull(slot))
2428         {
2429                 evalPlanQual *oldepq;
2430
2431                 /* stop execution */
2432                 EvalPlanQualStop(epq);
2433                 /* pop old PQ from the stack */
2434                 oldepq = epq->next;
2435                 if (oldepq == NULL)
2436                 {
2437                         /* this is the first (oldest) PQ - mark as free */
2438                         epq->rti = 0;
2439                         estate->es_useEvalPlan = false;
2440                         /* and continue Query execution */
2441                         return NULL;
2442                 }
2443                 Assert(oldepq->rti != 0);
2444                 /* push current PQ to freePQ stack */
2445                 oldepq->free = epq;
2446                 epq = oldepq;
2447                 estate->es_evalPlanQual = epq;
2448                 goto lpqnext;
2449         }
2450
2451         return slot;
2452 }
2453
2454 static void
2455 EndEvalPlanQual(EState *estate)
2456 {
2457         evalPlanQual *epq = estate->es_evalPlanQual;
2458
2459         if (epq->rti == 0)                      /* plans already shutdowned */
2460         {
2461                 Assert(epq->next == NULL);
2462                 return;
2463         }
2464
2465         for (;;)
2466         {
2467                 evalPlanQual *oldepq;
2468
2469                 /* stop execution */
2470                 EvalPlanQualStop(epq);
2471                 /* pop old PQ from the stack */
2472                 oldepq = epq->next;
2473                 if (oldepq == NULL)
2474                 {
2475                         /* this is the first (oldest) PQ - mark as free */
2476                         epq->rti = 0;
2477                         estate->es_useEvalPlan = false;
2478                         break;
2479                 }
2480                 Assert(oldepq->rti != 0);
2481                 /* push current PQ to freePQ stack */
2482                 oldepq->free = epq;
2483                 epq = oldepq;
2484                 estate->es_evalPlanQual = epq;
2485         }
2486 }
2487
2488 /*
2489  * Start execution of one level of PlanQual.
2490  *
2491  * This is a cut-down version of ExecutorStart(): we copy some state from
2492  * the top-level estate rather than initializing it fresh.
2493  */
2494 static void
2495 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2496 {
2497         EState     *epqstate;
2498         int                     rtsize;
2499         MemoryContext oldcontext;
2500         ListCell   *l;
2501
2502         rtsize = list_length(estate->es_range_table);
2503
2504         epq->estate = epqstate = CreateExecutorState();
2505
2506         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2507
2508         /*
2509          * The epqstates share the top query's copy of unchanging state such as
2510          * the snapshot, rangetable, result-rel info, and external Param info.
2511          * They need their own copies of local state, including a tuple table,
2512          * es_param_exec_vals, etc.
2513          */
2514         epqstate->es_direction = ForwardScanDirection;
2515         epqstate->es_snapshot = estate->es_snapshot;
2516         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2517         epqstate->es_range_table = estate->es_range_table;
2518         epqstate->es_output_cid = estate->es_output_cid;
2519         epqstate->es_result_relations = estate->es_result_relations;
2520         epqstate->es_num_result_relations = estate->es_num_result_relations;
2521         epqstate->es_result_relation_info = estate->es_result_relation_info;
2522         epqstate->es_junkFilter = estate->es_junkFilter;
2523         /* es_trig_target_relations must NOT be copied */
2524         epqstate->es_param_list_info = estate->es_param_list_info;
2525         if (estate->es_plannedstmt->nParamExec > 0)
2526                 epqstate->es_param_exec_vals = (ParamExecData *)
2527                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2528         epqstate->es_rowMarks = estate->es_rowMarks;
2529         epqstate->es_instrument = estate->es_instrument;
2530         epqstate->es_select_into = estate->es_select_into;
2531         epqstate->es_into_oids = estate->es_into_oids;
2532         epqstate->es_plannedstmt = estate->es_plannedstmt;
2533
2534         /*
2535          * Each epqstate must have its own es_evTupleNull state, but all the stack
2536          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2537          * the value being examined by an outer recheck.
2538          */
2539         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2540         if (priorepq == NULL)
2541                 /* first PQ stack entry */
2542                 epqstate->es_evTuple = (HeapTuple *)
2543                         palloc0(rtsize * sizeof(HeapTuple));
2544         else
2545                 /* later stack entries share the same storage */
2546                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2547
2548         /*
2549          * Create sub-tuple-table; we needn't redo the CountSlots work though.
2550          */
2551         epqstate->es_tupleTable =
2552                 ExecCreateTupleTable(estate->es_tupleTable->size);
2553
2554         /*
2555          * Initialize private state information for each SubPlan.  We must do this
2556          * before running ExecInitNode on the main query tree, since
2557          * ExecInitSubPlan expects to be able to find these entries.
2558          */
2559         Assert(epqstate->es_subplanstates == NIL);
2560         foreach(l, estate->es_plannedstmt->subplans)
2561         {
2562                 Plan       *subplan = (Plan *) lfirst(l);
2563                 PlanState  *subplanstate;
2564
2565                 subplanstate = ExecInitNode(subplan, epqstate, 0);
2566
2567                 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2568                                                                                          subplanstate);
2569         }
2570
2571         /*
2572          * Initialize the private state information for all the nodes in the query
2573          * tree.  This opens files, allocates storage and leaves us ready to start
2574          * processing tuples.
2575          */
2576         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2577
2578         MemoryContextSwitchTo(oldcontext);
2579 }
2580
2581 /*
2582  * End execution of one level of PlanQual.
2583  *
2584  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2585  * of the normal cleanup, but *not* close result relations (which we are
2586  * just sharing from the outer query).  We do, however, have to close any
2587  * trigger target relations that got opened, since those are not shared.
2588  */
2589 static void
2590 EvalPlanQualStop(evalPlanQual *epq)
2591 {
2592         EState     *epqstate = epq->estate;
2593         MemoryContext oldcontext;
2594         ListCell   *l;
2595
2596         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2597
2598         ExecEndNode(epq->planstate);
2599
2600         foreach(l, epqstate->es_subplanstates)
2601         {
2602                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2603
2604                 ExecEndNode(subplanstate);
2605         }
2606
2607         ExecDropTupleTable(epqstate->es_tupleTable, true);
2608         epqstate->es_tupleTable = NULL;
2609
2610         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2611         {
2612                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2613                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2614         }
2615
2616         foreach(l, epqstate->es_trig_target_relations)
2617         {
2618                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2619
2620                 /* Close indices and then the relation itself */
2621                 ExecCloseIndices(resultRelInfo);
2622                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2623         }
2624
2625         MemoryContextSwitchTo(oldcontext);
2626
2627         FreeExecutorState(epqstate);
2628
2629         epq->estate = NULL;
2630         epq->planstate = NULL;
2631 }
2632
2633 /*
2634  * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2635  *
2636  * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2637  * are looking at a row returned by the EvalPlanQual machinery, we need
2638  * to look at the subsidiary state instead.
2639  */
2640 PlanState *
2641 ExecGetActivePlanTree(QueryDesc *queryDesc)
2642 {
2643         EState     *estate = queryDesc->estate;
2644
2645         if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2646                 return estate->es_evalPlanQual->planstate;
2647         else
2648                 return queryDesc->planstate;
2649 }
2650
2651
2652 /*
2653  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2654  *
2655  * We implement SELECT INTO by diverting SELECT's normal output with
2656  * a specialized DestReceiver type.
2657  */
2658
2659 typedef struct
2660 {
2661         DestReceiver pub;                       /* publicly-known function pointers */
2662         EState     *estate;                     /* EState we are working with */
2663         Relation        rel;                    /* Relation to write to */
2664         int                     hi_options;             /* heap_insert performance options */
2665         BulkInsertState bistate;        /* bulk insert state */
2666 } DR_intorel;
2667
2668 /*
2669  * OpenIntoRel --- actually create the SELECT INTO target relation
2670  *
2671  * This also replaces QueryDesc->dest with the special DestReceiver for
2672  * SELECT INTO.  We assume that the correct result tuple type has already
2673  * been placed in queryDesc->tupDesc.
2674  */
2675 static void
2676 OpenIntoRel(QueryDesc *queryDesc)
2677 {
2678         IntoClause *into = queryDesc->plannedstmt->intoClause;
2679         EState     *estate = queryDesc->estate;
2680         Relation        intoRelationDesc;
2681         char       *intoName;
2682         Oid                     namespaceId;
2683         Oid                     tablespaceId;
2684         Datum           reloptions;
2685         AclResult       aclresult;
2686         Oid                     intoRelationId;
2687         TupleDesc       tupdesc;
2688         DR_intorel *myState;
2689
2690         Assert(into);
2691
2692         /*
2693          * Check consistency of arguments
2694          */
2695         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2696                 ereport(ERROR,
2697                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2698                                  errmsg("ON COMMIT can only be used on temporary tables")));
2699
2700         /*
2701          * Find namespace to create in, check its permissions
2702          */
2703         intoName = into->rel->relname;
2704         namespaceId = RangeVarGetCreationNamespace(into->rel);
2705
2706         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2707                                                                           ACL_CREATE);
2708         if (aclresult != ACLCHECK_OK)
2709                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2710                                            get_namespace_name(namespaceId));
2711
2712         /*
2713          * Select tablespace to use.  If not specified, use default tablespace
2714          * (which may in turn default to database's default).
2715          */
2716         if (into->tableSpaceName)
2717         {
2718                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2719                 if (!OidIsValid(tablespaceId))
2720                         ereport(ERROR,
2721                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2722                                          errmsg("tablespace \"%s\" does not exist",
2723                                                         into->tableSpaceName)));
2724         }
2725         else
2726         {
2727                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2728                 /* note InvalidOid is OK in this case */
2729         }
2730
2731         /* Check permissions except when using the database's default space */
2732         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2733         {
2734                 AclResult       aclresult;
2735
2736                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2737                                                                                    ACL_CREATE);
2738
2739                 if (aclresult != ACLCHECK_OK)
2740                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2741                                                    get_tablespace_name(tablespaceId));
2742         }
2743
2744         /* Parse and validate any reloptions */
2745         reloptions = transformRelOptions((Datum) 0,
2746                                                                          into->options,
2747                                                                          true,
2748                                                                          false);
2749         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2750
2751         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2752         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2753
2754         /* Now we can actually create the new relation */
2755         intoRelationId = heap_create_with_catalog(intoName,
2756                                                                                           namespaceId,
2757                                                                                           tablespaceId,
2758                                                                                           InvalidOid,
2759                                                                                           GetUserId(),
2760                                                                                           tupdesc,
2761                                                                                           NIL,
2762                                                                                           RELKIND_RELATION,
2763                                                                                           false,
2764                                                                                           true,
2765                                                                                           0,
2766                                                                                           into->onCommit,
2767                                                                                           reloptions,
2768                                                                                           allowSystemTableMods);
2769
2770         FreeTupleDesc(tupdesc);
2771
2772         /*
2773          * Advance command counter so that the newly-created relation's catalog
2774          * tuples will be visible to heap_open.
2775          */
2776         CommandCounterIncrement();
2777
2778         /*
2779          * If necessary, create a TOAST table for the INTO relation. Note that
2780          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2781          * the TOAST table will be visible for insertion.
2782          */
2783         AlterTableCreateToastTable(intoRelationId);
2784
2785         /*
2786          * And open the constructed table for writing.
2787          */
2788         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2789
2790         /*
2791          * Now replace the query's DestReceiver with one for SELECT INTO
2792          */
2793         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2794         myState = (DR_intorel *) queryDesc->dest;
2795         Assert(myState->pub.mydest == DestIntoRel);
2796         myState->estate = estate;
2797         myState->rel = intoRelationDesc;
2798
2799         /*
2800          * We can skip WAL-logging the insertions, unless PITR is in use.  We
2801          * can skip the FSM in any case.
2802          */
2803         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2804                 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2805         myState->bistate = GetBulkInsertState();
2806
2807         /* Not using WAL requires rd_targblock be initially invalid */
2808         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2809 }
2810
2811 /*
2812  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2813  */
2814 static void
2815 CloseIntoRel(QueryDesc *queryDesc)
2816 {
2817         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2818
2819         /* OpenIntoRel might never have gotten called */
2820         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2821         {
2822                 FreeBulkInsertState(myState->bistate);
2823
2824                 /* If we skipped using WAL, must heap_sync before commit */
2825                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2826                         heap_sync(myState->rel);
2827
2828                 /* close rel, but keep lock until commit */
2829                 heap_close(myState->rel, NoLock);
2830
2831                 myState->rel = NULL;
2832         }
2833 }
2834
2835 /*
2836  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2837  *
2838  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2839  * we just leave the private fields zeroed here.  OpenIntoRel will
2840  * fill them in.
2841  */
2842 DestReceiver *
2843 CreateIntoRelDestReceiver(void)
2844 {
2845         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2846
2847         self->pub.receiveSlot = intorel_receive;
2848         self->pub.rStartup = intorel_startup;
2849         self->pub.rShutdown = intorel_shutdown;
2850         self->pub.rDestroy = intorel_destroy;
2851         self->pub.mydest = DestIntoRel;
2852
2853         return (DestReceiver *) self;
2854 }
2855
2856 /*
2857  * intorel_startup --- executor startup
2858  */
2859 static void
2860 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2861 {
2862         /* no-op */
2863 }
2864
2865 /*
2866  * intorel_receive --- receive one tuple
2867  */
2868 static void
2869 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2870 {
2871         DR_intorel *myState = (DR_intorel *) self;
2872         HeapTuple       tuple;
2873
2874         /*
2875          * get the heap tuple out of the tuple table slot, making sure we have a
2876          * writable copy
2877          */
2878         tuple = ExecMaterializeSlot(slot);
2879
2880         heap_insert(myState->rel,
2881                                 tuple,
2882                                 myState->estate->es_output_cid,
2883                                 myState->hi_options,
2884                                 myState->bistate);
2885
2886         /* We know this is a newly created relation, so there are no indexes */
2887
2888         IncrAppended();
2889 }
2890
2891 /*
2892  * intorel_shutdown --- executor end
2893  */
2894 static void
2895 intorel_shutdown(DestReceiver *self)
2896 {
2897         /* no-op */
2898 }
2899
2900 /*
2901  * intorel_destroy --- release DestReceiver object
2902  */
2903 static void
2904 intorel_destroy(DestReceiver *self)
2905 {
2906         pfree(self);
2907 }