]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Move exprType(), exprTypmod(), expression_tree_walker(), and related routines
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.313 2008/08/25 22:42:32 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "nodes/nodeFuncs.h"
49 #include "optimizer/clauses.h"
50 #include "parser/parse_clause.h"
51 #include "parser/parsetree.h"
52 #include "storage/bufmgr.h"
53 #include "storage/lmgr.h"
54 #include "storage/smgr.h"
55 #include "utils/acl.h"
56 #include "utils/builtins.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/snapmgr.h"
60 #include "utils/tqual.h"
61
62
63 /* Hook for plugins to get control in ExecutorRun() */
64 ExecutorRun_hook_type ExecutorRun_hook = NULL;
65
66 typedef struct evalPlanQual
67 {
68         Index           rti;
69         EState     *estate;
70         PlanState  *planstate;
71         struct evalPlanQual *next;      /* stack of active PlanQual plans */
72         struct evalPlanQual *free;      /* list of free PlanQual plans */
73 } evalPlanQual;
74
75 /* decls for local routines only used within this module */
76 static void InitPlan(QueryDesc *queryDesc, int eflags);
77 static void ExecCheckPlanOutput(Relation resultRel, List *targetList);
78 static void ExecEndPlan(PlanState *planstate, EState *estate);
79 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
80                         CmdType operation,
81                         long numberTuples,
82                         ScanDirection direction,
83                         DestReceiver *dest);
84 static void ExecSelect(TupleTableSlot *slot,
85                    DestReceiver *dest, EState *estate);
86 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
87                    TupleTableSlot *planSlot,
88                    DestReceiver *dest, EState *estate);
89 static void ExecDelete(ItemPointer tupleid,
90                    TupleTableSlot *planSlot,
91                    DestReceiver *dest, EState *estate);
92 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
93                    TupleTableSlot *planSlot,
94                    DestReceiver *dest, EState *estate);
95 static void ExecProcessReturning(ProjectionInfo *projectReturning,
96                                          TupleTableSlot *tupleSlot,
97                                          TupleTableSlot *planSlot,
98                                          DestReceiver *dest);
99 static TupleTableSlot *EvalPlanQualNext(EState *estate);
100 static void EndEvalPlanQual(EState *estate);
101 static void ExecCheckRTPerms(List *rangeTable);
102 static void ExecCheckRTEPerms(RangeTblEntry *rte);
103 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
104 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
105                                   evalPlanQual *priorepq);
106 static void EvalPlanQualStop(evalPlanQual *epq);
107 static void OpenIntoRel(QueryDesc *queryDesc);
108 static void CloseIntoRel(QueryDesc *queryDesc);
109 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
110 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
111 static void intorel_shutdown(DestReceiver *self);
112 static void intorel_destroy(DestReceiver *self);
113
114 /* end of local decls */
115
116
117 /* ----------------------------------------------------------------
118  *              ExecutorStart
119  *
120  *              This routine must be called at the beginning of any execution of any
121  *              query plan
122  *
123  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
124  * clear why we bother to separate the two functions, but...).  The tupDesc
125  * field of the QueryDesc is filled in to describe the tuples that will be
126  * returned, and the internal fields (estate and planstate) are set up.
127  *
128  * eflags contains flag bits as described in executor.h.
129  *
130  * NB: the CurrentMemoryContext when this is called will become the parent
131  * of the per-query context used for this Executor invocation.
132  * ----------------------------------------------------------------
133  */
134 void
135 ExecutorStart(QueryDesc *queryDesc, int eflags)
136 {
137         EState     *estate;
138         MemoryContext oldcontext;
139
140         /* sanity checks: queryDesc must not be started already */
141         Assert(queryDesc != NULL);
142         Assert(queryDesc->estate == NULL);
143
144         /*
145          * If the transaction is read-only, we need to check if any writes are
146          * planned to non-temporary tables.  EXPLAIN is considered read-only.
147          */
148         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
149                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
150
151         /*
152          * Build EState, switch into per-query memory context for startup.
153          */
154         estate = CreateExecutorState();
155         queryDesc->estate = estate;
156
157         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
158
159         /*
160          * Fill in parameters, if any, from queryDesc
161          */
162         estate->es_param_list_info = queryDesc->params;
163
164         if (queryDesc->plannedstmt->nParamExec > 0)
165                 estate->es_param_exec_vals = (ParamExecData *)
166                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
167
168         /*
169          * If non-read-only query, set the command ID to mark output tuples with
170          */
171         switch (queryDesc->operation)
172         {
173                 case CMD_SELECT:
174                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
175                         if (queryDesc->plannedstmt->intoClause != NULL ||
176                                 queryDesc->plannedstmt->rowMarks != NIL)
177                                 estate->es_output_cid = GetCurrentCommandId(true);
178                         break;
179
180                 case CMD_INSERT:
181                 case CMD_DELETE:
182                 case CMD_UPDATE:
183                         estate->es_output_cid = GetCurrentCommandId(true);
184                         break;
185
186                 default:
187                         elog(ERROR, "unrecognized operation code: %d",
188                                  (int) queryDesc->operation);
189                         break;
190         }
191
192         /*
193          * Copy other important information into the EState
194          */
195         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
196         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
197         estate->es_instrument = queryDesc->doInstrument;
198
199         /*
200          * Initialize the plan state tree
201          */
202         InitPlan(queryDesc, eflags);
203
204         MemoryContextSwitchTo(oldcontext);
205 }
206
207 /* ----------------------------------------------------------------
208  *              ExecutorRun
209  *
210  *              This is the main routine of the executor module. It accepts
211  *              the query descriptor from the traffic cop and executes the
212  *              query plan.
213  *
214  *              ExecutorStart must have been called already.
215  *
216  *              If direction is NoMovementScanDirection then nothing is done
217  *              except to start up/shut down the destination.  Otherwise,
218  *              we retrieve up to 'count' tuples in the specified direction.
219  *
220  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
221  *              completion.
222  *
223  *              We provide a function hook variable that lets loadable plugins
224  *              get control when ExecutorRun is called.  Such a plugin would
225  *              normally call standard_ExecutorRun().
226  *
227  * ----------------------------------------------------------------
228  */
229 TupleTableSlot *
230 ExecutorRun(QueryDesc *queryDesc,
231                         ScanDirection direction, long count)
232 {
233         TupleTableSlot *result;
234
235         if (ExecutorRun_hook)
236                 result = (*ExecutorRun_hook) (queryDesc, direction, count);
237         else
238                 result = standard_ExecutorRun(queryDesc, direction, count);
239         return result;
240 }
241
242 TupleTableSlot *
243 standard_ExecutorRun(QueryDesc *queryDesc,
244                                          ScanDirection direction, long count)
245 {
246         EState     *estate;
247         CmdType         operation;
248         DestReceiver *dest;
249         bool            sendTuples;
250         TupleTableSlot *result;
251         MemoryContext oldcontext;
252
253         /* sanity checks */
254         Assert(queryDesc != NULL);
255
256         estate = queryDesc->estate;
257
258         Assert(estate != NULL);
259
260         /*
261          * Switch into per-query memory context
262          */
263         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
264
265         /*
266          * extract information from the query descriptor and the query feature.
267          */
268         operation = queryDesc->operation;
269         dest = queryDesc->dest;
270
271         /*
272          * startup tuple receiver, if we will be emitting tuples
273          */
274         estate->es_processed = 0;
275         estate->es_lastoid = InvalidOid;
276
277         sendTuples = (operation == CMD_SELECT ||
278                                   queryDesc->plannedstmt->returningLists);
279
280         if (sendTuples)
281                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
282
283         /*
284          * run plan
285          */
286         if (ScanDirectionIsNoMovement(direction))
287                 result = NULL;
288         else
289                 result = ExecutePlan(estate,
290                                                          queryDesc->planstate,
291                                                          operation,
292                                                          count,
293                                                          direction,
294                                                          dest);
295
296         /*
297          * shutdown tuple receiver, if we started it
298          */
299         if (sendTuples)
300                 (*dest->rShutdown) (dest);
301
302         MemoryContextSwitchTo(oldcontext);
303
304         return result;
305 }
306
307 /* ----------------------------------------------------------------
308  *              ExecutorEnd
309  *
310  *              This routine must be called at the end of execution of any
311  *              query plan
312  * ----------------------------------------------------------------
313  */
314 void
315 ExecutorEnd(QueryDesc *queryDesc)
316 {
317         EState     *estate;
318         MemoryContext oldcontext;
319
320         /* sanity checks */
321         Assert(queryDesc != NULL);
322
323         estate = queryDesc->estate;
324
325         Assert(estate != NULL);
326
327         /*
328          * Switch into per-query memory context to run ExecEndPlan
329          */
330         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
331
332         ExecEndPlan(queryDesc->planstate, estate);
333
334         /*
335          * Close the SELECT INTO relation if any
336          */
337         if (estate->es_select_into)
338                 CloseIntoRel(queryDesc);
339
340         /* do away with our snapshots */
341         UnregisterSnapshot(estate->es_snapshot);
342         UnregisterSnapshot(estate->es_crosscheck_snapshot);
343
344         /*
345          * Must switch out of context before destroying it
346          */
347         MemoryContextSwitchTo(oldcontext);
348
349         /*
350          * Release EState and per-query memory context.  This should release
351          * everything the executor has allocated.
352          */
353         FreeExecutorState(estate);
354
355         /* Reset queryDesc fields that no longer point to anything */
356         queryDesc->tupDesc = NULL;
357         queryDesc->estate = NULL;
358         queryDesc->planstate = NULL;
359 }
360
361 /* ----------------------------------------------------------------
362  *              ExecutorRewind
363  *
364  *              This routine may be called on an open queryDesc to rewind it
365  *              to the start.
366  * ----------------------------------------------------------------
367  */
368 void
369 ExecutorRewind(QueryDesc *queryDesc)
370 {
371         EState     *estate;
372         MemoryContext oldcontext;
373
374         /* sanity checks */
375         Assert(queryDesc != NULL);
376
377         estate = queryDesc->estate;
378
379         Assert(estate != NULL);
380
381         /* It's probably not sensible to rescan updating queries */
382         Assert(queryDesc->operation == CMD_SELECT);
383
384         /*
385          * Switch into per-query memory context
386          */
387         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
388
389         /*
390          * rescan plan
391          */
392         ExecReScan(queryDesc->planstate, NULL);
393
394         MemoryContextSwitchTo(oldcontext);
395 }
396
397
398 /*
399  * ExecCheckRTPerms
400  *              Check access permissions for all relations listed in a range table.
401  */
402 static void
403 ExecCheckRTPerms(List *rangeTable)
404 {
405         ListCell   *l;
406
407         foreach(l, rangeTable)
408         {
409                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
410         }
411 }
412
413 /*
414  * ExecCheckRTEPerms
415  *              Check access permissions for a single RTE.
416  */
417 static void
418 ExecCheckRTEPerms(RangeTblEntry *rte)
419 {
420         AclMode         requiredPerms;
421         Oid                     relOid;
422         Oid                     userid;
423
424         /*
425          * Only plain-relation RTEs need to be checked here.  Function RTEs are
426          * checked by init_fcache when the function is prepared for execution.
427          * Join, subquery, and special RTEs need no checks.
428          */
429         if (rte->rtekind != RTE_RELATION)
430                 return;
431
432         /*
433          * No work if requiredPerms is empty.
434          */
435         requiredPerms = rte->requiredPerms;
436         if (requiredPerms == 0)
437                 return;
438
439         relOid = rte->relid;
440
441         /*
442          * userid to check as: current user unless we have a setuid indication.
443          *
444          * Note: GetUserId() is presently fast enough that there's no harm in
445          * calling it separately for each RTE.  If that stops being true, we could
446          * call it once in ExecCheckRTPerms and pass the userid down from there.
447          * But for now, no need for the extra clutter.
448          */
449         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
450
451         /*
452          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
453          */
454         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
455                 != requiredPerms)
456                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
457                                            get_rel_name(relOid));
458 }
459
460 /*
461  * Check that the query does not imply any writes to non-temp tables.
462  */
463 static void
464 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
465 {
466         ListCell   *l;
467
468         /*
469          * CREATE TABLE AS or SELECT INTO?
470          *
471          * XXX should we allow this if the destination is temp?
472          */
473         if (plannedstmt->intoClause != NULL)
474                 goto fail;
475
476         /* Fail if write permissions are requested on any non-temp table */
477         foreach(l, plannedstmt->rtable)
478         {
479                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
480
481                 if (rte->rtekind != RTE_RELATION)
482                         continue;
483
484                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
485                         continue;
486
487                 if (isTempNamespace(get_rel_namespace(rte->relid)))
488                         continue;
489
490                 goto fail;
491         }
492
493         return;
494
495 fail:
496         ereport(ERROR,
497                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
498                          errmsg("transaction is read-only")));
499 }
500
501
502 /* ----------------------------------------------------------------
503  *              InitPlan
504  *
505  *              Initializes the query plan: open files, allocate storage
506  *              and start up the rule manager
507  * ----------------------------------------------------------------
508  */
509 static void
510 InitPlan(QueryDesc *queryDesc, int eflags)
511 {
512         CmdType         operation = queryDesc->operation;
513         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
514         Plan       *plan = plannedstmt->planTree;
515         List       *rangeTable = plannedstmt->rtable;
516         EState     *estate = queryDesc->estate;
517         PlanState  *planstate;
518         TupleDesc       tupType;
519         ListCell   *l;
520         int                     i;
521
522         /*
523          * Do permissions checks
524          */
525         ExecCheckRTPerms(rangeTable);
526
527         /*
528          * initialize the node's execution state
529          */
530         estate->es_range_table = rangeTable;
531
532         /*
533          * initialize result relation stuff
534          */
535         if (plannedstmt->resultRelations)
536         {
537                 List       *resultRelations = plannedstmt->resultRelations;
538                 int                     numResultRelations = list_length(resultRelations);
539                 ResultRelInfo *resultRelInfos;
540                 ResultRelInfo *resultRelInfo;
541
542                 resultRelInfos = (ResultRelInfo *)
543                         palloc(numResultRelations * sizeof(ResultRelInfo));
544                 resultRelInfo = resultRelInfos;
545                 foreach(l, resultRelations)
546                 {
547                         Index           resultRelationIndex = lfirst_int(l);
548                         Oid                     resultRelationOid;
549                         Relation        resultRelation;
550
551                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
552                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
553                         InitResultRelInfo(resultRelInfo,
554                                                           resultRelation,
555                                                           resultRelationIndex,
556                                                           operation,
557                                                           estate->es_instrument);
558                         resultRelInfo++;
559                 }
560                 estate->es_result_relations = resultRelInfos;
561                 estate->es_num_result_relations = numResultRelations;
562                 /* Initialize to first or only result rel */
563                 estate->es_result_relation_info = resultRelInfos;
564         }
565         else
566         {
567                 /*
568                  * if no result relation, then set state appropriately
569                  */
570                 estate->es_result_relations = NULL;
571                 estate->es_num_result_relations = 0;
572                 estate->es_result_relation_info = NULL;
573         }
574
575         /*
576          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
577          * flag appropriately so that the plan tree will be initialized with the
578          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
579          */
580         estate->es_select_into = false;
581         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
582         {
583                 estate->es_select_into = true;
584                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
585         }
586
587         /*
588          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
589          * initialize the plan tree, else we'd be doing a lock upgrade. While we
590          * are at it, build the ExecRowMark list.
591          */
592         estate->es_rowMarks = NIL;
593         foreach(l, plannedstmt->rowMarks)
594         {
595                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
596                 Oid                     relid = getrelid(rc->rti, rangeTable);
597                 Relation        relation;
598                 ExecRowMark *erm;
599
600                 relation = heap_open(relid, RowShareLock);
601                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
602                 erm->relation = relation;
603                 erm->rti = rc->rti;
604                 erm->forUpdate = rc->forUpdate;
605                 erm->noWait = rc->noWait;
606                 /* We'll set up ctidAttno below */
607                 erm->ctidAttNo = InvalidAttrNumber;
608                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
609         }
610
611         /*
612          * Initialize the executor "tuple" table.  We need slots for all the plan
613          * nodes, plus possibly output slots for the junkfilter(s). At this point
614          * we aren't sure if we need junkfilters, so just add slots for them
615          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
616          * trigger output tuples.  Also, one for RETURNING-list evaluation.
617          */
618         {
619                 int                     nSlots;
620
621                 /* Slots for the main plan tree */
622                 nSlots = ExecCountSlotsNode(plan);
623                 /* Add slots for subplans and initplans */
624                 foreach(l, plannedstmt->subplans)
625                 {
626                         Plan       *subplan = (Plan *) lfirst(l);
627
628                         nSlots += ExecCountSlotsNode(subplan);
629                 }
630                 /* Add slots for junkfilter(s) */
631                 if (plannedstmt->resultRelations != NIL)
632                         nSlots += list_length(plannedstmt->resultRelations);
633                 else
634                         nSlots += 1;
635                 if (operation != CMD_SELECT)
636                         nSlots++;                       /* for es_trig_tuple_slot */
637                 if (plannedstmt->returningLists)
638                         nSlots++;                       /* for RETURNING projection */
639
640                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
641
642                 if (operation != CMD_SELECT)
643                         estate->es_trig_tuple_slot =
644                                 ExecAllocTableSlot(estate->es_tupleTable);
645         }
646
647         /* mark EvalPlanQual not active */
648         estate->es_plannedstmt = plannedstmt;
649         estate->es_evalPlanQual = NULL;
650         estate->es_evTupleNull = NULL;
651         estate->es_evTuple = NULL;
652         estate->es_useEvalPlan = false;
653
654         /*
655          * Initialize private state information for each SubPlan.  We must do this
656          * before running ExecInitNode on the main query tree, since
657          * ExecInitSubPlan expects to be able to find these entries.
658          */
659         Assert(estate->es_subplanstates == NIL);
660         i = 1;                                          /* subplan indices count from 1 */
661         foreach(l, plannedstmt->subplans)
662         {
663                 Plan       *subplan = (Plan *) lfirst(l);
664                 PlanState  *subplanstate;
665                 int                     sp_eflags;
666
667                 /*
668                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
669                  * it is a parameterless subplan (not initplan), we suggest that it be
670                  * prepared to handle REWIND efficiently; otherwise there is no need.
671                  */
672                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
673                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
674                         sp_eflags |= EXEC_FLAG_REWIND;
675
676                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
677
678                 estate->es_subplanstates = lappend(estate->es_subplanstates,
679                                                                                    subplanstate);
680
681                 i++;
682         }
683
684         /*
685          * Initialize the private state information for all the nodes in the query
686          * tree.  This opens files, allocates storage and leaves us ready to start
687          * processing tuples.
688          */
689         planstate = ExecInitNode(plan, estate, eflags);
690
691         /*
692          * Get the tuple descriptor describing the type of tuples to return. (this
693          * is especially important if we are creating a relation with "SELECT
694          * INTO")
695          */
696         tupType = ExecGetResultType(planstate);
697
698         /*
699          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
700          * filter if there are any junk attrs in the tlist.  UPDATE and
701          * DELETE always need a filter, since there's always a junk 'ctid'
702          * attribute present --- no need to look first.
703          *
704          * This section of code is also a convenient place to verify that the
705          * output of an INSERT or UPDATE matches the target table(s).
706          */
707         {
708                 bool            junk_filter_needed = false;
709                 ListCell   *tlist;
710
711                 switch (operation)
712                 {
713                         case CMD_SELECT:
714                         case CMD_INSERT:
715                                 foreach(tlist, plan->targetlist)
716                                 {
717                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
718
719                                         if (tle->resjunk)
720                                         {
721                                                 junk_filter_needed = true;
722                                                 break;
723                                         }
724                                 }
725                                 break;
726                         case CMD_UPDATE:
727                         case CMD_DELETE:
728                                 junk_filter_needed = true;
729                                 break;
730                         default:
731                                 break;
732                 }
733
734                 if (junk_filter_needed)
735                 {
736                         /*
737                          * If there are multiple result relations, each one needs its own
738                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
739                          * we can't be fooled by some needing a filter and some not.
740                          */
741                         if (list_length(plannedstmt->resultRelations) > 1)
742                         {
743                                 PlanState **appendplans;
744                                 int                     as_nplans;
745                                 ResultRelInfo *resultRelInfo;
746
747                                 /* Top plan had better be an Append here. */
748                                 Assert(IsA(plan, Append));
749                                 Assert(((Append *) plan)->isTarget);
750                                 Assert(IsA(planstate, AppendState));
751                                 appendplans = ((AppendState *) planstate)->appendplans;
752                                 as_nplans = ((AppendState *) planstate)->as_nplans;
753                                 Assert(as_nplans == estate->es_num_result_relations);
754                                 resultRelInfo = estate->es_result_relations;
755                                 for (i = 0; i < as_nplans; i++)
756                                 {
757                                         PlanState  *subplan = appendplans[i];
758                                         JunkFilter *j;
759
760                                         if (operation == CMD_UPDATE)
761                                                 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
762                                                                                         subplan->plan->targetlist);
763
764                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
765                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
766                                                                   ExecAllocTableSlot(estate->es_tupleTable));
767
768                                         /*
769                                          * Since it must be UPDATE/DELETE, there had better be a
770                                          * "ctid" junk attribute in the tlist ... but ctid could
771                                          * be at a different resno for each result relation. We
772                                          * look up the ctid resnos now and save them in the
773                                          * junkfilters.
774                                          */
775                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
776                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
777                                                 elog(ERROR, "could not find junk ctid column");
778                                         resultRelInfo->ri_junkFilter = j;
779                                         resultRelInfo++;
780                                 }
781
782                                 /*
783                                  * Set active junkfilter too; at this point ExecInitAppend has
784                                  * already selected an active result relation...
785                                  */
786                                 estate->es_junkFilter =
787                                         estate->es_result_relation_info->ri_junkFilter;
788
789                                 /*
790                                  * We currently can't support rowmarks in this case, because
791                                  * the associated junk CTIDs might have different resnos in
792                                  * different subplans.
793                                  */
794                                 if (estate->es_rowMarks)
795                                         ereport(ERROR,
796                                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
797                                                          errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations")));
798                         }
799                         else
800                         {
801                                 /* Normal case with just one JunkFilter */
802                                 JunkFilter *j;
803
804                                 if (operation == CMD_INSERT || operation == CMD_UPDATE)
805                                         ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
806                                                                                 planstate->plan->targetlist);
807
808                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
809                                                                            tupType->tdhasoid,
810                                                                   ExecAllocTableSlot(estate->es_tupleTable));
811                                 estate->es_junkFilter = j;
812                                 if (estate->es_result_relation_info)
813                                         estate->es_result_relation_info->ri_junkFilter = j;
814
815                                 if (operation == CMD_SELECT)
816                                 {
817                                         /* For SELECT, want to return the cleaned tuple type */
818                                         tupType = j->jf_cleanTupType;
819                                 }
820                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
821                                 {
822                                         /* For UPDATE/DELETE, find the ctid junk attr now */
823                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
824                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
825                                                 elog(ERROR, "could not find junk ctid column");
826                                 }
827
828                                 /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
829                                 foreach(l, estate->es_rowMarks)
830                                 {
831                                         ExecRowMark *erm = (ExecRowMark *) lfirst(l);
832                                         char            resname[32];
833
834                                         snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
835                                         erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
836                                         if (!AttributeNumberIsValid(erm->ctidAttNo))
837                                                 elog(ERROR, "could not find junk \"%s\" column",
838                                                          resname);
839                                 }
840                         }
841                 }
842                 else
843                 {
844                         if (operation == CMD_INSERT)
845                                 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
846                                                                         planstate->plan->targetlist);
847
848                         estate->es_junkFilter = NULL;
849                         if (estate->es_rowMarks)
850                                 elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns");
851                 }
852         }
853
854         /*
855          * Initialize RETURNING projections if needed.
856          */
857         if (plannedstmt->returningLists)
858         {
859                 TupleTableSlot *slot;
860                 ExprContext *econtext;
861                 ResultRelInfo *resultRelInfo;
862
863                 /*
864                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
865                  * We assume all the sublists will generate the same output tupdesc.
866                  */
867                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
868                                                                  false);
869
870                 /* Set up a slot for the output of the RETURNING projection(s) */
871                 slot = ExecAllocTableSlot(estate->es_tupleTable);
872                 ExecSetSlotDescriptor(slot, tupType);
873                 /* Need an econtext too */
874                 econtext = CreateExprContext(estate);
875
876                 /*
877                  * Build a projection for each result rel.      Note that any SubPlans in
878                  * the RETURNING lists get attached to the topmost plan node.
879                  */
880                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
881                 resultRelInfo = estate->es_result_relations;
882                 foreach(l, plannedstmt->returningLists)
883                 {
884                         List       *rlist = (List *) lfirst(l);
885                         List       *rliststate;
886
887                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
888                         resultRelInfo->ri_projectReturning =
889                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
890                                                                          resultRelInfo->ri_RelationDesc->rd_att);
891                         resultRelInfo++;
892                 }
893         }
894
895         queryDesc->tupDesc = tupType;
896         queryDesc->planstate = planstate;
897
898         /*
899          * If doing SELECT INTO, initialize the "into" relation.  We must wait
900          * till now so we have the "clean" result tuple type to create the new
901          * table from.
902          *
903          * If EXPLAIN, skip creating the "into" relation.
904          */
905         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
906                 OpenIntoRel(queryDesc);
907 }
908
909 /*
910  * Initialize ResultRelInfo data for one result relation
911  */
912 void
913 InitResultRelInfo(ResultRelInfo *resultRelInfo,
914                                   Relation resultRelationDesc,
915                                   Index resultRelationIndex,
916                                   CmdType operation,
917                                   bool doInstrument)
918 {
919         /*
920          * Check valid relkind ... parser and/or planner should have noticed this
921          * already, but let's make sure.
922          */
923         switch (resultRelationDesc->rd_rel->relkind)
924         {
925                 case RELKIND_RELATION:
926                         /* OK */
927                         break;
928                 case RELKIND_SEQUENCE:
929                         ereport(ERROR,
930                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
931                                          errmsg("cannot change sequence \"%s\"",
932                                                         RelationGetRelationName(resultRelationDesc))));
933                         break;
934                 case RELKIND_TOASTVALUE:
935                         ereport(ERROR,
936                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
937                                          errmsg("cannot change TOAST relation \"%s\"",
938                                                         RelationGetRelationName(resultRelationDesc))));
939                         break;
940                 case RELKIND_VIEW:
941                         ereport(ERROR,
942                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
943                                          errmsg("cannot change view \"%s\"",
944                                                         RelationGetRelationName(resultRelationDesc))));
945                         break;
946                 default:
947                         ereport(ERROR,
948                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
949                                          errmsg("cannot change relation \"%s\"",
950                                                         RelationGetRelationName(resultRelationDesc))));
951                         break;
952         }
953
954         /* OK, fill in the node */
955         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
956         resultRelInfo->type = T_ResultRelInfo;
957         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
958         resultRelInfo->ri_RelationDesc = resultRelationDesc;
959         resultRelInfo->ri_NumIndices = 0;
960         resultRelInfo->ri_IndexRelationDescs = NULL;
961         resultRelInfo->ri_IndexRelationInfo = NULL;
962         /* make a copy so as not to depend on relcache info not changing... */
963         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
964         if (resultRelInfo->ri_TrigDesc)
965         {
966                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
967
968                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
969                         palloc0(n * sizeof(FmgrInfo));
970                 if (doInstrument)
971                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
972                 else
973                         resultRelInfo->ri_TrigInstrument = NULL;
974         }
975         else
976         {
977                 resultRelInfo->ri_TrigFunctions = NULL;
978                 resultRelInfo->ri_TrigInstrument = NULL;
979         }
980         resultRelInfo->ri_ConstraintExprs = NULL;
981         resultRelInfo->ri_junkFilter = NULL;
982         resultRelInfo->ri_projectReturning = NULL;
983
984         /*
985          * If there are indices on the result relation, open them and save
986          * descriptors in the result relation info, so that we can add new index
987          * entries for the tuples we add/update.  We need not do this for a
988          * DELETE, however, since deletion doesn't affect indexes.
989          */
990         if (resultRelationDesc->rd_rel->relhasindex &&
991                 operation != CMD_DELETE)
992                 ExecOpenIndices(resultRelInfo);
993 }
994
995 /*
996  * Verify that the tuples to be produced by INSERT or UPDATE match the
997  * target relation's rowtype
998  *
999  * We do this to guard against stale plans.  If plan invalidation is
1000  * functioning properly then we should never get a failure here, but better
1001  * safe than sorry.  Note that this is called after we have obtained lock
1002  * on the target rel, so the rowtype can't change underneath us.
1003  *
1004  * The plan output is represented by its targetlist, because that makes
1005  * handling the dropped-column case easier.
1006  */
1007 static void
1008 ExecCheckPlanOutput(Relation resultRel, List *targetList)
1009 {
1010         TupleDesc       resultDesc = RelationGetDescr(resultRel);
1011         int                     attno = 0;
1012         ListCell   *lc;
1013
1014         foreach(lc, targetList)
1015         {
1016                 TargetEntry *tle = (TargetEntry *) lfirst(lc);
1017                 Form_pg_attribute attr;
1018
1019                 if (tle->resjunk)
1020                         continue;                       /* ignore junk tlist items */
1021
1022                 if (attno >= resultDesc->natts)
1023                         ereport(ERROR,
1024                                         (errcode(ERRCODE_DATATYPE_MISMATCH),
1025                                          errmsg("table row type and query-specified row type do not match"),
1026                                          errdetail("Query has too many columns.")));
1027                 attr = resultDesc->attrs[attno++];
1028
1029                 if (!attr->attisdropped)
1030                 {
1031                         /* Normal case: demand type match */
1032                         if (exprType((Node *) tle->expr) != attr->atttypid)
1033                                 ereport(ERROR,
1034                                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1035                                                  errmsg("table row type and query-specified row type do not match"),
1036                                                  errdetail("Table has type %s at ordinal position %d, but query expects %s.",
1037                                                                    format_type_be(attr->atttypid),
1038                                                                    attno,
1039                                                                    format_type_be(exprType((Node *) tle->expr)))));
1040                 }
1041                 else
1042                 {
1043                         /*
1044                          * For a dropped column, we can't check atttypid (it's likely 0).
1045                          * In any case the planner has most likely inserted an INT4 null.
1046                          * What we insist on is just *some* NULL constant.
1047                          */
1048                         if (!IsA(tle->expr, Const) ||
1049                                 !((Const *) tle->expr)->constisnull)
1050                                 ereport(ERROR,
1051                                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1052                                                  errmsg("table row type and query-specified row type do not match"),
1053                                                  errdetail("Query provides a value for a dropped column at ordinal position %d.",
1054                                                                    attno)));
1055                 }
1056         }
1057         if (attno != resultDesc->natts)
1058                 ereport(ERROR,
1059                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1060                                  errmsg("table row type and query-specified row type do not match"),
1061                                  errdetail("Query has too few columns.")));
1062 }
1063
1064 /*
1065  *              ExecGetTriggerResultRel
1066  *
1067  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
1068  * triggers are fired on one of the result relations of the query, and so
1069  * we can just return a member of the es_result_relations array.  (Note: in
1070  * self-join situations there might be multiple members with the same OID;
1071  * if so it doesn't matter which one we pick.)  However, it is sometimes
1072  * necessary to fire triggers on other relations; this happens mainly when an
1073  * RI update trigger queues additional triggers on other relations, which will
1074  * be processed in the context of the outer query.      For efficiency's sake,
1075  * we want to have a ResultRelInfo for those triggers too; that can avoid
1076  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
1077  * ANALYZE to report the runtimes of such triggers.)  So we make additional
1078  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1079  */
1080 ResultRelInfo *
1081 ExecGetTriggerResultRel(EState *estate, Oid relid)
1082 {
1083         ResultRelInfo *rInfo;
1084         int                     nr;
1085         ListCell   *l;
1086         Relation        rel;
1087         MemoryContext oldcontext;
1088
1089         /* First, search through the query result relations */
1090         rInfo = estate->es_result_relations;
1091         nr = estate->es_num_result_relations;
1092         while (nr > 0)
1093         {
1094                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1095                         return rInfo;
1096                 rInfo++;
1097                 nr--;
1098         }
1099         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1100         foreach(l, estate->es_trig_target_relations)
1101         {
1102                 rInfo = (ResultRelInfo *) lfirst(l);
1103                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1104                         return rInfo;
1105         }
1106         /* Nope, so we need a new one */
1107
1108         /*
1109          * Open the target relation's relcache entry.  We assume that an
1110          * appropriate lock is still held by the backend from whenever the trigger
1111          * event got queued, so we need take no new lock here.
1112          */
1113         rel = heap_open(relid, NoLock);
1114
1115         /*
1116          * Make the new entry in the right context.  Currently, we don't need any
1117          * index information in ResultRelInfos used only for triggers, so tell
1118          * InitResultRelInfo it's a DELETE.
1119          */
1120         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1121         rInfo = makeNode(ResultRelInfo);
1122         InitResultRelInfo(rInfo,
1123                                           rel,
1124                                           0,            /* dummy rangetable index */
1125                                           CMD_DELETE,
1126                                           estate->es_instrument);
1127         estate->es_trig_target_relations =
1128                 lappend(estate->es_trig_target_relations, rInfo);
1129         MemoryContextSwitchTo(oldcontext);
1130
1131         return rInfo;
1132 }
1133
1134 /*
1135  *              ExecContextForcesOids
1136  *
1137  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1138  * we need to ensure that result tuples have space for an OID iff they are
1139  * going to be stored into a relation that has OIDs.  In other contexts
1140  * we are free to choose whether to leave space for OIDs in result tuples
1141  * (we generally don't want to, but we do if a physical-tlist optimization
1142  * is possible).  This routine checks the plan context and returns TRUE if the
1143  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1144  * *hasoids is set to the required value.
1145  *
1146  * One reason this is ugly is that all plan nodes in the plan tree will emit
1147  * tuples with space for an OID, though we really only need the topmost node
1148  * to do so.  However, node types like Sort don't project new tuples but just
1149  * return their inputs, and in those cases the requirement propagates down
1150  * to the input node.  Eventually we might make this code smart enough to
1151  * recognize how far down the requirement really goes, but for now we just
1152  * make all plan nodes do the same thing if the top level forces the choice.
1153  *
1154  * We assume that estate->es_result_relation_info is already set up to
1155  * describe the target relation.  Note that in an UPDATE that spans an
1156  * inheritance tree, some of the target relations may have OIDs and some not.
1157  * We have to make the decisions on a per-relation basis as we initialize
1158  * each of the child plans of the topmost Append plan.
1159  *
1160  * SELECT INTO is even uglier, because we don't have the INTO relation's
1161  * descriptor available when this code runs; we have to look aside at a
1162  * flag set by InitPlan().
1163  */
1164 bool
1165 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1166 {
1167         if (planstate->state->es_select_into)
1168         {
1169                 *hasoids = planstate->state->es_into_oids;
1170                 return true;
1171         }
1172         else
1173         {
1174                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1175
1176                 if (ri != NULL)
1177                 {
1178                         Relation        rel = ri->ri_RelationDesc;
1179
1180                         if (rel != NULL)
1181                         {
1182                                 *hasoids = rel->rd_rel->relhasoids;
1183                                 return true;
1184                         }
1185                 }
1186         }
1187
1188         return false;
1189 }
1190
1191 /* ----------------------------------------------------------------
1192  *              ExecEndPlan
1193  *
1194  *              Cleans up the query plan -- closes files and frees up storage
1195  *
1196  * NOTE: we are no longer very worried about freeing storage per se
1197  * in this code; FreeExecutorState should be guaranteed to release all
1198  * memory that needs to be released.  What we are worried about doing
1199  * is closing relations and dropping buffer pins.  Thus, for example,
1200  * tuple tables must be cleared or dropped to ensure pins are released.
1201  * ----------------------------------------------------------------
1202  */
1203 static void
1204 ExecEndPlan(PlanState *planstate, EState *estate)
1205 {
1206         ResultRelInfo *resultRelInfo;
1207         int                     i;
1208         ListCell   *l;
1209
1210         /*
1211          * shut down any PlanQual processing we were doing
1212          */
1213         if (estate->es_evalPlanQual != NULL)
1214                 EndEvalPlanQual(estate);
1215
1216         /*
1217          * shut down the node-type-specific query processing
1218          */
1219         ExecEndNode(planstate);
1220
1221         /*
1222          * for subplans too
1223          */
1224         foreach(l, estate->es_subplanstates)
1225         {
1226                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1227
1228                 ExecEndNode(subplanstate);
1229         }
1230
1231         /*
1232          * destroy the executor "tuple" table.
1233          */
1234         ExecDropTupleTable(estate->es_tupleTable, true);
1235         estate->es_tupleTable = NULL;
1236
1237         /*
1238          * close the result relation(s) if any, but hold locks until xact commit.
1239          */
1240         resultRelInfo = estate->es_result_relations;
1241         for (i = estate->es_num_result_relations; i > 0; i--)
1242         {
1243                 /* Close indices and then the relation itself */
1244                 ExecCloseIndices(resultRelInfo);
1245                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1246                 resultRelInfo++;
1247         }
1248
1249         /*
1250          * likewise close any trigger target relations
1251          */
1252         foreach(l, estate->es_trig_target_relations)
1253         {
1254                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1255                 /* Close indices and then the relation itself */
1256                 ExecCloseIndices(resultRelInfo);
1257                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1258         }
1259
1260         /*
1261          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1262          */
1263         foreach(l, estate->es_rowMarks)
1264         {
1265                 ExecRowMark *erm = lfirst(l);
1266
1267                 heap_close(erm->relation, NoLock);
1268         }
1269 }
1270
1271 /* ----------------------------------------------------------------
1272  *              ExecutePlan
1273  *
1274  *              processes the query plan to retrieve 'numberTuples' tuples in the
1275  *              direction specified.
1276  *
1277  *              Retrieves all tuples if numberTuples is 0
1278  *
1279  *              result is either a slot containing the last tuple in the case
1280  *              of a SELECT or NULL otherwise.
1281  *
1282  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1283  * user can see it
1284  * ----------------------------------------------------------------
1285  */
1286 static TupleTableSlot *
1287 ExecutePlan(EState *estate,
1288                         PlanState *planstate,
1289                         CmdType operation,
1290                         long numberTuples,
1291                         ScanDirection direction,
1292                         DestReceiver *dest)
1293 {
1294         JunkFilter *junkfilter;
1295         TupleTableSlot *planSlot;
1296         TupleTableSlot *slot;
1297         ItemPointer tupleid = NULL;
1298         ItemPointerData tuple_ctid;
1299         long            current_tuple_count;
1300         TupleTableSlot *result;
1301
1302         /*
1303          * initialize local variables
1304          */
1305         current_tuple_count = 0;
1306         result = NULL;
1307
1308         /*
1309          * Set the direction.
1310          */
1311         estate->es_direction = direction;
1312
1313         /*
1314          * Process BEFORE EACH STATEMENT triggers
1315          */
1316         switch (operation)
1317         {
1318                 case CMD_UPDATE:
1319                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1320                         break;
1321                 case CMD_DELETE:
1322                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1323                         break;
1324                 case CMD_INSERT:
1325                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1326                         break;
1327                 default:
1328                         /* do nothing */
1329                         break;
1330         }
1331
1332         /*
1333          * Loop until we've processed the proper number of tuples from the plan.
1334          */
1335
1336         for (;;)
1337         {
1338                 /* Reset the per-output-tuple exprcontext */
1339                 ResetPerTupleExprContext(estate);
1340
1341                 /*
1342                  * Execute the plan and obtain a tuple
1343                  */
1344 lnext:  ;
1345                 if (estate->es_useEvalPlan)
1346                 {
1347                         planSlot = EvalPlanQualNext(estate);
1348                         if (TupIsNull(planSlot))
1349                                 planSlot = ExecProcNode(planstate);
1350                 }
1351                 else
1352                         planSlot = ExecProcNode(planstate);
1353
1354                 /*
1355                  * if the tuple is null, then we assume there is nothing more to
1356                  * process so we just return null...
1357                  */
1358                 if (TupIsNull(planSlot))
1359                 {
1360                         result = NULL;
1361                         break;
1362                 }
1363                 slot = planSlot;
1364
1365                 /*
1366                  * If we have a junk filter, then project a new tuple with the junk
1367                  * removed.
1368                  *
1369                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1370                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1371                  * because that tuple slot has the wrong descriptor.)
1372                  *
1373                  * But first, extract all the junk information we need.
1374                  */
1375                 if ((junkfilter = estate->es_junkFilter) != NULL)
1376                 {
1377                         /*
1378                          * Process any FOR UPDATE or FOR SHARE locking requested.
1379                          */
1380                         if (estate->es_rowMarks != NIL)
1381                         {
1382                                 ListCell   *l;
1383
1384                 lmark:  ;
1385                                 foreach(l, estate->es_rowMarks)
1386                                 {
1387                                         ExecRowMark *erm = lfirst(l);
1388                                         Datum           datum;
1389                                         bool            isNull;
1390                                         HeapTupleData tuple;
1391                                         Buffer          buffer;
1392                                         ItemPointerData update_ctid;
1393                                         TransactionId update_xmax;
1394                                         TupleTableSlot *newSlot;
1395                                         LockTupleMode lockmode;
1396                                         HTSU_Result test;
1397
1398                                         datum = ExecGetJunkAttribute(slot,
1399                                                                                                  erm->ctidAttNo,
1400                                                                                                  &isNull);
1401                                         /* shouldn't ever get a null result... */
1402                                         if (isNull)
1403                                                 elog(ERROR, "ctid is NULL");
1404
1405                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1406
1407                                         if (erm->forUpdate)
1408                                                 lockmode = LockTupleExclusive;
1409                                         else
1410                                                 lockmode = LockTupleShared;
1411
1412                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1413                                                                                    &update_ctid, &update_xmax,
1414                                                                                    estate->es_output_cid,
1415                                                                                    lockmode, erm->noWait);
1416                                         ReleaseBuffer(buffer);
1417                                         switch (test)
1418                                         {
1419                                                 case HeapTupleSelfUpdated:
1420                                                         /* treat it as deleted; do not process */
1421                                                         goto lnext;
1422
1423                                                 case HeapTupleMayBeUpdated:
1424                                                         break;
1425
1426                                                 case HeapTupleUpdated:
1427                                                         if (IsXactIsoLevelSerializable)
1428                                                                 ereport(ERROR,
1429                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1430                                                                   errmsg("could not serialize access due to concurrent update")));
1431                                                         if (!ItemPointerEquals(&update_ctid,
1432                                                                                                    &tuple.t_self))
1433                                                         {
1434                                                                 /* updated, so look at updated version */
1435                                                                 newSlot = EvalPlanQual(estate,
1436                                                                                                            erm->rti,
1437                                                                                                            &update_ctid,
1438                                                                                                            update_xmax);
1439                                                                 if (!TupIsNull(newSlot))
1440                                                                 {
1441                                                                         slot = planSlot = newSlot;
1442                                                                         estate->es_useEvalPlan = true;
1443                                                                         goto lmark;
1444                                                                 }
1445                                                         }
1446
1447                                                         /*
1448                                                          * if tuple was deleted or PlanQual failed for
1449                                                          * updated tuple - we must not return this tuple!
1450                                                          */
1451                                                         goto lnext;
1452
1453                                                 default:
1454                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1455                                                                  test);
1456                                                         return NULL;
1457                                         }
1458                                 }
1459                         }
1460
1461                         /*
1462                          * extract the 'ctid' junk attribute.
1463                          */
1464                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1465                         {
1466                                 Datum           datum;
1467                                 bool            isNull;
1468
1469                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1470                                                                                          &isNull);
1471                                 /* shouldn't ever get a null result... */
1472                                 if (isNull)
1473                                         elog(ERROR, "ctid is NULL");
1474
1475                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1476                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1477                                 tupleid = &tuple_ctid;
1478                         }
1479
1480                         /*
1481                          * Create a new "clean" tuple with all junk attributes removed. We
1482                          * don't need to do this for DELETE, however (there will in fact
1483                          * be no non-junk attributes in a DELETE!)
1484                          */
1485                         if (operation != CMD_DELETE)
1486                                 slot = ExecFilterJunk(junkfilter, slot);
1487                 }
1488
1489                 /*
1490                  * now that we have a tuple, do the appropriate thing with it.. either
1491                  * return it to the user, add it to a relation someplace, delete it
1492                  * from a relation, or modify some of its attributes.
1493                  */
1494                 switch (operation)
1495                 {
1496                         case CMD_SELECT:
1497                                 ExecSelect(slot, dest, estate);
1498                                 result = slot;
1499                                 break;
1500
1501                         case CMD_INSERT:
1502                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1503                                 result = NULL;
1504                                 break;
1505
1506                         case CMD_DELETE:
1507                                 ExecDelete(tupleid, planSlot, dest, estate);
1508                                 result = NULL;
1509                                 break;
1510
1511                         case CMD_UPDATE:
1512                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1513                                 result = NULL;
1514                                 break;
1515
1516                         default:
1517                                 elog(ERROR, "unrecognized operation code: %d",
1518                                          (int) operation);
1519                                 result = NULL;
1520                                 break;
1521                 }
1522
1523                 /*
1524                  * check our tuple count.. if we've processed the proper number then
1525                  * quit, else loop again and process more tuples.  Zero numberTuples
1526                  * means no limit.
1527                  */
1528                 current_tuple_count++;
1529                 if (numberTuples && numberTuples == current_tuple_count)
1530                         break;
1531         }
1532
1533         /*
1534          * Process AFTER EACH STATEMENT triggers
1535          */
1536         switch (operation)
1537         {
1538                 case CMD_UPDATE:
1539                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1540                         break;
1541                 case CMD_DELETE:
1542                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1543                         break;
1544                 case CMD_INSERT:
1545                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1546                         break;
1547                 default:
1548                         /* do nothing */
1549                         break;
1550         }
1551
1552         /*
1553          * here, result is either a slot containing a tuple in the case of a
1554          * SELECT or NULL otherwise.
1555          */
1556         return result;
1557 }
1558
1559 /* ----------------------------------------------------------------
1560  *              ExecSelect
1561  *
1562  *              SELECTs are easy.. we just pass the tuple to the appropriate
1563  *              output function.
1564  * ----------------------------------------------------------------
1565  */
1566 static void
1567 ExecSelect(TupleTableSlot *slot,
1568                    DestReceiver *dest,
1569                    EState *estate)
1570 {
1571         (*dest->receiveSlot) (slot, dest);
1572         IncrRetrieved();
1573         (estate->es_processed)++;
1574 }
1575
1576 /* ----------------------------------------------------------------
1577  *              ExecInsert
1578  *
1579  *              INSERTs are trickier.. we have to insert the tuple into
1580  *              the base relation and insert appropriate tuples into the
1581  *              index relations.
1582  * ----------------------------------------------------------------
1583  */
1584 static void
1585 ExecInsert(TupleTableSlot *slot,
1586                    ItemPointer tupleid,
1587                    TupleTableSlot *planSlot,
1588                    DestReceiver *dest,
1589                    EState *estate)
1590 {
1591         HeapTuple       tuple;
1592         ResultRelInfo *resultRelInfo;
1593         Relation        resultRelationDesc;
1594         Oid                     newId;
1595
1596         /*
1597          * get the heap tuple out of the tuple table slot, making sure we have a
1598          * writable copy
1599          */
1600         tuple = ExecMaterializeSlot(slot);
1601
1602         /*
1603          * get information on the (current) result relation
1604          */
1605         resultRelInfo = estate->es_result_relation_info;
1606         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1607
1608         /* BEFORE ROW INSERT Triggers */
1609         if (resultRelInfo->ri_TrigDesc &&
1610                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1611         {
1612                 HeapTuple       newtuple;
1613
1614                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1615
1616                 if (newtuple == NULL)   /* "do nothing" */
1617                         return;
1618
1619                 if (newtuple != tuple)  /* modified by Trigger(s) */
1620                 {
1621                         /*
1622                          * Put the modified tuple into a slot for convenience of routines
1623                          * below.  We assume the tuple was allocated in per-tuple memory
1624                          * context, and therefore will go away by itself. The tuple table
1625                          * slot should not try to clear it.
1626                          */
1627                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1628
1629                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1630                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1631                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1632                         slot = newslot;
1633                         tuple = newtuple;
1634                 }
1635         }
1636
1637         /*
1638          * Check the constraints of the tuple
1639          */
1640         if (resultRelationDesc->rd_att->constr)
1641                 ExecConstraints(resultRelInfo, slot, estate);
1642
1643         /*
1644          * insert the tuple
1645          *
1646          * Note: heap_insert returns the tid (location) of the new tuple in the
1647          * t_self field.
1648          */
1649         newId = heap_insert(resultRelationDesc, tuple,
1650                                                 estate->es_output_cid,
1651                                                 true, true);
1652
1653         IncrAppended();
1654         (estate->es_processed)++;
1655         estate->es_lastoid = newId;
1656         setLastTid(&(tuple->t_self));
1657
1658         /*
1659          * insert index entries for tuple
1660          */
1661         if (resultRelInfo->ri_NumIndices > 0)
1662                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1663
1664         /* AFTER ROW INSERT Triggers */
1665         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1666
1667         /* Process RETURNING if present */
1668         if (resultRelInfo->ri_projectReturning)
1669                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1670                                                          slot, planSlot, dest);
1671 }
1672
1673 /* ----------------------------------------------------------------
1674  *              ExecDelete
1675  *
1676  *              DELETE is like UPDATE, except that we delete the tuple and no
1677  *              index modifications are needed
1678  * ----------------------------------------------------------------
1679  */
1680 static void
1681 ExecDelete(ItemPointer tupleid,
1682                    TupleTableSlot *planSlot,
1683                    DestReceiver *dest,
1684                    EState *estate)
1685 {
1686         ResultRelInfo *resultRelInfo;
1687         Relation        resultRelationDesc;
1688         HTSU_Result result;
1689         ItemPointerData update_ctid;
1690         TransactionId update_xmax;
1691
1692         /*
1693          * get information on the (current) result relation
1694          */
1695         resultRelInfo = estate->es_result_relation_info;
1696         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1697
1698         /* BEFORE ROW DELETE Triggers */
1699         if (resultRelInfo->ri_TrigDesc &&
1700                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1701         {
1702                 bool            dodelete;
1703
1704                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1705
1706                 if (!dodelete)                  /* "do nothing" */
1707                         return;
1708         }
1709
1710         /*
1711          * delete the tuple
1712          *
1713          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1714          * the row to be deleted is visible to that snapshot, and throw a can't-
1715          * serialize error if not.      This is a special-case behavior needed for
1716          * referential integrity updates in serializable transactions.
1717          */
1718 ldelete:;
1719         result = heap_delete(resultRelationDesc, tupleid,
1720                                                  &update_ctid, &update_xmax,
1721                                                  estate->es_output_cid,
1722                                                  estate->es_crosscheck_snapshot,
1723                                                  true /* wait for commit */ );
1724         switch (result)
1725         {
1726                 case HeapTupleSelfUpdated:
1727                         /* already deleted by self; nothing to do */
1728                         return;
1729
1730                 case HeapTupleMayBeUpdated:
1731                         break;
1732
1733                 case HeapTupleUpdated:
1734                         if (IsXactIsoLevelSerializable)
1735                                 ereport(ERROR,
1736                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1737                                                  errmsg("could not serialize access due to concurrent update")));
1738                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1739                         {
1740                                 TupleTableSlot *epqslot;
1741
1742                                 epqslot = EvalPlanQual(estate,
1743                                                                            resultRelInfo->ri_RangeTableIndex,
1744                                                                            &update_ctid,
1745                                                                            update_xmax);
1746                                 if (!TupIsNull(epqslot))
1747                                 {
1748                                         *tupleid = update_ctid;
1749                                         goto ldelete;
1750                                 }
1751                         }
1752                         /* tuple already deleted; nothing to do */
1753                         return;
1754
1755                 default:
1756                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1757                         return;
1758         }
1759
1760         IncrDeleted();
1761         (estate->es_processed)++;
1762
1763         /*
1764          * Note: Normally one would think that we have to delete index tuples
1765          * associated with the heap tuple now...
1766          *
1767          * ... but in POSTGRES, we have no need to do this because VACUUM will
1768          * take care of it later.  We can't delete index tuples immediately
1769          * anyway, since the tuple is still visible to other transactions.
1770          */
1771
1772         /* AFTER ROW DELETE Triggers */
1773         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1774
1775         /* Process RETURNING if present */
1776         if (resultRelInfo->ri_projectReturning)
1777         {
1778                 /*
1779                  * We have to put the target tuple into a slot, which means first we
1780                  * gotta fetch it.      We can use the trigger tuple slot.
1781                  */
1782                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1783                 HeapTupleData deltuple;
1784                 Buffer          delbuffer;
1785
1786                 deltuple.t_self = *tupleid;
1787                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1788                                                 &deltuple, &delbuffer, false, NULL))
1789                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1790
1791                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1792                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1793                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1794
1795                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1796                                                          slot, planSlot, dest);
1797
1798                 ExecClearTuple(slot);
1799                 ReleaseBuffer(delbuffer);
1800         }
1801 }
1802
1803 /* ----------------------------------------------------------------
1804  *              ExecUpdate
1805  *
1806  *              note: we can't run UPDATE queries with transactions
1807  *              off because UPDATEs are actually INSERTs and our
1808  *              scan will mistakenly loop forever, updating the tuple
1809  *              it just inserted..      This should be fixed but until it
1810  *              is, we don't want to get stuck in an infinite loop
1811  *              which corrupts your database..
1812  * ----------------------------------------------------------------
1813  */
1814 static void
1815 ExecUpdate(TupleTableSlot *slot,
1816                    ItemPointer tupleid,
1817                    TupleTableSlot *planSlot,
1818                    DestReceiver *dest,
1819                    EState *estate)
1820 {
1821         HeapTuple       tuple;
1822         ResultRelInfo *resultRelInfo;
1823         Relation        resultRelationDesc;
1824         HTSU_Result result;
1825         ItemPointerData update_ctid;
1826         TransactionId update_xmax;
1827
1828         /*
1829          * abort the operation if not running transactions
1830          */
1831         if (IsBootstrapProcessingMode())
1832                 elog(ERROR, "cannot UPDATE during bootstrap");
1833
1834         /*
1835          * get the heap tuple out of the tuple table slot, making sure we have a
1836          * writable copy
1837          */
1838         tuple = ExecMaterializeSlot(slot);
1839
1840         /*
1841          * get information on the (current) result relation
1842          */
1843         resultRelInfo = estate->es_result_relation_info;
1844         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1845
1846         /* BEFORE ROW UPDATE Triggers */
1847         if (resultRelInfo->ri_TrigDesc &&
1848                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1849         {
1850                 HeapTuple       newtuple;
1851
1852                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1853                                                                                 tupleid, tuple);
1854
1855                 if (newtuple == NULL)   /* "do nothing" */
1856                         return;
1857
1858                 if (newtuple != tuple)  /* modified by Trigger(s) */
1859                 {
1860                         /*
1861                          * Put the modified tuple into a slot for convenience of routines
1862                          * below.  We assume the tuple was allocated in per-tuple memory
1863                          * context, and therefore will go away by itself. The tuple table
1864                          * slot should not try to clear it.
1865                          */
1866                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1867
1868                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1869                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1870                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1871                         slot = newslot;
1872                         tuple = newtuple;
1873                 }
1874         }
1875
1876         /*
1877          * Check the constraints of the tuple
1878          *
1879          * If we generate a new candidate tuple after EvalPlanQual testing, we
1880          * must loop back here and recheck constraints.  (We don't need to redo
1881          * triggers, however.  If there are any BEFORE triggers then trigger.c
1882          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1883          * need to do them again.)
1884          */
1885 lreplace:;
1886         if (resultRelationDesc->rd_att->constr)
1887                 ExecConstraints(resultRelInfo, slot, estate);
1888
1889         /*
1890          * replace the heap tuple
1891          *
1892          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1893          * the row to be updated is visible to that snapshot, and throw a can't-
1894          * serialize error if not.      This is a special-case behavior needed for
1895          * referential integrity updates in serializable transactions.
1896          */
1897         result = heap_update(resultRelationDesc, tupleid, tuple,
1898                                                  &update_ctid, &update_xmax,
1899                                                  estate->es_output_cid,
1900                                                  estate->es_crosscheck_snapshot,
1901                                                  true /* wait for commit */ );
1902         switch (result)
1903         {
1904                 case HeapTupleSelfUpdated:
1905                         /* already deleted by self; nothing to do */
1906                         return;
1907
1908                 case HeapTupleMayBeUpdated:
1909                         break;
1910
1911                 case HeapTupleUpdated:
1912                         if (IsXactIsoLevelSerializable)
1913                                 ereport(ERROR,
1914                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1915                                                  errmsg("could not serialize access due to concurrent update")));
1916                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1917                         {
1918                                 TupleTableSlot *epqslot;
1919
1920                                 epqslot = EvalPlanQual(estate,
1921                                                                            resultRelInfo->ri_RangeTableIndex,
1922                                                                            &update_ctid,
1923                                                                            update_xmax);
1924                                 if (!TupIsNull(epqslot))
1925                                 {
1926                                         *tupleid = update_ctid;
1927                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1928                                         tuple = ExecMaterializeSlot(slot);
1929                                         goto lreplace;
1930                                 }
1931                         }
1932                         /* tuple already deleted; nothing to do */
1933                         return;
1934
1935                 default:
1936                         elog(ERROR, "unrecognized heap_update status: %u", result);
1937                         return;
1938         }
1939
1940         IncrReplaced();
1941         (estate->es_processed)++;
1942
1943         /*
1944          * Note: instead of having to update the old index tuples associated with
1945          * the heap tuple, all we do is form and insert new index tuples. This is
1946          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1947          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1948          * here is insert new index tuples.  -cim 9/27/89
1949          */
1950
1951         /*
1952          * insert index entries for tuple
1953          *
1954          * Note: heap_update returns the tid (location) of the new tuple in the
1955          * t_self field.
1956          *
1957          * If it's a HOT update, we mustn't insert new index entries.
1958          */
1959         if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1960                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1961
1962         /* AFTER ROW UPDATE Triggers */
1963         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1964
1965         /* Process RETURNING if present */
1966         if (resultRelInfo->ri_projectReturning)
1967                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1968                                                          slot, planSlot, dest);
1969 }
1970
1971 /*
1972  * ExecRelCheck --- check that tuple meets constraints for result relation
1973  */
1974 static const char *
1975 ExecRelCheck(ResultRelInfo *resultRelInfo,
1976                          TupleTableSlot *slot, EState *estate)
1977 {
1978         Relation        rel = resultRelInfo->ri_RelationDesc;
1979         int                     ncheck = rel->rd_att->constr->num_check;
1980         ConstrCheck *check = rel->rd_att->constr->check;
1981         ExprContext *econtext;
1982         MemoryContext oldContext;
1983         List       *qual;
1984         int                     i;
1985
1986         /*
1987          * If first time through for this result relation, build expression
1988          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1989          * memory context so they'll survive throughout the query.
1990          */
1991         if (resultRelInfo->ri_ConstraintExprs == NULL)
1992         {
1993                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1994                 resultRelInfo->ri_ConstraintExprs =
1995                         (List **) palloc(ncheck * sizeof(List *));
1996                 for (i = 0; i < ncheck; i++)
1997                 {
1998                         /* ExecQual wants implicit-AND form */
1999                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
2000                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
2001                                 ExecPrepareExpr((Expr *) qual, estate);
2002                 }
2003                 MemoryContextSwitchTo(oldContext);
2004         }
2005
2006         /*
2007          * We will use the EState's per-tuple context for evaluating constraint
2008          * expressions (creating it if it's not already there).
2009          */
2010         econtext = GetPerTupleExprContext(estate);
2011
2012         /* Arrange for econtext's scan tuple to be the tuple under test */
2013         econtext->ecxt_scantuple = slot;
2014
2015         /* And evaluate the constraints */
2016         for (i = 0; i < ncheck; i++)
2017         {
2018                 qual = resultRelInfo->ri_ConstraintExprs[i];
2019
2020                 /*
2021                  * NOTE: SQL92 specifies that a NULL result from a constraint
2022                  * expression is not to be treated as a failure.  Therefore, tell
2023                  * ExecQual to return TRUE for NULL.
2024                  */
2025                 if (!ExecQual(qual, econtext, true))
2026                         return check[i].ccname;
2027         }
2028
2029         /* NULL result means no error */
2030         return NULL;
2031 }
2032
2033 void
2034 ExecConstraints(ResultRelInfo *resultRelInfo,
2035                                 TupleTableSlot *slot, EState *estate)
2036 {
2037         Relation        rel = resultRelInfo->ri_RelationDesc;
2038         TupleConstr *constr = rel->rd_att->constr;
2039
2040         Assert(constr);
2041
2042         if (constr->has_not_null)
2043         {
2044                 int                     natts = rel->rd_att->natts;
2045                 int                     attrChk;
2046
2047                 for (attrChk = 1; attrChk <= natts; attrChk++)
2048                 {
2049                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
2050                                 slot_attisnull(slot, attrChk))
2051                                 ereport(ERROR,
2052                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2053                                                  errmsg("null value in column \"%s\" violates not-null constraint",
2054                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
2055                 }
2056         }
2057
2058         if (constr->num_check > 0)
2059         {
2060                 const char *failed;
2061
2062                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2063                         ereport(ERROR,
2064                                         (errcode(ERRCODE_CHECK_VIOLATION),
2065                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2066                                                         RelationGetRelationName(rel), failed)));
2067         }
2068 }
2069
2070 /*
2071  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
2072  *
2073  * projectReturning: RETURNING projection info for current result rel
2074  * tupleSlot: slot holding tuple actually inserted/updated/deleted
2075  * planSlot: slot holding tuple returned by top plan node
2076  * dest: where to send the output
2077  */
2078 static void
2079 ExecProcessReturning(ProjectionInfo *projectReturning,
2080                                          TupleTableSlot *tupleSlot,
2081                                          TupleTableSlot *planSlot,
2082                                          DestReceiver *dest)
2083 {
2084         ExprContext *econtext = projectReturning->pi_exprContext;
2085         TupleTableSlot *retSlot;
2086
2087         /*
2088          * Reset per-tuple memory context to free any expression evaluation
2089          * storage allocated in the previous cycle.
2090          */
2091         ResetExprContext(econtext);
2092
2093         /* Make tuple and any needed join variables available to ExecProject */
2094         econtext->ecxt_scantuple = tupleSlot;
2095         econtext->ecxt_outertuple = planSlot;
2096
2097         /* Compute the RETURNING expressions */
2098         retSlot = ExecProject(projectReturning, NULL);
2099
2100         /* Send to dest */
2101         (*dest->receiveSlot) (retSlot, dest);
2102
2103         ExecClearTuple(retSlot);
2104 }
2105
2106 /*
2107  * Check a modified tuple to see if we want to process its updated version
2108  * under READ COMMITTED rules.
2109  *
2110  * See backend/executor/README for some info about how this works.
2111  *
2112  *      estate - executor state data
2113  *      rti - rangetable index of table containing tuple
2114  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2115  *      priorXmax - t_xmax from the outdated tuple
2116  *
2117  * *tid is also an output parameter: it's modified to hold the TID of the
2118  * latest version of the tuple (note this may be changed even on failure)
2119  *
2120  * Returns a slot containing the new candidate update/delete tuple, or
2121  * NULL if we determine we shouldn't process the row.
2122  */
2123 TupleTableSlot *
2124 EvalPlanQual(EState *estate, Index rti,
2125                          ItemPointer tid, TransactionId priorXmax)
2126 {
2127         evalPlanQual *epq;
2128         EState     *epqstate;
2129         Relation        relation;
2130         HeapTupleData tuple;
2131         HeapTuple       copyTuple = NULL;
2132         SnapshotData SnapshotDirty;
2133         bool            endNode;
2134
2135         Assert(rti != 0);
2136
2137         /*
2138          * find relation containing target tuple
2139          */
2140         if (estate->es_result_relation_info != NULL &&
2141                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2142                 relation = estate->es_result_relation_info->ri_RelationDesc;
2143         else
2144         {
2145                 ListCell   *l;
2146
2147                 relation = NULL;
2148                 foreach(l, estate->es_rowMarks)
2149                 {
2150                         if (((ExecRowMark *) lfirst(l))->rti == rti)
2151                         {
2152                                 relation = ((ExecRowMark *) lfirst(l))->relation;
2153                                 break;
2154                         }
2155                 }
2156                 if (relation == NULL)
2157                         elog(ERROR, "could not find RowMark for RT index %u", rti);
2158         }
2159
2160         /*
2161          * fetch tid tuple
2162          *
2163          * Loop here to deal with updated or busy tuples
2164          */
2165         InitDirtySnapshot(SnapshotDirty);
2166         tuple.t_self = *tid;
2167         for (;;)
2168         {
2169                 Buffer          buffer;
2170
2171                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2172                 {
2173                         /*
2174                          * If xmin isn't what we're expecting, the slot must have been
2175                          * recycled and reused for an unrelated tuple.  This implies that
2176                          * the latest version of the row was deleted, so we need do
2177                          * nothing.  (Should be safe to examine xmin without getting
2178                          * buffer's content lock, since xmin never changes in an existing
2179                          * tuple.)
2180                          */
2181                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2182                                                                          priorXmax))
2183                         {
2184                                 ReleaseBuffer(buffer);
2185                                 return NULL;
2186                         }
2187
2188                         /* otherwise xmin should not be dirty... */
2189                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2190                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2191
2192                         /*
2193                          * If tuple is being updated by other transaction then we have to
2194                          * wait for its commit/abort.
2195                          */
2196                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2197                         {
2198                                 ReleaseBuffer(buffer);
2199                                 XactLockTableWait(SnapshotDirty.xmax);
2200                                 continue;               /* loop back to repeat heap_fetch */
2201                         }
2202
2203                         /*
2204                          * If tuple was inserted by our own transaction, we have to check
2205                          * cmin against es_output_cid: cmin >= current CID means our
2206                          * command cannot see the tuple, so we should ignore it.  Without
2207                          * this we are open to the "Halloween problem" of indefinitely
2208                          * re-updating the same tuple. (We need not check cmax because
2209                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2210                          * transaction dead, regardless of cmax.)  We just checked that
2211                          * priorXmax == xmin, so we can test that variable instead of
2212                          * doing HeapTupleHeaderGetXmin again.
2213                          */
2214                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2215                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2216                         {
2217                                 ReleaseBuffer(buffer);
2218                                 return NULL;
2219                         }
2220
2221                         /*
2222                          * We got tuple - now copy it for use by recheck query.
2223                          */
2224                         copyTuple = heap_copytuple(&tuple);
2225                         ReleaseBuffer(buffer);
2226                         break;
2227                 }
2228
2229                 /*
2230                  * If the referenced slot was actually empty, the latest version of
2231                  * the row must have been deleted, so we need do nothing.
2232                  */
2233                 if (tuple.t_data == NULL)
2234                 {
2235                         ReleaseBuffer(buffer);
2236                         return NULL;
2237                 }
2238
2239                 /*
2240                  * As above, if xmin isn't what we're expecting, do nothing.
2241                  */
2242                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2243                                                                  priorXmax))
2244                 {
2245                         ReleaseBuffer(buffer);
2246                         return NULL;
2247                 }
2248
2249                 /*
2250                  * If we get here, the tuple was found but failed SnapshotDirty.
2251                  * Assuming the xmin is either a committed xact or our own xact (as it
2252                  * certainly should be if we're trying to modify the tuple), this must
2253                  * mean that the row was updated or deleted by either a committed xact
2254                  * or our own xact.  If it was deleted, we can ignore it; if it was
2255                  * updated then chain up to the next version and repeat the whole
2256                  * test.
2257                  *
2258                  * As above, it should be safe to examine xmax and t_ctid without the
2259                  * buffer content lock, because they can't be changing.
2260                  */
2261                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2262                 {
2263                         /* deleted, so forget about it */
2264                         ReleaseBuffer(buffer);
2265                         return NULL;
2266                 }
2267
2268                 /* updated, so look at the updated row */
2269                 tuple.t_self = tuple.t_data->t_ctid;
2270                 /* updated row should have xmin matching this xmax */
2271                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2272                 ReleaseBuffer(buffer);
2273                 /* loop back to fetch next in chain */
2274         }
2275
2276         /*
2277          * For UPDATE/DELETE we have to return tid of actual row we're executing
2278          * PQ for.
2279          */
2280         *tid = tuple.t_self;
2281
2282         /*
2283          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2284          */
2285         epq = estate->es_evalPlanQual;
2286         endNode = true;
2287
2288         if (epq != NULL && epq->rti == 0)
2289         {
2290                 /* Top PQ stack entry is idle, so re-use it */
2291                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2292                 epq->rti = rti;
2293                 endNode = false;
2294         }
2295
2296         /*
2297          * If this is request for another RTE - Ra, - then we have to check wasn't
2298          * PlanQual requested for Ra already and if so then Ra' row was updated
2299          * again and we have to re-start old execution for Ra and forget all what
2300          * we done after Ra was suspended. Cool? -:))
2301          */
2302         if (epq != NULL && epq->rti != rti &&
2303                 epq->estate->es_evTuple[rti - 1] != NULL)
2304         {
2305                 do
2306                 {
2307                         evalPlanQual *oldepq;
2308
2309                         /* stop execution */
2310                         EvalPlanQualStop(epq);
2311                         /* pop previous PlanQual from the stack */
2312                         oldepq = epq->next;
2313                         Assert(oldepq && oldepq->rti != 0);
2314                         /* push current PQ to freePQ stack */
2315                         oldepq->free = epq;
2316                         epq = oldepq;
2317                         estate->es_evalPlanQual = epq;
2318                 } while (epq->rti != rti);
2319         }
2320
2321         /*
2322          * If we are requested for another RTE then we have to suspend execution
2323          * of current PlanQual and start execution for new one.
2324          */
2325         if (epq == NULL || epq->rti != rti)
2326         {
2327                 /* try to reuse plan used previously */
2328                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2329
2330                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2331                 {
2332                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2333                         newepq->free = NULL;
2334                         newepq->estate = NULL;
2335                         newepq->planstate = NULL;
2336                 }
2337                 else
2338                 {
2339                         /* recycle previously used PlanQual */
2340                         Assert(newepq->estate == NULL);
2341                         epq->free = NULL;
2342                 }
2343                 /* push current PQ to the stack */
2344                 newepq->next = epq;
2345                 epq = newepq;
2346                 estate->es_evalPlanQual = epq;
2347                 epq->rti = rti;
2348                 endNode = false;
2349         }
2350
2351         Assert(epq->rti == rti);
2352
2353         /*
2354          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2355          * end and restart execution of the plan, because ExecReScan wouldn't
2356          * ensure that upper plan nodes would reset themselves.  We could make
2357          * that work if insertion of the target tuple were integrated with the
2358          * Param mechanism somehow, so that the upper plan nodes know that their
2359          * children's outputs have changed.
2360          *
2361          * Note that the stack of free evalPlanQual nodes is quite useless at the
2362          * moment, since it only saves us from pallocing/releasing the
2363          * evalPlanQual nodes themselves.  But it will be useful once we implement
2364          * ReScan instead of end/restart for re-using PlanQual nodes.
2365          */
2366         if (endNode)
2367         {
2368                 /* stop execution */
2369                 EvalPlanQualStop(epq);
2370         }
2371
2372         /*
2373          * Initialize new recheck query.
2374          *
2375          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2376          * instead copy down changeable state from the top plan (including
2377          * es_result_relation_info, es_junkFilter) and reset locally changeable
2378          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2379          */
2380         EvalPlanQualStart(epq, estate, epq->next);
2381
2382         /*
2383          * free old RTE' tuple, if any, and store target tuple where relation's
2384          * scan node will see it
2385          */
2386         epqstate = epq->estate;
2387         if (epqstate->es_evTuple[rti - 1] != NULL)
2388                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2389         epqstate->es_evTuple[rti - 1] = copyTuple;
2390
2391         return EvalPlanQualNext(estate);
2392 }
2393
2394 static TupleTableSlot *
2395 EvalPlanQualNext(EState *estate)
2396 {
2397         evalPlanQual *epq = estate->es_evalPlanQual;
2398         MemoryContext oldcontext;
2399         TupleTableSlot *slot;
2400
2401         Assert(epq->rti != 0);
2402
2403 lpqnext:;
2404         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2405         slot = ExecProcNode(epq->planstate);
2406         MemoryContextSwitchTo(oldcontext);
2407
2408         /*
2409          * No more tuples for this PQ. Continue previous one.
2410          */
2411         if (TupIsNull(slot))
2412         {
2413                 evalPlanQual *oldepq;
2414
2415                 /* stop execution */
2416                 EvalPlanQualStop(epq);
2417                 /* pop old PQ from the stack */
2418                 oldepq = epq->next;
2419                 if (oldepq == NULL)
2420                 {
2421                         /* this is the first (oldest) PQ - mark as free */
2422                         epq->rti = 0;
2423                         estate->es_useEvalPlan = false;
2424                         /* and continue Query execution */
2425                         return NULL;
2426                 }
2427                 Assert(oldepq->rti != 0);
2428                 /* push current PQ to freePQ stack */
2429                 oldepq->free = epq;
2430                 epq = oldepq;
2431                 estate->es_evalPlanQual = epq;
2432                 goto lpqnext;
2433         }
2434
2435         return slot;
2436 }
2437
2438 static void
2439 EndEvalPlanQual(EState *estate)
2440 {
2441         evalPlanQual *epq = estate->es_evalPlanQual;
2442
2443         if (epq->rti == 0)                      /* plans already shutdowned */
2444         {
2445                 Assert(epq->next == NULL);
2446                 return;
2447         }
2448
2449         for (;;)
2450         {
2451                 evalPlanQual *oldepq;
2452
2453                 /* stop execution */
2454                 EvalPlanQualStop(epq);
2455                 /* pop old PQ from the stack */
2456                 oldepq = epq->next;
2457                 if (oldepq == NULL)
2458                 {
2459                         /* this is the first (oldest) PQ - mark as free */
2460                         epq->rti = 0;
2461                         estate->es_useEvalPlan = false;
2462                         break;
2463                 }
2464                 Assert(oldepq->rti != 0);
2465                 /* push current PQ to freePQ stack */
2466                 oldepq->free = epq;
2467                 epq = oldepq;
2468                 estate->es_evalPlanQual = epq;
2469         }
2470 }
2471
2472 /*
2473  * Start execution of one level of PlanQual.
2474  *
2475  * This is a cut-down version of ExecutorStart(): we copy some state from
2476  * the top-level estate rather than initializing it fresh.
2477  */
2478 static void
2479 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2480 {
2481         EState     *epqstate;
2482         int                     rtsize;
2483         MemoryContext oldcontext;
2484         ListCell   *l;
2485
2486         rtsize = list_length(estate->es_range_table);
2487
2488         epq->estate = epqstate = CreateExecutorState();
2489
2490         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2491
2492         /*
2493          * The epqstates share the top query's copy of unchanging state such as
2494          * the snapshot, rangetable, result-rel info, and external Param info.
2495          * They need their own copies of local state, including a tuple table,
2496          * es_param_exec_vals, etc.
2497          */
2498         epqstate->es_direction = ForwardScanDirection;
2499         epqstate->es_snapshot = estate->es_snapshot;
2500         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2501         epqstate->es_range_table = estate->es_range_table;
2502         epqstate->es_output_cid = estate->es_output_cid;
2503         epqstate->es_result_relations = estate->es_result_relations;
2504         epqstate->es_num_result_relations = estate->es_num_result_relations;
2505         epqstate->es_result_relation_info = estate->es_result_relation_info;
2506         epqstate->es_junkFilter = estate->es_junkFilter;
2507         /* es_trig_target_relations must NOT be copied */
2508         epqstate->es_param_list_info = estate->es_param_list_info;
2509         if (estate->es_plannedstmt->nParamExec > 0)
2510                 epqstate->es_param_exec_vals = (ParamExecData *)
2511                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2512         epqstate->es_rowMarks = estate->es_rowMarks;
2513         epqstate->es_instrument = estate->es_instrument;
2514         epqstate->es_select_into = estate->es_select_into;
2515         epqstate->es_into_oids = estate->es_into_oids;
2516         epqstate->es_plannedstmt = estate->es_plannedstmt;
2517
2518         /*
2519          * Each epqstate must have its own es_evTupleNull state, but all the stack
2520          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2521          * the value being examined by an outer recheck.
2522          */
2523         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2524         if (priorepq == NULL)
2525                 /* first PQ stack entry */
2526                 epqstate->es_evTuple = (HeapTuple *)
2527                         palloc0(rtsize * sizeof(HeapTuple));
2528         else
2529                 /* later stack entries share the same storage */
2530                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2531
2532         /*
2533          * Create sub-tuple-table; we needn't redo the CountSlots work though.
2534          */
2535         epqstate->es_tupleTable =
2536                 ExecCreateTupleTable(estate->es_tupleTable->size);
2537
2538         /*
2539          * Initialize private state information for each SubPlan.  We must do this
2540          * before running ExecInitNode on the main query tree, since
2541          * ExecInitSubPlan expects to be able to find these entries.
2542          */
2543         Assert(epqstate->es_subplanstates == NIL);
2544         foreach(l, estate->es_plannedstmt->subplans)
2545         {
2546                 Plan       *subplan = (Plan *) lfirst(l);
2547                 PlanState  *subplanstate;
2548
2549                 subplanstate = ExecInitNode(subplan, epqstate, 0);
2550
2551                 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2552                                                                                          subplanstate);
2553         }
2554
2555         /*
2556          * Initialize the private state information for all the nodes in the query
2557          * tree.  This opens files, allocates storage and leaves us ready to start
2558          * processing tuples.
2559          */
2560         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2561
2562         MemoryContextSwitchTo(oldcontext);
2563 }
2564
2565 /*
2566  * End execution of one level of PlanQual.
2567  *
2568  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2569  * of the normal cleanup, but *not* close result relations (which we are
2570  * just sharing from the outer query).  We do, however, have to close any
2571  * trigger target relations that got opened, since those are not shared.
2572  */
2573 static void
2574 EvalPlanQualStop(evalPlanQual *epq)
2575 {
2576         EState     *epqstate = epq->estate;
2577         MemoryContext oldcontext;
2578         ListCell   *l;
2579
2580         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2581
2582         ExecEndNode(epq->planstate);
2583
2584         foreach(l, epqstate->es_subplanstates)
2585         {
2586                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2587
2588                 ExecEndNode(subplanstate);
2589         }
2590
2591         ExecDropTupleTable(epqstate->es_tupleTable, true);
2592         epqstate->es_tupleTable = NULL;
2593
2594         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2595         {
2596                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2597                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2598         }
2599
2600         foreach(l, epqstate->es_trig_target_relations)
2601         {
2602                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2603
2604                 /* Close indices and then the relation itself */
2605                 ExecCloseIndices(resultRelInfo);
2606                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2607         }
2608
2609         MemoryContextSwitchTo(oldcontext);
2610
2611         FreeExecutorState(epqstate);
2612
2613         epq->estate = NULL;
2614         epq->planstate = NULL;
2615 }
2616
2617 /*
2618  * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2619  *
2620  * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2621  * are looking at a row returned by the EvalPlanQual machinery, we need
2622  * to look at the subsidiary state instead.
2623  */
2624 PlanState *
2625 ExecGetActivePlanTree(QueryDesc *queryDesc)
2626 {
2627         EState     *estate = queryDesc->estate;
2628
2629         if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2630                 return estate->es_evalPlanQual->planstate;
2631         else
2632                 return queryDesc->planstate;
2633 }
2634
2635
2636 /*
2637  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2638  *
2639  * We implement SELECT INTO by diverting SELECT's normal output with
2640  * a specialized DestReceiver type.
2641  */
2642
2643 typedef struct
2644 {
2645         DestReceiver pub;                       /* publicly-known function pointers */
2646         EState     *estate;                     /* EState we are working with */
2647         Relation        rel;                    /* Relation to write to */
2648         bool            use_wal;                /* do we need to WAL-log our writes? */
2649 } DR_intorel;
2650
2651 /*
2652  * OpenIntoRel --- actually create the SELECT INTO target relation
2653  *
2654  * This also replaces QueryDesc->dest with the special DestReceiver for
2655  * SELECT INTO.  We assume that the correct result tuple type has already
2656  * been placed in queryDesc->tupDesc.
2657  */
2658 static void
2659 OpenIntoRel(QueryDesc *queryDesc)
2660 {
2661         IntoClause *into = queryDesc->plannedstmt->intoClause;
2662         EState     *estate = queryDesc->estate;
2663         Relation        intoRelationDesc;
2664         char       *intoName;
2665         Oid                     namespaceId;
2666         Oid                     tablespaceId;
2667         Datum           reloptions;
2668         AclResult       aclresult;
2669         Oid                     intoRelationId;
2670         TupleDesc       tupdesc;
2671         DR_intorel *myState;
2672
2673         Assert(into);
2674
2675         /*
2676          * Check consistency of arguments
2677          */
2678         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2679                 ereport(ERROR,
2680                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2681                                  errmsg("ON COMMIT can only be used on temporary tables")));
2682
2683         /*
2684          * Find namespace to create in, check its permissions
2685          */
2686         intoName = into->rel->relname;
2687         namespaceId = RangeVarGetCreationNamespace(into->rel);
2688
2689         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2690                                                                           ACL_CREATE);
2691         if (aclresult != ACLCHECK_OK)
2692                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2693                                            get_namespace_name(namespaceId));
2694
2695         /*
2696          * Select tablespace to use.  If not specified, use default tablespace
2697          * (which may in turn default to database's default).
2698          */
2699         if (into->tableSpaceName)
2700         {
2701                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2702                 if (!OidIsValid(tablespaceId))
2703                         ereport(ERROR,
2704                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2705                                          errmsg("tablespace \"%s\" does not exist",
2706                                                         into->tableSpaceName)));
2707         }
2708         else
2709         {
2710                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2711                 /* note InvalidOid is OK in this case */
2712         }
2713
2714         /* Check permissions except when using the database's default space */
2715         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2716         {
2717                 AclResult       aclresult;
2718
2719                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2720                                                                                    ACL_CREATE);
2721
2722                 if (aclresult != ACLCHECK_OK)
2723                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2724                                                    get_tablespace_name(tablespaceId));
2725         }
2726
2727         /* Parse and validate any reloptions */
2728         reloptions = transformRelOptions((Datum) 0,
2729                                                                          into->options,
2730                                                                          true,
2731                                                                          false);
2732         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2733
2734         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2735         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2736
2737         /* Now we can actually create the new relation */
2738         intoRelationId = heap_create_with_catalog(intoName,
2739                                                                                           namespaceId,
2740                                                                                           tablespaceId,
2741                                                                                           InvalidOid,
2742                                                                                           GetUserId(),
2743                                                                                           tupdesc,
2744                                                                                           NIL,
2745                                                                                           RELKIND_RELATION,
2746                                                                                           false,
2747                                                                                           true,
2748                                                                                           0,
2749                                                                                           into->onCommit,
2750                                                                                           reloptions,
2751                                                                                           allowSystemTableMods);
2752
2753         FreeTupleDesc(tupdesc);
2754
2755         /*
2756          * Advance command counter so that the newly-created relation's catalog
2757          * tuples will be visible to heap_open.
2758          */
2759         CommandCounterIncrement();
2760
2761         /*
2762          * If necessary, create a TOAST table for the INTO relation. Note that
2763          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2764          * the TOAST table will be visible for insertion.
2765          */
2766         AlterTableCreateToastTable(intoRelationId);
2767
2768         /*
2769          * And open the constructed table for writing.
2770          */
2771         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2772
2773         /*
2774          * Now replace the query's DestReceiver with one for SELECT INTO
2775          */
2776         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2777         myState = (DR_intorel *) queryDesc->dest;
2778         Assert(myState->pub.mydest == DestIntoRel);
2779         myState->estate = estate;
2780
2781         /*
2782          * We can skip WAL-logging the insertions, unless PITR is in use.
2783          */
2784         myState->use_wal = XLogArchivingActive();
2785         myState->rel = intoRelationDesc;
2786
2787         /* use_wal off requires rd_targblock be initially invalid */
2788         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2789 }
2790
2791 /*
2792  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2793  */
2794 static void
2795 CloseIntoRel(QueryDesc *queryDesc)
2796 {
2797         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2798
2799         /* OpenIntoRel might never have gotten called */
2800         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2801         {
2802                 /* If we skipped using WAL, must heap_sync before commit */
2803                 if (!myState->use_wal)
2804                         heap_sync(myState->rel);
2805
2806                 /* close rel, but keep lock until commit */
2807                 heap_close(myState->rel, NoLock);
2808
2809                 myState->rel = NULL;
2810         }
2811 }
2812
2813 /*
2814  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2815  *
2816  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2817  * we just leave the private fields zeroed here.  OpenIntoRel will
2818  * fill them in.
2819  */
2820 DestReceiver *
2821 CreateIntoRelDestReceiver(void)
2822 {
2823         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2824
2825         self->pub.receiveSlot = intorel_receive;
2826         self->pub.rStartup = intorel_startup;
2827         self->pub.rShutdown = intorel_shutdown;
2828         self->pub.rDestroy = intorel_destroy;
2829         self->pub.mydest = DestIntoRel;
2830
2831         return (DestReceiver *) self;
2832 }
2833
2834 /*
2835  * intorel_startup --- executor startup
2836  */
2837 static void
2838 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2839 {
2840         /* no-op */
2841 }
2842
2843 /*
2844  * intorel_receive --- receive one tuple
2845  */
2846 static void
2847 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2848 {
2849         DR_intorel *myState = (DR_intorel *) self;
2850         HeapTuple       tuple;
2851
2852         /*
2853          * get the heap tuple out of the tuple table slot, making sure we have a
2854          * writable copy
2855          */
2856         tuple = ExecMaterializeSlot(slot);
2857
2858         heap_insert(myState->rel,
2859                                 tuple,
2860                                 myState->estate->es_output_cid,
2861                                 myState->use_wal,
2862                                 false);                 /* never any point in using FSM */
2863
2864         /* We know this is a newly created relation, so there are no indexes */
2865
2866         IncrAppended();
2867 }
2868
2869 /*
2870  * intorel_shutdown --- executor end
2871  */
2872 static void
2873 intorel_shutdown(DestReceiver *self)
2874 {
2875         /* no-op */
2876 }
2877
2878 /*
2879  * intorel_destroy --- release DestReceiver object
2880  */
2881 static void
2882 intorel_destroy(DestReceiver *self)
2883 {
2884         pfree(self);
2885 }