]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Remove the Query structure from the executor's API. This allows us to stop
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.287 2007/02/20 17:32:14 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "optimizer/clauses.h"
49 #include "parser/parse_clause.h"
50 #include "parser/parsetree.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55
56
57 typedef struct evalPlanQual
58 {
59         Index           rti;
60         EState     *estate;
61         PlanState  *planstate;
62         struct evalPlanQual *next;      /* stack of active PlanQual plans */
63         struct evalPlanQual *free;      /* list of free PlanQual plans */
64 } evalPlanQual;
65
66 /* decls for local routines only used within this module */
67 static void InitPlan(QueryDesc *queryDesc, int eflags);
68 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
69                                   Index resultRelationIndex,
70                                   List *rangeTable,
71                                   CmdType operation,
72                                   bool doInstrument);
73 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
74                         CmdType operation,
75                         long numberTuples,
76                         ScanDirection direction,
77                         DestReceiver *dest);
78 static void ExecSelect(TupleTableSlot *slot,
79                    DestReceiver *dest, EState *estate);
80 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
81                    TupleTableSlot *planSlot,
82                    DestReceiver *dest, EState *estate);
83 static void ExecDelete(ItemPointer tupleid,
84                    TupleTableSlot *planSlot,
85                    DestReceiver *dest, EState *estate);
86 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
87                    TupleTableSlot *planSlot,
88                    DestReceiver *dest, EState *estate);
89 static void ExecProcessReturning(ProjectionInfo *projectReturning,
90                                          TupleTableSlot *tupleSlot,
91                                          TupleTableSlot *planSlot,
92                                          DestReceiver *dest);
93 static TupleTableSlot *EvalPlanQualNext(EState *estate);
94 static void EndEvalPlanQual(EState *estate);
95 static void ExecCheckRTEPerms(RangeTblEntry *rte);
96 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
97 static void ExecCheckRangeTblReadOnly(List *rtable);
98 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
99                                   evalPlanQual *priorepq);
100 static void EvalPlanQualStop(evalPlanQual *epq);
101 static void OpenIntoRel(QueryDesc *queryDesc);
102 static void CloseIntoRel(QueryDesc *queryDesc);
103 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
104 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
105 static void intorel_shutdown(DestReceiver *self);
106 static void intorel_destroy(DestReceiver *self);
107
108 /* end of local decls */
109
110
111 /* ----------------------------------------------------------------
112  *              ExecutorStart
113  *
114  *              This routine must be called at the beginning of any execution of any
115  *              query plan
116  *
117  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
118  * clear why we bother to separate the two functions, but...).  The tupDesc
119  * field of the QueryDesc is filled in to describe the tuples that will be
120  * returned, and the internal fields (estate and planstate) are set up.
121  *
122  * eflags contains flag bits as described in executor.h.
123  *
124  * NB: the CurrentMemoryContext when this is called will become the parent
125  * of the per-query context used for this Executor invocation.
126  * ----------------------------------------------------------------
127  */
128 void
129 ExecutorStart(QueryDesc *queryDesc, int eflags)
130 {
131         EState     *estate;
132         MemoryContext oldcontext;
133
134         /* sanity checks: queryDesc must not be started already */
135         Assert(queryDesc != NULL);
136         Assert(queryDesc->estate == NULL);
137
138         /*
139          * If the transaction is read-only, we need to check if any writes are
140          * planned to non-temporary tables.  EXPLAIN is considered read-only.
141          */
142         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
143                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
144
145         /*
146          * Build EState, switch into per-query memory context for startup.
147          */
148         estate = CreateExecutorState();
149         queryDesc->estate = estate;
150
151         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
152
153         /*
154          * Fill in parameters, if any, from queryDesc
155          */
156         estate->es_param_list_info = queryDesc->params;
157
158         if (queryDesc->plannedstmt->nParamExec > 0)
159                 estate->es_param_exec_vals = (ParamExecData *)
160                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
161
162         /*
163          * Copy other important information into the EState
164          */
165         estate->es_snapshot = queryDesc->snapshot;
166         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
167         estate->es_instrument = queryDesc->doInstrument;
168
169         /*
170          * Initialize the plan state tree
171          */
172         InitPlan(queryDesc, eflags);
173
174         MemoryContextSwitchTo(oldcontext);
175 }
176
177 /* ----------------------------------------------------------------
178  *              ExecutorRun
179  *
180  *              This is the main routine of the executor module. It accepts
181  *              the query descriptor from the traffic cop and executes the
182  *              query plan.
183  *
184  *              ExecutorStart must have been called already.
185  *
186  *              If direction is NoMovementScanDirection then nothing is done
187  *              except to start up/shut down the destination.  Otherwise,
188  *              we retrieve up to 'count' tuples in the specified direction.
189  *
190  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
191  *              completion.
192  *
193  * ----------------------------------------------------------------
194  */
195 TupleTableSlot *
196 ExecutorRun(QueryDesc *queryDesc,
197                         ScanDirection direction, long count)
198 {
199         EState     *estate;
200         CmdType         operation;
201         DestReceiver *dest;
202         bool            sendTuples;
203         TupleTableSlot *result;
204         MemoryContext oldcontext;
205
206         /* sanity checks */
207         Assert(queryDesc != NULL);
208
209         estate = queryDesc->estate;
210
211         Assert(estate != NULL);
212
213         /*
214          * Switch into per-query memory context
215          */
216         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
217
218         /*
219          * extract information from the query descriptor and the query feature.
220          */
221         operation = queryDesc->operation;
222         dest = queryDesc->dest;
223
224         /*
225          * startup tuple receiver, if we will be emitting tuples
226          */
227         estate->es_processed = 0;
228         estate->es_lastoid = InvalidOid;
229
230         sendTuples = (operation == CMD_SELECT ||
231                                   queryDesc->plannedstmt->returningLists);
232
233         if (sendTuples)
234                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
235
236         /*
237          * run plan
238          */
239         if (ScanDirectionIsNoMovement(direction))
240                 result = NULL;
241         else
242                 result = ExecutePlan(estate,
243                                                          queryDesc->planstate,
244                                                          operation,
245                                                          count,
246                                                          direction,
247                                                          dest);
248
249         /*
250          * shutdown tuple receiver, if we started it
251          */
252         if (sendTuples)
253                 (*dest->rShutdown) (dest);
254
255         MemoryContextSwitchTo(oldcontext);
256
257         return result;
258 }
259
260 /* ----------------------------------------------------------------
261  *              ExecutorEnd
262  *
263  *              This routine must be called at the end of execution of any
264  *              query plan
265  * ----------------------------------------------------------------
266  */
267 void
268 ExecutorEnd(QueryDesc *queryDesc)
269 {
270         EState     *estate;
271         MemoryContext oldcontext;
272
273         /* sanity checks */
274         Assert(queryDesc != NULL);
275
276         estate = queryDesc->estate;
277
278         Assert(estate != NULL);
279
280         /*
281          * Switch into per-query memory context to run ExecEndPlan
282          */
283         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
284
285         ExecEndPlan(queryDesc->planstate, estate);
286
287         /*
288          * Close the SELECT INTO relation if any
289          */
290         if (estate->es_select_into)
291                 CloseIntoRel(queryDesc);
292
293         /*
294          * Must switch out of context before destroying it
295          */
296         MemoryContextSwitchTo(oldcontext);
297
298         /*
299          * Release EState and per-query memory context.  This should release
300          * everything the executor has allocated.
301          */
302         FreeExecutorState(estate);
303
304         /* Reset queryDesc fields that no longer point to anything */
305         queryDesc->tupDesc = NULL;
306         queryDesc->estate = NULL;
307         queryDesc->planstate = NULL;
308 }
309
310 /* ----------------------------------------------------------------
311  *              ExecutorRewind
312  *
313  *              This routine may be called on an open queryDesc to rewind it
314  *              to the start.
315  * ----------------------------------------------------------------
316  */
317 void
318 ExecutorRewind(QueryDesc *queryDesc)
319 {
320         EState     *estate;
321         MemoryContext oldcontext;
322
323         /* sanity checks */
324         Assert(queryDesc != NULL);
325
326         estate = queryDesc->estate;
327
328         Assert(estate != NULL);
329
330         /* It's probably not sensible to rescan updating queries */
331         Assert(queryDesc->operation == CMD_SELECT);
332
333         /*
334          * Switch into per-query memory context
335          */
336         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
337
338         /*
339          * rescan plan
340          */
341         ExecReScan(queryDesc->planstate, NULL);
342
343         MemoryContextSwitchTo(oldcontext);
344 }
345
346
347 /*
348  * ExecCheckRTPerms
349  *              Check access permissions for all relations listed in a range table.
350  */
351 void
352 ExecCheckRTPerms(List *rangeTable)
353 {
354         ListCell   *l;
355
356         foreach(l, rangeTable)
357         {
358                 RangeTblEntry *rte = lfirst(l);
359
360                 ExecCheckRTEPerms(rte);
361         }
362 }
363
364 /*
365  * ExecCheckRTEPerms
366  *              Check access permissions for a single RTE.
367  */
368 static void
369 ExecCheckRTEPerms(RangeTblEntry *rte)
370 {
371         AclMode         requiredPerms;
372         Oid                     relOid;
373         Oid                     userid;
374
375         /*
376          * Only plain-relation RTEs need to be checked here.  Subquery RTEs are
377          * checked by ExecInitSubqueryScan if the subquery is still a separate
378          * subquery --- if it's been pulled up into our query level then the RTEs
379          * are in our rangetable and will be checked here. Function RTEs are
380          * checked by init_fcache when the function is prepared for execution.
381          * Join and special RTEs need no checks.
382          */
383         if (rte->rtekind != RTE_RELATION)
384                 return;
385
386         /*
387          * No work if requiredPerms is empty.
388          */
389         requiredPerms = rte->requiredPerms;
390         if (requiredPerms == 0)
391                 return;
392
393         relOid = rte->relid;
394
395         /*
396          * userid to check as: current user unless we have a setuid indication.
397          *
398          * Note: GetUserId() is presently fast enough that there's no harm in
399          * calling it separately for each RTE.  If that stops being true, we could
400          * call it once in ExecCheckRTPerms and pass the userid down from there.
401          * But for now, no need for the extra clutter.
402          */
403         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
404
405         /*
406          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
407          */
408         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
409                 != requiredPerms)
410                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
411                                            get_rel_name(relOid));
412 }
413
414 /*
415  * Check that the query does not imply any writes to non-temp tables.
416  */
417 static void
418 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
419 {
420         /*
421          * CREATE TABLE AS or SELECT INTO?
422          *
423          * XXX should we allow this if the destination is temp?
424          */
425         if (plannedstmt->into != NULL)
426                 goto fail;
427
428         /* Fail if write permissions are requested on any non-temp table */
429         ExecCheckRangeTblReadOnly(plannedstmt->rtable);
430
431         return;
432
433 fail:
434         ereport(ERROR,
435                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
436                          errmsg("transaction is read-only")));
437 }
438
439 static void
440 ExecCheckRangeTblReadOnly(List *rtable)
441 {
442         ListCell   *l;
443
444         /* Fail if write permissions are requested on any non-temp table */
445         foreach(l, rtable)
446         {
447                 RangeTblEntry *rte = lfirst(l);
448
449                 if (rte->rtekind == RTE_SUBQUERY)
450                 {
451                         Assert(!rte->subquery->into);
452                         ExecCheckRangeTblReadOnly(rte->subquery->rtable);
453                         continue;
454                 }
455
456                 if (rte->rtekind != RTE_RELATION)
457                         continue;
458
459                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
460                         continue;
461
462                 if (isTempNamespace(get_rel_namespace(rte->relid)))
463                         continue;
464
465                 goto fail;
466         }
467
468         return;
469
470 fail:
471         ereport(ERROR,
472                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
473                          errmsg("transaction is read-only")));
474 }
475
476
477 /* ----------------------------------------------------------------
478  *              InitPlan
479  *
480  *              Initializes the query plan: open files, allocate storage
481  *              and start up the rule manager
482  * ----------------------------------------------------------------
483  */
484 static void
485 InitPlan(QueryDesc *queryDesc, int eflags)
486 {
487         CmdType         operation = queryDesc->operation;
488         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
489         Plan       *plan = plannedstmt->planTree;
490         List       *rangeTable = plannedstmt->rtable;
491         EState     *estate = queryDesc->estate;
492         PlanState  *planstate;
493         TupleDesc       tupType;
494         ListCell   *l;
495
496         /*
497          * Do permissions checks.  It's sufficient to examine the query's top
498          * rangetable here --- subplan RTEs will be checked during
499          * ExecInitSubPlan().
500          */
501         ExecCheckRTPerms(rangeTable);
502
503         /*
504          * initialize the node's execution state
505          */
506         estate->es_range_table = rangeTable;
507
508         /*
509          * initialize result relation stuff
510          */
511         if (plannedstmt->resultRelations)
512         {
513                 List       *resultRelations = plannedstmt->resultRelations;
514                 int                     numResultRelations = list_length(resultRelations);
515                 ResultRelInfo *resultRelInfos;
516                 ResultRelInfo *resultRelInfo;
517
518                 resultRelInfos = (ResultRelInfo *)
519                         palloc(numResultRelations * sizeof(ResultRelInfo));
520                 resultRelInfo = resultRelInfos;
521                 foreach(l, resultRelations)
522                 {
523                         initResultRelInfo(resultRelInfo,
524                                                           lfirst_int(l),
525                                                           rangeTable,
526                                                           operation,
527                                                           estate->es_instrument);
528                         resultRelInfo++;
529                 }
530                 estate->es_result_relations = resultRelInfos;
531                 estate->es_num_result_relations = numResultRelations;
532                 /* Initialize to first or only result rel */
533                 estate->es_result_relation_info = resultRelInfos;
534         }
535         else
536         {
537                 /*
538                  * if no result relation, then set state appropriately
539                  */
540                 estate->es_result_relations = NULL;
541                 estate->es_num_result_relations = 0;
542                 estate->es_result_relation_info = NULL;
543         }
544
545         /*
546          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
547          * flag appropriately so that the plan tree will be initialized with the
548          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
549          */
550         estate->es_select_into = false;
551         if (operation == CMD_SELECT && plannedstmt->into != NULL)
552         {
553                 estate->es_select_into = true;
554                 estate->es_into_oids = interpretOidsOption(plannedstmt->into->options);
555         }
556
557         /*
558          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
559          * initialize the plan tree, else we'd be doing a lock upgrade.
560          * While we are at it, build the ExecRowMark list.
561          */
562         estate->es_rowMarks = NIL;
563         foreach(l, plannedstmt->rowMarks)
564         {
565                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
566                 Oid                     relid = getrelid(rc->rti, rangeTable);
567                 Relation        relation;
568                 ExecRowMark *erm;
569
570                 relation = heap_open(relid, RowShareLock);
571                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
572                 erm->relation = relation;
573                 erm->rti = rc->rti;
574                 erm->forUpdate = rc->forUpdate;
575                 erm->noWait = rc->noWait;
576                 /* We'll set up ctidAttno below */
577                 erm->ctidAttNo = InvalidAttrNumber;
578                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
579         }
580
581         /*
582          * initialize the executor "tuple" table.  We need slots for all the plan
583          * nodes, plus possibly output slots for the junkfilter(s). At this point
584          * we aren't sure if we need junkfilters, so just add slots for them
585          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
586          * trigger output tuples.
587          */
588         {
589                 int                     nSlots = ExecCountSlotsNode(plan);
590
591                 if (plannedstmt->resultRelations != NIL)
592                         nSlots += list_length(plannedstmt->resultRelations);
593                 else
594                         nSlots += 1;
595                 if (operation != CMD_SELECT)
596                         nSlots++;                       /* for es_trig_tuple_slot */
597                 if (plannedstmt->returningLists)
598                         nSlots++;                       /* for RETURNING projection */
599
600                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
601
602                 if (operation != CMD_SELECT)
603                         estate->es_trig_tuple_slot =
604                                 ExecAllocTableSlot(estate->es_tupleTable);
605         }
606
607         /* mark EvalPlanQual not active */
608         estate->es_plannedstmt = plannedstmt;
609         estate->es_evalPlanQual = NULL;
610         estate->es_evTupleNull = NULL;
611         estate->es_evTuple = NULL;
612         estate->es_useEvalPlan = false;
613
614         /*
615          * initialize the private state information for all the nodes in the query
616          * tree.  This opens files, allocates storage and leaves us ready to start
617          * processing tuples.
618          */
619         planstate = ExecInitNode(plan, estate, eflags);
620
621         /*
622          * Get the tuple descriptor describing the type of tuples to return. (this
623          * is especially important if we are creating a relation with "SELECT
624          * INTO")
625          */
626         tupType = ExecGetResultType(planstate);
627
628         /*
629          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
630          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
631          * INTO also need a filter if the plan may return raw disk tuples (else
632          * heap_insert will be scribbling on the source relation!). UPDATE and
633          * DELETE always need a filter, since there's always a junk 'ctid'
634          * attribute present --- no need to look first.
635          */
636         {
637                 bool            junk_filter_needed = false;
638                 ListCell   *tlist;
639
640                 switch (operation)
641                 {
642                         case CMD_SELECT:
643                         case CMD_INSERT:
644                                 foreach(tlist, plan->targetlist)
645                                 {
646                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
647
648                                         if (tle->resjunk)
649                                         {
650                                                 junk_filter_needed = true;
651                                                 break;
652                                         }
653                                 }
654                                 if (!junk_filter_needed &&
655                                         (operation == CMD_INSERT || estate->es_select_into) &&
656                                         ExecMayReturnRawTuples(planstate))
657                                         junk_filter_needed = true;
658                                 break;
659                         case CMD_UPDATE:
660                         case CMD_DELETE:
661                                 junk_filter_needed = true;
662                                 break;
663                         default:
664                                 break;
665                 }
666
667                 if (junk_filter_needed)
668                 {
669                         /*
670                          * If there are multiple result relations, each one needs its own
671                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
672                          * we can't be fooled by some needing a filter and some not.
673                          */
674                         if (list_length(plannedstmt->resultRelations) > 1)
675                         {
676                                 PlanState **appendplans;
677                                 int                     as_nplans;
678                                 ResultRelInfo *resultRelInfo;
679                                 int                     i;
680
681                                 /* Top plan had better be an Append here. */
682                                 Assert(IsA(plan, Append));
683                                 Assert(((Append *) plan)->isTarget);
684                                 Assert(IsA(planstate, AppendState));
685                                 appendplans = ((AppendState *) planstate)->appendplans;
686                                 as_nplans = ((AppendState *) planstate)->as_nplans;
687                                 Assert(as_nplans == estate->es_num_result_relations);
688                                 resultRelInfo = estate->es_result_relations;
689                                 for (i = 0; i < as_nplans; i++)
690                                 {
691                                         PlanState  *subplan = appendplans[i];
692                                         JunkFilter *j;
693
694                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
695                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
696                                                                   ExecAllocTableSlot(estate->es_tupleTable));
697                                         /*
698                                          * Since it must be UPDATE/DELETE, there had better be
699                                          * a "ctid" junk attribute in the tlist ... but ctid could
700                                          * be at a different resno for each result relation.
701                                          * We look up the ctid resnos now and save them in the
702                                          * junkfilters.
703                                          */
704                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
705                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
706                                                 elog(ERROR, "could not find junk ctid column");
707                                         resultRelInfo->ri_junkFilter = j;
708                                         resultRelInfo++;
709                                 }
710
711                                 /*
712                                  * Set active junkfilter too; at this point ExecInitAppend has
713                                  * already selected an active result relation...
714                                  */
715                                 estate->es_junkFilter =
716                                         estate->es_result_relation_info->ri_junkFilter;
717                         }
718                         else
719                         {
720                                 /* Normal case with just one JunkFilter */
721                                 JunkFilter *j;
722
723                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
724                                                                            tupType->tdhasoid,
725                                                                   ExecAllocTableSlot(estate->es_tupleTable));
726                                 estate->es_junkFilter = j;
727                                 if (estate->es_result_relation_info)
728                                         estate->es_result_relation_info->ri_junkFilter = j;
729
730                                 if (operation == CMD_SELECT)
731                                 {
732                                         /* For SELECT, want to return the cleaned tuple type */
733                                         tupType = j->jf_cleanTupType;
734                                         /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
735                                         foreach(l, estate->es_rowMarks)
736                                         {
737                                                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
738                                                 char            resname[32];
739
740                                                 snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
741                                                 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
742                                                 if (!AttributeNumberIsValid(erm->ctidAttNo))
743                                                         elog(ERROR, "could not find junk \"%s\" column",
744                                                                  resname);
745                                         }
746                                 }
747                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
748                                 {
749                                         /* For UPDATE/DELETE, find the ctid junk attr now */
750                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
751                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
752                                                 elog(ERROR, "could not find junk ctid column");
753                                 }
754                         }
755                 }
756                 else
757                         estate->es_junkFilter = NULL;
758         }
759
760         /*
761          * Initialize RETURNING projections if needed.
762          */
763         if (plannedstmt->returningLists)
764         {
765                 TupleTableSlot *slot;
766                 ExprContext *econtext;
767                 ResultRelInfo *resultRelInfo;
768
769                 /*
770                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
771                  * We assume all the sublists will generate the same output tupdesc.
772                  */
773                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
774                                                                  false);
775
776                 /* Set up a slot for the output of the RETURNING projection(s) */
777                 slot = ExecAllocTableSlot(estate->es_tupleTable);
778                 ExecSetSlotDescriptor(slot, tupType);
779                 /* Need an econtext too */
780                 econtext = CreateExprContext(estate);
781
782                 /*
783                  * Build a projection for each result rel.      Note that any SubPlans in
784                  * the RETURNING lists get attached to the topmost plan node.
785                  */
786                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
787                 resultRelInfo = estate->es_result_relations;
788                 foreach(l, plannedstmt->returningLists)
789                 {
790                         List       *rlist = (List *) lfirst(l);
791                         List       *rliststate;
792
793                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
794                         resultRelInfo->ri_projectReturning =
795                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
796                                                                            resultRelInfo->ri_RelationDesc->rd_att);
797                         resultRelInfo++;
798                 }
799
800                 /*
801                  * Because we already ran ExecInitNode() for the top plan node, any
802                  * subplans we just attached to it won't have been initialized; so we
803                  * have to do it here.  (Ugly, but the alternatives seem worse.)
804                  */
805                 foreach(l, planstate->subPlan)
806                 {
807                         SubPlanState *sstate = (SubPlanState *) lfirst(l);
808
809                         Assert(IsA(sstate, SubPlanState));
810                         if (sstate->planstate == NULL)          /* already inited? */
811                                 ExecInitSubPlan(sstate, estate, eflags);
812                 }
813         }
814
815         queryDesc->tupDesc = tupType;
816         queryDesc->planstate = planstate;
817
818         /*
819          * If doing SELECT INTO, initialize the "into" relation.  We must wait
820          * till now so we have the "clean" result tuple type to create the new
821          * table from.
822          *
823          * If EXPLAIN, skip creating the "into" relation.
824          */
825         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
826                 OpenIntoRel(queryDesc);
827 }
828
829 /*
830  * Initialize ResultRelInfo data for one result relation
831  */
832 static void
833 initResultRelInfo(ResultRelInfo *resultRelInfo,
834                                   Index resultRelationIndex,
835                                   List *rangeTable,
836                                   CmdType operation,
837                                   bool doInstrument)
838 {
839         Oid                     resultRelationOid;
840         Relation        resultRelationDesc;
841
842         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
843         resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
844
845         switch (resultRelationDesc->rd_rel->relkind)
846         {
847                 case RELKIND_SEQUENCE:
848                         ereport(ERROR,
849                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
850                                          errmsg("cannot change sequence \"%s\"",
851                                                         RelationGetRelationName(resultRelationDesc))));
852                         break;
853                 case RELKIND_TOASTVALUE:
854                         ereport(ERROR,
855                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
856                                          errmsg("cannot change TOAST relation \"%s\"",
857                                                         RelationGetRelationName(resultRelationDesc))));
858                         break;
859                 case RELKIND_VIEW:
860                         ereport(ERROR,
861                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
862                                          errmsg("cannot change view \"%s\"",
863                                                         RelationGetRelationName(resultRelationDesc))));
864                         break;
865         }
866
867         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
868         resultRelInfo->type = T_ResultRelInfo;
869         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
870         resultRelInfo->ri_RelationDesc = resultRelationDesc;
871         resultRelInfo->ri_NumIndices = 0;
872         resultRelInfo->ri_IndexRelationDescs = NULL;
873         resultRelInfo->ri_IndexRelationInfo = NULL;
874         /* make a copy so as not to depend on relcache info not changing... */
875         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
876         if (resultRelInfo->ri_TrigDesc)
877         {
878                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
879
880                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
881                         palloc0(n * sizeof(FmgrInfo));
882                 if (doInstrument)
883                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
884                 else
885                         resultRelInfo->ri_TrigInstrument = NULL;
886         }
887         else
888         {
889                 resultRelInfo->ri_TrigFunctions = NULL;
890                 resultRelInfo->ri_TrigInstrument = NULL;
891         }
892         resultRelInfo->ri_ConstraintExprs = NULL;
893         resultRelInfo->ri_junkFilter = NULL;
894         resultRelInfo->ri_projectReturning = NULL;
895
896         /*
897          * If there are indices on the result relation, open them and save
898          * descriptors in the result relation info, so that we can add new index
899          * entries for the tuples we add/update.  We need not do this for a
900          * DELETE, however, since deletion doesn't affect indexes.
901          */
902         if (resultRelationDesc->rd_rel->relhasindex &&
903                 operation != CMD_DELETE)
904                 ExecOpenIndices(resultRelInfo);
905 }
906
907 /*
908  *              ExecContextForcesOids
909  *
910  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
911  * we need to ensure that result tuples have space for an OID iff they are
912  * going to be stored into a relation that has OIDs.  In other contexts
913  * we are free to choose whether to leave space for OIDs in result tuples
914  * (we generally don't want to, but we do if a physical-tlist optimization
915  * is possible).  This routine checks the plan context and returns TRUE if the
916  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
917  * *hasoids is set to the required value.
918  *
919  * One reason this is ugly is that all plan nodes in the plan tree will emit
920  * tuples with space for an OID, though we really only need the topmost node
921  * to do so.  However, node types like Sort don't project new tuples but just
922  * return their inputs, and in those cases the requirement propagates down
923  * to the input node.  Eventually we might make this code smart enough to
924  * recognize how far down the requirement really goes, but for now we just
925  * make all plan nodes do the same thing if the top level forces the choice.
926  *
927  * We assume that estate->es_result_relation_info is already set up to
928  * describe the target relation.  Note that in an UPDATE that spans an
929  * inheritance tree, some of the target relations may have OIDs and some not.
930  * We have to make the decisions on a per-relation basis as we initialize
931  * each of the child plans of the topmost Append plan.
932  *
933  * SELECT INTO is even uglier, because we don't have the INTO relation's
934  * descriptor available when this code runs; we have to look aside at a
935  * flag set by InitPlan().
936  */
937 bool
938 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
939 {
940         if (planstate->state->es_select_into)
941         {
942                 *hasoids = planstate->state->es_into_oids;
943                 return true;
944         }
945         else
946         {
947                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
948
949                 if (ri != NULL)
950                 {
951                         Relation        rel = ri->ri_RelationDesc;
952
953                         if (rel != NULL)
954                         {
955                                 *hasoids = rel->rd_rel->relhasoids;
956                                 return true;
957                         }
958                 }
959         }
960
961         return false;
962 }
963
964 /* ----------------------------------------------------------------
965  *              ExecEndPlan
966  *
967  *              Cleans up the query plan -- closes files and frees up storage
968  *
969  * NOTE: we are no longer very worried about freeing storage per se
970  * in this code; FreeExecutorState should be guaranteed to release all
971  * memory that needs to be released.  What we are worried about doing
972  * is closing relations and dropping buffer pins.  Thus, for example,
973  * tuple tables must be cleared or dropped to ensure pins are released.
974  * ----------------------------------------------------------------
975  */
976 void
977 ExecEndPlan(PlanState *planstate, EState *estate)
978 {
979         ResultRelInfo *resultRelInfo;
980         int                     i;
981         ListCell   *l;
982
983         /*
984          * shut down any PlanQual processing we were doing
985          */
986         if (estate->es_evalPlanQual != NULL)
987                 EndEvalPlanQual(estate);
988
989         /*
990          * shut down the node-type-specific query processing
991          */
992         ExecEndNode(planstate);
993
994         /*
995          * destroy the executor "tuple" table.
996          */
997         ExecDropTupleTable(estate->es_tupleTable, true);
998         estate->es_tupleTable = NULL;
999
1000         /*
1001          * close the result relation(s) if any, but hold locks until xact commit.
1002          */
1003         resultRelInfo = estate->es_result_relations;
1004         for (i = estate->es_num_result_relations; i > 0; i--)
1005         {
1006                 /* Close indices and then the relation itself */
1007                 ExecCloseIndices(resultRelInfo);
1008                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1009                 resultRelInfo++;
1010         }
1011
1012         /*
1013          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1014          */
1015         foreach(l, estate->es_rowMarks)
1016         {
1017                 ExecRowMark *erm = lfirst(l);
1018
1019                 heap_close(erm->relation, NoLock);
1020         }
1021 }
1022
1023 /* ----------------------------------------------------------------
1024  *              ExecutePlan
1025  *
1026  *              processes the query plan to retrieve 'numberTuples' tuples in the
1027  *              direction specified.
1028  *
1029  *              Retrieves all tuples if numberTuples is 0
1030  *
1031  *              result is either a slot containing the last tuple in the case
1032  *              of a SELECT or NULL otherwise.
1033  *
1034  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1035  * user can see it
1036  * ----------------------------------------------------------------
1037  */
1038 static TupleTableSlot *
1039 ExecutePlan(EState *estate,
1040                         PlanState *planstate,
1041                         CmdType operation,
1042                         long numberTuples,
1043                         ScanDirection direction,
1044                         DestReceiver *dest)
1045 {
1046         JunkFilter *junkfilter;
1047         TupleTableSlot *planSlot;
1048         TupleTableSlot *slot;
1049         ItemPointer tupleid = NULL;
1050         ItemPointerData tuple_ctid;
1051         long            current_tuple_count;
1052         TupleTableSlot *result;
1053
1054         /*
1055          * initialize local variables
1056          */
1057         current_tuple_count = 0;
1058         result = NULL;
1059
1060         /*
1061          * Set the direction.
1062          */
1063         estate->es_direction = direction;
1064
1065         /*
1066          * Process BEFORE EACH STATEMENT triggers
1067          */
1068         switch (operation)
1069         {
1070                 case CMD_UPDATE:
1071                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1072                         break;
1073                 case CMD_DELETE:
1074                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1075                         break;
1076                 case CMD_INSERT:
1077                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1078                         break;
1079                 default:
1080                         /* do nothing */
1081                         break;
1082         }
1083
1084         /*
1085          * Loop until we've processed the proper number of tuples from the plan.
1086          */
1087
1088         for (;;)
1089         {
1090                 /* Reset the per-output-tuple exprcontext */
1091                 ResetPerTupleExprContext(estate);
1092
1093                 /*
1094                  * Execute the plan and obtain a tuple
1095                  */
1096 lnext:  ;
1097                 if (estate->es_useEvalPlan)
1098                 {
1099                         planSlot = EvalPlanQualNext(estate);
1100                         if (TupIsNull(planSlot))
1101                                 planSlot = ExecProcNode(planstate);
1102                 }
1103                 else
1104                         planSlot = ExecProcNode(planstate);
1105
1106                 /*
1107                  * if the tuple is null, then we assume there is nothing more to
1108                  * process so we just return null...
1109                  */
1110                 if (TupIsNull(planSlot))
1111                 {
1112                         result = NULL;
1113                         break;
1114                 }
1115                 slot = planSlot;
1116
1117                 /*
1118                  * if we have a junk filter, then project a new tuple with the junk
1119                  * removed.
1120                  *
1121                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1122                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1123                  * because that tuple slot has the wrong descriptor.)
1124                  *
1125                  * Also, extract all the junk information we need.
1126                  */
1127                 if ((junkfilter = estate->es_junkFilter) != NULL)
1128                 {
1129                         Datum           datum;
1130                         bool            isNull;
1131
1132                         /*
1133                          * extract the 'ctid' junk attribute.
1134                          */
1135                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1136                         {
1137                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1138                                                                                          &isNull);
1139                                 /* shouldn't ever get a null result... */
1140                                 if (isNull)
1141                                         elog(ERROR, "ctid is NULL");
1142
1143                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1144                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1145                                 tupleid = &tuple_ctid;
1146                         }
1147
1148                         /*
1149                          * Process any FOR UPDATE or FOR SHARE locking requested.
1150                          */
1151                         else if (estate->es_rowMarks != NIL)
1152                         {
1153                                 ListCell   *l;
1154
1155                 lmark:  ;
1156                                 foreach(l, estate->es_rowMarks)
1157                                 {
1158                                         ExecRowMark *erm = lfirst(l);
1159                                         HeapTupleData tuple;
1160                                         Buffer          buffer;
1161                                         ItemPointerData update_ctid;
1162                                         TransactionId update_xmax;
1163                                         TupleTableSlot *newSlot;
1164                                         LockTupleMode lockmode;
1165                                         HTSU_Result test;
1166
1167                                         datum = ExecGetJunkAttribute(slot,
1168                                                                                                  erm->ctidAttNo,
1169                                                                                                  &isNull);
1170                                         /* shouldn't ever get a null result... */
1171                                         if (isNull)
1172                                                 elog(ERROR, "ctid is NULL");
1173
1174                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1175
1176                                         if (erm->forUpdate)
1177                                                 lockmode = LockTupleExclusive;
1178                                         else
1179                                                 lockmode = LockTupleShared;
1180
1181                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1182                                                                                    &update_ctid, &update_xmax,
1183                                                                                    estate->es_snapshot->curcid,
1184                                                                                    lockmode, erm->noWait);
1185                                         ReleaseBuffer(buffer);
1186                                         switch (test)
1187                                         {
1188                                                 case HeapTupleSelfUpdated:
1189                                                         /* treat it as deleted; do not process */
1190                                                         goto lnext;
1191
1192                                                 case HeapTupleMayBeUpdated:
1193                                                         break;
1194
1195                                                 case HeapTupleUpdated:
1196                                                         if (IsXactIsoLevelSerializable)
1197                                                                 ereport(ERROR,
1198                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1199                                                                   errmsg("could not serialize access due to concurrent update")));
1200                                                         if (!ItemPointerEquals(&update_ctid,
1201                                                                                                    &tuple.t_self))
1202                                                         {
1203                                                                 /* updated, so look at updated version */
1204                                                                 newSlot = EvalPlanQual(estate,
1205                                                                                                            erm->rti,
1206                                                                                                            &update_ctid,
1207                                                                                                            update_xmax,
1208                                                                                                 estate->es_snapshot->curcid);
1209                                                                 if (!TupIsNull(newSlot))
1210                                                                 {
1211                                                                         slot = planSlot = newSlot;
1212                                                                         estate->es_useEvalPlan = true;
1213                                                                         goto lmark;
1214                                                                 }
1215                                                         }
1216
1217                                                         /*
1218                                                          * if tuple was deleted or PlanQual failed for
1219                                                          * updated tuple - we must not return this tuple!
1220                                                          */
1221                                                         goto lnext;
1222
1223                                                 default:
1224                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1225                                                                  test);
1226                                                         return NULL;
1227                                         }
1228                                 }
1229                         }
1230
1231                         /*
1232                          * Create a new "clean" tuple with all junk attributes removed. We
1233                          * don't need to do this for DELETE, however (there will in fact
1234                          * be no non-junk attributes in a DELETE!)
1235                          */
1236                         if (operation != CMD_DELETE)
1237                                 slot = ExecFilterJunk(junkfilter, slot);
1238                 }
1239
1240                 /*
1241                  * now that we have a tuple, do the appropriate thing with it.. either
1242                  * return it to the user, add it to a relation someplace, delete it
1243                  * from a relation, or modify some of its attributes.
1244                  */
1245                 switch (operation)
1246                 {
1247                         case CMD_SELECT:
1248                                 ExecSelect(slot, dest, estate);
1249                                 result = slot;
1250                                 break;
1251
1252                         case CMD_INSERT:
1253                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1254                                 result = NULL;
1255                                 break;
1256
1257                         case CMD_DELETE:
1258                                 ExecDelete(tupleid, planSlot, dest, estate);
1259                                 result = NULL;
1260                                 break;
1261
1262                         case CMD_UPDATE:
1263                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1264                                 result = NULL;
1265                                 break;
1266
1267                         default:
1268                                 elog(ERROR, "unrecognized operation code: %d",
1269                                          (int) operation);
1270                                 result = NULL;
1271                                 break;
1272                 }
1273
1274                 /*
1275                  * check our tuple count.. if we've processed the proper number then
1276                  * quit, else loop again and process more tuples.  Zero numberTuples
1277                  * means no limit.
1278                  */
1279                 current_tuple_count++;
1280                 if (numberTuples && numberTuples == current_tuple_count)
1281                         break;
1282         }
1283
1284         /*
1285          * Process AFTER EACH STATEMENT triggers
1286          */
1287         switch (operation)
1288         {
1289                 case CMD_UPDATE:
1290                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1291                         break;
1292                 case CMD_DELETE:
1293                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1294                         break;
1295                 case CMD_INSERT:
1296                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1297                         break;
1298                 default:
1299                         /* do nothing */
1300                         break;
1301         }
1302
1303         /*
1304          * here, result is either a slot containing a tuple in the case of a
1305          * SELECT or NULL otherwise.
1306          */
1307         return result;
1308 }
1309
1310 /* ----------------------------------------------------------------
1311  *              ExecSelect
1312  *
1313  *              SELECTs are easy.. we just pass the tuple to the appropriate
1314  *              output function.
1315  * ----------------------------------------------------------------
1316  */
1317 static void
1318 ExecSelect(TupleTableSlot *slot,
1319                    DestReceiver *dest,
1320                    EState *estate)
1321 {
1322         (*dest->receiveSlot) (slot, dest);
1323         IncrRetrieved();
1324         (estate->es_processed)++;
1325 }
1326
1327 /* ----------------------------------------------------------------
1328  *              ExecInsert
1329  *
1330  *              INSERTs are trickier.. we have to insert the tuple into
1331  *              the base relation and insert appropriate tuples into the
1332  *              index relations.
1333  * ----------------------------------------------------------------
1334  */
1335 static void
1336 ExecInsert(TupleTableSlot *slot,
1337                    ItemPointer tupleid,
1338                    TupleTableSlot *planSlot,
1339                    DestReceiver *dest,
1340                    EState *estate)
1341 {
1342         HeapTuple       tuple;
1343         ResultRelInfo *resultRelInfo;
1344         Relation        resultRelationDesc;
1345         Oid                     newId;
1346
1347         /*
1348          * get the heap tuple out of the tuple table slot, making sure we have a
1349          * writable copy
1350          */
1351         tuple = ExecMaterializeSlot(slot);
1352
1353         /*
1354          * get information on the (current) result relation
1355          */
1356         resultRelInfo = estate->es_result_relation_info;
1357         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1358
1359         /* BEFORE ROW INSERT Triggers */
1360         if (resultRelInfo->ri_TrigDesc &&
1361                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1362         {
1363                 HeapTuple       newtuple;
1364
1365                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1366
1367                 if (newtuple == NULL)   /* "do nothing" */
1368                         return;
1369
1370                 if (newtuple != tuple)  /* modified by Trigger(s) */
1371                 {
1372                         /*
1373                          * Put the modified tuple into a slot for convenience of routines
1374                          * below.  We assume the tuple was allocated in per-tuple memory
1375                          * context, and therefore will go away by itself. The tuple table
1376                          * slot should not try to clear it.
1377                          */
1378                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1379
1380                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1381                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1382                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1383                         slot = newslot;
1384                         tuple = newtuple;
1385                 }
1386         }
1387
1388         /*
1389          * Check the constraints of the tuple
1390          */
1391         if (resultRelationDesc->rd_att->constr)
1392                 ExecConstraints(resultRelInfo, slot, estate);
1393
1394         /*
1395          * insert the tuple
1396          *
1397          * Note: heap_insert returns the tid (location) of the new tuple in the
1398          * t_self field.
1399          */
1400         newId = heap_insert(resultRelationDesc, tuple,
1401                                                 estate->es_snapshot->curcid,
1402                                                 true, true);
1403
1404         IncrAppended();
1405         (estate->es_processed)++;
1406         estate->es_lastoid = newId;
1407         setLastTid(&(tuple->t_self));
1408
1409         /*
1410          * insert index entries for tuple
1411          */
1412         if (resultRelInfo->ri_NumIndices > 0)
1413                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1414
1415         /* AFTER ROW INSERT Triggers */
1416         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1417
1418         /* Process RETURNING if present */
1419         if (resultRelInfo->ri_projectReturning)
1420                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1421                                                          slot, planSlot, dest);
1422 }
1423
1424 /* ----------------------------------------------------------------
1425  *              ExecDelete
1426  *
1427  *              DELETE is like UPDATE, except that we delete the tuple and no
1428  *              index modifications are needed
1429  * ----------------------------------------------------------------
1430  */
1431 static void
1432 ExecDelete(ItemPointer tupleid,
1433                    TupleTableSlot *planSlot,
1434                    DestReceiver *dest,
1435                    EState *estate)
1436 {
1437         ResultRelInfo *resultRelInfo;
1438         Relation        resultRelationDesc;
1439         HTSU_Result result;
1440         ItemPointerData update_ctid;
1441         TransactionId update_xmax;
1442
1443         /*
1444          * get information on the (current) result relation
1445          */
1446         resultRelInfo = estate->es_result_relation_info;
1447         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1448
1449         /* BEFORE ROW DELETE Triggers */
1450         if (resultRelInfo->ri_TrigDesc &&
1451                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1452         {
1453                 bool            dodelete;
1454
1455                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1456                                                                                 estate->es_snapshot->curcid);
1457
1458                 if (!dodelete)                  /* "do nothing" */
1459                         return;
1460         }
1461
1462         /*
1463          * delete the tuple
1464          *
1465          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1466          * the row to be deleted is visible to that snapshot, and throw a can't-
1467          * serialize error if not.      This is a special-case behavior needed for
1468          * referential integrity updates in serializable transactions.
1469          */
1470 ldelete:;
1471         result = heap_delete(resultRelationDesc, tupleid,
1472                                                  &update_ctid, &update_xmax,
1473                                                  estate->es_snapshot->curcid,
1474                                                  estate->es_crosscheck_snapshot,
1475                                                  true /* wait for commit */ );
1476         switch (result)
1477         {
1478                 case HeapTupleSelfUpdated:
1479                         /* already deleted by self; nothing to do */
1480                         return;
1481
1482                 case HeapTupleMayBeUpdated:
1483                         break;
1484
1485                 case HeapTupleUpdated:
1486                         if (IsXactIsoLevelSerializable)
1487                                 ereport(ERROR,
1488                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1489                                                  errmsg("could not serialize access due to concurrent update")));
1490                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1491                         {
1492                                 TupleTableSlot *epqslot;
1493
1494                                 epqslot = EvalPlanQual(estate,
1495                                                                            resultRelInfo->ri_RangeTableIndex,
1496                                                                            &update_ctid,
1497                                                                            update_xmax,
1498                                                                            estate->es_snapshot->curcid);
1499                                 if (!TupIsNull(epqslot))
1500                                 {
1501                                         *tupleid = update_ctid;
1502                                         goto ldelete;
1503                                 }
1504                         }
1505                         /* tuple already deleted; nothing to do */
1506                         return;
1507
1508                 default:
1509                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1510                         return;
1511         }
1512
1513         IncrDeleted();
1514         (estate->es_processed)++;
1515
1516         /*
1517          * Note: Normally one would think that we have to delete index tuples
1518          * associated with the heap tuple now...
1519          *
1520          * ... but in POSTGRES, we have no need to do this because VACUUM will
1521          * take care of it later.  We can't delete index tuples immediately
1522          * anyway, since the tuple is still visible to other transactions.
1523          */
1524
1525         /* AFTER ROW DELETE Triggers */
1526         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1527
1528         /* Process RETURNING if present */
1529         if (resultRelInfo->ri_projectReturning)
1530         {
1531                 /*
1532                  * We have to put the target tuple into a slot, which means first we
1533                  * gotta fetch it.      We can use the trigger tuple slot.
1534                  */
1535                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1536                 HeapTupleData deltuple;
1537                 Buffer          delbuffer;
1538
1539                 deltuple.t_self = *tupleid;
1540                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1541                                                 &deltuple, &delbuffer, false, NULL))
1542                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1543
1544                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1545                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1546                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1547
1548                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1549                                                          slot, planSlot, dest);
1550
1551                 ExecClearTuple(slot);
1552                 ReleaseBuffer(delbuffer);
1553         }
1554 }
1555
1556 /* ----------------------------------------------------------------
1557  *              ExecUpdate
1558  *
1559  *              note: we can't run UPDATE queries with transactions
1560  *              off because UPDATEs are actually INSERTs and our
1561  *              scan will mistakenly loop forever, updating the tuple
1562  *              it just inserted..      This should be fixed but until it
1563  *              is, we don't want to get stuck in an infinite loop
1564  *              which corrupts your database..
1565  * ----------------------------------------------------------------
1566  */
1567 static void
1568 ExecUpdate(TupleTableSlot *slot,
1569                    ItemPointer tupleid,
1570                    TupleTableSlot *planSlot,
1571                    DestReceiver *dest,
1572                    EState *estate)
1573 {
1574         HeapTuple       tuple;
1575         ResultRelInfo *resultRelInfo;
1576         Relation        resultRelationDesc;
1577         HTSU_Result result;
1578         ItemPointerData update_ctid;
1579         TransactionId update_xmax;
1580
1581         /*
1582          * abort the operation if not running transactions
1583          */
1584         if (IsBootstrapProcessingMode())
1585                 elog(ERROR, "cannot UPDATE during bootstrap");
1586
1587         /*
1588          * get the heap tuple out of the tuple table slot, making sure we have a
1589          * writable copy
1590          */
1591         tuple = ExecMaterializeSlot(slot);
1592
1593         /*
1594          * get information on the (current) result relation
1595          */
1596         resultRelInfo = estate->es_result_relation_info;
1597         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1598
1599         /* BEFORE ROW UPDATE Triggers */
1600         if (resultRelInfo->ri_TrigDesc &&
1601                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1602         {
1603                 HeapTuple       newtuple;
1604
1605                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1606                                                                                 tupleid, tuple,
1607                                                                                 estate->es_snapshot->curcid);
1608
1609                 if (newtuple == NULL)   /* "do nothing" */
1610                         return;
1611
1612                 if (newtuple != tuple)  /* modified by Trigger(s) */
1613                 {
1614                         /*
1615                          * Put the modified tuple into a slot for convenience of routines
1616                          * below.  We assume the tuple was allocated in per-tuple memory
1617                          * context, and therefore will go away by itself. The tuple table
1618                          * slot should not try to clear it.
1619                          */
1620                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1621
1622                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1623                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1624                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1625                         slot = newslot;
1626                         tuple = newtuple;
1627                 }
1628         }
1629
1630         /*
1631          * Check the constraints of the tuple
1632          *
1633          * If we generate a new candidate tuple after EvalPlanQual testing, we
1634          * must loop back here and recheck constraints.  (We don't need to redo
1635          * triggers, however.  If there are any BEFORE triggers then trigger.c
1636          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1637          * need to do them again.)
1638          */
1639 lreplace:;
1640         if (resultRelationDesc->rd_att->constr)
1641                 ExecConstraints(resultRelInfo, slot, estate);
1642
1643         /*
1644          * replace the heap tuple
1645          *
1646          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1647          * the row to be updated is visible to that snapshot, and throw a can't-
1648          * serialize error if not.      This is a special-case behavior needed for
1649          * referential integrity updates in serializable transactions.
1650          */
1651         result = heap_update(resultRelationDesc, tupleid, tuple,
1652                                                  &update_ctid, &update_xmax,
1653                                                  estate->es_snapshot->curcid,
1654                                                  estate->es_crosscheck_snapshot,
1655                                                  true /* wait for commit */ );
1656         switch (result)
1657         {
1658                 case HeapTupleSelfUpdated:
1659                         /* already deleted by self; nothing to do */
1660                         return;
1661
1662                 case HeapTupleMayBeUpdated:
1663                         break;
1664
1665                 case HeapTupleUpdated:
1666                         if (IsXactIsoLevelSerializable)
1667                                 ereport(ERROR,
1668                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1669                                                  errmsg("could not serialize access due to concurrent update")));
1670                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1671                         {
1672                                 TupleTableSlot *epqslot;
1673
1674                                 epqslot = EvalPlanQual(estate,
1675                                                                            resultRelInfo->ri_RangeTableIndex,
1676                                                                            &update_ctid,
1677                                                                            update_xmax,
1678                                                                            estate->es_snapshot->curcid);
1679                                 if (!TupIsNull(epqslot))
1680                                 {
1681                                         *tupleid = update_ctid;
1682                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1683                                         tuple = ExecMaterializeSlot(slot);
1684                                         goto lreplace;
1685                                 }
1686                         }
1687                         /* tuple already deleted; nothing to do */
1688                         return;
1689
1690                 default:
1691                         elog(ERROR, "unrecognized heap_update status: %u", result);
1692                         return;
1693         }
1694
1695         IncrReplaced();
1696         (estate->es_processed)++;
1697
1698         /*
1699          * Note: instead of having to update the old index tuples associated with
1700          * the heap tuple, all we do is form and insert new index tuples. This is
1701          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1702          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1703          * here is insert new index tuples.  -cim 9/27/89
1704          */
1705
1706         /*
1707          * insert index entries for tuple
1708          *
1709          * Note: heap_update returns the tid (location) of the new tuple in the
1710          * t_self field.
1711          */
1712         if (resultRelInfo->ri_NumIndices > 0)
1713                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1714
1715         /* AFTER ROW UPDATE Triggers */
1716         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1717
1718         /* Process RETURNING if present */
1719         if (resultRelInfo->ri_projectReturning)
1720                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1721                                                          slot, planSlot, dest);
1722 }
1723
1724 /*
1725  * ExecRelCheck --- check that tuple meets constraints for result relation
1726  */
1727 static const char *
1728 ExecRelCheck(ResultRelInfo *resultRelInfo,
1729                          TupleTableSlot *slot, EState *estate)
1730 {
1731         Relation        rel = resultRelInfo->ri_RelationDesc;
1732         int                     ncheck = rel->rd_att->constr->num_check;
1733         ConstrCheck *check = rel->rd_att->constr->check;
1734         ExprContext *econtext;
1735         MemoryContext oldContext;
1736         List       *qual;
1737         int                     i;
1738
1739         /*
1740          * If first time through for this result relation, build expression
1741          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1742          * memory context so they'll survive throughout the query.
1743          */
1744         if (resultRelInfo->ri_ConstraintExprs == NULL)
1745         {
1746                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1747                 resultRelInfo->ri_ConstraintExprs =
1748                         (List **) palloc(ncheck * sizeof(List *));
1749                 for (i = 0; i < ncheck; i++)
1750                 {
1751                         /* ExecQual wants implicit-AND form */
1752                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1753                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1754                                 ExecPrepareExpr((Expr *) qual, estate);
1755                 }
1756                 MemoryContextSwitchTo(oldContext);
1757         }
1758
1759         /*
1760          * We will use the EState's per-tuple context for evaluating constraint
1761          * expressions (creating it if it's not already there).
1762          */
1763         econtext = GetPerTupleExprContext(estate);
1764
1765         /* Arrange for econtext's scan tuple to be the tuple under test */
1766         econtext->ecxt_scantuple = slot;
1767
1768         /* And evaluate the constraints */
1769         for (i = 0; i < ncheck; i++)
1770         {
1771                 qual = resultRelInfo->ri_ConstraintExprs[i];
1772
1773                 /*
1774                  * NOTE: SQL92 specifies that a NULL result from a constraint
1775                  * expression is not to be treated as a failure.  Therefore, tell
1776                  * ExecQual to return TRUE for NULL.
1777                  */
1778                 if (!ExecQual(qual, econtext, true))
1779                         return check[i].ccname;
1780         }
1781
1782         /* NULL result means no error */
1783         return NULL;
1784 }
1785
1786 void
1787 ExecConstraints(ResultRelInfo *resultRelInfo,
1788                                 TupleTableSlot *slot, EState *estate)
1789 {
1790         Relation        rel = resultRelInfo->ri_RelationDesc;
1791         TupleConstr *constr = rel->rd_att->constr;
1792
1793         Assert(constr);
1794
1795         if (constr->has_not_null)
1796         {
1797                 int                     natts = rel->rd_att->natts;
1798                 int                     attrChk;
1799
1800                 for (attrChk = 1; attrChk <= natts; attrChk++)
1801                 {
1802                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1803                                 slot_attisnull(slot, attrChk))
1804                                 ereport(ERROR,
1805                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1806                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1807                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1808                 }
1809         }
1810
1811         if (constr->num_check > 0)
1812         {
1813                 const char *failed;
1814
1815                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1816                         ereport(ERROR,
1817                                         (errcode(ERRCODE_CHECK_VIOLATION),
1818                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1819                                                         RelationGetRelationName(rel), failed)));
1820         }
1821 }
1822
1823 /*
1824  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
1825  *
1826  * projectReturning: RETURNING projection info for current result rel
1827  * tupleSlot: slot holding tuple actually inserted/updated/deleted
1828  * planSlot: slot holding tuple returned by top plan node
1829  * dest: where to send the output
1830  */
1831 static void
1832 ExecProcessReturning(ProjectionInfo *projectReturning,
1833                                          TupleTableSlot *tupleSlot,
1834                                          TupleTableSlot *planSlot,
1835                                          DestReceiver *dest)
1836 {
1837         ExprContext *econtext = projectReturning->pi_exprContext;
1838         TupleTableSlot *retSlot;
1839
1840         /*
1841          * Reset per-tuple memory context to free any expression evaluation
1842          * storage allocated in the previous cycle.
1843          */
1844         ResetExprContext(econtext);
1845
1846         /* Make tuple and any needed join variables available to ExecProject */
1847         econtext->ecxt_scantuple = tupleSlot;
1848         econtext->ecxt_outertuple = planSlot;
1849
1850         /* Compute the RETURNING expressions */
1851         retSlot = ExecProject(projectReturning, NULL);
1852
1853         /* Send to dest */
1854         (*dest->receiveSlot) (retSlot, dest);
1855
1856         ExecClearTuple(retSlot);
1857 }
1858
1859 /*
1860  * Check a modified tuple to see if we want to process its updated version
1861  * under READ COMMITTED rules.
1862  *
1863  * See backend/executor/README for some info about how this works.
1864  *
1865  *      estate - executor state data
1866  *      rti - rangetable index of table containing tuple
1867  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1868  *      priorXmax - t_xmax from the outdated tuple
1869  *      curCid - command ID of current command of my transaction
1870  *
1871  * *tid is also an output parameter: it's modified to hold the TID of the
1872  * latest version of the tuple (note this may be changed even on failure)
1873  *
1874  * Returns a slot containing the new candidate update/delete tuple, or
1875  * NULL if we determine we shouldn't process the row.
1876  */
1877 TupleTableSlot *
1878 EvalPlanQual(EState *estate, Index rti,
1879                          ItemPointer tid, TransactionId priorXmax, CommandId curCid)
1880 {
1881         evalPlanQual *epq;
1882         EState     *epqstate;
1883         Relation        relation;
1884         HeapTupleData tuple;
1885         HeapTuple       copyTuple = NULL;
1886         bool            endNode;
1887
1888         Assert(rti != 0);
1889
1890         /*
1891          * find relation containing target tuple
1892          */
1893         if (estate->es_result_relation_info != NULL &&
1894                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1895                 relation = estate->es_result_relation_info->ri_RelationDesc;
1896         else
1897         {
1898                 ListCell   *l;
1899
1900                 relation = NULL;
1901                 foreach(l, estate->es_rowMarks)
1902                 {
1903                         if (((ExecRowMark *) lfirst(l))->rti == rti)
1904                         {
1905                                 relation = ((ExecRowMark *) lfirst(l))->relation;
1906                                 break;
1907                         }
1908                 }
1909                 if (relation == NULL)
1910                         elog(ERROR, "could not find RowMark for RT index %u", rti);
1911         }
1912
1913         /*
1914          * fetch tid tuple
1915          *
1916          * Loop here to deal with updated or busy tuples
1917          */
1918         tuple.t_self = *tid;
1919         for (;;)
1920         {
1921                 Buffer          buffer;
1922
1923                 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL))
1924                 {
1925                         /*
1926                          * If xmin isn't what we're expecting, the slot must have been
1927                          * recycled and reused for an unrelated tuple.  This implies that
1928                          * the latest version of the row was deleted, so we need do
1929                          * nothing.  (Should be safe to examine xmin without getting
1930                          * buffer's content lock, since xmin never changes in an existing
1931                          * tuple.)
1932                          */
1933                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1934                                                                          priorXmax))
1935                         {
1936                                 ReleaseBuffer(buffer);
1937                                 return NULL;
1938                         }
1939
1940                         /* otherwise xmin should not be dirty... */
1941                         if (TransactionIdIsValid(SnapshotDirty->xmin))
1942                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1943
1944                         /*
1945                          * If tuple is being updated by other transaction then we have to
1946                          * wait for its commit/abort.
1947                          */
1948                         if (TransactionIdIsValid(SnapshotDirty->xmax))
1949                         {
1950                                 ReleaseBuffer(buffer);
1951                                 XactLockTableWait(SnapshotDirty->xmax);
1952                                 continue;               /* loop back to repeat heap_fetch */
1953                         }
1954
1955                         /*
1956                          * If tuple was inserted by our own transaction, we have to check
1957                          * cmin against curCid: cmin >= curCid means our command cannot
1958                          * see the tuple, so we should ignore it.  Without this we are
1959                          * open to the "Halloween problem" of indefinitely re-updating the
1960                          * same tuple.  (We need not check cmax because
1961                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1962                          * transaction dead, regardless of cmax.)  We just checked that
1963                          * priorXmax == xmin, so we can test that variable instead of
1964                          * doing HeapTupleHeaderGetXmin again.
1965                          */
1966                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1967                                 HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
1968                         {
1969                                 ReleaseBuffer(buffer);
1970                                 return NULL;
1971                         }
1972
1973                         /*
1974                          * We got tuple - now copy it for use by recheck query.
1975                          */
1976                         copyTuple = heap_copytuple(&tuple);
1977                         ReleaseBuffer(buffer);
1978                         break;
1979                 }
1980
1981                 /*
1982                  * If the referenced slot was actually empty, the latest version of
1983                  * the row must have been deleted, so we need do nothing.
1984                  */
1985                 if (tuple.t_data == NULL)
1986                 {
1987                         ReleaseBuffer(buffer);
1988                         return NULL;
1989                 }
1990
1991                 /*
1992                  * As above, if xmin isn't what we're expecting, do nothing.
1993                  */
1994                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1995                                                                  priorXmax))
1996                 {
1997                         ReleaseBuffer(buffer);
1998                         return NULL;
1999                 }
2000
2001                 /*
2002                  * If we get here, the tuple was found but failed SnapshotDirty.
2003                  * Assuming the xmin is either a committed xact or our own xact (as it
2004                  * certainly should be if we're trying to modify the tuple), this must
2005                  * mean that the row was updated or deleted by either a committed xact
2006                  * or our own xact.  If it was deleted, we can ignore it; if it was
2007                  * updated then chain up to the next version and repeat the whole
2008                  * test.
2009                  *
2010                  * As above, it should be safe to examine xmax and t_ctid without the
2011                  * buffer content lock, because they can't be changing.
2012                  */
2013                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2014                 {
2015                         /* deleted, so forget about it */
2016                         ReleaseBuffer(buffer);
2017                         return NULL;
2018                 }
2019
2020                 /* updated, so look at the updated row */
2021                 tuple.t_self = tuple.t_data->t_ctid;
2022                 /* updated row should have xmin matching this xmax */
2023                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2024                 ReleaseBuffer(buffer);
2025                 /* loop back to fetch next in chain */
2026         }
2027
2028         /*
2029          * For UPDATE/DELETE we have to return tid of actual row we're executing
2030          * PQ for.
2031          */
2032         *tid = tuple.t_self;
2033
2034         /*
2035          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2036          */
2037         epq = estate->es_evalPlanQual;
2038         endNode = true;
2039
2040         if (epq != NULL && epq->rti == 0)
2041         {
2042                 /* Top PQ stack entry is idle, so re-use it */
2043                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2044                 epq->rti = rti;
2045                 endNode = false;
2046         }
2047
2048         /*
2049          * If this is request for another RTE - Ra, - then we have to check wasn't
2050          * PlanQual requested for Ra already and if so then Ra' row was updated
2051          * again and we have to re-start old execution for Ra and forget all what
2052          * we done after Ra was suspended. Cool? -:))
2053          */
2054         if (epq != NULL && epq->rti != rti &&
2055                 epq->estate->es_evTuple[rti - 1] != NULL)
2056         {
2057                 do
2058                 {
2059                         evalPlanQual *oldepq;
2060
2061                         /* stop execution */
2062                         EvalPlanQualStop(epq);
2063                         /* pop previous PlanQual from the stack */
2064                         oldepq = epq->next;
2065                         Assert(oldepq && oldepq->rti != 0);
2066                         /* push current PQ to freePQ stack */
2067                         oldepq->free = epq;
2068                         epq = oldepq;
2069                         estate->es_evalPlanQual = epq;
2070                 } while (epq->rti != rti);
2071         }
2072
2073         /*
2074          * If we are requested for another RTE then we have to suspend execution
2075          * of current PlanQual and start execution for new one.
2076          */
2077         if (epq == NULL || epq->rti != rti)
2078         {
2079                 /* try to reuse plan used previously */
2080                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2081
2082                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2083                 {
2084                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2085                         newepq->free = NULL;
2086                         newepq->estate = NULL;
2087                         newepq->planstate = NULL;
2088                 }
2089                 else
2090                 {
2091                         /* recycle previously used PlanQual */
2092                         Assert(newepq->estate == NULL);
2093                         epq->free = NULL;
2094                 }
2095                 /* push current PQ to the stack */
2096                 newepq->next = epq;
2097                 epq = newepq;
2098                 estate->es_evalPlanQual = epq;
2099                 epq->rti = rti;
2100                 endNode = false;
2101         }
2102
2103         Assert(epq->rti == rti);
2104
2105         /*
2106          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2107          * end and restart execution of the plan, because ExecReScan wouldn't
2108          * ensure that upper plan nodes would reset themselves.  We could make
2109          * that work if insertion of the target tuple were integrated with the
2110          * Param mechanism somehow, so that the upper plan nodes know that their
2111          * children's outputs have changed.
2112          *
2113          * Note that the stack of free evalPlanQual nodes is quite useless at the
2114          * moment, since it only saves us from pallocing/releasing the
2115          * evalPlanQual nodes themselves.  But it will be useful once we implement
2116          * ReScan instead of end/restart for re-using PlanQual nodes.
2117          */
2118         if (endNode)
2119         {
2120                 /* stop execution */
2121                 EvalPlanQualStop(epq);
2122         }
2123
2124         /*
2125          * Initialize new recheck query.
2126          *
2127          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2128          * instead copy down changeable state from the top plan (including
2129          * es_result_relation_info, es_junkFilter) and reset locally changeable
2130          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2131          */
2132         EvalPlanQualStart(epq, estate, epq->next);
2133
2134         /*
2135          * free old RTE' tuple, if any, and store target tuple where relation's
2136          * scan node will see it
2137          */
2138         epqstate = epq->estate;
2139         if (epqstate->es_evTuple[rti - 1] != NULL)
2140                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2141         epqstate->es_evTuple[rti - 1] = copyTuple;
2142
2143         return EvalPlanQualNext(estate);
2144 }
2145
2146 static TupleTableSlot *
2147 EvalPlanQualNext(EState *estate)
2148 {
2149         evalPlanQual *epq = estate->es_evalPlanQual;
2150         MemoryContext oldcontext;
2151         TupleTableSlot *slot;
2152
2153         Assert(epq->rti != 0);
2154
2155 lpqnext:;
2156         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2157         slot = ExecProcNode(epq->planstate);
2158         MemoryContextSwitchTo(oldcontext);
2159
2160         /*
2161          * No more tuples for this PQ. Continue previous one.
2162          */
2163         if (TupIsNull(slot))
2164         {
2165                 evalPlanQual *oldepq;
2166
2167                 /* stop execution */
2168                 EvalPlanQualStop(epq);
2169                 /* pop old PQ from the stack */
2170                 oldepq = epq->next;
2171                 if (oldepq == NULL)
2172                 {
2173                         /* this is the first (oldest) PQ - mark as free */
2174                         epq->rti = 0;
2175                         estate->es_useEvalPlan = false;
2176                         /* and continue Query execution */
2177                         return NULL;
2178                 }
2179                 Assert(oldepq->rti != 0);
2180                 /* push current PQ to freePQ stack */
2181                 oldepq->free = epq;
2182                 epq = oldepq;
2183                 estate->es_evalPlanQual = epq;
2184                 goto lpqnext;
2185         }
2186
2187         return slot;
2188 }
2189
2190 static void
2191 EndEvalPlanQual(EState *estate)
2192 {
2193         evalPlanQual *epq = estate->es_evalPlanQual;
2194
2195         if (epq->rti == 0)                      /* plans already shutdowned */
2196         {
2197                 Assert(epq->next == NULL);
2198                 return;
2199         }
2200
2201         for (;;)
2202         {
2203                 evalPlanQual *oldepq;
2204
2205                 /* stop execution */
2206                 EvalPlanQualStop(epq);
2207                 /* pop old PQ from the stack */
2208                 oldepq = epq->next;
2209                 if (oldepq == NULL)
2210                 {
2211                         /* this is the first (oldest) PQ - mark as free */
2212                         epq->rti = 0;
2213                         estate->es_useEvalPlan = false;
2214                         break;
2215                 }
2216                 Assert(oldepq->rti != 0);
2217                 /* push current PQ to freePQ stack */
2218                 oldepq->free = epq;
2219                 epq = oldepq;
2220                 estate->es_evalPlanQual = epq;
2221         }
2222 }
2223
2224 /*
2225  * Start execution of one level of PlanQual.
2226  *
2227  * This is a cut-down version of ExecutorStart(): we copy some state from
2228  * the top-level estate rather than initializing it fresh.
2229  */
2230 static void
2231 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2232 {
2233         EState     *epqstate;
2234         int                     rtsize;
2235         MemoryContext oldcontext;
2236
2237         rtsize = list_length(estate->es_range_table);
2238
2239         /*
2240          * It's tempting to think about using CreateSubExecutorState here, but
2241          * at present we can't because of memory leakage concerns ...
2242          */
2243         epq->estate = epqstate = CreateExecutorState();
2244
2245         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2246
2247         /*
2248          * The epqstates share the top query's copy of unchanging state such as
2249          * the snapshot, rangetable, result-rel info, and external Param info.
2250          * They need their own copies of local state, including a tuple table,
2251          * es_param_exec_vals, etc.
2252          */
2253         epqstate->es_direction = ForwardScanDirection;
2254         epqstate->es_snapshot = estate->es_snapshot;
2255         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2256         epqstate->es_range_table = estate->es_range_table;
2257         epqstate->es_result_relations = estate->es_result_relations;
2258         epqstate->es_num_result_relations = estate->es_num_result_relations;
2259         epqstate->es_result_relation_info = estate->es_result_relation_info;
2260         epqstate->es_junkFilter = estate->es_junkFilter;
2261         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2262         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2263         epqstate->es_param_list_info = estate->es_param_list_info;
2264         if (estate->es_plannedstmt->nParamExec > 0)
2265                 epqstate->es_param_exec_vals = (ParamExecData *)
2266                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2267         epqstate->es_rowMarks = estate->es_rowMarks;
2268         epqstate->es_instrument = estate->es_instrument;
2269         epqstate->es_select_into = estate->es_select_into;
2270         epqstate->es_into_oids = estate->es_into_oids;
2271         epqstate->es_plannedstmt = estate->es_plannedstmt;
2272
2273         /*
2274          * Each epqstate must have its own es_evTupleNull state, but all the stack
2275          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2276          * the value being examined by an outer recheck.
2277          */
2278         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2279         if (priorepq == NULL)
2280                 /* first PQ stack entry */
2281                 epqstate->es_evTuple = (HeapTuple *)
2282                         palloc0(rtsize * sizeof(HeapTuple));
2283         else
2284                 /* later stack entries share the same storage */
2285                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2286
2287         epqstate->es_tupleTable =
2288                 ExecCreateTupleTable(estate->es_tupleTable->size);
2289
2290         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2291
2292         MemoryContextSwitchTo(oldcontext);
2293 }
2294
2295 /*
2296  * End execution of one level of PlanQual.
2297  *
2298  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2299  * of the normal cleanup, but *not* close result relations (which we are
2300  * just sharing from the outer query).
2301  */
2302 static void
2303 EvalPlanQualStop(evalPlanQual *epq)
2304 {
2305         EState     *epqstate = epq->estate;
2306         MemoryContext oldcontext;
2307
2308         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2309
2310         ExecEndNode(epq->planstate);
2311
2312         ExecDropTupleTable(epqstate->es_tupleTable, true);
2313         epqstate->es_tupleTable = NULL;
2314
2315         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2316         {
2317                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2318                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2319         }
2320
2321         MemoryContextSwitchTo(oldcontext);
2322
2323         FreeExecutorState(epqstate);
2324
2325         epq->estate = NULL;
2326         epq->planstate = NULL;
2327 }
2328
2329
2330 /*
2331  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2332  *
2333  * We implement SELECT INTO by diverting SELECT's normal output with
2334  * a specialized DestReceiver type.
2335  *
2336  * TODO: remove some of the INTO-specific cruft from EState, and keep
2337  * it in the DestReceiver instead.
2338  */
2339
2340 typedef struct
2341 {
2342         DestReceiver pub;                       /* publicly-known function pointers */
2343         EState     *estate;                     /* EState we are working with */
2344 } DR_intorel;
2345
2346 /*
2347  * OpenIntoRel --- actually create the SELECT INTO target relation
2348  *
2349  * This also replaces QueryDesc->dest with the special DestReceiver for
2350  * SELECT INTO.  We assume that the correct result tuple type has already
2351  * been placed in queryDesc->tupDesc.
2352  */
2353 static void
2354 OpenIntoRel(QueryDesc *queryDesc)
2355 {
2356         IntoClause *into = queryDesc->plannedstmt->into;
2357         EState     *estate = queryDesc->estate;
2358         Relation        intoRelationDesc;
2359         char       *intoName;
2360         Oid                     namespaceId;
2361         Oid                     tablespaceId;
2362         Datum           reloptions;
2363         AclResult       aclresult;
2364         Oid                     intoRelationId;
2365         TupleDesc       tupdesc;
2366         DR_intorel *myState;
2367
2368         Assert(into);
2369
2370         /*
2371          * Check consistency of arguments
2372          */
2373         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2374                 ereport(ERROR,
2375                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2376                                  errmsg("ON COMMIT can only be used on temporary tables")));
2377
2378         /*
2379          * Find namespace to create in, check its permissions
2380          */
2381         intoName = into->rel->relname;
2382         namespaceId = RangeVarGetCreationNamespace(into->rel);
2383
2384         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2385                                                                           ACL_CREATE);
2386         if (aclresult != ACLCHECK_OK)
2387                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2388                                            get_namespace_name(namespaceId));
2389
2390         /*
2391          * Select tablespace to use.  If not specified, use default_tablespace
2392          * (which may in turn default to database's default).
2393          */
2394         if (into->tableSpaceName)
2395         {
2396                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2397                 if (!OidIsValid(tablespaceId))
2398                         ereport(ERROR,
2399                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2400                                          errmsg("tablespace \"%s\" does not exist",
2401                                                         into->tableSpaceName)));
2402         }
2403         else if (into->rel->istemp)
2404         {
2405                 tablespaceId = GetTempTablespace();
2406         }
2407         else
2408         {
2409                 tablespaceId = GetDefaultTablespace();
2410                 /* note InvalidOid is OK in this case */
2411         }
2412
2413         /* Check permissions except when using the database's default space */
2414         if (OidIsValid(tablespaceId))
2415         {
2416                 AclResult       aclresult;
2417
2418                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2419                                                                                    ACL_CREATE);
2420
2421                 if (aclresult != ACLCHECK_OK)
2422                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2423                                                    get_tablespace_name(tablespaceId));
2424         }
2425
2426         /* Parse and validate any reloptions */
2427         reloptions = transformRelOptions((Datum) 0,
2428                                                                          into->options,
2429                                                                          true,
2430                                                                          false);
2431         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2432
2433         /* have to copy the actual tupdesc to get rid of any constraints */
2434         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2435
2436         /* Now we can actually create the new relation */
2437         intoRelationId = heap_create_with_catalog(intoName,
2438                                                                                           namespaceId,
2439                                                                                           tablespaceId,
2440                                                                                           InvalidOid,
2441                                                                                           GetUserId(),
2442                                                                                           tupdesc,
2443                                                                                           RELKIND_RELATION,
2444                                                                                           false,
2445                                                                                           true,
2446                                                                                           0,
2447                                                                                           into->onCommit,
2448                                                                                           reloptions,
2449                                                                                           allowSystemTableMods);
2450
2451         FreeTupleDesc(tupdesc);
2452
2453         /*
2454          * Advance command counter so that the newly-created relation's catalog
2455          * tuples will be visible to heap_open.
2456          */
2457         CommandCounterIncrement();
2458
2459         /*
2460          * If necessary, create a TOAST table for the INTO relation. Note that
2461          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2462          * the TOAST table will be visible for insertion.
2463          */
2464         AlterTableCreateToastTable(intoRelationId);
2465
2466         /*
2467          * And open the constructed table for writing.
2468          */
2469         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2470
2471         /* use_wal off requires rd_targblock be initially invalid */
2472         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2473
2474         /*
2475          * We can skip WAL-logging the insertions, unless PITR is in use.
2476          *
2477          * Note that for a non-temp INTO table, this is safe only because we know
2478          * that the catalog changes above will have been WAL-logged, and so
2479          * RecordTransactionCommit will think it needs to WAL-log the eventual
2480          * transaction commit.  Else the commit might be lost, even though all the
2481          * data is safely fsync'd ...
2482          */
2483         estate->es_into_relation_use_wal = XLogArchivingActive();
2484         estate->es_into_relation_descriptor = intoRelationDesc;
2485
2486         /*
2487          * Now replace the query's DestReceiver with one for SELECT INTO
2488          */
2489         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2490         myState = (DR_intorel *) queryDesc->dest;
2491         Assert(myState->pub.mydest == DestIntoRel);
2492         myState->estate = estate;
2493 }
2494
2495 /*
2496  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2497  */
2498 static void
2499 CloseIntoRel(QueryDesc *queryDesc)
2500 {
2501         EState     *estate = queryDesc->estate;
2502
2503         /* OpenIntoRel might never have gotten called */
2504         if (estate->es_into_relation_descriptor)
2505         {
2506                 /*
2507                  * If we skipped using WAL, and it's not a temp relation, we must
2508                  * force the relation down to disk before it's safe to commit the
2509                  * transaction.  This requires forcing out any dirty buffers and then
2510                  * doing a forced fsync.
2511                  */
2512                 if (!estate->es_into_relation_use_wal &&
2513                         !estate->es_into_relation_descriptor->rd_istemp)
2514                         heap_sync(estate->es_into_relation_descriptor);
2515
2516                 /* close rel, but keep lock until commit */
2517                 heap_close(estate->es_into_relation_descriptor, NoLock);
2518
2519                 estate->es_into_relation_descriptor = NULL;
2520         }
2521 }
2522
2523 /*
2524  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2525  *
2526  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2527  * we just leave the private fields empty here.  OpenIntoRel will
2528  * fill them in.
2529  */
2530 DestReceiver *
2531 CreateIntoRelDestReceiver(void)
2532 {
2533         DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
2534
2535         self->pub.receiveSlot = intorel_receive;
2536         self->pub.rStartup = intorel_startup;
2537         self->pub.rShutdown = intorel_shutdown;
2538         self->pub.rDestroy = intorel_destroy;
2539         self->pub.mydest = DestIntoRel;
2540
2541         self->estate = NULL;
2542
2543         return (DestReceiver *) self;
2544 }
2545
2546 /*
2547  * intorel_startup --- executor startup
2548  */
2549 static void
2550 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2551 {
2552         /* no-op */
2553 }
2554
2555 /*
2556  * intorel_receive --- receive one tuple
2557  */
2558 static void
2559 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2560 {
2561         DR_intorel *myState = (DR_intorel *) self;
2562         EState     *estate = myState->estate;
2563         HeapTuple       tuple;
2564
2565         tuple = ExecCopySlotTuple(slot);
2566
2567         heap_insert(estate->es_into_relation_descriptor,
2568                                 tuple,
2569                                 estate->es_snapshot->curcid,
2570                                 estate->es_into_relation_use_wal,
2571                                 false);                 /* never any point in using FSM */
2572
2573         /* We know this is a newly created relation, so there are no indexes */
2574
2575         heap_freetuple(tuple);
2576
2577         IncrAppended();
2578 }
2579
2580 /*
2581  * intorel_shutdown --- executor end
2582  */
2583 static void
2584 intorel_shutdown(DestReceiver *self)
2585 {
2586         /* no-op */
2587 }
2588
2589 /*
2590  * intorel_destroy --- release DestReceiver object
2591  */
2592 static void
2593 intorel_destroy(DestReceiver *self)
2594 {
2595         pfree(self);
2596 }