]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Turn the rangetable used by the executor into a flat list, and avoid storing
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.288 2007/02/22 22:00:22 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "optimizer/clauses.h"
49 #include "parser/parse_clause.h"
50 #include "parser/parsetree.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55
56
57 typedef struct evalPlanQual
58 {
59         Index           rti;
60         EState     *estate;
61         PlanState  *planstate;
62         struct evalPlanQual *next;      /* stack of active PlanQual plans */
63         struct evalPlanQual *free;      /* list of free PlanQual plans */
64 } evalPlanQual;
65
66 /* decls for local routines only used within this module */
67 static void InitPlan(QueryDesc *queryDesc, int eflags);
68 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
69                                   Index resultRelationIndex,
70                                   List *rangeTable,
71                                   CmdType operation,
72                                   bool doInstrument);
73 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
74                         CmdType operation,
75                         long numberTuples,
76                         ScanDirection direction,
77                         DestReceiver *dest);
78 static void ExecSelect(TupleTableSlot *slot,
79                    DestReceiver *dest, EState *estate);
80 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
81                    TupleTableSlot *planSlot,
82                    DestReceiver *dest, EState *estate);
83 static void ExecDelete(ItemPointer tupleid,
84                    TupleTableSlot *planSlot,
85                    DestReceiver *dest, EState *estate);
86 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
87                    TupleTableSlot *planSlot,
88                    DestReceiver *dest, EState *estate);
89 static void ExecProcessReturning(ProjectionInfo *projectReturning,
90                                          TupleTableSlot *tupleSlot,
91                                          TupleTableSlot *planSlot,
92                                          DestReceiver *dest);
93 static TupleTableSlot *EvalPlanQualNext(EState *estate);
94 static void EndEvalPlanQual(EState *estate);
95 static void ExecCheckRTPerms(List *rangeTable);
96 static void ExecCheckRTEPerms(RangeTblEntry *rte);
97 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
98 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
99                                   evalPlanQual *priorepq);
100 static void EvalPlanQualStop(evalPlanQual *epq);
101 static void OpenIntoRel(QueryDesc *queryDesc);
102 static void CloseIntoRel(QueryDesc *queryDesc);
103 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
104 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
105 static void intorel_shutdown(DestReceiver *self);
106 static void intorel_destroy(DestReceiver *self);
107
108 /* end of local decls */
109
110
111 /* ----------------------------------------------------------------
112  *              ExecutorStart
113  *
114  *              This routine must be called at the beginning of any execution of any
115  *              query plan
116  *
117  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
118  * clear why we bother to separate the two functions, but...).  The tupDesc
119  * field of the QueryDesc is filled in to describe the tuples that will be
120  * returned, and the internal fields (estate and planstate) are set up.
121  *
122  * eflags contains flag bits as described in executor.h.
123  *
124  * NB: the CurrentMemoryContext when this is called will become the parent
125  * of the per-query context used for this Executor invocation.
126  * ----------------------------------------------------------------
127  */
128 void
129 ExecutorStart(QueryDesc *queryDesc, int eflags)
130 {
131         EState     *estate;
132         MemoryContext oldcontext;
133
134         /* sanity checks: queryDesc must not be started already */
135         Assert(queryDesc != NULL);
136         Assert(queryDesc->estate == NULL);
137
138         /*
139          * If the transaction is read-only, we need to check if any writes are
140          * planned to non-temporary tables.  EXPLAIN is considered read-only.
141          */
142         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
143                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
144
145         /*
146          * Build EState, switch into per-query memory context for startup.
147          */
148         estate = CreateExecutorState();
149         queryDesc->estate = estate;
150
151         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
152
153         /*
154          * Fill in parameters, if any, from queryDesc
155          */
156         estate->es_param_list_info = queryDesc->params;
157
158         if (queryDesc->plannedstmt->nParamExec > 0)
159                 estate->es_param_exec_vals = (ParamExecData *)
160                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
161
162         /*
163          * Copy other important information into the EState
164          */
165         estate->es_snapshot = queryDesc->snapshot;
166         estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
167         estate->es_instrument = queryDesc->doInstrument;
168
169         /*
170          * Initialize the plan state tree
171          */
172         InitPlan(queryDesc, eflags);
173
174         MemoryContextSwitchTo(oldcontext);
175 }
176
177 /* ----------------------------------------------------------------
178  *              ExecutorRun
179  *
180  *              This is the main routine of the executor module. It accepts
181  *              the query descriptor from the traffic cop and executes the
182  *              query plan.
183  *
184  *              ExecutorStart must have been called already.
185  *
186  *              If direction is NoMovementScanDirection then nothing is done
187  *              except to start up/shut down the destination.  Otherwise,
188  *              we retrieve up to 'count' tuples in the specified direction.
189  *
190  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
191  *              completion.
192  *
193  * ----------------------------------------------------------------
194  */
195 TupleTableSlot *
196 ExecutorRun(QueryDesc *queryDesc,
197                         ScanDirection direction, long count)
198 {
199         EState     *estate;
200         CmdType         operation;
201         DestReceiver *dest;
202         bool            sendTuples;
203         TupleTableSlot *result;
204         MemoryContext oldcontext;
205
206         /* sanity checks */
207         Assert(queryDesc != NULL);
208
209         estate = queryDesc->estate;
210
211         Assert(estate != NULL);
212
213         /*
214          * Switch into per-query memory context
215          */
216         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
217
218         /*
219          * extract information from the query descriptor and the query feature.
220          */
221         operation = queryDesc->operation;
222         dest = queryDesc->dest;
223
224         /*
225          * startup tuple receiver, if we will be emitting tuples
226          */
227         estate->es_processed = 0;
228         estate->es_lastoid = InvalidOid;
229
230         sendTuples = (operation == CMD_SELECT ||
231                                   queryDesc->plannedstmt->returningLists);
232
233         if (sendTuples)
234                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
235
236         /*
237          * run plan
238          */
239         if (ScanDirectionIsNoMovement(direction))
240                 result = NULL;
241         else
242                 result = ExecutePlan(estate,
243                                                          queryDesc->planstate,
244                                                          operation,
245                                                          count,
246                                                          direction,
247                                                          dest);
248
249         /*
250          * shutdown tuple receiver, if we started it
251          */
252         if (sendTuples)
253                 (*dest->rShutdown) (dest);
254
255         MemoryContextSwitchTo(oldcontext);
256
257         return result;
258 }
259
260 /* ----------------------------------------------------------------
261  *              ExecutorEnd
262  *
263  *              This routine must be called at the end of execution of any
264  *              query plan
265  * ----------------------------------------------------------------
266  */
267 void
268 ExecutorEnd(QueryDesc *queryDesc)
269 {
270         EState     *estate;
271         MemoryContext oldcontext;
272
273         /* sanity checks */
274         Assert(queryDesc != NULL);
275
276         estate = queryDesc->estate;
277
278         Assert(estate != NULL);
279
280         /*
281          * Switch into per-query memory context to run ExecEndPlan
282          */
283         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
284
285         ExecEndPlan(queryDesc->planstate, estate);
286
287         /*
288          * Close the SELECT INTO relation if any
289          */
290         if (estate->es_select_into)
291                 CloseIntoRel(queryDesc);
292
293         /*
294          * Must switch out of context before destroying it
295          */
296         MemoryContextSwitchTo(oldcontext);
297
298         /*
299          * Release EState and per-query memory context.  This should release
300          * everything the executor has allocated.
301          */
302         FreeExecutorState(estate);
303
304         /* Reset queryDesc fields that no longer point to anything */
305         queryDesc->tupDesc = NULL;
306         queryDesc->estate = NULL;
307         queryDesc->planstate = NULL;
308 }
309
310 /* ----------------------------------------------------------------
311  *              ExecutorRewind
312  *
313  *              This routine may be called on an open queryDesc to rewind it
314  *              to the start.
315  * ----------------------------------------------------------------
316  */
317 void
318 ExecutorRewind(QueryDesc *queryDesc)
319 {
320         EState     *estate;
321         MemoryContext oldcontext;
322
323         /* sanity checks */
324         Assert(queryDesc != NULL);
325
326         estate = queryDesc->estate;
327
328         Assert(estate != NULL);
329
330         /* It's probably not sensible to rescan updating queries */
331         Assert(queryDesc->operation == CMD_SELECT);
332
333         /*
334          * Switch into per-query memory context
335          */
336         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
337
338         /*
339          * rescan plan
340          */
341         ExecReScan(queryDesc->planstate, NULL);
342
343         MemoryContextSwitchTo(oldcontext);
344 }
345
346
347 /*
348  * ExecCheckRTPerms
349  *              Check access permissions for all relations listed in a range table.
350  */
351 static void
352 ExecCheckRTPerms(List *rangeTable)
353 {
354         ListCell   *l;
355
356         foreach(l, rangeTable)
357         {
358                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
359         }
360 }
361
362 /*
363  * ExecCheckRTEPerms
364  *              Check access permissions for a single RTE.
365  */
366 static void
367 ExecCheckRTEPerms(RangeTblEntry *rte)
368 {
369         AclMode         requiredPerms;
370         Oid                     relOid;
371         Oid                     userid;
372
373         /*
374          * Only plain-relation RTEs need to be checked here.  Function RTEs are
375          * checked by init_fcache when the function is prepared for execution.
376          * Join, subquery, and special RTEs need no checks.
377          */
378         if (rte->rtekind != RTE_RELATION)
379                 return;
380
381         /*
382          * No work if requiredPerms is empty.
383          */
384         requiredPerms = rte->requiredPerms;
385         if (requiredPerms == 0)
386                 return;
387
388         relOid = rte->relid;
389
390         /*
391          * userid to check as: current user unless we have a setuid indication.
392          *
393          * Note: GetUserId() is presently fast enough that there's no harm in
394          * calling it separately for each RTE.  If that stops being true, we could
395          * call it once in ExecCheckRTPerms and pass the userid down from there.
396          * But for now, no need for the extra clutter.
397          */
398         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
399
400         /*
401          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
402          */
403         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
404                 != requiredPerms)
405                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
406                                            get_rel_name(relOid));
407 }
408
409 /*
410  * Check that the query does not imply any writes to non-temp tables.
411  */
412 static void
413 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
414 {
415         ListCell   *l;
416
417         /*
418          * CREATE TABLE AS or SELECT INTO?
419          *
420          * XXX should we allow this if the destination is temp?
421          */
422         if (plannedstmt->into != NULL)
423                 goto fail;
424
425         /* Fail if write permissions are requested on any non-temp table */
426         foreach(l, plannedstmt->rtable)
427         {
428                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
429
430                 if (rte->rtekind != RTE_RELATION)
431                         continue;
432
433                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
434                         continue;
435
436                 if (isTempNamespace(get_rel_namespace(rte->relid)))
437                         continue;
438
439                 goto fail;
440         }
441
442         return;
443
444 fail:
445         ereport(ERROR,
446                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
447                          errmsg("transaction is read-only")));
448 }
449
450
451 /* ----------------------------------------------------------------
452  *              InitPlan
453  *
454  *              Initializes the query plan: open files, allocate storage
455  *              and start up the rule manager
456  * ----------------------------------------------------------------
457  */
458 static void
459 InitPlan(QueryDesc *queryDesc, int eflags)
460 {
461         CmdType         operation = queryDesc->operation;
462         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
463         Plan       *plan = plannedstmt->planTree;
464         List       *rangeTable = plannedstmt->rtable;
465         EState     *estate = queryDesc->estate;
466         PlanState  *planstate;
467         TupleDesc       tupType;
468         ListCell   *l;
469
470         /*
471          * Do permissions checks
472          */
473         ExecCheckRTPerms(rangeTable);
474
475         /*
476          * initialize the node's execution state
477          */
478         estate->es_range_table = rangeTable;
479
480         /*
481          * initialize result relation stuff
482          */
483         if (plannedstmt->resultRelations)
484         {
485                 List       *resultRelations = plannedstmt->resultRelations;
486                 int                     numResultRelations = list_length(resultRelations);
487                 ResultRelInfo *resultRelInfos;
488                 ResultRelInfo *resultRelInfo;
489
490                 resultRelInfos = (ResultRelInfo *)
491                         palloc(numResultRelations * sizeof(ResultRelInfo));
492                 resultRelInfo = resultRelInfos;
493                 foreach(l, resultRelations)
494                 {
495                         initResultRelInfo(resultRelInfo,
496                                                           lfirst_int(l),
497                                                           rangeTable,
498                                                           operation,
499                                                           estate->es_instrument);
500                         resultRelInfo++;
501                 }
502                 estate->es_result_relations = resultRelInfos;
503                 estate->es_num_result_relations = numResultRelations;
504                 /* Initialize to first or only result rel */
505                 estate->es_result_relation_info = resultRelInfos;
506         }
507         else
508         {
509                 /*
510                  * if no result relation, then set state appropriately
511                  */
512                 estate->es_result_relations = NULL;
513                 estate->es_num_result_relations = 0;
514                 estate->es_result_relation_info = NULL;
515         }
516
517         /*
518          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
519          * flag appropriately so that the plan tree will be initialized with the
520          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
521          */
522         estate->es_select_into = false;
523         if (operation == CMD_SELECT && plannedstmt->into != NULL)
524         {
525                 estate->es_select_into = true;
526                 estate->es_into_oids = interpretOidsOption(plannedstmt->into->options);
527         }
528
529         /*
530          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
531          * initialize the plan tree, else we'd be doing a lock upgrade.
532          * While we are at it, build the ExecRowMark list.
533          */
534         estate->es_rowMarks = NIL;
535         foreach(l, plannedstmt->rowMarks)
536         {
537                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
538                 Oid                     relid = getrelid(rc->rti, rangeTable);
539                 Relation        relation;
540                 ExecRowMark *erm;
541
542                 relation = heap_open(relid, RowShareLock);
543                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
544                 erm->relation = relation;
545                 erm->rti = rc->rti;
546                 erm->forUpdate = rc->forUpdate;
547                 erm->noWait = rc->noWait;
548                 /* We'll set up ctidAttno below */
549                 erm->ctidAttNo = InvalidAttrNumber;
550                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
551         }
552
553         /*
554          * initialize the executor "tuple" table.  We need slots for all the plan
555          * nodes, plus possibly output slots for the junkfilter(s). At this point
556          * we aren't sure if we need junkfilters, so just add slots for them
557          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
558          * trigger output tuples.
559          */
560         {
561                 int                     nSlots = ExecCountSlotsNode(plan);
562
563                 if (plannedstmt->resultRelations != NIL)
564                         nSlots += list_length(plannedstmt->resultRelations);
565                 else
566                         nSlots += 1;
567                 if (operation != CMD_SELECT)
568                         nSlots++;                       /* for es_trig_tuple_slot */
569                 if (plannedstmt->returningLists)
570                         nSlots++;                       /* for RETURNING projection */
571
572                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
573
574                 if (operation != CMD_SELECT)
575                         estate->es_trig_tuple_slot =
576                                 ExecAllocTableSlot(estate->es_tupleTable);
577         }
578
579         /* mark EvalPlanQual not active */
580         estate->es_plannedstmt = plannedstmt;
581         estate->es_evalPlanQual = NULL;
582         estate->es_evTupleNull = NULL;
583         estate->es_evTuple = NULL;
584         estate->es_useEvalPlan = false;
585
586         /*
587          * initialize the private state information for all the nodes in the query
588          * tree.  This opens files, allocates storage and leaves us ready to start
589          * processing tuples.
590          */
591         planstate = ExecInitNode(plan, estate, eflags);
592
593         /*
594          * Get the tuple descriptor describing the type of tuples to return. (this
595          * is especially important if we are creating a relation with "SELECT
596          * INTO")
597          */
598         tupType = ExecGetResultType(planstate);
599
600         /*
601          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
602          * filter if there are any junk attrs in the tlist.  INSERT and SELECT
603          * INTO also need a filter if the plan may return raw disk tuples (else
604          * heap_insert will be scribbling on the source relation!). UPDATE and
605          * DELETE always need a filter, since there's always a junk 'ctid'
606          * attribute present --- no need to look first.
607          */
608         {
609                 bool            junk_filter_needed = false;
610                 ListCell   *tlist;
611
612                 switch (operation)
613                 {
614                         case CMD_SELECT:
615                         case CMD_INSERT:
616                                 foreach(tlist, plan->targetlist)
617                                 {
618                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
619
620                                         if (tle->resjunk)
621                                         {
622                                                 junk_filter_needed = true;
623                                                 break;
624                                         }
625                                 }
626                                 if (!junk_filter_needed &&
627                                         (operation == CMD_INSERT || estate->es_select_into) &&
628                                         ExecMayReturnRawTuples(planstate))
629                                         junk_filter_needed = true;
630                                 break;
631                         case CMD_UPDATE:
632                         case CMD_DELETE:
633                                 junk_filter_needed = true;
634                                 break;
635                         default:
636                                 break;
637                 }
638
639                 if (junk_filter_needed)
640                 {
641                         /*
642                          * If there are multiple result relations, each one needs its own
643                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
644                          * we can't be fooled by some needing a filter and some not.
645                          */
646                         if (list_length(plannedstmt->resultRelations) > 1)
647                         {
648                                 PlanState **appendplans;
649                                 int                     as_nplans;
650                                 ResultRelInfo *resultRelInfo;
651                                 int                     i;
652
653                                 /* Top plan had better be an Append here. */
654                                 Assert(IsA(plan, Append));
655                                 Assert(((Append *) plan)->isTarget);
656                                 Assert(IsA(planstate, AppendState));
657                                 appendplans = ((AppendState *) planstate)->appendplans;
658                                 as_nplans = ((AppendState *) planstate)->as_nplans;
659                                 Assert(as_nplans == estate->es_num_result_relations);
660                                 resultRelInfo = estate->es_result_relations;
661                                 for (i = 0; i < as_nplans; i++)
662                                 {
663                                         PlanState  *subplan = appendplans[i];
664                                         JunkFilter *j;
665
666                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
667                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
668                                                                   ExecAllocTableSlot(estate->es_tupleTable));
669                                         /*
670                                          * Since it must be UPDATE/DELETE, there had better be
671                                          * a "ctid" junk attribute in the tlist ... but ctid could
672                                          * be at a different resno for each result relation.
673                                          * We look up the ctid resnos now and save them in the
674                                          * junkfilters.
675                                          */
676                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
677                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
678                                                 elog(ERROR, "could not find junk ctid column");
679                                         resultRelInfo->ri_junkFilter = j;
680                                         resultRelInfo++;
681                                 }
682
683                                 /*
684                                  * Set active junkfilter too; at this point ExecInitAppend has
685                                  * already selected an active result relation...
686                                  */
687                                 estate->es_junkFilter =
688                                         estate->es_result_relation_info->ri_junkFilter;
689                         }
690                         else
691                         {
692                                 /* Normal case with just one JunkFilter */
693                                 JunkFilter *j;
694
695                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
696                                                                            tupType->tdhasoid,
697                                                                   ExecAllocTableSlot(estate->es_tupleTable));
698                                 estate->es_junkFilter = j;
699                                 if (estate->es_result_relation_info)
700                                         estate->es_result_relation_info->ri_junkFilter = j;
701
702                                 if (operation == CMD_SELECT)
703                                 {
704                                         /* For SELECT, want to return the cleaned tuple type */
705                                         tupType = j->jf_cleanTupType;
706                                         /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
707                                         foreach(l, estate->es_rowMarks)
708                                         {
709                                                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
710                                                 char            resname[32];
711
712                                                 snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
713                                                 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
714                                                 if (!AttributeNumberIsValid(erm->ctidAttNo))
715                                                         elog(ERROR, "could not find junk \"%s\" column",
716                                                                  resname);
717                                         }
718                                 }
719                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
720                                 {
721                                         /* For UPDATE/DELETE, find the ctid junk attr now */
722                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
723                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
724                                                 elog(ERROR, "could not find junk ctid column");
725                                 }
726                         }
727                 }
728                 else
729                         estate->es_junkFilter = NULL;
730         }
731
732         /*
733          * Initialize RETURNING projections if needed.
734          */
735         if (plannedstmt->returningLists)
736         {
737                 TupleTableSlot *slot;
738                 ExprContext *econtext;
739                 ResultRelInfo *resultRelInfo;
740
741                 /*
742                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
743                  * We assume all the sublists will generate the same output tupdesc.
744                  */
745                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
746                                                                  false);
747
748                 /* Set up a slot for the output of the RETURNING projection(s) */
749                 slot = ExecAllocTableSlot(estate->es_tupleTable);
750                 ExecSetSlotDescriptor(slot, tupType);
751                 /* Need an econtext too */
752                 econtext = CreateExprContext(estate);
753
754                 /*
755                  * Build a projection for each result rel.      Note that any SubPlans in
756                  * the RETURNING lists get attached to the topmost plan node.
757                  */
758                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
759                 resultRelInfo = estate->es_result_relations;
760                 foreach(l, plannedstmt->returningLists)
761                 {
762                         List       *rlist = (List *) lfirst(l);
763                         List       *rliststate;
764
765                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
766                         resultRelInfo->ri_projectReturning =
767                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
768                                                                            resultRelInfo->ri_RelationDesc->rd_att);
769                         resultRelInfo++;
770                 }
771
772                 /*
773                  * Because we already ran ExecInitNode() for the top plan node, any
774                  * subplans we just attached to it won't have been initialized; so we
775                  * have to do it here.  (Ugly, but the alternatives seem worse.)
776                  */
777                 foreach(l, planstate->subPlan)
778                 {
779                         SubPlanState *sstate = (SubPlanState *) lfirst(l);
780
781                         Assert(IsA(sstate, SubPlanState));
782                         if (sstate->planstate == NULL)          /* already inited? */
783                                 ExecInitSubPlan(sstate, estate, eflags);
784                 }
785         }
786
787         queryDesc->tupDesc = tupType;
788         queryDesc->planstate = planstate;
789
790         /*
791          * If doing SELECT INTO, initialize the "into" relation.  We must wait
792          * till now so we have the "clean" result tuple type to create the new
793          * table from.
794          *
795          * If EXPLAIN, skip creating the "into" relation.
796          */
797         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
798                 OpenIntoRel(queryDesc);
799 }
800
801 /*
802  * Initialize ResultRelInfo data for one result relation
803  */
804 static void
805 initResultRelInfo(ResultRelInfo *resultRelInfo,
806                                   Index resultRelationIndex,
807                                   List *rangeTable,
808                                   CmdType operation,
809                                   bool doInstrument)
810 {
811         Oid                     resultRelationOid;
812         Relation        resultRelationDesc;
813
814         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
815         resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
816
817         switch (resultRelationDesc->rd_rel->relkind)
818         {
819                 case RELKIND_SEQUENCE:
820                         ereport(ERROR,
821                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
822                                          errmsg("cannot change sequence \"%s\"",
823                                                         RelationGetRelationName(resultRelationDesc))));
824                         break;
825                 case RELKIND_TOASTVALUE:
826                         ereport(ERROR,
827                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
828                                          errmsg("cannot change TOAST relation \"%s\"",
829                                                         RelationGetRelationName(resultRelationDesc))));
830                         break;
831                 case RELKIND_VIEW:
832                         ereport(ERROR,
833                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
834                                          errmsg("cannot change view \"%s\"",
835                                                         RelationGetRelationName(resultRelationDesc))));
836                         break;
837         }
838
839         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
840         resultRelInfo->type = T_ResultRelInfo;
841         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
842         resultRelInfo->ri_RelationDesc = resultRelationDesc;
843         resultRelInfo->ri_NumIndices = 0;
844         resultRelInfo->ri_IndexRelationDescs = NULL;
845         resultRelInfo->ri_IndexRelationInfo = NULL;
846         /* make a copy so as not to depend on relcache info not changing... */
847         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
848         if (resultRelInfo->ri_TrigDesc)
849         {
850                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
851
852                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
853                         palloc0(n * sizeof(FmgrInfo));
854                 if (doInstrument)
855                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
856                 else
857                         resultRelInfo->ri_TrigInstrument = NULL;
858         }
859         else
860         {
861                 resultRelInfo->ri_TrigFunctions = NULL;
862                 resultRelInfo->ri_TrigInstrument = NULL;
863         }
864         resultRelInfo->ri_ConstraintExprs = NULL;
865         resultRelInfo->ri_junkFilter = NULL;
866         resultRelInfo->ri_projectReturning = NULL;
867
868         /*
869          * If there are indices on the result relation, open them and save
870          * descriptors in the result relation info, so that we can add new index
871          * entries for the tuples we add/update.  We need not do this for a
872          * DELETE, however, since deletion doesn't affect indexes.
873          */
874         if (resultRelationDesc->rd_rel->relhasindex &&
875                 operation != CMD_DELETE)
876                 ExecOpenIndices(resultRelInfo);
877 }
878
879 /*
880  *              ExecContextForcesOids
881  *
882  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
883  * we need to ensure that result tuples have space for an OID iff they are
884  * going to be stored into a relation that has OIDs.  In other contexts
885  * we are free to choose whether to leave space for OIDs in result tuples
886  * (we generally don't want to, but we do if a physical-tlist optimization
887  * is possible).  This routine checks the plan context and returns TRUE if the
888  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
889  * *hasoids is set to the required value.
890  *
891  * One reason this is ugly is that all plan nodes in the plan tree will emit
892  * tuples with space for an OID, though we really only need the topmost node
893  * to do so.  However, node types like Sort don't project new tuples but just
894  * return their inputs, and in those cases the requirement propagates down
895  * to the input node.  Eventually we might make this code smart enough to
896  * recognize how far down the requirement really goes, but for now we just
897  * make all plan nodes do the same thing if the top level forces the choice.
898  *
899  * We assume that estate->es_result_relation_info is already set up to
900  * describe the target relation.  Note that in an UPDATE that spans an
901  * inheritance tree, some of the target relations may have OIDs and some not.
902  * We have to make the decisions on a per-relation basis as we initialize
903  * each of the child plans of the topmost Append plan.
904  *
905  * SELECT INTO is even uglier, because we don't have the INTO relation's
906  * descriptor available when this code runs; we have to look aside at a
907  * flag set by InitPlan().
908  */
909 bool
910 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
911 {
912         if (planstate->state->es_select_into)
913         {
914                 *hasoids = planstate->state->es_into_oids;
915                 return true;
916         }
917         else
918         {
919                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
920
921                 if (ri != NULL)
922                 {
923                         Relation        rel = ri->ri_RelationDesc;
924
925                         if (rel != NULL)
926                         {
927                                 *hasoids = rel->rd_rel->relhasoids;
928                                 return true;
929                         }
930                 }
931         }
932
933         return false;
934 }
935
936 /* ----------------------------------------------------------------
937  *              ExecEndPlan
938  *
939  *              Cleans up the query plan -- closes files and frees up storage
940  *
941  * NOTE: we are no longer very worried about freeing storage per se
942  * in this code; FreeExecutorState should be guaranteed to release all
943  * memory that needs to be released.  What we are worried about doing
944  * is closing relations and dropping buffer pins.  Thus, for example,
945  * tuple tables must be cleared or dropped to ensure pins are released.
946  * ----------------------------------------------------------------
947  */
948 void
949 ExecEndPlan(PlanState *planstate, EState *estate)
950 {
951         ResultRelInfo *resultRelInfo;
952         int                     i;
953         ListCell   *l;
954
955         /*
956          * shut down any PlanQual processing we were doing
957          */
958         if (estate->es_evalPlanQual != NULL)
959                 EndEvalPlanQual(estate);
960
961         /*
962          * shut down the node-type-specific query processing
963          */
964         ExecEndNode(planstate);
965
966         /*
967          * destroy the executor "tuple" table.
968          */
969         ExecDropTupleTable(estate->es_tupleTable, true);
970         estate->es_tupleTable = NULL;
971
972         /*
973          * close the result relation(s) if any, but hold locks until xact commit.
974          */
975         resultRelInfo = estate->es_result_relations;
976         for (i = estate->es_num_result_relations; i > 0; i--)
977         {
978                 /* Close indices and then the relation itself */
979                 ExecCloseIndices(resultRelInfo);
980                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
981                 resultRelInfo++;
982         }
983
984         /*
985          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
986          */
987         foreach(l, estate->es_rowMarks)
988         {
989                 ExecRowMark *erm = lfirst(l);
990
991                 heap_close(erm->relation, NoLock);
992         }
993 }
994
995 /* ----------------------------------------------------------------
996  *              ExecutePlan
997  *
998  *              processes the query plan to retrieve 'numberTuples' tuples in the
999  *              direction specified.
1000  *
1001  *              Retrieves all tuples if numberTuples is 0
1002  *
1003  *              result is either a slot containing the last tuple in the case
1004  *              of a SELECT or NULL otherwise.
1005  *
1006  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1007  * user can see it
1008  * ----------------------------------------------------------------
1009  */
1010 static TupleTableSlot *
1011 ExecutePlan(EState *estate,
1012                         PlanState *planstate,
1013                         CmdType operation,
1014                         long numberTuples,
1015                         ScanDirection direction,
1016                         DestReceiver *dest)
1017 {
1018         JunkFilter *junkfilter;
1019         TupleTableSlot *planSlot;
1020         TupleTableSlot *slot;
1021         ItemPointer tupleid = NULL;
1022         ItemPointerData tuple_ctid;
1023         long            current_tuple_count;
1024         TupleTableSlot *result;
1025
1026         /*
1027          * initialize local variables
1028          */
1029         current_tuple_count = 0;
1030         result = NULL;
1031
1032         /*
1033          * Set the direction.
1034          */
1035         estate->es_direction = direction;
1036
1037         /*
1038          * Process BEFORE EACH STATEMENT triggers
1039          */
1040         switch (operation)
1041         {
1042                 case CMD_UPDATE:
1043                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1044                         break;
1045                 case CMD_DELETE:
1046                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1047                         break;
1048                 case CMD_INSERT:
1049                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1050                         break;
1051                 default:
1052                         /* do nothing */
1053                         break;
1054         }
1055
1056         /*
1057          * Loop until we've processed the proper number of tuples from the plan.
1058          */
1059
1060         for (;;)
1061         {
1062                 /* Reset the per-output-tuple exprcontext */
1063                 ResetPerTupleExprContext(estate);
1064
1065                 /*
1066                  * Execute the plan and obtain a tuple
1067                  */
1068 lnext:  ;
1069                 if (estate->es_useEvalPlan)
1070                 {
1071                         planSlot = EvalPlanQualNext(estate);
1072                         if (TupIsNull(planSlot))
1073                                 planSlot = ExecProcNode(planstate);
1074                 }
1075                 else
1076                         planSlot = ExecProcNode(planstate);
1077
1078                 /*
1079                  * if the tuple is null, then we assume there is nothing more to
1080                  * process so we just return null...
1081                  */
1082                 if (TupIsNull(planSlot))
1083                 {
1084                         result = NULL;
1085                         break;
1086                 }
1087                 slot = planSlot;
1088
1089                 /*
1090                  * if we have a junk filter, then project a new tuple with the junk
1091                  * removed.
1092                  *
1093                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1094                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1095                  * because that tuple slot has the wrong descriptor.)
1096                  *
1097                  * Also, extract all the junk information we need.
1098                  */
1099                 if ((junkfilter = estate->es_junkFilter) != NULL)
1100                 {
1101                         Datum           datum;
1102                         bool            isNull;
1103
1104                         /*
1105                          * extract the 'ctid' junk attribute.
1106                          */
1107                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1108                         {
1109                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1110                                                                                          &isNull);
1111                                 /* shouldn't ever get a null result... */
1112                                 if (isNull)
1113                                         elog(ERROR, "ctid is NULL");
1114
1115                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1116                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1117                                 tupleid = &tuple_ctid;
1118                         }
1119
1120                         /*
1121                          * Process any FOR UPDATE or FOR SHARE locking requested.
1122                          */
1123                         else if (estate->es_rowMarks != NIL)
1124                         {
1125                                 ListCell   *l;
1126
1127                 lmark:  ;
1128                                 foreach(l, estate->es_rowMarks)
1129                                 {
1130                                         ExecRowMark *erm = lfirst(l);
1131                                         HeapTupleData tuple;
1132                                         Buffer          buffer;
1133                                         ItemPointerData update_ctid;
1134                                         TransactionId update_xmax;
1135                                         TupleTableSlot *newSlot;
1136                                         LockTupleMode lockmode;
1137                                         HTSU_Result test;
1138
1139                                         datum = ExecGetJunkAttribute(slot,
1140                                                                                                  erm->ctidAttNo,
1141                                                                                                  &isNull);
1142                                         /* shouldn't ever get a null result... */
1143                                         if (isNull)
1144                                                 elog(ERROR, "ctid is NULL");
1145
1146                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1147
1148                                         if (erm->forUpdate)
1149                                                 lockmode = LockTupleExclusive;
1150                                         else
1151                                                 lockmode = LockTupleShared;
1152
1153                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1154                                                                                    &update_ctid, &update_xmax,
1155                                                                                    estate->es_snapshot->curcid,
1156                                                                                    lockmode, erm->noWait);
1157                                         ReleaseBuffer(buffer);
1158                                         switch (test)
1159                                         {
1160                                                 case HeapTupleSelfUpdated:
1161                                                         /* treat it as deleted; do not process */
1162                                                         goto lnext;
1163
1164                                                 case HeapTupleMayBeUpdated:
1165                                                         break;
1166
1167                                                 case HeapTupleUpdated:
1168                                                         if (IsXactIsoLevelSerializable)
1169                                                                 ereport(ERROR,
1170                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1171                                                                   errmsg("could not serialize access due to concurrent update")));
1172                                                         if (!ItemPointerEquals(&update_ctid,
1173                                                                                                    &tuple.t_self))
1174                                                         {
1175                                                                 /* updated, so look at updated version */
1176                                                                 newSlot = EvalPlanQual(estate,
1177                                                                                                            erm->rti,
1178                                                                                                            &update_ctid,
1179                                                                                                            update_xmax,
1180                                                                                                 estate->es_snapshot->curcid);
1181                                                                 if (!TupIsNull(newSlot))
1182                                                                 {
1183                                                                         slot = planSlot = newSlot;
1184                                                                         estate->es_useEvalPlan = true;
1185                                                                         goto lmark;
1186                                                                 }
1187                                                         }
1188
1189                                                         /*
1190                                                          * if tuple was deleted or PlanQual failed for
1191                                                          * updated tuple - we must not return this tuple!
1192                                                          */
1193                                                         goto lnext;
1194
1195                                                 default:
1196                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1197                                                                  test);
1198                                                         return NULL;
1199                                         }
1200                                 }
1201                         }
1202
1203                         /*
1204                          * Create a new "clean" tuple with all junk attributes removed. We
1205                          * don't need to do this for DELETE, however (there will in fact
1206                          * be no non-junk attributes in a DELETE!)
1207                          */
1208                         if (operation != CMD_DELETE)
1209                                 slot = ExecFilterJunk(junkfilter, slot);
1210                 }
1211
1212                 /*
1213                  * now that we have a tuple, do the appropriate thing with it.. either
1214                  * return it to the user, add it to a relation someplace, delete it
1215                  * from a relation, or modify some of its attributes.
1216                  */
1217                 switch (operation)
1218                 {
1219                         case CMD_SELECT:
1220                                 ExecSelect(slot, dest, estate);
1221                                 result = slot;
1222                                 break;
1223
1224                         case CMD_INSERT:
1225                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1226                                 result = NULL;
1227                                 break;
1228
1229                         case CMD_DELETE:
1230                                 ExecDelete(tupleid, planSlot, dest, estate);
1231                                 result = NULL;
1232                                 break;
1233
1234                         case CMD_UPDATE:
1235                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1236                                 result = NULL;
1237                                 break;
1238
1239                         default:
1240                                 elog(ERROR, "unrecognized operation code: %d",
1241                                          (int) operation);
1242                                 result = NULL;
1243                                 break;
1244                 }
1245
1246                 /*
1247                  * check our tuple count.. if we've processed the proper number then
1248                  * quit, else loop again and process more tuples.  Zero numberTuples
1249                  * means no limit.
1250                  */
1251                 current_tuple_count++;
1252                 if (numberTuples && numberTuples == current_tuple_count)
1253                         break;
1254         }
1255
1256         /*
1257          * Process AFTER EACH STATEMENT triggers
1258          */
1259         switch (operation)
1260         {
1261                 case CMD_UPDATE:
1262                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1263                         break;
1264                 case CMD_DELETE:
1265                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1266                         break;
1267                 case CMD_INSERT:
1268                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1269                         break;
1270                 default:
1271                         /* do nothing */
1272                         break;
1273         }
1274
1275         /*
1276          * here, result is either a slot containing a tuple in the case of a
1277          * SELECT or NULL otherwise.
1278          */
1279         return result;
1280 }
1281
1282 /* ----------------------------------------------------------------
1283  *              ExecSelect
1284  *
1285  *              SELECTs are easy.. we just pass the tuple to the appropriate
1286  *              output function.
1287  * ----------------------------------------------------------------
1288  */
1289 static void
1290 ExecSelect(TupleTableSlot *slot,
1291                    DestReceiver *dest,
1292                    EState *estate)
1293 {
1294         (*dest->receiveSlot) (slot, dest);
1295         IncrRetrieved();
1296         (estate->es_processed)++;
1297 }
1298
1299 /* ----------------------------------------------------------------
1300  *              ExecInsert
1301  *
1302  *              INSERTs are trickier.. we have to insert the tuple into
1303  *              the base relation and insert appropriate tuples into the
1304  *              index relations.
1305  * ----------------------------------------------------------------
1306  */
1307 static void
1308 ExecInsert(TupleTableSlot *slot,
1309                    ItemPointer tupleid,
1310                    TupleTableSlot *planSlot,
1311                    DestReceiver *dest,
1312                    EState *estate)
1313 {
1314         HeapTuple       tuple;
1315         ResultRelInfo *resultRelInfo;
1316         Relation        resultRelationDesc;
1317         Oid                     newId;
1318
1319         /*
1320          * get the heap tuple out of the tuple table slot, making sure we have a
1321          * writable copy
1322          */
1323         tuple = ExecMaterializeSlot(slot);
1324
1325         /*
1326          * get information on the (current) result relation
1327          */
1328         resultRelInfo = estate->es_result_relation_info;
1329         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1330
1331         /* BEFORE ROW INSERT Triggers */
1332         if (resultRelInfo->ri_TrigDesc &&
1333                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1334         {
1335                 HeapTuple       newtuple;
1336
1337                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1338
1339                 if (newtuple == NULL)   /* "do nothing" */
1340                         return;
1341
1342                 if (newtuple != tuple)  /* modified by Trigger(s) */
1343                 {
1344                         /*
1345                          * Put the modified tuple into a slot for convenience of routines
1346                          * below.  We assume the tuple was allocated in per-tuple memory
1347                          * context, and therefore will go away by itself. The tuple table
1348                          * slot should not try to clear it.
1349                          */
1350                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1351
1352                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1353                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1354                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1355                         slot = newslot;
1356                         tuple = newtuple;
1357                 }
1358         }
1359
1360         /*
1361          * Check the constraints of the tuple
1362          */
1363         if (resultRelationDesc->rd_att->constr)
1364                 ExecConstraints(resultRelInfo, slot, estate);
1365
1366         /*
1367          * insert the tuple
1368          *
1369          * Note: heap_insert returns the tid (location) of the new tuple in the
1370          * t_self field.
1371          */
1372         newId = heap_insert(resultRelationDesc, tuple,
1373                                                 estate->es_snapshot->curcid,
1374                                                 true, true);
1375
1376         IncrAppended();
1377         (estate->es_processed)++;
1378         estate->es_lastoid = newId;
1379         setLastTid(&(tuple->t_self));
1380
1381         /*
1382          * insert index entries for tuple
1383          */
1384         if (resultRelInfo->ri_NumIndices > 0)
1385                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1386
1387         /* AFTER ROW INSERT Triggers */
1388         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1389
1390         /* Process RETURNING if present */
1391         if (resultRelInfo->ri_projectReturning)
1392                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1393                                                          slot, planSlot, dest);
1394 }
1395
1396 /* ----------------------------------------------------------------
1397  *              ExecDelete
1398  *
1399  *              DELETE is like UPDATE, except that we delete the tuple and no
1400  *              index modifications are needed
1401  * ----------------------------------------------------------------
1402  */
1403 static void
1404 ExecDelete(ItemPointer tupleid,
1405                    TupleTableSlot *planSlot,
1406                    DestReceiver *dest,
1407                    EState *estate)
1408 {
1409         ResultRelInfo *resultRelInfo;
1410         Relation        resultRelationDesc;
1411         HTSU_Result result;
1412         ItemPointerData update_ctid;
1413         TransactionId update_xmax;
1414
1415         /*
1416          * get information on the (current) result relation
1417          */
1418         resultRelInfo = estate->es_result_relation_info;
1419         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1420
1421         /* BEFORE ROW DELETE Triggers */
1422         if (resultRelInfo->ri_TrigDesc &&
1423                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1424         {
1425                 bool            dodelete;
1426
1427                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1428                                                                                 estate->es_snapshot->curcid);
1429
1430                 if (!dodelete)                  /* "do nothing" */
1431                         return;
1432         }
1433
1434         /*
1435          * delete the tuple
1436          *
1437          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1438          * the row to be deleted is visible to that snapshot, and throw a can't-
1439          * serialize error if not.      This is a special-case behavior needed for
1440          * referential integrity updates in serializable transactions.
1441          */
1442 ldelete:;
1443         result = heap_delete(resultRelationDesc, tupleid,
1444                                                  &update_ctid, &update_xmax,
1445                                                  estate->es_snapshot->curcid,
1446                                                  estate->es_crosscheck_snapshot,
1447                                                  true /* wait for commit */ );
1448         switch (result)
1449         {
1450                 case HeapTupleSelfUpdated:
1451                         /* already deleted by self; nothing to do */
1452                         return;
1453
1454                 case HeapTupleMayBeUpdated:
1455                         break;
1456
1457                 case HeapTupleUpdated:
1458                         if (IsXactIsoLevelSerializable)
1459                                 ereport(ERROR,
1460                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1461                                                  errmsg("could not serialize access due to concurrent update")));
1462                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1463                         {
1464                                 TupleTableSlot *epqslot;
1465
1466                                 epqslot = EvalPlanQual(estate,
1467                                                                            resultRelInfo->ri_RangeTableIndex,
1468                                                                            &update_ctid,
1469                                                                            update_xmax,
1470                                                                            estate->es_snapshot->curcid);
1471                                 if (!TupIsNull(epqslot))
1472                                 {
1473                                         *tupleid = update_ctid;
1474                                         goto ldelete;
1475                                 }
1476                         }
1477                         /* tuple already deleted; nothing to do */
1478                         return;
1479
1480                 default:
1481                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1482                         return;
1483         }
1484
1485         IncrDeleted();
1486         (estate->es_processed)++;
1487
1488         /*
1489          * Note: Normally one would think that we have to delete index tuples
1490          * associated with the heap tuple now...
1491          *
1492          * ... but in POSTGRES, we have no need to do this because VACUUM will
1493          * take care of it later.  We can't delete index tuples immediately
1494          * anyway, since the tuple is still visible to other transactions.
1495          */
1496
1497         /* AFTER ROW DELETE Triggers */
1498         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1499
1500         /* Process RETURNING if present */
1501         if (resultRelInfo->ri_projectReturning)
1502         {
1503                 /*
1504                  * We have to put the target tuple into a slot, which means first we
1505                  * gotta fetch it.      We can use the trigger tuple slot.
1506                  */
1507                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1508                 HeapTupleData deltuple;
1509                 Buffer          delbuffer;
1510
1511                 deltuple.t_self = *tupleid;
1512                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1513                                                 &deltuple, &delbuffer, false, NULL))
1514                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1515
1516                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1517                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1518                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1519
1520                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1521                                                          slot, planSlot, dest);
1522
1523                 ExecClearTuple(slot);
1524                 ReleaseBuffer(delbuffer);
1525         }
1526 }
1527
1528 /* ----------------------------------------------------------------
1529  *              ExecUpdate
1530  *
1531  *              note: we can't run UPDATE queries with transactions
1532  *              off because UPDATEs are actually INSERTs and our
1533  *              scan will mistakenly loop forever, updating the tuple
1534  *              it just inserted..      This should be fixed but until it
1535  *              is, we don't want to get stuck in an infinite loop
1536  *              which corrupts your database..
1537  * ----------------------------------------------------------------
1538  */
1539 static void
1540 ExecUpdate(TupleTableSlot *slot,
1541                    ItemPointer tupleid,
1542                    TupleTableSlot *planSlot,
1543                    DestReceiver *dest,
1544                    EState *estate)
1545 {
1546         HeapTuple       tuple;
1547         ResultRelInfo *resultRelInfo;
1548         Relation        resultRelationDesc;
1549         HTSU_Result result;
1550         ItemPointerData update_ctid;
1551         TransactionId update_xmax;
1552
1553         /*
1554          * abort the operation if not running transactions
1555          */
1556         if (IsBootstrapProcessingMode())
1557                 elog(ERROR, "cannot UPDATE during bootstrap");
1558
1559         /*
1560          * get the heap tuple out of the tuple table slot, making sure we have a
1561          * writable copy
1562          */
1563         tuple = ExecMaterializeSlot(slot);
1564
1565         /*
1566          * get information on the (current) result relation
1567          */
1568         resultRelInfo = estate->es_result_relation_info;
1569         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1570
1571         /* BEFORE ROW UPDATE Triggers */
1572         if (resultRelInfo->ri_TrigDesc &&
1573                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1574         {
1575                 HeapTuple       newtuple;
1576
1577                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1578                                                                                 tupleid, tuple,
1579                                                                                 estate->es_snapshot->curcid);
1580
1581                 if (newtuple == NULL)   /* "do nothing" */
1582                         return;
1583
1584                 if (newtuple != tuple)  /* modified by Trigger(s) */
1585                 {
1586                         /*
1587                          * Put the modified tuple into a slot for convenience of routines
1588                          * below.  We assume the tuple was allocated in per-tuple memory
1589                          * context, and therefore will go away by itself. The tuple table
1590                          * slot should not try to clear it.
1591                          */
1592                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1593
1594                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1595                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1596                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1597                         slot = newslot;
1598                         tuple = newtuple;
1599                 }
1600         }
1601
1602         /*
1603          * Check the constraints of the tuple
1604          *
1605          * If we generate a new candidate tuple after EvalPlanQual testing, we
1606          * must loop back here and recheck constraints.  (We don't need to redo
1607          * triggers, however.  If there are any BEFORE triggers then trigger.c
1608          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1609          * need to do them again.)
1610          */
1611 lreplace:;
1612         if (resultRelationDesc->rd_att->constr)
1613                 ExecConstraints(resultRelInfo, slot, estate);
1614
1615         /*
1616          * replace the heap tuple
1617          *
1618          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1619          * the row to be updated is visible to that snapshot, and throw a can't-
1620          * serialize error if not.      This is a special-case behavior needed for
1621          * referential integrity updates in serializable transactions.
1622          */
1623         result = heap_update(resultRelationDesc, tupleid, tuple,
1624                                                  &update_ctid, &update_xmax,
1625                                                  estate->es_snapshot->curcid,
1626                                                  estate->es_crosscheck_snapshot,
1627                                                  true /* wait for commit */ );
1628         switch (result)
1629         {
1630                 case HeapTupleSelfUpdated:
1631                         /* already deleted by self; nothing to do */
1632                         return;
1633
1634                 case HeapTupleMayBeUpdated:
1635                         break;
1636
1637                 case HeapTupleUpdated:
1638                         if (IsXactIsoLevelSerializable)
1639                                 ereport(ERROR,
1640                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1641                                                  errmsg("could not serialize access due to concurrent update")));
1642                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1643                         {
1644                                 TupleTableSlot *epqslot;
1645
1646                                 epqslot = EvalPlanQual(estate,
1647                                                                            resultRelInfo->ri_RangeTableIndex,
1648                                                                            &update_ctid,
1649                                                                            update_xmax,
1650                                                                            estate->es_snapshot->curcid);
1651                                 if (!TupIsNull(epqslot))
1652                                 {
1653                                         *tupleid = update_ctid;
1654                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1655                                         tuple = ExecMaterializeSlot(slot);
1656                                         goto lreplace;
1657                                 }
1658                         }
1659                         /* tuple already deleted; nothing to do */
1660                         return;
1661
1662                 default:
1663                         elog(ERROR, "unrecognized heap_update status: %u", result);
1664                         return;
1665         }
1666
1667         IncrReplaced();
1668         (estate->es_processed)++;
1669
1670         /*
1671          * Note: instead of having to update the old index tuples associated with
1672          * the heap tuple, all we do is form and insert new index tuples. This is
1673          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1674          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1675          * here is insert new index tuples.  -cim 9/27/89
1676          */
1677
1678         /*
1679          * insert index entries for tuple
1680          *
1681          * Note: heap_update returns the tid (location) of the new tuple in the
1682          * t_self field.
1683          */
1684         if (resultRelInfo->ri_NumIndices > 0)
1685                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1686
1687         /* AFTER ROW UPDATE Triggers */
1688         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1689
1690         /* Process RETURNING if present */
1691         if (resultRelInfo->ri_projectReturning)
1692                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1693                                                          slot, planSlot, dest);
1694 }
1695
1696 /*
1697  * ExecRelCheck --- check that tuple meets constraints for result relation
1698  */
1699 static const char *
1700 ExecRelCheck(ResultRelInfo *resultRelInfo,
1701                          TupleTableSlot *slot, EState *estate)
1702 {
1703         Relation        rel = resultRelInfo->ri_RelationDesc;
1704         int                     ncheck = rel->rd_att->constr->num_check;
1705         ConstrCheck *check = rel->rd_att->constr->check;
1706         ExprContext *econtext;
1707         MemoryContext oldContext;
1708         List       *qual;
1709         int                     i;
1710
1711         /*
1712          * If first time through for this result relation, build expression
1713          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1714          * memory context so they'll survive throughout the query.
1715          */
1716         if (resultRelInfo->ri_ConstraintExprs == NULL)
1717         {
1718                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1719                 resultRelInfo->ri_ConstraintExprs =
1720                         (List **) palloc(ncheck * sizeof(List *));
1721                 for (i = 0; i < ncheck; i++)
1722                 {
1723                         /* ExecQual wants implicit-AND form */
1724                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1725                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1726                                 ExecPrepareExpr((Expr *) qual, estate);
1727                 }
1728                 MemoryContextSwitchTo(oldContext);
1729         }
1730
1731         /*
1732          * We will use the EState's per-tuple context for evaluating constraint
1733          * expressions (creating it if it's not already there).
1734          */
1735         econtext = GetPerTupleExprContext(estate);
1736
1737         /* Arrange for econtext's scan tuple to be the tuple under test */
1738         econtext->ecxt_scantuple = slot;
1739
1740         /* And evaluate the constraints */
1741         for (i = 0; i < ncheck; i++)
1742         {
1743                 qual = resultRelInfo->ri_ConstraintExprs[i];
1744
1745                 /*
1746                  * NOTE: SQL92 specifies that a NULL result from a constraint
1747                  * expression is not to be treated as a failure.  Therefore, tell
1748                  * ExecQual to return TRUE for NULL.
1749                  */
1750                 if (!ExecQual(qual, econtext, true))
1751                         return check[i].ccname;
1752         }
1753
1754         /* NULL result means no error */
1755         return NULL;
1756 }
1757
1758 void
1759 ExecConstraints(ResultRelInfo *resultRelInfo,
1760                                 TupleTableSlot *slot, EState *estate)
1761 {
1762         Relation        rel = resultRelInfo->ri_RelationDesc;
1763         TupleConstr *constr = rel->rd_att->constr;
1764
1765         Assert(constr);
1766
1767         if (constr->has_not_null)
1768         {
1769                 int                     natts = rel->rd_att->natts;
1770                 int                     attrChk;
1771
1772                 for (attrChk = 1; attrChk <= natts; attrChk++)
1773                 {
1774                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1775                                 slot_attisnull(slot, attrChk))
1776                                 ereport(ERROR,
1777                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1778                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1779                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1780                 }
1781         }
1782
1783         if (constr->num_check > 0)
1784         {
1785                 const char *failed;
1786
1787                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1788                         ereport(ERROR,
1789                                         (errcode(ERRCODE_CHECK_VIOLATION),
1790                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1791                                                         RelationGetRelationName(rel), failed)));
1792         }
1793 }
1794
1795 /*
1796  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
1797  *
1798  * projectReturning: RETURNING projection info for current result rel
1799  * tupleSlot: slot holding tuple actually inserted/updated/deleted
1800  * planSlot: slot holding tuple returned by top plan node
1801  * dest: where to send the output
1802  */
1803 static void
1804 ExecProcessReturning(ProjectionInfo *projectReturning,
1805                                          TupleTableSlot *tupleSlot,
1806                                          TupleTableSlot *planSlot,
1807                                          DestReceiver *dest)
1808 {
1809         ExprContext *econtext = projectReturning->pi_exprContext;
1810         TupleTableSlot *retSlot;
1811
1812         /*
1813          * Reset per-tuple memory context to free any expression evaluation
1814          * storage allocated in the previous cycle.
1815          */
1816         ResetExprContext(econtext);
1817
1818         /* Make tuple and any needed join variables available to ExecProject */
1819         econtext->ecxt_scantuple = tupleSlot;
1820         econtext->ecxt_outertuple = planSlot;
1821
1822         /* Compute the RETURNING expressions */
1823         retSlot = ExecProject(projectReturning, NULL);
1824
1825         /* Send to dest */
1826         (*dest->receiveSlot) (retSlot, dest);
1827
1828         ExecClearTuple(retSlot);
1829 }
1830
1831 /*
1832  * Check a modified tuple to see if we want to process its updated version
1833  * under READ COMMITTED rules.
1834  *
1835  * See backend/executor/README for some info about how this works.
1836  *
1837  *      estate - executor state data
1838  *      rti - rangetable index of table containing tuple
1839  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1840  *      priorXmax - t_xmax from the outdated tuple
1841  *      curCid - command ID of current command of my transaction
1842  *
1843  * *tid is also an output parameter: it's modified to hold the TID of the
1844  * latest version of the tuple (note this may be changed even on failure)
1845  *
1846  * Returns a slot containing the new candidate update/delete tuple, or
1847  * NULL if we determine we shouldn't process the row.
1848  */
1849 TupleTableSlot *
1850 EvalPlanQual(EState *estate, Index rti,
1851                          ItemPointer tid, TransactionId priorXmax, CommandId curCid)
1852 {
1853         evalPlanQual *epq;
1854         EState     *epqstate;
1855         Relation        relation;
1856         HeapTupleData tuple;
1857         HeapTuple       copyTuple = NULL;
1858         bool            endNode;
1859
1860         Assert(rti != 0);
1861
1862         /*
1863          * find relation containing target tuple
1864          */
1865         if (estate->es_result_relation_info != NULL &&
1866                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1867                 relation = estate->es_result_relation_info->ri_RelationDesc;
1868         else
1869         {
1870                 ListCell   *l;
1871
1872                 relation = NULL;
1873                 foreach(l, estate->es_rowMarks)
1874                 {
1875                         if (((ExecRowMark *) lfirst(l))->rti == rti)
1876                         {
1877                                 relation = ((ExecRowMark *) lfirst(l))->relation;
1878                                 break;
1879                         }
1880                 }
1881                 if (relation == NULL)
1882                         elog(ERROR, "could not find RowMark for RT index %u", rti);
1883         }
1884
1885         /*
1886          * fetch tid tuple
1887          *
1888          * Loop here to deal with updated or busy tuples
1889          */
1890         tuple.t_self = *tid;
1891         for (;;)
1892         {
1893                 Buffer          buffer;
1894
1895                 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL))
1896                 {
1897                         /*
1898                          * If xmin isn't what we're expecting, the slot must have been
1899                          * recycled and reused for an unrelated tuple.  This implies that
1900                          * the latest version of the row was deleted, so we need do
1901                          * nothing.  (Should be safe to examine xmin without getting
1902                          * buffer's content lock, since xmin never changes in an existing
1903                          * tuple.)
1904                          */
1905                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1906                                                                          priorXmax))
1907                         {
1908                                 ReleaseBuffer(buffer);
1909                                 return NULL;
1910                         }
1911
1912                         /* otherwise xmin should not be dirty... */
1913                         if (TransactionIdIsValid(SnapshotDirty->xmin))
1914                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1915
1916                         /*
1917                          * If tuple is being updated by other transaction then we have to
1918                          * wait for its commit/abort.
1919                          */
1920                         if (TransactionIdIsValid(SnapshotDirty->xmax))
1921                         {
1922                                 ReleaseBuffer(buffer);
1923                                 XactLockTableWait(SnapshotDirty->xmax);
1924                                 continue;               /* loop back to repeat heap_fetch */
1925                         }
1926
1927                         /*
1928                          * If tuple was inserted by our own transaction, we have to check
1929                          * cmin against curCid: cmin >= curCid means our command cannot
1930                          * see the tuple, so we should ignore it.  Without this we are
1931                          * open to the "Halloween problem" of indefinitely re-updating the
1932                          * same tuple.  (We need not check cmax because
1933                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1934                          * transaction dead, regardless of cmax.)  We just checked that
1935                          * priorXmax == xmin, so we can test that variable instead of
1936                          * doing HeapTupleHeaderGetXmin again.
1937                          */
1938                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1939                                 HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
1940                         {
1941                                 ReleaseBuffer(buffer);
1942                                 return NULL;
1943                         }
1944
1945                         /*
1946                          * We got tuple - now copy it for use by recheck query.
1947                          */
1948                         copyTuple = heap_copytuple(&tuple);
1949                         ReleaseBuffer(buffer);
1950                         break;
1951                 }
1952
1953                 /*
1954                  * If the referenced slot was actually empty, the latest version of
1955                  * the row must have been deleted, so we need do nothing.
1956                  */
1957                 if (tuple.t_data == NULL)
1958                 {
1959                         ReleaseBuffer(buffer);
1960                         return NULL;
1961                 }
1962
1963                 /*
1964                  * As above, if xmin isn't what we're expecting, do nothing.
1965                  */
1966                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1967                                                                  priorXmax))
1968                 {
1969                         ReleaseBuffer(buffer);
1970                         return NULL;
1971                 }
1972
1973                 /*
1974                  * If we get here, the tuple was found but failed SnapshotDirty.
1975                  * Assuming the xmin is either a committed xact or our own xact (as it
1976                  * certainly should be if we're trying to modify the tuple), this must
1977                  * mean that the row was updated or deleted by either a committed xact
1978                  * or our own xact.  If it was deleted, we can ignore it; if it was
1979                  * updated then chain up to the next version and repeat the whole
1980                  * test.
1981                  *
1982                  * As above, it should be safe to examine xmax and t_ctid without the
1983                  * buffer content lock, because they can't be changing.
1984                  */
1985                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1986                 {
1987                         /* deleted, so forget about it */
1988                         ReleaseBuffer(buffer);
1989                         return NULL;
1990                 }
1991
1992                 /* updated, so look at the updated row */
1993                 tuple.t_self = tuple.t_data->t_ctid;
1994                 /* updated row should have xmin matching this xmax */
1995                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1996                 ReleaseBuffer(buffer);
1997                 /* loop back to fetch next in chain */
1998         }
1999
2000         /*
2001          * For UPDATE/DELETE we have to return tid of actual row we're executing
2002          * PQ for.
2003          */
2004         *tid = tuple.t_self;
2005
2006         /*
2007          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2008          */
2009         epq = estate->es_evalPlanQual;
2010         endNode = true;
2011
2012         if (epq != NULL && epq->rti == 0)
2013         {
2014                 /* Top PQ stack entry is idle, so re-use it */
2015                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2016                 epq->rti = rti;
2017                 endNode = false;
2018         }
2019
2020         /*
2021          * If this is request for another RTE - Ra, - then we have to check wasn't
2022          * PlanQual requested for Ra already and if so then Ra' row was updated
2023          * again and we have to re-start old execution for Ra and forget all what
2024          * we done after Ra was suspended. Cool? -:))
2025          */
2026         if (epq != NULL && epq->rti != rti &&
2027                 epq->estate->es_evTuple[rti - 1] != NULL)
2028         {
2029                 do
2030                 {
2031                         evalPlanQual *oldepq;
2032
2033                         /* stop execution */
2034                         EvalPlanQualStop(epq);
2035                         /* pop previous PlanQual from the stack */
2036                         oldepq = epq->next;
2037                         Assert(oldepq && oldepq->rti != 0);
2038                         /* push current PQ to freePQ stack */
2039                         oldepq->free = epq;
2040                         epq = oldepq;
2041                         estate->es_evalPlanQual = epq;
2042                 } while (epq->rti != rti);
2043         }
2044
2045         /*
2046          * If we are requested for another RTE then we have to suspend execution
2047          * of current PlanQual and start execution for new one.
2048          */
2049         if (epq == NULL || epq->rti != rti)
2050         {
2051                 /* try to reuse plan used previously */
2052                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2053
2054                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2055                 {
2056                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2057                         newepq->free = NULL;
2058                         newepq->estate = NULL;
2059                         newepq->planstate = NULL;
2060                 }
2061                 else
2062                 {
2063                         /* recycle previously used PlanQual */
2064                         Assert(newepq->estate == NULL);
2065                         epq->free = NULL;
2066                 }
2067                 /* push current PQ to the stack */
2068                 newepq->next = epq;
2069                 epq = newepq;
2070                 estate->es_evalPlanQual = epq;
2071                 epq->rti = rti;
2072                 endNode = false;
2073         }
2074
2075         Assert(epq->rti == rti);
2076
2077         /*
2078          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2079          * end and restart execution of the plan, because ExecReScan wouldn't
2080          * ensure that upper plan nodes would reset themselves.  We could make
2081          * that work if insertion of the target tuple were integrated with the
2082          * Param mechanism somehow, so that the upper plan nodes know that their
2083          * children's outputs have changed.
2084          *
2085          * Note that the stack of free evalPlanQual nodes is quite useless at the
2086          * moment, since it only saves us from pallocing/releasing the
2087          * evalPlanQual nodes themselves.  But it will be useful once we implement
2088          * ReScan instead of end/restart for re-using PlanQual nodes.
2089          */
2090         if (endNode)
2091         {
2092                 /* stop execution */
2093                 EvalPlanQualStop(epq);
2094         }
2095
2096         /*
2097          * Initialize new recheck query.
2098          *
2099          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2100          * instead copy down changeable state from the top plan (including
2101          * es_result_relation_info, es_junkFilter) and reset locally changeable
2102          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2103          */
2104         EvalPlanQualStart(epq, estate, epq->next);
2105
2106         /*
2107          * free old RTE' tuple, if any, and store target tuple where relation's
2108          * scan node will see it
2109          */
2110         epqstate = epq->estate;
2111         if (epqstate->es_evTuple[rti - 1] != NULL)
2112                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2113         epqstate->es_evTuple[rti - 1] = copyTuple;
2114
2115         return EvalPlanQualNext(estate);
2116 }
2117
2118 static TupleTableSlot *
2119 EvalPlanQualNext(EState *estate)
2120 {
2121         evalPlanQual *epq = estate->es_evalPlanQual;
2122         MemoryContext oldcontext;
2123         TupleTableSlot *slot;
2124
2125         Assert(epq->rti != 0);
2126
2127 lpqnext:;
2128         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2129         slot = ExecProcNode(epq->planstate);
2130         MemoryContextSwitchTo(oldcontext);
2131
2132         /*
2133          * No more tuples for this PQ. Continue previous one.
2134          */
2135         if (TupIsNull(slot))
2136         {
2137                 evalPlanQual *oldepq;
2138
2139                 /* stop execution */
2140                 EvalPlanQualStop(epq);
2141                 /* pop old PQ from the stack */
2142                 oldepq = epq->next;
2143                 if (oldepq == NULL)
2144                 {
2145                         /* this is the first (oldest) PQ - mark as free */
2146                         epq->rti = 0;
2147                         estate->es_useEvalPlan = false;
2148                         /* and continue Query execution */
2149                         return NULL;
2150                 }
2151                 Assert(oldepq->rti != 0);
2152                 /* push current PQ to freePQ stack */
2153                 oldepq->free = epq;
2154                 epq = oldepq;
2155                 estate->es_evalPlanQual = epq;
2156                 goto lpqnext;
2157         }
2158
2159         return slot;
2160 }
2161
2162 static void
2163 EndEvalPlanQual(EState *estate)
2164 {
2165         evalPlanQual *epq = estate->es_evalPlanQual;
2166
2167         if (epq->rti == 0)                      /* plans already shutdowned */
2168         {
2169                 Assert(epq->next == NULL);
2170                 return;
2171         }
2172
2173         for (;;)
2174         {
2175                 evalPlanQual *oldepq;
2176
2177                 /* stop execution */
2178                 EvalPlanQualStop(epq);
2179                 /* pop old PQ from the stack */
2180                 oldepq = epq->next;
2181                 if (oldepq == NULL)
2182                 {
2183                         /* this is the first (oldest) PQ - mark as free */
2184                         epq->rti = 0;
2185                         estate->es_useEvalPlan = false;
2186                         break;
2187                 }
2188                 Assert(oldepq->rti != 0);
2189                 /* push current PQ to freePQ stack */
2190                 oldepq->free = epq;
2191                 epq = oldepq;
2192                 estate->es_evalPlanQual = epq;
2193         }
2194 }
2195
2196 /*
2197  * Start execution of one level of PlanQual.
2198  *
2199  * This is a cut-down version of ExecutorStart(): we copy some state from
2200  * the top-level estate rather than initializing it fresh.
2201  */
2202 static void
2203 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2204 {
2205         EState     *epqstate;
2206         int                     rtsize;
2207         MemoryContext oldcontext;
2208
2209         rtsize = list_length(estate->es_range_table);
2210
2211         /*
2212          * It's tempting to think about using CreateSubExecutorState here, but
2213          * at present we can't because of memory leakage concerns ...
2214          */
2215         epq->estate = epqstate = CreateExecutorState();
2216
2217         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2218
2219         /*
2220          * The epqstates share the top query's copy of unchanging state such as
2221          * the snapshot, rangetable, result-rel info, and external Param info.
2222          * They need their own copies of local state, including a tuple table,
2223          * es_param_exec_vals, etc.
2224          */
2225         epqstate->es_direction = ForwardScanDirection;
2226         epqstate->es_snapshot = estate->es_snapshot;
2227         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2228         epqstate->es_range_table = estate->es_range_table;
2229         epqstate->es_result_relations = estate->es_result_relations;
2230         epqstate->es_num_result_relations = estate->es_num_result_relations;
2231         epqstate->es_result_relation_info = estate->es_result_relation_info;
2232         epqstate->es_junkFilter = estate->es_junkFilter;
2233         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2234         epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2235         epqstate->es_param_list_info = estate->es_param_list_info;
2236         if (estate->es_plannedstmt->nParamExec > 0)
2237                 epqstate->es_param_exec_vals = (ParamExecData *)
2238                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2239         epqstate->es_rowMarks = estate->es_rowMarks;
2240         epqstate->es_instrument = estate->es_instrument;
2241         epqstate->es_select_into = estate->es_select_into;
2242         epqstate->es_into_oids = estate->es_into_oids;
2243         epqstate->es_plannedstmt = estate->es_plannedstmt;
2244
2245         /*
2246          * Each epqstate must have its own es_evTupleNull state, but all the stack
2247          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2248          * the value being examined by an outer recheck.
2249          */
2250         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2251         if (priorepq == NULL)
2252                 /* first PQ stack entry */
2253                 epqstate->es_evTuple = (HeapTuple *)
2254                         palloc0(rtsize * sizeof(HeapTuple));
2255         else
2256                 /* later stack entries share the same storage */
2257                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2258
2259         epqstate->es_tupleTable =
2260                 ExecCreateTupleTable(estate->es_tupleTable->size);
2261
2262         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2263
2264         MemoryContextSwitchTo(oldcontext);
2265 }
2266
2267 /*
2268  * End execution of one level of PlanQual.
2269  *
2270  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2271  * of the normal cleanup, but *not* close result relations (which we are
2272  * just sharing from the outer query).
2273  */
2274 static void
2275 EvalPlanQualStop(evalPlanQual *epq)
2276 {
2277         EState     *epqstate = epq->estate;
2278         MemoryContext oldcontext;
2279
2280         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2281
2282         ExecEndNode(epq->planstate);
2283
2284         ExecDropTupleTable(epqstate->es_tupleTable, true);
2285         epqstate->es_tupleTable = NULL;
2286
2287         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2288         {
2289                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2290                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2291         }
2292
2293         MemoryContextSwitchTo(oldcontext);
2294
2295         FreeExecutorState(epqstate);
2296
2297         epq->estate = NULL;
2298         epq->planstate = NULL;
2299 }
2300
2301
2302 /*
2303  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2304  *
2305  * We implement SELECT INTO by diverting SELECT's normal output with
2306  * a specialized DestReceiver type.
2307  *
2308  * TODO: remove some of the INTO-specific cruft from EState, and keep
2309  * it in the DestReceiver instead.
2310  */
2311
2312 typedef struct
2313 {
2314         DestReceiver pub;                       /* publicly-known function pointers */
2315         EState     *estate;                     /* EState we are working with */
2316 } DR_intorel;
2317
2318 /*
2319  * OpenIntoRel --- actually create the SELECT INTO target relation
2320  *
2321  * This also replaces QueryDesc->dest with the special DestReceiver for
2322  * SELECT INTO.  We assume that the correct result tuple type has already
2323  * been placed in queryDesc->tupDesc.
2324  */
2325 static void
2326 OpenIntoRel(QueryDesc *queryDesc)
2327 {
2328         IntoClause *into = queryDesc->plannedstmt->into;
2329         EState     *estate = queryDesc->estate;
2330         Relation        intoRelationDesc;
2331         char       *intoName;
2332         Oid                     namespaceId;
2333         Oid                     tablespaceId;
2334         Datum           reloptions;
2335         AclResult       aclresult;
2336         Oid                     intoRelationId;
2337         TupleDesc       tupdesc;
2338         DR_intorel *myState;
2339
2340         Assert(into);
2341
2342         /*
2343          * Check consistency of arguments
2344          */
2345         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2346                 ereport(ERROR,
2347                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2348                                  errmsg("ON COMMIT can only be used on temporary tables")));
2349
2350         /*
2351          * Find namespace to create in, check its permissions
2352          */
2353         intoName = into->rel->relname;
2354         namespaceId = RangeVarGetCreationNamespace(into->rel);
2355
2356         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2357                                                                           ACL_CREATE);
2358         if (aclresult != ACLCHECK_OK)
2359                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2360                                            get_namespace_name(namespaceId));
2361
2362         /*
2363          * Select tablespace to use.  If not specified, use default_tablespace
2364          * (which may in turn default to database's default).
2365          */
2366         if (into->tableSpaceName)
2367         {
2368                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2369                 if (!OidIsValid(tablespaceId))
2370                         ereport(ERROR,
2371                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2372                                          errmsg("tablespace \"%s\" does not exist",
2373                                                         into->tableSpaceName)));
2374         }
2375         else if (into->rel->istemp)
2376         {
2377                 tablespaceId = GetTempTablespace();
2378         }
2379         else
2380         {
2381                 tablespaceId = GetDefaultTablespace();
2382                 /* note InvalidOid is OK in this case */
2383         }
2384
2385         /* Check permissions except when using the database's default space */
2386         if (OidIsValid(tablespaceId))
2387         {
2388                 AclResult       aclresult;
2389
2390                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2391                                                                                    ACL_CREATE);
2392
2393                 if (aclresult != ACLCHECK_OK)
2394                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2395                                                    get_tablespace_name(tablespaceId));
2396         }
2397
2398         /* Parse and validate any reloptions */
2399         reloptions = transformRelOptions((Datum) 0,
2400                                                                          into->options,
2401                                                                          true,
2402                                                                          false);
2403         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2404
2405         /* have to copy the actual tupdesc to get rid of any constraints */
2406         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2407
2408         /* Now we can actually create the new relation */
2409         intoRelationId = heap_create_with_catalog(intoName,
2410                                                                                           namespaceId,
2411                                                                                           tablespaceId,
2412                                                                                           InvalidOid,
2413                                                                                           GetUserId(),
2414                                                                                           tupdesc,
2415                                                                                           RELKIND_RELATION,
2416                                                                                           false,
2417                                                                                           true,
2418                                                                                           0,
2419                                                                                           into->onCommit,
2420                                                                                           reloptions,
2421                                                                                           allowSystemTableMods);
2422
2423         FreeTupleDesc(tupdesc);
2424
2425         /*
2426          * Advance command counter so that the newly-created relation's catalog
2427          * tuples will be visible to heap_open.
2428          */
2429         CommandCounterIncrement();
2430
2431         /*
2432          * If necessary, create a TOAST table for the INTO relation. Note that
2433          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2434          * the TOAST table will be visible for insertion.
2435          */
2436         AlterTableCreateToastTable(intoRelationId);
2437
2438         /*
2439          * And open the constructed table for writing.
2440          */
2441         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2442
2443         /* use_wal off requires rd_targblock be initially invalid */
2444         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2445
2446         /*
2447          * We can skip WAL-logging the insertions, unless PITR is in use.
2448          *
2449          * Note that for a non-temp INTO table, this is safe only because we know
2450          * that the catalog changes above will have been WAL-logged, and so
2451          * RecordTransactionCommit will think it needs to WAL-log the eventual
2452          * transaction commit.  Else the commit might be lost, even though all the
2453          * data is safely fsync'd ...
2454          */
2455         estate->es_into_relation_use_wal = XLogArchivingActive();
2456         estate->es_into_relation_descriptor = intoRelationDesc;
2457
2458         /*
2459          * Now replace the query's DestReceiver with one for SELECT INTO
2460          */
2461         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2462         myState = (DR_intorel *) queryDesc->dest;
2463         Assert(myState->pub.mydest == DestIntoRel);
2464         myState->estate = estate;
2465 }
2466
2467 /*
2468  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2469  */
2470 static void
2471 CloseIntoRel(QueryDesc *queryDesc)
2472 {
2473         EState     *estate = queryDesc->estate;
2474
2475         /* OpenIntoRel might never have gotten called */
2476         if (estate->es_into_relation_descriptor)
2477         {
2478                 /*
2479                  * If we skipped using WAL, and it's not a temp relation, we must
2480                  * force the relation down to disk before it's safe to commit the
2481                  * transaction.  This requires forcing out any dirty buffers and then
2482                  * doing a forced fsync.
2483                  */
2484                 if (!estate->es_into_relation_use_wal &&
2485                         !estate->es_into_relation_descriptor->rd_istemp)
2486                         heap_sync(estate->es_into_relation_descriptor);
2487
2488                 /* close rel, but keep lock until commit */
2489                 heap_close(estate->es_into_relation_descriptor, NoLock);
2490
2491                 estate->es_into_relation_descriptor = NULL;
2492         }
2493 }
2494
2495 /*
2496  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2497  *
2498  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2499  * we just leave the private fields empty here.  OpenIntoRel will
2500  * fill them in.
2501  */
2502 DestReceiver *
2503 CreateIntoRelDestReceiver(void)
2504 {
2505         DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
2506
2507         self->pub.receiveSlot = intorel_receive;
2508         self->pub.rStartup = intorel_startup;
2509         self->pub.rShutdown = intorel_shutdown;
2510         self->pub.rDestroy = intorel_destroy;
2511         self->pub.mydest = DestIntoRel;
2512
2513         self->estate = NULL;
2514
2515         return (DestReceiver *) self;
2516 }
2517
2518 /*
2519  * intorel_startup --- executor startup
2520  */
2521 static void
2522 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2523 {
2524         /* no-op */
2525 }
2526
2527 /*
2528  * intorel_receive --- receive one tuple
2529  */
2530 static void
2531 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2532 {
2533         DR_intorel *myState = (DR_intorel *) self;
2534         EState     *estate = myState->estate;
2535         HeapTuple       tuple;
2536
2537         tuple = ExecCopySlotTuple(slot);
2538
2539         heap_insert(estate->es_into_relation_descriptor,
2540                                 tuple,
2541                                 estate->es_snapshot->curcid,
2542                                 estate->es_into_relation_use_wal,
2543                                 false);                 /* never any point in using FSM */
2544
2545         /* We know this is a newly created relation, so there are no indexes */
2546
2547         heap_freetuple(tuple);
2548
2549         IncrAppended();
2550 }
2551
2552 /*
2553  * intorel_shutdown --- executor end
2554  */
2555 static void
2556 intorel_shutdown(DestReceiver *self)
2557 {
2558         /* no-op */
2559 }
2560
2561 /*
2562  * intorel_destroy --- release DestReceiver object
2563  */
2564 static void
2565 intorel_destroy(DestReceiver *self)
2566 {
2567         pfree(self);
2568 }