]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
54df18d3c799dfce33095c82ec7b5d42c0f279e9
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorFinish()
10  *      ExecutorEnd()
11  *
12  *      These four procedures are the external interface to the executor.
13  *      In each case, the query descriptor is required as an argument.
14  *
15  *      ExecutorStart must be called at the beginning of execution of any
16  *      query plan and ExecutorEnd must always be called at the end of
17  *      execution of a plan (unless it is aborted due to error).
18  *
19  *      ExecutorRun accepts direction and count arguments that specify whether
20  *      the plan is to be executed forwards, backwards, and for how many tuples.
21  *      In some cases ExecutorRun may be called multiple times to process all
22  *      the tuples for a plan.  It is also acceptable to stop short of executing
23  *      the whole plan (but only if it is a SELECT).
24  *
25  *      ExecutorFinish must be called after the final ExecutorRun call and
26  *      before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
27  *      which should also omit ExecutorRun.
28  *
29  * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
30  * Portions Copyright (c) 1994, Regents of the University of California
31  *
32  *
33  * IDENTIFICATION
34  *        src/backend/executor/execMain.c
35  *
36  *-------------------------------------------------------------------------
37  */
38 #include "postgres.h"
39
40 #include "access/reloptions.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/heap.h"
45 #include "catalog/namespace.h"
46 #include "catalog/toasting.h"
47 #include "commands/tablespace.h"
48 #include "commands/trigger.h"
49 #include "executor/execdebug.h"
50 #include "mb/pg_wchar.h"
51 #include "miscadmin.h"
52 #include "optimizer/clauses.h"
53 #include "parser/parse_clause.h"
54 #include "parser/parsetree.h"
55 #include "storage/bufmgr.h"
56 #include "storage/lmgr.h"
57 #include "storage/smgr.h"
58 #include "tcop/utility.h"
59 #include "utils/acl.h"
60 #include "utils/builtins.h"
61 #include "utils/lsyscache.h"
62 #include "utils/memutils.h"
63 #include "utils/snapmgr.h"
64 #include "utils/tqual.h"
65
66
67 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
68 ExecutorStart_hook_type ExecutorStart_hook = NULL;
69 ExecutorRun_hook_type ExecutorRun_hook = NULL;
70 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
71 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
72
73 /* Hook for plugin to get control in ExecCheckRTPerms() */
74 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
75
76 /* decls for local routines only used within this module */
77 static void InitPlan(QueryDesc *queryDesc, int eflags);
78 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
79 static void ExecPostprocessPlan(EState *estate);
80 static void ExecEndPlan(PlanState *planstate, EState *estate);
81 static void ExecutePlan(EState *estate, PlanState *planstate,
82                         CmdType operation,
83                         bool sendTuples,
84                         long numberTuples,
85                         ScanDirection direction,
86                         DestReceiver *dest);
87 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
88 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
89 static char *ExecBuildSlotValueDescription(TupleTableSlot *slot,
90                                                                                    int maxfieldlen);
91 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
92                                   Plan *planTree);
93 static void OpenIntoRel(QueryDesc *queryDesc);
94 static void CloseIntoRel(QueryDesc *queryDesc);
95 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
96 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
97 static void intorel_shutdown(DestReceiver *self);
98 static void intorel_destroy(DestReceiver *self);
99
100 /* end of local decls */
101
102
103 /* ----------------------------------------------------------------
104  *              ExecutorStart
105  *
106  *              This routine must be called at the beginning of any execution of any
107  *              query plan
108  *
109  * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
110  * only because some places use QueryDescs for utility commands).  The tupDesc
111  * field of the QueryDesc is filled in to describe the tuples that will be
112  * returned, and the internal fields (estate and planstate) are set up.
113  *
114  * eflags contains flag bits as described in executor.h.
115  *
116  * NB: the CurrentMemoryContext when this is called will become the parent
117  * of the per-query context used for this Executor invocation.
118  *
119  * We provide a function hook variable that lets loadable plugins
120  * get control when ExecutorStart is called.  Such a plugin would
121  * normally call standard_ExecutorStart().
122  *
123  * ----------------------------------------------------------------
124  */
125 void
126 ExecutorStart(QueryDesc *queryDesc, int eflags)
127 {
128         if (ExecutorStart_hook)
129                 (*ExecutorStart_hook) (queryDesc, eflags);
130         else
131                 standard_ExecutorStart(queryDesc, eflags);
132 }
133
134 void
135 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
136 {
137         EState     *estate;
138         MemoryContext oldcontext;
139
140         /* sanity checks: queryDesc must not be started already */
141         Assert(queryDesc != NULL);
142         Assert(queryDesc->estate == NULL);
143
144         /*
145          * If the transaction is read-only, we need to check if any writes are
146          * planned to non-temporary tables.  EXPLAIN is considered read-only.
147          */
148         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
149                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
150
151         /*
152          * Build EState, switch into per-query memory context for startup.
153          */
154         estate = CreateExecutorState();
155         queryDesc->estate = estate;
156
157         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
158
159         /*
160          * Fill in external parameters, if any, from queryDesc; and allocate
161          * workspace for internal parameters
162          */
163         estate->es_param_list_info = queryDesc->params;
164
165         if (queryDesc->plannedstmt->nParamExec > 0)
166                 estate->es_param_exec_vals = (ParamExecData *)
167                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
168
169         /*
170          * If non-read-only query, set the command ID to mark output tuples with
171          */
172         switch (queryDesc->operation)
173         {
174                 case CMD_SELECT:
175
176                         /*
177                          * SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to
178                          * mark tuples
179                          */
180                         if (queryDesc->plannedstmt->intoClause != NULL ||
181                                 queryDesc->plannedstmt->rowMarks != NIL ||
182                                 queryDesc->plannedstmt->hasModifyingCTE)
183                                 estate->es_output_cid = GetCurrentCommandId(true);
184
185                         /*
186                          * A SELECT without modifying CTEs can't possibly queue triggers,
187                          * so force skip-triggers mode. This is just a marginal efficiency
188                          * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
189                          * all that expensive, but we might as well do it.
190                          */
191                         if (!queryDesc->plannedstmt->hasModifyingCTE)
192                                 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
193                         break;
194
195                 case CMD_INSERT:
196                 case CMD_DELETE:
197                 case CMD_UPDATE:
198                         estate->es_output_cid = GetCurrentCommandId(true);
199                         break;
200
201                 default:
202                         elog(ERROR, "unrecognized operation code: %d",
203                                  (int) queryDesc->operation);
204                         break;
205         }
206
207         /*
208          * Copy other important information into the EState
209          */
210         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
211         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
212         estate->es_top_eflags = eflags;
213         estate->es_instrument = queryDesc->instrument_options;
214
215         /*
216          * Initialize the plan state tree
217          */
218         InitPlan(queryDesc, eflags);
219
220         /*
221          * Set up an AFTER-trigger statement context, unless told not to, or
222          * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
223          */
224         if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
225                 AfterTriggerBeginQuery();
226
227         MemoryContextSwitchTo(oldcontext);
228 }
229
230 /* ----------------------------------------------------------------
231  *              ExecutorRun
232  *
233  *              This is the main routine of the executor module. It accepts
234  *              the query descriptor from the traffic cop and executes the
235  *              query plan.
236  *
237  *              ExecutorStart must have been called already.
238  *
239  *              If direction is NoMovementScanDirection then nothing is done
240  *              except to start up/shut down the destination.  Otherwise,
241  *              we retrieve up to 'count' tuples in the specified direction.
242  *
243  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
244  *              completion.
245  *
246  *              There is no return value, but output tuples (if any) are sent to
247  *              the destination receiver specified in the QueryDesc; and the number
248  *              of tuples processed at the top level can be found in
249  *              estate->es_processed.
250  *
251  *              We provide a function hook variable that lets loadable plugins
252  *              get control when ExecutorRun is called.  Such a plugin would
253  *              normally call standard_ExecutorRun().
254  *
255  * ----------------------------------------------------------------
256  */
257 void
258 ExecutorRun(QueryDesc *queryDesc,
259                         ScanDirection direction, long count)
260 {
261         if (ExecutorRun_hook)
262                 (*ExecutorRun_hook) (queryDesc, direction, count);
263         else
264                 standard_ExecutorRun(queryDesc, direction, count);
265 }
266
267 void
268 standard_ExecutorRun(QueryDesc *queryDesc,
269                                          ScanDirection direction, long count)
270 {
271         EState     *estate;
272         CmdType         operation;
273         DestReceiver *dest;
274         bool            sendTuples;
275         MemoryContext oldcontext;
276
277         /* sanity checks */
278         Assert(queryDesc != NULL);
279
280         estate = queryDesc->estate;
281
282         Assert(estate != NULL);
283         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
284
285         /*
286          * Switch into per-query memory context
287          */
288         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
289
290         /* Allow instrumentation of Executor overall runtime */
291         if (queryDesc->totaltime)
292                 InstrStartNode(queryDesc->totaltime);
293
294         /*
295          * extract information from the query descriptor and the query feature.
296          */
297         operation = queryDesc->operation;
298         dest = queryDesc->dest;
299
300         /*
301          * startup tuple receiver, if we will be emitting tuples
302          */
303         estate->es_processed = 0;
304         estate->es_lastoid = InvalidOid;
305
306         sendTuples = (operation == CMD_SELECT ||
307                                   queryDesc->plannedstmt->hasReturning);
308
309         if (sendTuples)
310                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
311
312         /*
313          * if it's CREATE TABLE AS ... WITH NO DATA, skip plan execution
314          */
315         if (estate->es_select_into &&
316                 queryDesc->plannedstmt->intoClause->skipData)
317                 direction = NoMovementScanDirection;
318
319         /*
320          * run plan
321          */
322         if (!ScanDirectionIsNoMovement(direction))
323                 ExecutePlan(estate,
324                                         queryDesc->planstate,
325                                         operation,
326                                         sendTuples,
327                                         count,
328                                         direction,
329                                         dest);
330
331         /*
332          * shutdown tuple receiver, if we started it
333          */
334         if (sendTuples)
335                 (*dest->rShutdown) (dest);
336
337         if (queryDesc->totaltime)
338                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
339
340         MemoryContextSwitchTo(oldcontext);
341 }
342
343 /* ----------------------------------------------------------------
344  *              ExecutorFinish
345  *
346  *              This routine must be called after the last ExecutorRun call.
347  *              It performs cleanup such as firing AFTER triggers.      It is
348  *              separate from ExecutorEnd because EXPLAIN ANALYZE needs to
349  *              include these actions in the total runtime.
350  *
351  *              We provide a function hook variable that lets loadable plugins
352  *              get control when ExecutorFinish is called.      Such a plugin would
353  *              normally call standard_ExecutorFinish().
354  *
355  * ----------------------------------------------------------------
356  */
357 void
358 ExecutorFinish(QueryDesc *queryDesc)
359 {
360         if (ExecutorFinish_hook)
361                 (*ExecutorFinish_hook) (queryDesc);
362         else
363                 standard_ExecutorFinish(queryDesc);
364 }
365
366 void
367 standard_ExecutorFinish(QueryDesc *queryDesc)
368 {
369         EState     *estate;
370         MemoryContext oldcontext;
371
372         /* sanity checks */
373         Assert(queryDesc != NULL);
374
375         estate = queryDesc->estate;
376
377         Assert(estate != NULL);
378         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
379
380         /* This should be run once and only once per Executor instance */
381         Assert(!estate->es_finished);
382
383         /* Switch into per-query memory context */
384         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
385
386         /* Allow instrumentation of Executor overall runtime */
387         if (queryDesc->totaltime)
388                 InstrStartNode(queryDesc->totaltime);
389
390         /* Run ModifyTable nodes to completion */
391         ExecPostprocessPlan(estate);
392
393         /* Execute queued AFTER triggers, unless told not to */
394         if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
395                 AfterTriggerEndQuery(estate);
396
397         if (queryDesc->totaltime)
398                 InstrStopNode(queryDesc->totaltime, 0);
399
400         MemoryContextSwitchTo(oldcontext);
401
402         estate->es_finished = true;
403 }
404
405 /* ----------------------------------------------------------------
406  *              ExecutorEnd
407  *
408  *              This routine must be called at the end of execution of any
409  *              query plan
410  *
411  *              We provide a function hook variable that lets loadable plugins
412  *              get control when ExecutorEnd is called.  Such a plugin would
413  *              normally call standard_ExecutorEnd().
414  *
415  * ----------------------------------------------------------------
416  */
417 void
418 ExecutorEnd(QueryDesc *queryDesc)
419 {
420         if (ExecutorEnd_hook)
421                 (*ExecutorEnd_hook) (queryDesc);
422         else
423                 standard_ExecutorEnd(queryDesc);
424 }
425
426 void
427 standard_ExecutorEnd(QueryDesc *queryDesc)
428 {
429         EState     *estate;
430         MemoryContext oldcontext;
431
432         /* sanity checks */
433         Assert(queryDesc != NULL);
434
435         estate = queryDesc->estate;
436
437         Assert(estate != NULL);
438
439         /*
440          * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
441          * Assert is needed because ExecutorFinish is new as of 9.1, and callers
442          * might forget to call it.
443          */
444         Assert(estate->es_finished ||
445                    (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
446
447         /*
448          * Switch into per-query memory context to run ExecEndPlan
449          */
450         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
451
452         ExecEndPlan(queryDesc->planstate, estate);
453
454         /*
455          * Close the SELECT INTO relation if any
456          */
457         if (estate->es_select_into)
458                 CloseIntoRel(queryDesc);
459
460         /* do away with our snapshots */
461         UnregisterSnapshot(estate->es_snapshot);
462         UnregisterSnapshot(estate->es_crosscheck_snapshot);
463
464         /*
465          * Must switch out of context before destroying it
466          */
467         MemoryContextSwitchTo(oldcontext);
468
469         /*
470          * Release EState and per-query memory context.  This should release
471          * everything the executor has allocated.
472          */
473         FreeExecutorState(estate);
474
475         /* Reset queryDesc fields that no longer point to anything */
476         queryDesc->tupDesc = NULL;
477         queryDesc->estate = NULL;
478         queryDesc->planstate = NULL;
479         queryDesc->totaltime = NULL;
480 }
481
482 /* ----------------------------------------------------------------
483  *              ExecutorRewind
484  *
485  *              This routine may be called on an open queryDesc to rewind it
486  *              to the start.
487  * ----------------------------------------------------------------
488  */
489 void
490 ExecutorRewind(QueryDesc *queryDesc)
491 {
492         EState     *estate;
493         MemoryContext oldcontext;
494
495         /* sanity checks */
496         Assert(queryDesc != NULL);
497
498         estate = queryDesc->estate;
499
500         Assert(estate != NULL);
501
502         /* It's probably not sensible to rescan updating queries */
503         Assert(queryDesc->operation == CMD_SELECT);
504
505         /*
506          * Switch into per-query memory context
507          */
508         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
509
510         /*
511          * rescan plan
512          */
513         ExecReScan(queryDesc->planstate);
514
515         MemoryContextSwitchTo(oldcontext);
516 }
517
518
519 /*
520  * ExecCheckRTPerms
521  *              Check access permissions for all relations listed in a range table.
522  *
523  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
524  * error if ereport_on_violation is true, or simply returns false otherwise.
525  */
526 bool
527 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
528 {
529         ListCell   *l;
530         bool            result = true;
531
532         foreach(l, rangeTable)
533         {
534                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
535
536                 result = ExecCheckRTEPerms(rte);
537                 if (!result)
538                 {
539                         Assert(rte->rtekind == RTE_RELATION);
540                         if (ereport_on_violation)
541                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
542                                                            get_rel_name(rte->relid));
543                         return false;
544                 }
545         }
546
547         if (ExecutorCheckPerms_hook)
548                 result = (*ExecutorCheckPerms_hook) (rangeTable,
549                                                                                          ereport_on_violation);
550         return result;
551 }
552
553 /*
554  * ExecCheckRTEPerms
555  *              Check access permissions for a single RTE.
556  */
557 static bool
558 ExecCheckRTEPerms(RangeTblEntry *rte)
559 {
560         AclMode         requiredPerms;
561         AclMode         relPerms;
562         AclMode         remainingPerms;
563         Oid                     relOid;
564         Oid                     userid;
565         Bitmapset  *tmpset;
566         int                     col;
567
568         /*
569          * Only plain-relation RTEs need to be checked here.  Function RTEs are
570          * checked by init_fcache when the function is prepared for execution.
571          * Join, subquery, and special RTEs need no checks.
572          */
573         if (rte->rtekind != RTE_RELATION)
574                 return true;
575
576         /*
577          * No work if requiredPerms is empty.
578          */
579         requiredPerms = rte->requiredPerms;
580         if (requiredPerms == 0)
581                 return true;
582
583         relOid = rte->relid;
584
585         /*
586          * userid to check as: current user unless we have a setuid indication.
587          *
588          * Note: GetUserId() is presently fast enough that there's no harm in
589          * calling it separately for each RTE.  If that stops being true, we could
590          * call it once in ExecCheckRTPerms and pass the userid down from there.
591          * But for now, no need for the extra clutter.
592          */
593         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
594
595         /*
596          * We must have *all* the requiredPerms bits, but some of the bits can be
597          * satisfied from column-level rather than relation-level permissions.
598          * First, remove any bits that are satisfied by relation permissions.
599          */
600         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
601         remainingPerms = requiredPerms & ~relPerms;
602         if (remainingPerms != 0)
603         {
604                 /*
605                  * If we lack any permissions that exist only as relation permissions,
606                  * we can fail straight away.
607                  */
608                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
609                         return false;
610
611                 /*
612                  * Check to see if we have the needed privileges at column level.
613                  *
614                  * Note: failures just report a table-level error; it would be nicer
615                  * to report a column-level error if we have some but not all of the
616                  * column privileges.
617                  */
618                 if (remainingPerms & ACL_SELECT)
619                 {
620                         /*
621                          * When the query doesn't explicitly reference any columns (for
622                          * example, SELECT COUNT(*) FROM table), allow the query if we
623                          * have SELECT on any column of the rel, as per SQL spec.
624                          */
625                         if (bms_is_empty(rte->selectedCols))
626                         {
627                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
628                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
629                                         return false;
630                         }
631
632                         tmpset = bms_copy(rte->selectedCols);
633                         while ((col = bms_first_member(tmpset)) >= 0)
634                         {
635                                 /* remove the column number offset */
636                                 col += FirstLowInvalidHeapAttributeNumber;
637                                 if (col == InvalidAttrNumber)
638                                 {
639                                         /* Whole-row reference, must have priv on all cols */
640                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
641                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
642                                                 return false;
643                                 }
644                                 else
645                                 {
646                                         if (pg_attribute_aclcheck(relOid, col, userid,
647                                                                                           ACL_SELECT) != ACLCHECK_OK)
648                                                 return false;
649                                 }
650                         }
651                         bms_free(tmpset);
652                 }
653
654                 /*
655                  * Basically the same for the mod columns, with either INSERT or
656                  * UPDATE privilege as specified by remainingPerms.
657                  */
658                 remainingPerms &= ~ACL_SELECT;
659                 if (remainingPerms != 0)
660                 {
661                         /*
662                          * When the query doesn't explicitly change any columns, allow the
663                          * query if we have permission on any column of the rel.  This is
664                          * to handle SELECT FOR UPDATE as well as possible corner cases in
665                          * INSERT and UPDATE.
666                          */
667                         if (bms_is_empty(rte->modifiedCols))
668                         {
669                                 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
670                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
671                                         return false;
672                         }
673
674                         tmpset = bms_copy(rte->modifiedCols);
675                         while ((col = bms_first_member(tmpset)) >= 0)
676                         {
677                                 /* remove the column number offset */
678                                 col += FirstLowInvalidHeapAttributeNumber;
679                                 if (col == InvalidAttrNumber)
680                                 {
681                                         /* whole-row reference can't happen here */
682                                         elog(ERROR, "whole-row update is not implemented");
683                                 }
684                                 else
685                                 {
686                                         if (pg_attribute_aclcheck(relOid, col, userid,
687                                                                                           remainingPerms) != ACLCHECK_OK)
688                                                 return false;
689                                 }
690                         }
691                         bms_free(tmpset);
692                 }
693         }
694         return true;
695 }
696
697 /*
698  * Check that the query does not imply any writes to non-temp tables.
699  *
700  * Note: in a Hot Standby slave this would need to reject writes to temp
701  * tables as well; but an HS slave can't have created any temp tables
702  * in the first place, so no need to check that.
703  */
704 static void
705 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
706 {
707         ListCell   *l;
708
709         /*
710          * CREATE TABLE AS or SELECT INTO?
711          *
712          * XXX should we allow this if the destination is temp?  Considering that
713          * it would still require catalog changes, probably not.
714          */
715         if (plannedstmt->intoClause != NULL)
716                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
717
718         /* Fail if write permissions are requested on any non-temp table */
719         foreach(l, plannedstmt->rtable)
720         {
721                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
722
723                 if (rte->rtekind != RTE_RELATION)
724                         continue;
725
726                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
727                         continue;
728
729                 if (isTempNamespace(get_rel_namespace(rte->relid)))
730                         continue;
731
732                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
733         }
734 }
735
736
737 /* ----------------------------------------------------------------
738  *              InitPlan
739  *
740  *              Initializes the query plan: open files, allocate storage
741  *              and start up the rule manager
742  * ----------------------------------------------------------------
743  */
744 static void
745 InitPlan(QueryDesc *queryDesc, int eflags)
746 {
747         CmdType         operation = queryDesc->operation;
748         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
749         Plan       *plan = plannedstmt->planTree;
750         List       *rangeTable = plannedstmt->rtable;
751         EState     *estate = queryDesc->estate;
752         PlanState  *planstate;
753         TupleDesc       tupType;
754         ListCell   *l;
755         int                     i;
756
757         /*
758          * Do permissions checks
759          */
760         ExecCheckRTPerms(rangeTable, true);
761
762         /*
763          * initialize the node's execution state
764          */
765         estate->es_range_table = rangeTable;
766         estate->es_plannedstmt = plannedstmt;
767
768         /*
769          * initialize result relation stuff, and open/lock the result rels.
770          *
771          * We must do this before initializing the plan tree, else we might try to
772          * do a lock upgrade if a result rel is also a source rel.
773          */
774         if (plannedstmt->resultRelations)
775         {
776                 List       *resultRelations = plannedstmt->resultRelations;
777                 int                     numResultRelations = list_length(resultRelations);
778                 ResultRelInfo *resultRelInfos;
779                 ResultRelInfo *resultRelInfo;
780
781                 resultRelInfos = (ResultRelInfo *)
782                         palloc(numResultRelations * sizeof(ResultRelInfo));
783                 resultRelInfo = resultRelInfos;
784                 foreach(l, resultRelations)
785                 {
786                         Index           resultRelationIndex = lfirst_int(l);
787                         Oid                     resultRelationOid;
788                         Relation        resultRelation;
789
790                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
791                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
792                         InitResultRelInfo(resultRelInfo,
793                                                           resultRelation,
794                                                           resultRelationIndex,
795                                                           estate->es_instrument);
796                         resultRelInfo++;
797                 }
798                 estate->es_result_relations = resultRelInfos;
799                 estate->es_num_result_relations = numResultRelations;
800                 /* es_result_relation_info is NULL except when within ModifyTable */
801                 estate->es_result_relation_info = NULL;
802         }
803         else
804         {
805                 /*
806                  * if no result relation, then set state appropriately
807                  */
808                 estate->es_result_relations = NULL;
809                 estate->es_num_result_relations = 0;
810                 estate->es_result_relation_info = NULL;
811         }
812
813         /*
814          * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
815          * before we initialize the plan tree, else we'd be risking lock upgrades.
816          * While we are at it, build the ExecRowMark list.
817          */
818         estate->es_rowMarks = NIL;
819         foreach(l, plannedstmt->rowMarks)
820         {
821                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
822                 Oid                     relid;
823                 Relation        relation;
824                 ExecRowMark *erm;
825
826                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
827                 if (rc->isParent)
828                         continue;
829
830                 switch (rc->markType)
831                 {
832                         case ROW_MARK_EXCLUSIVE:
833                         case ROW_MARK_SHARE:
834                                 relid = getrelid(rc->rti, rangeTable);
835                                 relation = heap_open(relid, RowShareLock);
836                                 break;
837                         case ROW_MARK_REFERENCE:
838                                 relid = getrelid(rc->rti, rangeTable);
839                                 relation = heap_open(relid, AccessShareLock);
840                                 break;
841                         case ROW_MARK_COPY:
842                                 /* there's no real table here ... */
843                                 relation = NULL;
844                                 break;
845                         default:
846                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
847                                 relation = NULL;        /* keep compiler quiet */
848                                 break;
849                 }
850
851                 /* Check that relation is a legal target for marking */
852                 if (relation)
853                         CheckValidRowMarkRel(relation, rc->markType);
854
855                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
856                 erm->relation = relation;
857                 erm->rti = rc->rti;
858                 erm->prti = rc->prti;
859                 erm->rowmarkId = rc->rowmarkId;
860                 erm->markType = rc->markType;
861                 erm->noWait = rc->noWait;
862                 ItemPointerSetInvalid(&(erm->curCtid));
863                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
864         }
865
866         /*
867          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
868          * flag appropriately so that the plan tree will be initialized with the
869          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
870          */
871         estate->es_select_into = false;
872         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
873         {
874                 estate->es_select_into = true;
875                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
876         }
877
878         /*
879          * Initialize the executor's tuple table to empty.
880          */
881         estate->es_tupleTable = NIL;
882         estate->es_trig_tuple_slot = NULL;
883         estate->es_trig_oldtup_slot = NULL;
884         estate->es_trig_newtup_slot = NULL;
885
886         /* mark EvalPlanQual not active */
887         estate->es_epqTuple = NULL;
888         estate->es_epqTupleSet = NULL;
889         estate->es_epqScanDone = NULL;
890
891         /*
892          * Initialize private state information for each SubPlan.  We must do this
893          * before running ExecInitNode on the main query tree, since
894          * ExecInitSubPlan expects to be able to find these entries.
895          */
896         Assert(estate->es_subplanstates == NIL);
897         i = 1;                                          /* subplan indices count from 1 */
898         foreach(l, plannedstmt->subplans)
899         {
900                 Plan       *subplan = (Plan *) lfirst(l);
901                 PlanState  *subplanstate;
902                 int                     sp_eflags;
903
904                 /*
905                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
906                  * it is a parameterless subplan (not initplan), we suggest that it be
907                  * prepared to handle REWIND efficiently; otherwise there is no need.
908                  */
909                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
910                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
911                         sp_eflags |= EXEC_FLAG_REWIND;
912
913                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
914
915                 estate->es_subplanstates = lappend(estate->es_subplanstates,
916                                                                                    subplanstate);
917
918                 i++;
919         }
920
921         /*
922          * Initialize the private state information for all the nodes in the query
923          * tree.  This opens files, allocates storage and leaves us ready to start
924          * processing tuples.
925          */
926         planstate = ExecInitNode(plan, estate, eflags);
927
928         /*
929          * Get the tuple descriptor describing the type of tuples to return. (this
930          * is especially important if we are creating a relation with "SELECT
931          * INTO")
932          */
933         tupType = ExecGetResultType(planstate);
934
935         /*
936          * Initialize the junk filter if needed.  SELECT queries need a filter if
937          * there are any junk attrs in the top-level tlist.
938          */
939         if (operation == CMD_SELECT)
940         {
941                 bool            junk_filter_needed = false;
942                 ListCell   *tlist;
943
944                 foreach(tlist, plan->targetlist)
945                 {
946                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
947
948                         if (tle->resjunk)
949                         {
950                                 junk_filter_needed = true;
951                                 break;
952                         }
953                 }
954
955                 if (junk_filter_needed)
956                 {
957                         JunkFilter *j;
958
959                         j = ExecInitJunkFilter(planstate->plan->targetlist,
960                                                                    tupType->tdhasoid,
961                                                                    ExecInitExtraTupleSlot(estate));
962                         estate->es_junkFilter = j;
963
964                         /* Want to return the cleaned tuple type */
965                         tupType = j->jf_cleanTupType;
966                 }
967         }
968
969         queryDesc->tupDesc = tupType;
970         queryDesc->planstate = planstate;
971
972         /*
973          * If doing SELECT INTO, initialize the "into" relation.  We must wait
974          * till now so we have the "clean" result tuple type to create the new
975          * table from.
976          *
977          * If EXPLAIN, skip creating the "into" relation.
978          */
979         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
980                 OpenIntoRel(queryDesc);
981 }
982
983 /*
984  * Check that a proposed result relation is a legal target for the operation
985  *
986  * In most cases parser and/or planner should have noticed this already, but
987  * let's make sure.  In the view case we do need a test here, because if the
988  * view wasn't rewritten by a rule, it had better have an INSTEAD trigger.
989  *
990  * Note: when changing this function, you probably also need to look at
991  * CheckValidRowMarkRel.
992  */
993 void
994 CheckValidResultRel(Relation resultRel, CmdType operation)
995 {
996         TriggerDesc *trigDesc = resultRel->trigdesc;
997
998         switch (resultRel->rd_rel->relkind)
999         {
1000                 case RELKIND_RELATION:
1001                         /* OK */
1002                         break;
1003                 case RELKIND_SEQUENCE:
1004                         ereport(ERROR,
1005                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1006                                          errmsg("cannot change sequence \"%s\"",
1007                                                         RelationGetRelationName(resultRel))));
1008                         break;
1009                 case RELKIND_TOASTVALUE:
1010                         ereport(ERROR,
1011                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1012                                          errmsg("cannot change TOAST relation \"%s\"",
1013                                                         RelationGetRelationName(resultRel))));
1014                         break;
1015                 case RELKIND_VIEW:
1016                         switch (operation)
1017                         {
1018                                 case CMD_INSERT:
1019                                         if (!trigDesc || !trigDesc->trig_insert_instead_row)
1020                                                 ereport(ERROR,
1021                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1022                                                    errmsg("cannot insert into view \"%s\"",
1023                                                                   RelationGetRelationName(resultRel)),
1024                                                    errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
1025                                         break;
1026                                 case CMD_UPDATE:
1027                                         if (!trigDesc || !trigDesc->trig_update_instead_row)
1028                                                 ereport(ERROR,
1029                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1030                                                    errmsg("cannot update view \"%s\"",
1031                                                                   RelationGetRelationName(resultRel)),
1032                                                    errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
1033                                         break;
1034                                 case CMD_DELETE:
1035                                         if (!trigDesc || !trigDesc->trig_delete_instead_row)
1036                                                 ereport(ERROR,
1037                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1038                                                    errmsg("cannot delete from view \"%s\"",
1039                                                                   RelationGetRelationName(resultRel)),
1040                                                    errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
1041                                         break;
1042                                 default:
1043                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1044                                         break;
1045                         }
1046                         break;
1047                 case RELKIND_FOREIGN_TABLE:
1048                         ereport(ERROR,
1049                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1050                                          errmsg("cannot change foreign table \"%s\"",
1051                                                         RelationGetRelationName(resultRel))));
1052                         break;
1053                 default:
1054                         ereport(ERROR,
1055                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1056                                          errmsg("cannot change relation \"%s\"",
1057                                                         RelationGetRelationName(resultRel))));
1058                         break;
1059         }
1060 }
1061
1062 /*
1063  * Check that a proposed rowmark target relation is a legal target
1064  *
1065  * In most cases parser and/or planner should have noticed this already, but
1066  * they don't cover all cases.
1067  */
1068 static void
1069 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1070 {
1071         switch (rel->rd_rel->relkind)
1072         {
1073                 case RELKIND_RELATION:
1074                         /* OK */
1075                         break;
1076                 case RELKIND_SEQUENCE:
1077                         /* Must disallow this because we don't vacuum sequences */
1078                         ereport(ERROR,
1079                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1080                                          errmsg("cannot lock rows in sequence \"%s\"",
1081                                                         RelationGetRelationName(rel))));
1082                         break;
1083                 case RELKIND_TOASTVALUE:
1084                         /* We could allow this, but there seems no good reason to */
1085                         ereport(ERROR,
1086                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1087                                          errmsg("cannot lock rows in TOAST relation \"%s\"",
1088                                                         RelationGetRelationName(rel))));
1089                         break;
1090                 case RELKIND_VIEW:
1091                         /* Should not get here */
1092                         ereport(ERROR,
1093                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1094                                          errmsg("cannot lock rows in view \"%s\"",
1095                                                         RelationGetRelationName(rel))));
1096                         break;
1097                 case RELKIND_FOREIGN_TABLE:
1098                         /* Perhaps we can support this someday, but not today */
1099                         ereport(ERROR,
1100                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1101                                          errmsg("cannot lock rows in foreign table \"%s\"",
1102                                                         RelationGetRelationName(rel))));
1103                         break;
1104                 default:
1105                         ereport(ERROR,
1106                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1107                                          errmsg("cannot lock rows in relation \"%s\"",
1108                                                         RelationGetRelationName(rel))));
1109                         break;
1110         }
1111 }
1112
1113 /*
1114  * Initialize ResultRelInfo data for one result relation
1115  *
1116  * Caution: before Postgres 9.1, this function included the relkind checking
1117  * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1118  * appropriate.  Be sure callers cover those needs.
1119  */
1120 void
1121 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1122                                   Relation resultRelationDesc,
1123                                   Index resultRelationIndex,
1124                                   int instrument_options)
1125 {
1126         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1127         resultRelInfo->type = T_ResultRelInfo;
1128         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1129         resultRelInfo->ri_RelationDesc = resultRelationDesc;
1130         resultRelInfo->ri_NumIndices = 0;
1131         resultRelInfo->ri_IndexRelationDescs = NULL;
1132         resultRelInfo->ri_IndexRelationInfo = NULL;
1133         /* make a copy so as not to depend on relcache info not changing... */
1134         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1135         if (resultRelInfo->ri_TrigDesc)
1136         {
1137                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
1138
1139                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1140                         palloc0(n * sizeof(FmgrInfo));
1141                 resultRelInfo->ri_TrigWhenExprs = (List **)
1142                         palloc0(n * sizeof(List *));
1143                 if (instrument_options)
1144                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1145         }
1146         else
1147         {
1148                 resultRelInfo->ri_TrigFunctions = NULL;
1149                 resultRelInfo->ri_TrigWhenExprs = NULL;
1150                 resultRelInfo->ri_TrigInstrument = NULL;
1151         }
1152         resultRelInfo->ri_ConstraintExprs = NULL;
1153         resultRelInfo->ri_junkFilter = NULL;
1154         resultRelInfo->ri_projectReturning = NULL;
1155 }
1156
1157 /*
1158  *              ExecGetTriggerResultRel
1159  *
1160  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
1161  * triggers are fired on one of the result relations of the query, and so
1162  * we can just return a member of the es_result_relations array.  (Note: in
1163  * self-join situations there might be multiple members with the same OID;
1164  * if so it doesn't matter which one we pick.)  However, it is sometimes
1165  * necessary to fire triggers on other relations; this happens mainly when an
1166  * RI update trigger queues additional triggers on other relations, which will
1167  * be processed in the context of the outer query.      For efficiency's sake,
1168  * we want to have a ResultRelInfo for those triggers too; that can avoid
1169  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
1170  * ANALYZE to report the runtimes of such triggers.)  So we make additional
1171  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1172  */
1173 ResultRelInfo *
1174 ExecGetTriggerResultRel(EState *estate, Oid relid)
1175 {
1176         ResultRelInfo *rInfo;
1177         int                     nr;
1178         ListCell   *l;
1179         Relation        rel;
1180         MemoryContext oldcontext;
1181
1182         /* First, search through the query result relations */
1183         rInfo = estate->es_result_relations;
1184         nr = estate->es_num_result_relations;
1185         while (nr > 0)
1186         {
1187                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1188                         return rInfo;
1189                 rInfo++;
1190                 nr--;
1191         }
1192         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1193         foreach(l, estate->es_trig_target_relations)
1194         {
1195                 rInfo = (ResultRelInfo *) lfirst(l);
1196                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1197                         return rInfo;
1198         }
1199         /* Nope, so we need a new one */
1200
1201         /*
1202          * Open the target relation's relcache entry.  We assume that an
1203          * appropriate lock is still held by the backend from whenever the trigger
1204          * event got queued, so we need take no new lock here.  Also, we need not
1205          * recheck the relkind, so no need for CheckValidResultRel.
1206          */
1207         rel = heap_open(relid, NoLock);
1208
1209         /*
1210          * Make the new entry in the right context.
1211          */
1212         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1213         rInfo = makeNode(ResultRelInfo);
1214         InitResultRelInfo(rInfo,
1215                                           rel,
1216                                           0,            /* dummy rangetable index */
1217                                           estate->es_instrument);
1218         estate->es_trig_target_relations =
1219                 lappend(estate->es_trig_target_relations, rInfo);
1220         MemoryContextSwitchTo(oldcontext);
1221
1222         /*
1223          * Currently, we don't need any index information in ResultRelInfos used
1224          * only for triggers, so no need to call ExecOpenIndices.
1225          */
1226
1227         return rInfo;
1228 }
1229
1230 /*
1231  *              ExecContextForcesOids
1232  *
1233  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1234  * we need to ensure that result tuples have space for an OID iff they are
1235  * going to be stored into a relation that has OIDs.  In other contexts
1236  * we are free to choose whether to leave space for OIDs in result tuples
1237  * (we generally don't want to, but we do if a physical-tlist optimization
1238  * is possible).  This routine checks the plan context and returns TRUE if the
1239  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1240  * *hasoids is set to the required value.
1241  *
1242  * One reason this is ugly is that all plan nodes in the plan tree will emit
1243  * tuples with space for an OID, though we really only need the topmost node
1244  * to do so.  However, node types like Sort don't project new tuples but just
1245  * return their inputs, and in those cases the requirement propagates down
1246  * to the input node.  Eventually we might make this code smart enough to
1247  * recognize how far down the requirement really goes, but for now we just
1248  * make all plan nodes do the same thing if the top level forces the choice.
1249  *
1250  * We assume that if we are generating tuples for INSERT or UPDATE,
1251  * estate->es_result_relation_info is already set up to describe the target
1252  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1253  * the target relations may have OIDs and some not.  We have to make the
1254  * decisions on a per-relation basis as we initialize each of the subplans of
1255  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1256  * while initializing each subplan.
1257  *
1258  * SELECT INTO is even uglier, because we don't have the INTO relation's
1259  * descriptor available when this code runs; we have to look aside at a
1260  * flag set by InitPlan().
1261  */
1262 bool
1263 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1264 {
1265         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1266
1267         if (ri != NULL)
1268         {
1269                 Relation        rel = ri->ri_RelationDesc;
1270
1271                 if (rel != NULL)
1272                 {
1273                         *hasoids = rel->rd_rel->relhasoids;
1274                         return true;
1275                 }
1276         }
1277
1278         if (planstate->state->es_select_into)
1279         {
1280                 *hasoids = planstate->state->es_into_oids;
1281                 return true;
1282         }
1283
1284         return false;
1285 }
1286
1287 /* ----------------------------------------------------------------
1288  *              ExecPostprocessPlan
1289  *
1290  *              Give plan nodes a final chance to execute before shutdown
1291  * ----------------------------------------------------------------
1292  */
1293 static void
1294 ExecPostprocessPlan(EState *estate)
1295 {
1296         ListCell   *lc;
1297
1298         /*
1299          * Make sure nodes run forward.
1300          */
1301         estate->es_direction = ForwardScanDirection;
1302
1303         /*
1304          * Run any secondary ModifyTable nodes to completion, in case the main
1305          * query did not fetch all rows from them.      (We do this to ensure that
1306          * such nodes have predictable results.)
1307          */
1308         foreach(lc, estate->es_auxmodifytables)
1309         {
1310                 PlanState  *ps = (PlanState *) lfirst(lc);
1311
1312                 for (;;)
1313                 {
1314                         TupleTableSlot *slot;
1315
1316                         /* Reset the per-output-tuple exprcontext each time */
1317                         ResetPerTupleExprContext(estate);
1318
1319                         slot = ExecProcNode(ps);
1320
1321                         if (TupIsNull(slot))
1322                                 break;
1323                 }
1324         }
1325 }
1326
1327 /* ----------------------------------------------------------------
1328  *              ExecEndPlan
1329  *
1330  *              Cleans up the query plan -- closes files and frees up storage
1331  *
1332  * NOTE: we are no longer very worried about freeing storage per se
1333  * in this code; FreeExecutorState should be guaranteed to release all
1334  * memory that needs to be released.  What we are worried about doing
1335  * is closing relations and dropping buffer pins.  Thus, for example,
1336  * tuple tables must be cleared or dropped to ensure pins are released.
1337  * ----------------------------------------------------------------
1338  */
1339 static void
1340 ExecEndPlan(PlanState *planstate, EState *estate)
1341 {
1342         ResultRelInfo *resultRelInfo;
1343         int                     i;
1344         ListCell   *l;
1345
1346         /*
1347          * shut down the node-type-specific query processing
1348          */
1349         ExecEndNode(planstate);
1350
1351         /*
1352          * for subplans too
1353          */
1354         foreach(l, estate->es_subplanstates)
1355         {
1356                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1357
1358                 ExecEndNode(subplanstate);
1359         }
1360
1361         /*
1362          * destroy the executor's tuple table.  Actually we only care about
1363          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1364          * the TupleTableSlots, since the containing memory context is about to go
1365          * away anyway.
1366          */
1367         ExecResetTupleTable(estate->es_tupleTable, false);
1368
1369         /*
1370          * close the result relation(s) if any, but hold locks until xact commit.
1371          */
1372         resultRelInfo = estate->es_result_relations;
1373         for (i = estate->es_num_result_relations; i > 0; i--)
1374         {
1375                 /* Close indices and then the relation itself */
1376                 ExecCloseIndices(resultRelInfo);
1377                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1378                 resultRelInfo++;
1379         }
1380
1381         /*
1382          * likewise close any trigger target relations
1383          */
1384         foreach(l, estate->es_trig_target_relations)
1385         {
1386                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1387                 /* Close indices and then the relation itself */
1388                 ExecCloseIndices(resultRelInfo);
1389                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1390         }
1391
1392         /*
1393          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1394          */
1395         foreach(l, estate->es_rowMarks)
1396         {
1397                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1398
1399                 if (erm->relation)
1400                         heap_close(erm->relation, NoLock);
1401         }
1402 }
1403
1404 /* ----------------------------------------------------------------
1405  *              ExecutePlan
1406  *
1407  *              Processes the query plan until we have processed 'numberTuples' tuples,
1408  *              moving in the specified direction.
1409  *
1410  *              Runs to completion if numberTuples is 0
1411  *
1412  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1413  * user can see it
1414  * ----------------------------------------------------------------
1415  */
1416 static void
1417 ExecutePlan(EState *estate,
1418                         PlanState *planstate,
1419                         CmdType operation,
1420                         bool sendTuples,
1421                         long numberTuples,
1422                         ScanDirection direction,
1423                         DestReceiver *dest)
1424 {
1425         TupleTableSlot *slot;
1426         long            current_tuple_count;
1427
1428         /*
1429          * initialize local variables
1430          */
1431         current_tuple_count = 0;
1432
1433         /*
1434          * Set the direction.
1435          */
1436         estate->es_direction = direction;
1437
1438         /*
1439          * Loop until we've processed the proper number of tuples from the plan.
1440          */
1441         for (;;)
1442         {
1443                 /* Reset the per-output-tuple exprcontext */
1444                 ResetPerTupleExprContext(estate);
1445
1446                 /*
1447                  * Execute the plan and obtain a tuple
1448                  */
1449                 slot = ExecProcNode(planstate);
1450
1451                 /*
1452                  * if the tuple is null, then we assume there is nothing more to
1453                  * process so we just end the loop...
1454                  */
1455                 if (TupIsNull(slot))
1456                         break;
1457
1458                 /*
1459                  * If we have a junk filter, then project a new tuple with the junk
1460                  * removed.
1461                  *
1462                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1463                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1464                  * because that tuple slot has the wrong descriptor.)
1465                  */
1466                 if (estate->es_junkFilter != NULL)
1467                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1468
1469                 /*
1470                  * If we are supposed to send the tuple somewhere, do so. (In
1471                  * practice, this is probably always the case at this point.)
1472                  */
1473                 if (sendTuples)
1474                         (*dest->receiveSlot) (slot, dest);
1475
1476                 /*
1477                  * Count tuples processed, if this is a SELECT.  (For other operation
1478                  * types, the ModifyTable plan node must count the appropriate
1479                  * events.)
1480                  */
1481                 if (operation == CMD_SELECT)
1482                         (estate->es_processed)++;
1483
1484                 /*
1485                  * check our tuple count.. if we've processed the proper number then
1486                  * quit, else loop again and process more tuples.  Zero numberTuples
1487                  * means no limit.
1488                  */
1489                 current_tuple_count++;
1490                 if (numberTuples && numberTuples == current_tuple_count)
1491                         break;
1492         }
1493 }
1494
1495
1496 /*
1497  * ExecRelCheck --- check that tuple meets constraints for result relation
1498  */
1499 static const char *
1500 ExecRelCheck(ResultRelInfo *resultRelInfo,
1501                          TupleTableSlot *slot, EState *estate)
1502 {
1503         Relation        rel = resultRelInfo->ri_RelationDesc;
1504         int                     ncheck = rel->rd_att->constr->num_check;
1505         ConstrCheck *check = rel->rd_att->constr->check;
1506         ExprContext *econtext;
1507         MemoryContext oldContext;
1508         List       *qual;
1509         int                     i;
1510
1511         /*
1512          * If first time through for this result relation, build expression
1513          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1514          * memory context so they'll survive throughout the query.
1515          */
1516         if (resultRelInfo->ri_ConstraintExprs == NULL)
1517         {
1518                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1519                 resultRelInfo->ri_ConstraintExprs =
1520                         (List **) palloc(ncheck * sizeof(List *));
1521                 for (i = 0; i < ncheck; i++)
1522                 {
1523                         /* ExecQual wants implicit-AND form */
1524                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1525                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1526                                 ExecPrepareExpr((Expr *) qual, estate);
1527                 }
1528                 MemoryContextSwitchTo(oldContext);
1529         }
1530
1531         /*
1532          * We will use the EState's per-tuple context for evaluating constraint
1533          * expressions (creating it if it's not already there).
1534          */
1535         econtext = GetPerTupleExprContext(estate);
1536
1537         /* Arrange for econtext's scan tuple to be the tuple under test */
1538         econtext->ecxt_scantuple = slot;
1539
1540         /* And evaluate the constraints */
1541         for (i = 0; i < ncheck; i++)
1542         {
1543                 qual = resultRelInfo->ri_ConstraintExprs[i];
1544
1545                 /*
1546                  * NOTE: SQL92 specifies that a NULL result from a constraint
1547                  * expression is not to be treated as a failure.  Therefore, tell
1548                  * ExecQual to return TRUE for NULL.
1549                  */
1550                 if (!ExecQual(qual, econtext, true))
1551                         return check[i].ccname;
1552         }
1553
1554         /* NULL result means no error */
1555         return NULL;
1556 }
1557
1558 void
1559 ExecConstraints(ResultRelInfo *resultRelInfo,
1560                                 TupleTableSlot *slot, EState *estate)
1561 {
1562         Relation        rel = resultRelInfo->ri_RelationDesc;
1563         TupleConstr *constr = rel->rd_att->constr;
1564
1565         Assert(constr);
1566
1567         if (constr->has_not_null)
1568         {
1569                 int                     natts = rel->rd_att->natts;
1570                 int                     attrChk;
1571
1572                 for (attrChk = 1; attrChk <= natts; attrChk++)
1573                 {
1574                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1575                                 slot_attisnull(slot, attrChk))
1576                                 ereport(ERROR,
1577                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1578                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1579                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname)),
1580                                                  errdetail("Failing row contains %s.",
1581                                                                    ExecBuildSlotValueDescription(slot, 64))));
1582                 }
1583         }
1584
1585         if (constr->num_check > 0)
1586         {
1587                 const char *failed;
1588
1589                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1590                         ereport(ERROR,
1591                                         (errcode(ERRCODE_CHECK_VIOLATION),
1592                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1593                                                         RelationGetRelationName(rel), failed),
1594                                          errdetail("Failing row contains %s.",
1595                                                            ExecBuildSlotValueDescription(slot, 64))));
1596         }
1597 }
1598
1599 /*
1600  * ExecBuildSlotValueDescription -- construct a string representing a tuple
1601  *
1602  * This is intentionally very similar to BuildIndexValueDescription, but
1603  * unlike that function, we truncate long field values.  That seems necessary
1604  * here since heap field values could be very long, whereas index entries
1605  * typically aren't so wide.
1606  */
1607 static char *
1608 ExecBuildSlotValueDescription(TupleTableSlot *slot, int maxfieldlen)
1609 {
1610         StringInfoData buf;
1611         TupleDesc       tupdesc = slot->tts_tupleDescriptor;
1612         int                     i;
1613
1614         /* Make sure the tuple is fully deconstructed */
1615         slot_getallattrs(slot);
1616
1617         initStringInfo(&buf);
1618
1619         appendStringInfoChar(&buf, '(');
1620
1621         for (i = 0; i < tupdesc->natts; i++)
1622         {
1623                 char       *val;
1624                 int                     vallen;
1625
1626                 if (slot->tts_isnull[i])
1627                         val = "null";
1628                 else
1629                 {
1630                         Oid                     foutoid;
1631                         bool            typisvarlena;
1632
1633                         getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
1634                                                           &foutoid, &typisvarlena);
1635                         val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
1636                 }
1637
1638                 if (i > 0)
1639                         appendStringInfoString(&buf, ", ");
1640
1641                 /* truncate if needed */
1642                 vallen = strlen(val);
1643                 if (vallen <= maxfieldlen)
1644                         appendStringInfoString(&buf, val);
1645                 else
1646                 {
1647                         vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1648                         appendBinaryStringInfo(&buf, val, vallen);
1649                         appendStringInfoString(&buf, "...");
1650                 }
1651         }
1652
1653         appendStringInfoChar(&buf, ')');
1654
1655         return buf.data;
1656 }
1657
1658
1659 /*
1660  * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
1661  */
1662 ExecRowMark *
1663 ExecFindRowMark(EState *estate, Index rti)
1664 {
1665         ListCell   *lc;
1666
1667         foreach(lc, estate->es_rowMarks)
1668         {
1669                 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
1670
1671                 if (erm->rti == rti)
1672                         return erm;
1673         }
1674         elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
1675         return NULL;                            /* keep compiler quiet */
1676 }
1677
1678 /*
1679  * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
1680  *
1681  * Inputs are the underlying ExecRowMark struct and the targetlist of the
1682  * input plan node (not planstate node!).  We need the latter to find out
1683  * the column numbers of the resjunk columns.
1684  */
1685 ExecAuxRowMark *
1686 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
1687 {
1688         ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
1689         char            resname[32];
1690
1691         aerm->rowmark = erm;
1692
1693         /* Look up the resjunk columns associated with this rowmark */
1694         if (erm->relation)
1695         {
1696                 Assert(erm->markType != ROW_MARK_COPY);
1697
1698                 /* if child rel, need tableoid */
1699                 if (erm->rti != erm->prti)
1700                 {
1701                         snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
1702                         aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
1703                                                                                                                    resname);
1704                         if (!AttributeNumberIsValid(aerm->toidAttNo))
1705                                 elog(ERROR, "could not find junk %s column", resname);
1706                 }
1707
1708                 /* always need ctid for real relations */
1709                 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
1710                 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
1711                                                                                                            resname);
1712                 if (!AttributeNumberIsValid(aerm->ctidAttNo))
1713                         elog(ERROR, "could not find junk %s column", resname);
1714         }
1715         else
1716         {
1717                 Assert(erm->markType == ROW_MARK_COPY);
1718
1719                 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
1720                 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
1721                                                                                                                 resname);
1722                 if (!AttributeNumberIsValid(aerm->wholeAttNo))
1723                         elog(ERROR, "could not find junk %s column", resname);
1724         }
1725
1726         return aerm;
1727 }
1728
1729
1730 /*
1731  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1732  * process the updated version under READ COMMITTED rules.
1733  *
1734  * See backend/executor/README for some info about how this works.
1735  */
1736
1737
1738 /*
1739  * Check a modified tuple to see if we want to process its updated version
1740  * under READ COMMITTED rules.
1741  *
1742  *      estate - outer executor state data
1743  *      epqstate - state for EvalPlanQual rechecking
1744  *      relation - table containing tuple
1745  *      rti - rangetable index of table containing tuple
1746  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1747  *      priorXmax - t_xmax from the outdated tuple
1748  *
1749  * *tid is also an output parameter: it's modified to hold the TID of the
1750  * latest version of the tuple (note this may be changed even on failure)
1751  *
1752  * Returns a slot containing the new candidate update/delete tuple, or
1753  * NULL if we determine we shouldn't process the row.
1754  */
1755 TupleTableSlot *
1756 EvalPlanQual(EState *estate, EPQState *epqstate,
1757                          Relation relation, Index rti,
1758                          ItemPointer tid, TransactionId priorXmax)
1759 {
1760         TupleTableSlot *slot;
1761         HeapTuple       copyTuple;
1762
1763         Assert(rti > 0);
1764
1765         /*
1766          * Get and lock the updated version of the row; if fail, return NULL.
1767          */
1768         copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
1769                                                                   tid, priorXmax);
1770
1771         if (copyTuple == NULL)
1772                 return NULL;
1773
1774         /*
1775          * For UPDATE/DELETE we have to return tid of actual row we're executing
1776          * PQ for.
1777          */
1778         *tid = copyTuple->t_self;
1779
1780         /*
1781          * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
1782          */
1783         EvalPlanQualBegin(epqstate, estate);
1784
1785         /*
1786          * Free old test tuple, if any, and store new tuple where relation's scan
1787          * node will see it
1788          */
1789         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1790
1791         /*
1792          * Fetch any non-locked source rows
1793          */
1794         EvalPlanQualFetchRowMarks(epqstate);
1795
1796         /*
1797          * Run the EPQ query.  We assume it will return at most one tuple.
1798          */
1799         slot = EvalPlanQualNext(epqstate);
1800
1801         /*
1802          * If we got a tuple, force the slot to materialize the tuple so that it
1803          * is not dependent on any local state in the EPQ query (in particular,
1804          * it's highly likely that the slot contains references to any pass-by-ref
1805          * datums that may be present in copyTuple).  As with the next step, this
1806          * is to guard against early re-use of the EPQ query.
1807          */
1808         if (!TupIsNull(slot))
1809                 (void) ExecMaterializeSlot(slot);
1810
1811         /*
1812          * Clear out the test tuple.  This is needed in case the EPQ query is
1813          * re-used to test a tuple for a different relation.  (Not clear that can
1814          * really happen, but let's be safe.)
1815          */
1816         EvalPlanQualSetTuple(epqstate, rti, NULL);
1817
1818         return slot;
1819 }
1820
1821 /*
1822  * Fetch a copy of the newest version of an outdated tuple
1823  *
1824  *      estate - executor state data
1825  *      relation - table containing tuple
1826  *      lockmode - requested tuple lock mode
1827  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1828  *      priorXmax - t_xmax from the outdated tuple
1829  *
1830  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1831  * that there is no newest version (ie, the row was deleted not updated).
1832  * If successful, we have locked the newest tuple version, so caller does not
1833  * need to worry about it changing anymore.
1834  *
1835  * Note: properly, lockmode should be declared as enum LockTupleMode,
1836  * but we use "int" to avoid having to include heapam.h in executor.h.
1837  */
1838 HeapTuple
1839 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1840                                   ItemPointer tid, TransactionId priorXmax)
1841 {
1842         HeapTuple       copyTuple = NULL;
1843         HeapTupleData tuple;
1844         SnapshotData SnapshotDirty;
1845
1846         /*
1847          * fetch target tuple
1848          *
1849          * Loop here to deal with updated or busy tuples
1850          */
1851         InitDirtySnapshot(SnapshotDirty);
1852         tuple.t_self = *tid;
1853         for (;;)
1854         {
1855                 Buffer          buffer;
1856
1857                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1858                 {
1859                         HTSU_Result test;
1860                         ItemPointerData update_ctid;
1861                         TransactionId update_xmax;
1862
1863                         /*
1864                          * If xmin isn't what we're expecting, the slot must have been
1865                          * recycled and reused for an unrelated tuple.  This implies that
1866                          * the latest version of the row was deleted, so we need do
1867                          * nothing.  (Should be safe to examine xmin without getting
1868                          * buffer's content lock, since xmin never changes in an existing
1869                          * tuple.)
1870                          */
1871                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1872                                                                          priorXmax))
1873                         {
1874                                 ReleaseBuffer(buffer);
1875                                 return NULL;
1876                         }
1877
1878                         /* otherwise xmin should not be dirty... */
1879                         if (TransactionIdIsValid(SnapshotDirty.xmin))
1880                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1881
1882                         /*
1883                          * If tuple is being updated by other transaction then we have to
1884                          * wait for its commit/abort.
1885                          */
1886                         if (TransactionIdIsValid(SnapshotDirty.xmax))
1887                         {
1888                                 ReleaseBuffer(buffer);
1889                                 XactLockTableWait(SnapshotDirty.xmax);
1890                                 continue;               /* loop back to repeat heap_fetch */
1891                         }
1892
1893                         /*
1894                          * If tuple was inserted by our own transaction, we have to check
1895                          * cmin against es_output_cid: cmin >= current CID means our
1896                          * command cannot see the tuple, so we should ignore it.  Without
1897                          * this we are open to the "Halloween problem" of indefinitely
1898                          * re-updating the same tuple. (We need not check cmax because
1899                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1900                          * transaction dead, regardless of cmax.)  We just checked that
1901                          * priorXmax == xmin, so we can test that variable instead of
1902                          * doing HeapTupleHeaderGetXmin again.
1903                          */
1904                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1905                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
1906                         {
1907                                 ReleaseBuffer(buffer);
1908                                 return NULL;
1909                         }
1910
1911                         /*
1912                          * This is a live tuple, so now try to lock it.
1913                          */
1914                         test = heap_lock_tuple(relation, &tuple, &buffer,
1915                                                                    &update_ctid, &update_xmax,
1916                                                                    estate->es_output_cid,
1917                                                                    lockmode, false);
1918                         /* We now have two pins on the buffer, get rid of one */
1919                         ReleaseBuffer(buffer);
1920
1921                         switch (test)
1922                         {
1923                                 case HeapTupleSelfUpdated:
1924                                         /* treat it as deleted; do not process */
1925                                         ReleaseBuffer(buffer);
1926                                         return NULL;
1927
1928                                 case HeapTupleMayBeUpdated:
1929                                         /* successfully locked */
1930                                         break;
1931
1932                                 case HeapTupleUpdated:
1933                                         ReleaseBuffer(buffer);
1934                                         if (IsolationUsesXactSnapshot())
1935                                                 ereport(ERROR,
1936                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1937                                                                  errmsg("could not serialize access due to concurrent update")));
1938                                         if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
1939                                         {
1940                                                 /* it was updated, so look at the updated version */
1941                                                 tuple.t_self = update_ctid;
1942                                                 /* updated row should have xmin matching this xmax */
1943                                                 priorXmax = update_xmax;
1944                                                 continue;
1945                                         }
1946                                         /* tuple was deleted, so give up */
1947                                         return NULL;
1948
1949                                 default:
1950                                         ReleaseBuffer(buffer);
1951                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1952                                                  test);
1953                                         return NULL;    /* keep compiler quiet */
1954                         }
1955
1956                         /*
1957                          * We got tuple - now copy it for use by recheck query.
1958                          */
1959                         copyTuple = heap_copytuple(&tuple);
1960                         ReleaseBuffer(buffer);
1961                         break;
1962                 }
1963
1964                 /*
1965                  * If the referenced slot was actually empty, the latest version of
1966                  * the row must have been deleted, so we need do nothing.
1967                  */
1968                 if (tuple.t_data == NULL)
1969                 {
1970                         ReleaseBuffer(buffer);
1971                         return NULL;
1972                 }
1973
1974                 /*
1975                  * As above, if xmin isn't what we're expecting, do nothing.
1976                  */
1977                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1978                                                                  priorXmax))
1979                 {
1980                         ReleaseBuffer(buffer);
1981                         return NULL;
1982                 }
1983
1984                 /*
1985                  * If we get here, the tuple was found but failed SnapshotDirty.
1986                  * Assuming the xmin is either a committed xact or our own xact (as it
1987                  * certainly should be if we're trying to modify the tuple), this must
1988                  * mean that the row was updated or deleted by either a committed xact
1989                  * or our own xact.  If it was deleted, we can ignore it; if it was
1990                  * updated then chain up to the next version and repeat the whole
1991                  * process.
1992                  *
1993                  * As above, it should be safe to examine xmax and t_ctid without the
1994                  * buffer content lock, because they can't be changing.
1995                  */
1996                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1997                 {
1998                         /* deleted, so forget about it */
1999                         ReleaseBuffer(buffer);
2000                         return NULL;
2001                 }
2002
2003                 /* updated, so look at the updated row */
2004                 tuple.t_self = tuple.t_data->t_ctid;
2005                 /* updated row should have xmin matching this xmax */
2006                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2007                 ReleaseBuffer(buffer);
2008                 /* loop back to fetch next in chain */
2009         }
2010
2011         /*
2012          * Return the copied tuple
2013          */
2014         return copyTuple;
2015 }
2016
2017 /*
2018  * EvalPlanQualInit -- initialize during creation of a plan state node
2019  * that might need to invoke EPQ processing.
2020  *
2021  * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2022  * with EvalPlanQualSetPlan.
2023  */
2024 void
2025 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2026                                  Plan *subplan, List *auxrowmarks, int epqParam)
2027 {
2028         /* Mark the EPQ state inactive */
2029         epqstate->estate = NULL;
2030         epqstate->planstate = NULL;
2031         epqstate->origslot = NULL;
2032         /* ... and remember data that EvalPlanQualBegin will need */
2033         epqstate->plan = subplan;
2034         epqstate->arowMarks = auxrowmarks;
2035         epqstate->epqParam = epqParam;
2036 }
2037
2038 /*
2039  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2040  *
2041  * We need this so that ModifyTuple can deal with multiple subplans.
2042  */
2043 void
2044 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2045 {
2046         /* If we have a live EPQ query, shut it down */
2047         EvalPlanQualEnd(epqstate);
2048         /* And set/change the plan pointer */
2049         epqstate->plan = subplan;
2050         /* The rowmarks depend on the plan, too */
2051         epqstate->arowMarks = auxrowmarks;
2052 }
2053
2054 /*
2055  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2056  *
2057  * NB: passed tuple must be palloc'd; it may get freed later
2058  */
2059 void
2060 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2061 {
2062         EState     *estate = epqstate->estate;
2063
2064         Assert(rti > 0);
2065
2066         /*
2067          * free old test tuple, if any, and store new tuple where relation's scan
2068          * node will see it
2069          */
2070         if (estate->es_epqTuple[rti - 1] != NULL)
2071                 heap_freetuple(estate->es_epqTuple[rti - 1]);
2072         estate->es_epqTuple[rti - 1] = tuple;
2073         estate->es_epqTupleSet[rti - 1] = true;
2074 }
2075
2076 /*
2077  * Fetch back the current test tuple (if any) for the specified RTI
2078  */
2079 HeapTuple
2080 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2081 {
2082         EState     *estate = epqstate->estate;
2083
2084         Assert(rti > 0);
2085
2086         return estate->es_epqTuple[rti - 1];
2087 }
2088
2089 /*
2090  * Fetch the current row values for any non-locked relations that need
2091  * to be scanned by an EvalPlanQual operation.  origslot must have been set
2092  * to contain the current result row (top-level row) that we need to recheck.
2093  */
2094 void
2095 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2096 {
2097         ListCell   *l;
2098
2099         Assert(epqstate->origslot != NULL);
2100
2101         foreach(l, epqstate->arowMarks)
2102         {
2103                 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2104                 ExecRowMark *erm = aerm->rowmark;
2105                 Datum           datum;
2106                 bool            isNull;
2107                 HeapTupleData tuple;
2108
2109                 if (RowMarkRequiresRowShareLock(erm->markType))
2110                         elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2111
2112                 /* clear any leftover test tuple for this rel */
2113                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2114
2115                 if (erm->relation)
2116                 {
2117                         Buffer          buffer;
2118
2119                         Assert(erm->markType == ROW_MARK_REFERENCE);
2120
2121                         /* if child rel, must check whether it produced this row */
2122                         if (erm->rti != erm->prti)
2123                         {
2124                                 Oid                     tableoid;
2125
2126                                 datum = ExecGetJunkAttribute(epqstate->origslot,
2127                                                                                          aerm->toidAttNo,
2128                                                                                          &isNull);
2129                                 /* non-locked rels could be on the inside of outer joins */
2130                                 if (isNull)
2131                                         continue;
2132                                 tableoid = DatumGetObjectId(datum);
2133
2134                                 if (tableoid != RelationGetRelid(erm->relation))
2135                                 {
2136                                         /* this child is inactive right now */
2137                                         continue;
2138                                 }
2139                         }
2140
2141                         /* fetch the tuple's ctid */
2142                         datum = ExecGetJunkAttribute(epqstate->origslot,
2143                                                                                  aerm->ctidAttNo,
2144                                                                                  &isNull);
2145                         /* non-locked rels could be on the inside of outer joins */
2146                         if (isNull)
2147                                 continue;
2148                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2149
2150                         /* okay, fetch the tuple */
2151                         if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2152                                                         false, NULL))
2153                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2154
2155                         /* successful, copy and store tuple */
2156                         EvalPlanQualSetTuple(epqstate, erm->rti,
2157                                                                  heap_copytuple(&tuple));
2158                         ReleaseBuffer(buffer);
2159                 }
2160                 else
2161                 {
2162                         HeapTupleHeader td;
2163
2164                         Assert(erm->markType == ROW_MARK_COPY);
2165
2166                         /* fetch the whole-row Var for the relation */
2167                         datum = ExecGetJunkAttribute(epqstate->origslot,
2168                                                                                  aerm->wholeAttNo,
2169                                                                                  &isNull);
2170                         /* non-locked rels could be on the inside of outer joins */
2171                         if (isNull)
2172                                 continue;
2173                         td = DatumGetHeapTupleHeader(datum);
2174
2175                         /* build a temporary HeapTuple control structure */
2176                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
2177                         ItemPointerSetInvalid(&(tuple.t_self));
2178                         tuple.t_tableOid = InvalidOid;
2179                         tuple.t_data = td;
2180
2181                         /* copy and store tuple */
2182                         EvalPlanQualSetTuple(epqstate, erm->rti,
2183                                                                  heap_copytuple(&tuple));
2184                 }
2185         }
2186 }
2187
2188 /*
2189  * Fetch the next row (if any) from EvalPlanQual testing
2190  *
2191  * (In practice, there should never be more than one row...)
2192  */
2193 TupleTableSlot *
2194 EvalPlanQualNext(EPQState *epqstate)
2195 {
2196         MemoryContext oldcontext;
2197         TupleTableSlot *slot;
2198
2199         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
2200         slot = ExecProcNode(epqstate->planstate);
2201         MemoryContextSwitchTo(oldcontext);
2202
2203         return slot;
2204 }
2205
2206 /*
2207  * Initialize or reset an EvalPlanQual state tree
2208  */
2209 void
2210 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
2211 {
2212         EState     *estate = epqstate->estate;
2213
2214         if (estate == NULL)
2215         {
2216                 /* First time through, so create a child EState */
2217                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
2218         }
2219         else
2220         {
2221                 /*
2222                  * We already have a suitable child EPQ tree, so just reset it.
2223                  */
2224                 int                     rtsize = list_length(parentestate->es_range_table);
2225                 PlanState  *planstate = epqstate->planstate;
2226
2227                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
2228
2229                 /* Recopy current values of parent parameters */
2230                 if (parentestate->es_plannedstmt->nParamExec > 0)
2231                 {
2232                         int                     i = parentestate->es_plannedstmt->nParamExec;
2233
2234                         while (--i >= 0)
2235                         {
2236                                 /* copy value if any, but not execPlan link */
2237                                 estate->es_param_exec_vals[i].value =
2238                                         parentestate->es_param_exec_vals[i].value;
2239                                 estate->es_param_exec_vals[i].isnull =
2240                                         parentestate->es_param_exec_vals[i].isnull;
2241                         }
2242                 }
2243
2244                 /*
2245                  * Mark child plan tree as needing rescan at all scan nodes.  The
2246                  * first ExecProcNode will take care of actually doing the rescan.
2247                  */
2248                 planstate->chgParam = bms_add_member(planstate->chgParam,
2249                                                                                          epqstate->epqParam);
2250         }
2251 }
2252
2253 /*
2254  * Start execution of an EvalPlanQual plan tree.
2255  *
2256  * This is a cut-down version of ExecutorStart(): we copy some state from
2257  * the top-level estate rather than initializing it fresh.
2258  */
2259 static void
2260 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
2261 {
2262         EState     *estate;
2263         int                     rtsize;
2264         MemoryContext oldcontext;
2265         ListCell   *l;
2266
2267         rtsize = list_length(parentestate->es_range_table);
2268
2269         epqstate->estate = estate = CreateExecutorState();
2270
2271         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2272
2273         /*
2274          * Child EPQ EStates share the parent's copy of unchanging state such as
2275          * the snapshot, rangetable, result-rel info, and external Param info.
2276          * They need their own copies of local state, including a tuple table,
2277          * es_param_exec_vals, etc.
2278          */
2279         estate->es_direction = ForwardScanDirection;
2280         estate->es_snapshot = parentestate->es_snapshot;
2281         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2282         estate->es_range_table = parentestate->es_range_table;
2283         estate->es_plannedstmt = parentestate->es_plannedstmt;
2284         estate->es_junkFilter = parentestate->es_junkFilter;
2285         estate->es_output_cid = parentestate->es_output_cid;
2286         estate->es_result_relations = parentestate->es_result_relations;
2287         estate->es_num_result_relations = parentestate->es_num_result_relations;
2288         estate->es_result_relation_info = parentestate->es_result_relation_info;
2289         /* es_trig_target_relations must NOT be copied */
2290         estate->es_rowMarks = parentestate->es_rowMarks;
2291         estate->es_top_eflags = parentestate->es_top_eflags;
2292         estate->es_instrument = parentestate->es_instrument;
2293         estate->es_select_into = parentestate->es_select_into;
2294         estate->es_into_oids = parentestate->es_into_oids;
2295         /* es_auxmodifytables must NOT be copied */
2296
2297         /*
2298          * The external param list is simply shared from parent.  The internal
2299          * param workspace has to be local state, but we copy the initial values
2300          * from the parent, so as to have access to any param values that were
2301          * already set from other parts of the parent's plan tree.
2302          */
2303         estate->es_param_list_info = parentestate->es_param_list_info;
2304         if (parentestate->es_plannedstmt->nParamExec > 0)
2305         {
2306                 int                     i = parentestate->es_plannedstmt->nParamExec;
2307
2308                 estate->es_param_exec_vals = (ParamExecData *)
2309                         palloc0(i * sizeof(ParamExecData));
2310                 while (--i >= 0)
2311                 {
2312                         /* copy value if any, but not execPlan link */
2313                         estate->es_param_exec_vals[i].value =
2314                                 parentestate->es_param_exec_vals[i].value;
2315                         estate->es_param_exec_vals[i].isnull =
2316                                 parentestate->es_param_exec_vals[i].isnull;
2317                 }
2318         }
2319
2320         /*
2321          * Each EState must have its own es_epqScanDone state, but if we have
2322          * nested EPQ checks they should share es_epqTuple arrays.      This allows
2323          * sub-rechecks to inherit the values being examined by an outer recheck.
2324          */
2325         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
2326         if (parentestate->es_epqTuple != NULL)
2327         {
2328                 estate->es_epqTuple = parentestate->es_epqTuple;
2329                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
2330         }
2331         else
2332         {
2333                 estate->es_epqTuple = (HeapTuple *)
2334                         palloc0(rtsize * sizeof(HeapTuple));
2335                 estate->es_epqTupleSet = (bool *)
2336                         palloc0(rtsize * sizeof(bool));
2337         }
2338
2339         /*
2340          * Each estate also has its own tuple table.
2341          */
2342         estate->es_tupleTable = NIL;
2343
2344         /*
2345          * Initialize private state information for each SubPlan.  We must do this
2346          * before running ExecInitNode on the main query tree, since
2347          * ExecInitSubPlan expects to be able to find these entries. Some of the
2348          * SubPlans might not be used in the part of the plan tree we intend to
2349          * run, but since it's not easy to tell which, we just initialize them
2350          * all.  (However, if the subplan is headed by a ModifyTable node, then it
2351          * must be a data-modifying CTE, which we will certainly not need to
2352          * re-run, so we can skip initializing it.      This is just an efficiency
2353          * hack; it won't skip data-modifying CTEs for which the ModifyTable node
2354          * is not at the top.)
2355          */
2356         Assert(estate->es_subplanstates == NIL);
2357         foreach(l, parentestate->es_plannedstmt->subplans)
2358         {
2359                 Plan       *subplan = (Plan *) lfirst(l);
2360                 PlanState  *subplanstate;
2361
2362                 /* Don't initialize ModifyTable subplans, per comment above */
2363                 if (IsA(subplan, ModifyTable))
2364                         subplanstate = NULL;
2365                 else
2366                         subplanstate = ExecInitNode(subplan, estate, 0);
2367
2368                 estate->es_subplanstates = lappend(estate->es_subplanstates,
2369                                                                                    subplanstate);
2370         }
2371
2372         /*
2373          * Initialize the private state information for all the nodes in the part
2374          * of the plan tree we need to run.  This opens files, allocates storage
2375          * and leaves us ready to start processing tuples.
2376          */
2377         epqstate->planstate = ExecInitNode(planTree, estate, 0);
2378
2379         MemoryContextSwitchTo(oldcontext);
2380 }
2381
2382 /*
2383  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2384  * or if we are done with the current EPQ child.
2385  *
2386  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2387  * of the normal cleanup, but *not* close result relations (which we are
2388  * just sharing from the outer query).  We do, however, have to close any
2389  * trigger target relations that got opened, since those are not shared.
2390  * (There probably shouldn't be any of the latter, but just in case...)
2391  */
2392 void
2393 EvalPlanQualEnd(EPQState *epqstate)
2394 {
2395         EState     *estate = epqstate->estate;
2396         MemoryContext oldcontext;
2397         ListCell   *l;
2398
2399         if (estate == NULL)
2400                 return;                                 /* idle, so nothing to do */
2401
2402         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2403
2404         ExecEndNode(epqstate->planstate);
2405
2406         foreach(l, estate->es_subplanstates)
2407         {
2408                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2409
2410                 ExecEndNode(subplanstate);
2411         }
2412
2413         /* throw away the per-estate tuple table */
2414         ExecResetTupleTable(estate->es_tupleTable, false);
2415
2416         /* close any trigger target relations attached to this EState */
2417         foreach(l, estate->es_trig_target_relations)
2418         {
2419                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2420
2421                 /* Close indices and then the relation itself */
2422                 ExecCloseIndices(resultRelInfo);
2423                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2424         }
2425
2426         MemoryContextSwitchTo(oldcontext);
2427
2428         FreeExecutorState(estate);
2429
2430         /* Mark EPQState idle */
2431         epqstate->estate = NULL;
2432         epqstate->planstate = NULL;
2433         epqstate->origslot = NULL;
2434 }
2435
2436
2437 /*
2438  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2439  *
2440  * We implement SELECT INTO by diverting SELECT's normal output with
2441  * a specialized DestReceiver type.
2442  */
2443
2444 typedef struct
2445 {
2446         DestReceiver pub;                       /* publicly-known function pointers */
2447         EState     *estate;                     /* EState we are working with */
2448         Relation        rel;                    /* Relation to write to */
2449         int                     hi_options;             /* heap_insert performance options */
2450         BulkInsertState bistate;        /* bulk insert state */
2451 } DR_intorel;
2452
2453 /*
2454  * OpenIntoRel --- actually create the SELECT INTO target relation
2455  *
2456  * This also replaces QueryDesc->dest with the special DestReceiver for
2457  * SELECT INTO.  We assume that the correct result tuple type has already
2458  * been placed in queryDesc->tupDesc.
2459  */
2460 static void
2461 OpenIntoRel(QueryDesc *queryDesc)
2462 {
2463         IntoClause *into = queryDesc->plannedstmt->intoClause;
2464         EState     *estate = queryDesc->estate;
2465         TupleDesc       intoTupDesc = queryDesc->tupDesc;
2466         Relation        intoRelationDesc;
2467         char       *intoName;
2468         Oid                     namespaceId;
2469         Oid                     tablespaceId;
2470         Datum           reloptions;
2471         Oid                     intoRelationId;
2472         DR_intorel *myState;
2473         RangeTblEntry  *rte;
2474         AttrNumber              attnum;
2475         static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2476
2477         Assert(into);
2478
2479         /*
2480          * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
2481          * should try to use that function instead.
2482          */
2483
2484         /*
2485          * Check consistency of arguments
2486          */
2487         if (into->onCommit != ONCOMMIT_NOOP
2488                 && into->rel->relpersistence != RELPERSISTENCE_TEMP)
2489                 ereport(ERROR,
2490                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2491                                  errmsg("ON COMMIT can only be used on temporary tables")));
2492
2493         {
2494                 AclResult aclresult;
2495                 int i;
2496
2497                 for (i = 0; i < intoTupDesc->natts; i++)
2498                 {
2499                         Oid atttypid = intoTupDesc->attrs[i]->atttypid;
2500
2501                         aclresult = pg_type_aclcheck(atttypid, GetUserId(), ACL_USAGE);
2502                         if (aclresult != ACLCHECK_OK)
2503                                 aclcheck_error(aclresult, ACL_KIND_TYPE,
2504                                                            format_type_be(atttypid));
2505                 }
2506         }
2507
2508         /*
2509          * If a column name list was specified in CREATE TABLE AS, override the
2510          * column names derived from the query.  (Too few column names are OK, too
2511          * many are not.)  It would probably be all right to scribble directly on
2512          * the query's result tupdesc, but let's be safe and make a copy.
2513          */
2514         if (into->colNames)
2515         {
2516                 ListCell   *lc;
2517
2518                 intoTupDesc = CreateTupleDescCopy(intoTupDesc);
2519                 attnum = 1;
2520                 foreach(lc, into->colNames)
2521                 {
2522                         char       *colname = strVal(lfirst(lc));
2523
2524                         if (attnum > intoTupDesc->natts)
2525                                 ereport(ERROR,
2526                                                 (errcode(ERRCODE_SYNTAX_ERROR),
2527                                                  errmsg("CREATE TABLE AS specifies too many column names")));
2528                         namestrcpy(&(intoTupDesc->attrs[attnum - 1]->attname), colname);
2529                         attnum++;
2530                 }
2531         }
2532
2533         /*
2534          * Find namespace to create in, check its permissions
2535          */
2536         intoName = into->rel->relname;
2537         namespaceId = RangeVarGetAndCheckCreationNamespace(into->rel);
2538         RangeVarAdjustRelationPersistence(into->rel, namespaceId);
2539
2540         /*
2541          * Security check: disallow creating temp tables from security-restricted
2542          * code.  This is needed because calling code might not expect untrusted
2543          * tables to appear in pg_temp at the front of its search path.
2544          */
2545         if (into->rel->relpersistence == RELPERSISTENCE_TEMP
2546                 && InSecurityRestrictedOperation())
2547                 ereport(ERROR,
2548                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2549                                  errmsg("cannot create temporary table within security-restricted operation")));
2550
2551         /*
2552          * Select tablespace to use.  If not specified, use default tablespace
2553          * (which may in turn default to database's default).
2554          */
2555         if (into->tableSpaceName)
2556         {
2557                 tablespaceId = get_tablespace_oid(into->tableSpaceName, false);
2558         }
2559         else
2560         {
2561                 tablespaceId = GetDefaultTablespace(into->rel->relpersistence);
2562                 /* note InvalidOid is OK in this case */
2563         }
2564
2565         /* Check permissions except when using the database's default space */
2566         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2567         {
2568                 AclResult       aclresult;
2569
2570                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2571                                                                                    ACL_CREATE);
2572
2573                 if (aclresult != ACLCHECK_OK)
2574                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2575                                                    get_tablespace_name(tablespaceId));
2576         }
2577
2578         /* Parse and validate any reloptions */
2579         reloptions = transformRelOptions((Datum) 0,
2580                                                                          into->options,
2581                                                                          NULL,
2582                                                                          validnsps,
2583                                                                          true,
2584                                                                          false);
2585         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2586
2587         /* Now we can actually create the new relation */
2588         intoRelationId = heap_create_with_catalog(intoName,
2589                                                                                           namespaceId,
2590                                                                                           tablespaceId,
2591                                                                                           InvalidOid,
2592                                                                                           InvalidOid,
2593                                                                                           InvalidOid,
2594                                                                                           GetUserId(),
2595                                                                                           intoTupDesc,
2596                                                                                           NIL,
2597                                                                                           RELKIND_RELATION,
2598                                                                                           into->rel->relpersistence,
2599                                                                                           false,
2600                                                                                           false,
2601                                                                                           true,
2602                                                                                           0,
2603                                                                                           into->onCommit,
2604                                                                                           reloptions,
2605                                                                                           true,
2606                                                                                           allowSystemTableMods);
2607         Assert(intoRelationId != InvalidOid);
2608
2609         /*
2610          * Advance command counter so that the newly-created relation's catalog
2611          * tuples will be visible to heap_open.
2612          */
2613         CommandCounterIncrement();
2614
2615         /*
2616          * If necessary, create a TOAST table for the INTO relation. Note that
2617          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2618          * the TOAST table will be visible for insertion.
2619          */
2620         reloptions = transformRelOptions((Datum) 0,
2621                                                                          into->options,
2622                                                                          "toast",
2623                                                                          validnsps,
2624                                                                          true,
2625                                                                          false);
2626
2627         (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2628
2629         AlterTableCreateToastTable(intoRelationId, reloptions);
2630
2631         /*
2632          * And open the constructed table for writing.
2633          */
2634         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2635
2636         /*
2637          * Check INSERT permission on the constructed table.
2638          */
2639         rte = makeNode(RangeTblEntry);
2640         rte->rtekind = RTE_RELATION;
2641         rte->relid = intoRelationId;
2642         rte->relkind = RELKIND_RELATION;
2643         rte->requiredPerms = ACL_INSERT;
2644
2645         for (attnum = 1; attnum <= intoTupDesc->natts; attnum++)
2646                 rte->modifiedCols = bms_add_member(rte->modifiedCols,
2647                                 attnum - FirstLowInvalidHeapAttributeNumber);
2648
2649         ExecCheckRTPerms(list_make1(rte), true);
2650
2651         /*
2652          * Now replace the query's DestReceiver with one for SELECT INTO
2653          */
2654         queryDesc->dest = CreateDestReceiver(DestIntoRel);
2655         myState = (DR_intorel *) queryDesc->dest;
2656         Assert(myState->pub.mydest == DestIntoRel);
2657         myState->estate = estate;
2658         myState->rel = intoRelationDesc;
2659
2660         /*
2661          * We can skip WAL-logging the insertions, unless PITR or streaming
2662          * replication is in use. We can skip the FSM in any case.
2663          */
2664         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2665                 (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL);
2666         myState->bistate = GetBulkInsertState();
2667
2668         /* Not using WAL requires smgr_targblock be initially invalid */
2669         Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
2670 }
2671
2672 /*
2673  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2674  */
2675 static void
2676 CloseIntoRel(QueryDesc *queryDesc)
2677 {
2678         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2679
2680         /* OpenIntoRel might never have gotten called */
2681         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2682         {
2683                 FreeBulkInsertState(myState->bistate);
2684
2685                 /* If we skipped using WAL, must heap_sync before commit */
2686                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2687                         heap_sync(myState->rel);
2688
2689                 /* close rel, but keep lock until commit */
2690                 heap_close(myState->rel, NoLock);
2691
2692                 myState->rel = NULL;
2693         }
2694 }
2695
2696 /*
2697  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2698  */
2699 DestReceiver *
2700 CreateIntoRelDestReceiver(void)
2701 {
2702         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2703
2704         self->pub.receiveSlot = intorel_receive;
2705         self->pub.rStartup = intorel_startup;
2706         self->pub.rShutdown = intorel_shutdown;
2707         self->pub.rDestroy = intorel_destroy;
2708         self->pub.mydest = DestIntoRel;
2709
2710         /* private fields will be set by OpenIntoRel */
2711
2712         return (DestReceiver *) self;
2713 }
2714
2715 /*
2716  * intorel_startup --- executor startup
2717  */
2718 static void
2719 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2720 {
2721         /* no-op */
2722 }
2723
2724 /*
2725  * intorel_receive --- receive one tuple
2726  */
2727 static void
2728 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2729 {
2730         DR_intorel *myState = (DR_intorel *) self;
2731         HeapTuple       tuple;
2732
2733         /*
2734          * get the heap tuple out of the tuple table slot, making sure we have a
2735          * writable copy
2736          */
2737         tuple = ExecMaterializeSlot(slot);
2738
2739         /*
2740          * force assignment of new OID (see comments in ExecInsert)
2741          */
2742         if (myState->rel->rd_rel->relhasoids)
2743                 HeapTupleSetOid(tuple, InvalidOid);
2744
2745         heap_insert(myState->rel,
2746                                 tuple,
2747                                 myState->estate->es_output_cid,
2748                                 myState->hi_options,
2749                                 myState->bistate);
2750
2751         /* We know this is a newly created relation, so there are no indexes */
2752 }
2753
2754 /*
2755  * intorel_shutdown --- executor end
2756  */
2757 static void
2758 intorel_shutdown(DestReceiver *self)
2759 {
2760         /* no-op */
2761 }
2762
2763 /*
2764  * intorel_destroy --- release DestReceiver object
2765  */
2766 static void
2767 intorel_destroy(DestReceiver *self)
2768 {
2769         pfree(self);
2770 }