]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Re-implement EvalPlanQual processing to improve its performance and eliminate
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.334 2009/10/26 02:26:29 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/reloptions.h"
36 #include "access/sysattr.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "parser/parse_clause.h"
49 #include "parser/parsetree.h"
50 #include "storage/bufmgr.h"
51 #include "storage/lmgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/snapmgr.h"
56 #include "utils/tqual.h"
57
58
59 /* Hooks for plugins to get control in ExecutorStart/Run/End() */
60 ExecutorStart_hook_type ExecutorStart_hook = NULL;
61 ExecutorRun_hook_type ExecutorRun_hook = NULL;
62 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
63
64 /* decls for local routines only used within this module */
65 static void InitPlan(QueryDesc *queryDesc, int eflags);
66 static void ExecEndPlan(PlanState *planstate, EState *estate);
67 static void ExecutePlan(EState *estate, PlanState *planstate,
68                         CmdType operation,
69                         bool sendTuples,
70                         long numberTuples,
71                         ScanDirection direction,
72                         DestReceiver *dest);
73 static void ExecCheckRTPerms(List *rangeTable);
74 static void ExecCheckRTEPerms(RangeTblEntry *rte);
75 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
76 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
77                                                           Plan *planTree);
78 static void OpenIntoRel(QueryDesc *queryDesc);
79 static void CloseIntoRel(QueryDesc *queryDesc);
80 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
81 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
82 static void intorel_shutdown(DestReceiver *self);
83 static void intorel_destroy(DestReceiver *self);
84
85 /* end of local decls */
86
87
88 /* ----------------------------------------------------------------
89  *              ExecutorStart
90  *
91  *              This routine must be called at the beginning of any execution of any
92  *              query plan
93  *
94  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
95  * clear why we bother to separate the two functions, but...).  The tupDesc
96  * field of the QueryDesc is filled in to describe the tuples that will be
97  * returned, and the internal fields (estate and planstate) are set up.
98  *
99  * eflags contains flag bits as described in executor.h.
100  *
101  * NB: the CurrentMemoryContext when this is called will become the parent
102  * of the per-query context used for this Executor invocation.
103  *
104  * We provide a function hook variable that lets loadable plugins
105  * get control when ExecutorStart is called.  Such a plugin would
106  * normally call standard_ExecutorStart().
107  *
108  * ----------------------------------------------------------------
109  */
110 void
111 ExecutorStart(QueryDesc *queryDesc, int eflags)
112 {
113         if (ExecutorStart_hook)
114                 (*ExecutorStart_hook) (queryDesc, eflags);
115         else
116                 standard_ExecutorStart(queryDesc, eflags);
117 }
118
119 void
120 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
121 {
122         EState     *estate;
123         MemoryContext oldcontext;
124
125         /* sanity checks: queryDesc must not be started already */
126         Assert(queryDesc != NULL);
127         Assert(queryDesc->estate == NULL);
128
129         /*
130          * If the transaction is read-only, we need to check if any writes are
131          * planned to non-temporary tables.  EXPLAIN is considered read-only.
132          */
133         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
134                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
135
136         /*
137          * Build EState, switch into per-query memory context for startup.
138          */
139         estate = CreateExecutorState();
140         queryDesc->estate = estate;
141
142         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
143
144         /*
145          * Fill in external parameters, if any, from queryDesc; and allocate
146          * workspace for internal parameters
147          */
148         estate->es_param_list_info = queryDesc->params;
149
150         if (queryDesc->plannedstmt->nParamExec > 0)
151                 estate->es_param_exec_vals = (ParamExecData *)
152                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
153
154         /*
155          * If non-read-only query, set the command ID to mark output tuples with
156          */
157         switch (queryDesc->operation)
158         {
159                 case CMD_SELECT:
160                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
161                         if (queryDesc->plannedstmt->intoClause != NULL ||
162                                 queryDesc->plannedstmt->rowMarks != NIL)
163                                 estate->es_output_cid = GetCurrentCommandId(true);
164                         break;
165
166                 case CMD_INSERT:
167                 case CMD_DELETE:
168                 case CMD_UPDATE:
169                         estate->es_output_cid = GetCurrentCommandId(true);
170                         break;
171
172                 default:
173                         elog(ERROR, "unrecognized operation code: %d",
174                                  (int) queryDesc->operation);
175                         break;
176         }
177
178         /*
179          * Copy other important information into the EState
180          */
181         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
182         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
183         estate->es_instrument = queryDesc->doInstrument;
184
185         /*
186          * Initialize the plan state tree
187          */
188         InitPlan(queryDesc, eflags);
189
190         MemoryContextSwitchTo(oldcontext);
191 }
192
193 /* ----------------------------------------------------------------
194  *              ExecutorRun
195  *
196  *              This is the main routine of the executor module. It accepts
197  *              the query descriptor from the traffic cop and executes the
198  *              query plan.
199  *
200  *              ExecutorStart must have been called already.
201  *
202  *              If direction is NoMovementScanDirection then nothing is done
203  *              except to start up/shut down the destination.  Otherwise,
204  *              we retrieve up to 'count' tuples in the specified direction.
205  *
206  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
207  *              completion.
208  *
209  *              There is no return value, but output tuples (if any) are sent to
210  *              the destination receiver specified in the QueryDesc; and the number
211  *              of tuples processed at the top level can be found in
212  *              estate->es_processed.
213  *
214  *              We provide a function hook variable that lets loadable plugins
215  *              get control when ExecutorRun is called.  Such a plugin would
216  *              normally call standard_ExecutorRun().
217  *
218  * ----------------------------------------------------------------
219  */
220 void
221 ExecutorRun(QueryDesc *queryDesc,
222                         ScanDirection direction, long count)
223 {
224         if (ExecutorRun_hook)
225                 (*ExecutorRun_hook) (queryDesc, direction, count);
226         else
227                 standard_ExecutorRun(queryDesc, direction, count);
228 }
229
230 void
231 standard_ExecutorRun(QueryDesc *queryDesc,
232                                          ScanDirection direction, long count)
233 {
234         EState     *estate;
235         CmdType         operation;
236         DestReceiver *dest;
237         bool            sendTuples;
238         MemoryContext oldcontext;
239
240         /* sanity checks */
241         Assert(queryDesc != NULL);
242
243         estate = queryDesc->estate;
244
245         Assert(estate != NULL);
246
247         /*
248          * Switch into per-query memory context
249          */
250         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
251
252         /* Allow instrumentation of ExecutorRun overall runtime */
253         if (queryDesc->totaltime)
254                 InstrStartNode(queryDesc->totaltime);
255
256         /*
257          * extract information from the query descriptor and the query feature.
258          */
259         operation = queryDesc->operation;
260         dest = queryDesc->dest;
261
262         /*
263          * startup tuple receiver, if we will be emitting tuples
264          */
265         estate->es_processed = 0;
266         estate->es_lastoid = InvalidOid;
267
268         sendTuples = (operation == CMD_SELECT ||
269                                   queryDesc->plannedstmt->hasReturning);
270
271         if (sendTuples)
272                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
273
274         /*
275          * run plan
276          */
277         if (!ScanDirectionIsNoMovement(direction))
278                 ExecutePlan(estate,
279                                         queryDesc->planstate,
280                                         operation,
281                                         sendTuples,
282                                         count,
283                                         direction,
284                                         dest);
285
286         /*
287          * shutdown tuple receiver, if we started it
288          */
289         if (sendTuples)
290                 (*dest->rShutdown) (dest);
291
292         if (queryDesc->totaltime)
293                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
294
295         MemoryContextSwitchTo(oldcontext);
296 }
297
298 /* ----------------------------------------------------------------
299  *              ExecutorEnd
300  *
301  *              This routine must be called at the end of execution of any
302  *              query plan
303  *
304  *              We provide a function hook variable that lets loadable plugins
305  *              get control when ExecutorEnd is called.  Such a plugin would
306  *              normally call standard_ExecutorEnd().
307  *
308  * ----------------------------------------------------------------
309  */
310 void
311 ExecutorEnd(QueryDesc *queryDesc)
312 {
313         if (ExecutorEnd_hook)
314                 (*ExecutorEnd_hook) (queryDesc);
315         else
316                 standard_ExecutorEnd(queryDesc);
317 }
318
319 void
320 standard_ExecutorEnd(QueryDesc *queryDesc)
321 {
322         EState     *estate;
323         MemoryContext oldcontext;
324
325         /* sanity checks */
326         Assert(queryDesc != NULL);
327
328         estate = queryDesc->estate;
329
330         Assert(estate != NULL);
331
332         /*
333          * Switch into per-query memory context to run ExecEndPlan
334          */
335         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
336
337         ExecEndPlan(queryDesc->planstate, estate);
338
339         /*
340          * Close the SELECT INTO relation if any
341          */
342         if (estate->es_select_into)
343                 CloseIntoRel(queryDesc);
344
345         /* do away with our snapshots */
346         UnregisterSnapshot(estate->es_snapshot);
347         UnregisterSnapshot(estate->es_crosscheck_snapshot);
348
349         /*
350          * Must switch out of context before destroying it
351          */
352         MemoryContextSwitchTo(oldcontext);
353
354         /*
355          * Release EState and per-query memory context.  This should release
356          * everything the executor has allocated.
357          */
358         FreeExecutorState(estate);
359
360         /* Reset queryDesc fields that no longer point to anything */
361         queryDesc->tupDesc = NULL;
362         queryDesc->estate = NULL;
363         queryDesc->planstate = NULL;
364         queryDesc->totaltime = NULL;
365 }
366
367 /* ----------------------------------------------------------------
368  *              ExecutorRewind
369  *
370  *              This routine may be called on an open queryDesc to rewind it
371  *              to the start.
372  * ----------------------------------------------------------------
373  */
374 void
375 ExecutorRewind(QueryDesc *queryDesc)
376 {
377         EState     *estate;
378         MemoryContext oldcontext;
379
380         /* sanity checks */
381         Assert(queryDesc != NULL);
382
383         estate = queryDesc->estate;
384
385         Assert(estate != NULL);
386
387         /* It's probably not sensible to rescan updating queries */
388         Assert(queryDesc->operation == CMD_SELECT);
389
390         /*
391          * Switch into per-query memory context
392          */
393         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
394
395         /*
396          * rescan plan
397          */
398         ExecReScan(queryDesc->planstate, NULL);
399
400         MemoryContextSwitchTo(oldcontext);
401 }
402
403
404 /*
405  * ExecCheckRTPerms
406  *              Check access permissions for all relations listed in a range table.
407  */
408 static void
409 ExecCheckRTPerms(List *rangeTable)
410 {
411         ListCell   *l;
412
413         foreach(l, rangeTable)
414         {
415                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
416         }
417 }
418
419 /*
420  * ExecCheckRTEPerms
421  *              Check access permissions for a single RTE.
422  */
423 static void
424 ExecCheckRTEPerms(RangeTblEntry *rte)
425 {
426         AclMode         requiredPerms;
427         AclMode         relPerms;
428         AclMode         remainingPerms;
429         Oid                     relOid;
430         Oid                     userid;
431         Bitmapset  *tmpset;
432         int                     col;
433
434         /*
435          * Only plain-relation RTEs need to be checked here.  Function RTEs are
436          * checked by init_fcache when the function is prepared for execution.
437          * Join, subquery, and special RTEs need no checks.
438          */
439         if (rte->rtekind != RTE_RELATION)
440                 return;
441
442         /*
443          * No work if requiredPerms is empty.
444          */
445         requiredPerms = rte->requiredPerms;
446         if (requiredPerms == 0)
447                 return;
448
449         relOid = rte->relid;
450
451         /*
452          * userid to check as: current user unless we have a setuid indication.
453          *
454          * Note: GetUserId() is presently fast enough that there's no harm in
455          * calling it separately for each RTE.  If that stops being true, we could
456          * call it once in ExecCheckRTPerms and pass the userid down from there.
457          * But for now, no need for the extra clutter.
458          */
459         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
460
461         /*
462          * We must have *all* the requiredPerms bits, but some of the bits can be
463          * satisfied from column-level rather than relation-level permissions.
464          * First, remove any bits that are satisfied by relation permissions.
465          */
466         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
467         remainingPerms = requiredPerms & ~relPerms;
468         if (remainingPerms != 0)
469         {
470                 /*
471                  * If we lack any permissions that exist only as relation permissions,
472                  * we can fail straight away.
473                  */
474                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
475                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
476                                                    get_rel_name(relOid));
477
478                 /*
479                  * Check to see if we have the needed privileges at column level.
480                  *
481                  * Note: failures just report a table-level error; it would be nicer
482                  * to report a column-level error if we have some but not all of the
483                  * column privileges.
484                  */
485                 if (remainingPerms & ACL_SELECT)
486                 {
487                         /*
488                          * When the query doesn't explicitly reference any columns (for
489                          * example, SELECT COUNT(*) FROM table), allow the query if we
490                          * have SELECT on any column of the rel, as per SQL spec.
491                          */
492                         if (bms_is_empty(rte->selectedCols))
493                         {
494                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
495                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
496                                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
497                                                                    get_rel_name(relOid));
498                         }
499
500                         tmpset = bms_copy(rte->selectedCols);
501                         while ((col = bms_first_member(tmpset)) >= 0)
502                         {
503                                 /* remove the column number offset */
504                                 col += FirstLowInvalidHeapAttributeNumber;
505                                 if (col == InvalidAttrNumber)
506                                 {
507                                         /* Whole-row reference, must have priv on all cols */
508                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
509                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
510                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
511                                                                            get_rel_name(relOid));
512                                 }
513                                 else
514                                 {
515                                         if (pg_attribute_aclcheck(relOid, col, userid, ACL_SELECT)
516                                                 != ACLCHECK_OK)
517                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
518                                                                            get_rel_name(relOid));
519                                 }
520                         }
521                         bms_free(tmpset);
522                 }
523
524                 /*
525                  * Basically the same for the mod columns, with either INSERT or
526                  * UPDATE privilege as specified by remainingPerms.
527                  */
528                 remainingPerms &= ~ACL_SELECT;
529                 if (remainingPerms != 0)
530                 {
531                         /*
532                          * When the query doesn't explicitly change any columns, allow the
533                          * query if we have permission on any column of the rel.  This is
534                          * to handle SELECT FOR UPDATE as well as possible corner cases in
535                          * INSERT and UPDATE.
536                          */
537                         if (bms_is_empty(rte->modifiedCols))
538                         {
539                                 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
540                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
541                                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
542                                                                    get_rel_name(relOid));
543                         }
544
545                         tmpset = bms_copy(rte->modifiedCols);
546                         while ((col = bms_first_member(tmpset)) >= 0)
547                         {
548                                 /* remove the column number offset */
549                                 col += FirstLowInvalidHeapAttributeNumber;
550                                 if (col == InvalidAttrNumber)
551                                 {
552                                         /* whole-row reference can't happen here */
553                                         elog(ERROR, "whole-row update is not implemented");
554                                 }
555                                 else
556                                 {
557                                         if (pg_attribute_aclcheck(relOid, col, userid, remainingPerms)
558                                                 != ACLCHECK_OK)
559                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
560                                                                            get_rel_name(relOid));
561                                 }
562                         }
563                         bms_free(tmpset);
564                 }
565         }
566 }
567
568 /*
569  * Check that the query does not imply any writes to non-temp tables.
570  */
571 static void
572 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
573 {
574         ListCell   *l;
575
576         /*
577          * CREATE TABLE AS or SELECT INTO?
578          *
579          * XXX should we allow this if the destination is temp?
580          */
581         if (plannedstmt->intoClause != NULL)
582                 goto fail;
583
584         /* Fail if write permissions are requested on any non-temp table */
585         foreach(l, plannedstmt->rtable)
586         {
587                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
588
589                 if (rte->rtekind != RTE_RELATION)
590                         continue;
591
592                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
593                         continue;
594
595                 if (isTempNamespace(get_rel_namespace(rte->relid)))
596                         continue;
597
598                 goto fail;
599         }
600
601         return;
602
603 fail:
604         ereport(ERROR,
605                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
606                          errmsg("transaction is read-only")));
607 }
608
609
610 /* ----------------------------------------------------------------
611  *              InitPlan
612  *
613  *              Initializes the query plan: open files, allocate storage
614  *              and start up the rule manager
615  * ----------------------------------------------------------------
616  */
617 static void
618 InitPlan(QueryDesc *queryDesc, int eflags)
619 {
620         CmdType         operation = queryDesc->operation;
621         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
622         Plan       *plan = plannedstmt->planTree;
623         List       *rangeTable = plannedstmt->rtable;
624         EState     *estate = queryDesc->estate;
625         PlanState  *planstate;
626         TupleDesc       tupType;
627         ListCell   *l;
628         int                     i;
629
630         /*
631          * Do permissions checks
632          */
633         ExecCheckRTPerms(rangeTable);
634
635         /*
636          * initialize the node's execution state
637          */
638         estate->es_range_table = rangeTable;
639         estate->es_plannedstmt = plannedstmt;
640
641         /*
642          * initialize result relation stuff, and open/lock the result rels.
643          *
644          * We must do this before initializing the plan tree, else we might
645          * try to do a lock upgrade if a result rel is also a source rel.
646          */
647         if (plannedstmt->resultRelations)
648         {
649                 List       *resultRelations = plannedstmt->resultRelations;
650                 int                     numResultRelations = list_length(resultRelations);
651                 ResultRelInfo *resultRelInfos;
652                 ResultRelInfo *resultRelInfo;
653
654                 resultRelInfos = (ResultRelInfo *)
655                         palloc(numResultRelations * sizeof(ResultRelInfo));
656                 resultRelInfo = resultRelInfos;
657                 foreach(l, resultRelations)
658                 {
659                         Index           resultRelationIndex = lfirst_int(l);
660                         Oid                     resultRelationOid;
661                         Relation        resultRelation;
662
663                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
664                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
665                         InitResultRelInfo(resultRelInfo,
666                                                           resultRelation,
667                                                           resultRelationIndex,
668                                                           operation,
669                                                           estate->es_instrument);
670                         resultRelInfo++;
671                 }
672                 estate->es_result_relations = resultRelInfos;
673                 estate->es_num_result_relations = numResultRelations;
674                 /* es_result_relation_info is NULL except when within ModifyTable */
675                 estate->es_result_relation_info = NULL;
676         }
677         else
678         {
679                 /*
680                  * if no result relation, then set state appropriately
681                  */
682                 estate->es_result_relations = NULL;
683                 estate->es_num_result_relations = 0;
684                 estate->es_result_relation_info = NULL;
685         }
686
687         /*
688          * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
689          * before we initialize the plan tree, else we'd be risking lock
690          * upgrades.  While we are at it, build the ExecRowMark list.
691          */
692         estate->es_rowMarks = NIL;
693         foreach(l, plannedstmt->rowMarks)
694         {
695                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
696                 Oid                     relid;
697                 Relation        relation;
698                 ExecRowMark *erm;
699
700                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
701                 if (rc->isParent)
702                         continue;
703
704                 switch (rc->markType)
705                 {
706                         case ROW_MARK_EXCLUSIVE:
707                         case ROW_MARK_SHARE:
708                                 relid = getrelid(rc->rti, rangeTable);
709                                 relation = heap_open(relid, RowShareLock);
710                                 break;
711                         case ROW_MARK_REFERENCE:
712                                 relid = getrelid(rc->rti, rangeTable);
713                                 relation = heap_open(relid, AccessShareLock);
714                                 break;
715                         case ROW_MARK_COPY:
716                                 /* there's no real table here ... */
717                                 relation = NULL;
718                                 break;
719                         default:
720                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
721                                 relation = NULL;        /* keep compiler quiet */
722                                 break;
723                 }
724
725                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
726                 erm->relation = relation;
727                 erm->rti = rc->rti;
728                 erm->prti = rc->prti;
729                 erm->markType = rc->markType;
730                 erm->noWait = rc->noWait;
731                 erm->ctidAttNo = rc->ctidAttNo;
732                 erm->toidAttNo = rc->toidAttNo;
733                 erm->wholeAttNo = rc->wholeAttNo;
734                 ItemPointerSetInvalid(&(erm->curCtid));
735                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
736         }
737
738         /*
739          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
740          * flag appropriately so that the plan tree will be initialized with the
741          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
742          */
743         estate->es_select_into = false;
744         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
745         {
746                 estate->es_select_into = true;
747                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
748         }
749
750         /*
751          * Initialize the executor's tuple table to empty.
752          */
753         estate->es_tupleTable = NIL;
754         estate->es_trig_tuple_slot = NULL;
755
756         /* mark EvalPlanQual not active */
757         estate->es_epqTuple = NULL;
758         estate->es_epqTupleSet = NULL;
759         estate->es_epqScanDone = NULL;
760
761         /*
762          * Initialize private state information for each SubPlan.  We must do this
763          * before running ExecInitNode on the main query tree, since
764          * ExecInitSubPlan expects to be able to find these entries.
765          */
766         Assert(estate->es_subplanstates == NIL);
767         i = 1;                                          /* subplan indices count from 1 */
768         foreach(l, plannedstmt->subplans)
769         {
770                 Plan       *subplan = (Plan *) lfirst(l);
771                 PlanState  *subplanstate;
772                 int                     sp_eflags;
773
774                 /*
775                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
776                  * it is a parameterless subplan (not initplan), we suggest that it be
777                  * prepared to handle REWIND efficiently; otherwise there is no need.
778                  */
779                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
780                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
781                         sp_eflags |= EXEC_FLAG_REWIND;
782
783                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
784
785                 estate->es_subplanstates = lappend(estate->es_subplanstates,
786                                                                                    subplanstate);
787
788                 i++;
789         }
790
791         /*
792          * Initialize the private state information for all the nodes in the query
793          * tree.  This opens files, allocates storage and leaves us ready to start
794          * processing tuples.
795          */
796         planstate = ExecInitNode(plan, estate, eflags);
797
798         /*
799          * Get the tuple descriptor describing the type of tuples to return. (this
800          * is especially important if we are creating a relation with "SELECT
801          * INTO")
802          */
803         tupType = ExecGetResultType(planstate);
804
805         /*
806          * Initialize the junk filter if needed.  SELECT queries need a
807          * filter if there are any junk attrs in the top-level tlist.
808          */
809         if (operation == CMD_SELECT)
810         {
811                 bool            junk_filter_needed = false;
812                 ListCell   *tlist;
813
814                 foreach(tlist, plan->targetlist)
815                 {
816                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
817
818                         if (tle->resjunk)
819                         {
820                                 junk_filter_needed = true;
821                                 break;
822                         }
823                 }
824
825                 if (junk_filter_needed)
826                 {
827                         JunkFilter *j;
828
829                         j = ExecInitJunkFilter(planstate->plan->targetlist,
830                                                                    tupType->tdhasoid,
831                                                                    ExecInitExtraTupleSlot(estate));
832                         estate->es_junkFilter = j;
833
834                         /* Want to return the cleaned tuple type */
835                         tupType = j->jf_cleanTupType;
836                 }
837         }
838
839         queryDesc->tupDesc = tupType;
840         queryDesc->planstate = planstate;
841
842         /*
843          * If doing SELECT INTO, initialize the "into" relation.  We must wait
844          * till now so we have the "clean" result tuple type to create the new
845          * table from.
846          *
847          * If EXPLAIN, skip creating the "into" relation.
848          */
849         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
850                 OpenIntoRel(queryDesc);
851 }
852
853 /*
854  * Initialize ResultRelInfo data for one result relation
855  */
856 void
857 InitResultRelInfo(ResultRelInfo *resultRelInfo,
858                                   Relation resultRelationDesc,
859                                   Index resultRelationIndex,
860                                   CmdType operation,
861                                   bool doInstrument)
862 {
863         /*
864          * Check valid relkind ... parser and/or planner should have noticed this
865          * already, but let's make sure.
866          */
867         switch (resultRelationDesc->rd_rel->relkind)
868         {
869                 case RELKIND_RELATION:
870                         /* OK */
871                         break;
872                 case RELKIND_SEQUENCE:
873                         ereport(ERROR,
874                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
875                                          errmsg("cannot change sequence \"%s\"",
876                                                         RelationGetRelationName(resultRelationDesc))));
877                         break;
878                 case RELKIND_TOASTVALUE:
879                         ereport(ERROR,
880                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
881                                          errmsg("cannot change TOAST relation \"%s\"",
882                                                         RelationGetRelationName(resultRelationDesc))));
883                         break;
884                 case RELKIND_VIEW:
885                         ereport(ERROR,
886                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
887                                          errmsg("cannot change view \"%s\"",
888                                                         RelationGetRelationName(resultRelationDesc))));
889                         break;
890                 default:
891                         ereport(ERROR,
892                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
893                                          errmsg("cannot change relation \"%s\"",
894                                                         RelationGetRelationName(resultRelationDesc))));
895                         break;
896         }
897
898         /* OK, fill in the node */
899         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
900         resultRelInfo->type = T_ResultRelInfo;
901         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
902         resultRelInfo->ri_RelationDesc = resultRelationDesc;
903         resultRelInfo->ri_NumIndices = 0;
904         resultRelInfo->ri_IndexRelationDescs = NULL;
905         resultRelInfo->ri_IndexRelationInfo = NULL;
906         /* make a copy so as not to depend on relcache info not changing... */
907         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
908         if (resultRelInfo->ri_TrigDesc)
909         {
910                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
911
912                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
913                         palloc0(n * sizeof(FmgrInfo));
914                 if (doInstrument)
915                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
916                 else
917                         resultRelInfo->ri_TrigInstrument = NULL;
918         }
919         else
920         {
921                 resultRelInfo->ri_TrigFunctions = NULL;
922                 resultRelInfo->ri_TrigInstrument = NULL;
923         }
924         resultRelInfo->ri_ConstraintExprs = NULL;
925         resultRelInfo->ri_junkFilter = NULL;
926         resultRelInfo->ri_projectReturning = NULL;
927
928         /*
929          * If there are indices on the result relation, open them and save
930          * descriptors in the result relation info, so that we can add new index
931          * entries for the tuples we add/update.  We need not do this for a
932          * DELETE, however, since deletion doesn't affect indexes.
933          */
934         if (resultRelationDesc->rd_rel->relhasindex &&
935                 operation != CMD_DELETE)
936                 ExecOpenIndices(resultRelInfo);
937 }
938
939 /*
940  *              ExecGetTriggerResultRel
941  *
942  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
943  * triggers are fired on one of the result relations of the query, and so
944  * we can just return a member of the es_result_relations array.  (Note: in
945  * self-join situations there might be multiple members with the same OID;
946  * if so it doesn't matter which one we pick.)  However, it is sometimes
947  * necessary to fire triggers on other relations; this happens mainly when an
948  * RI update trigger queues additional triggers on other relations, which will
949  * be processed in the context of the outer query.      For efficiency's sake,
950  * we want to have a ResultRelInfo for those triggers too; that can avoid
951  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
952  * ANALYZE to report the runtimes of such triggers.)  So we make additional
953  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
954  */
955 ResultRelInfo *
956 ExecGetTriggerResultRel(EState *estate, Oid relid)
957 {
958         ResultRelInfo *rInfo;
959         int                     nr;
960         ListCell   *l;
961         Relation        rel;
962         MemoryContext oldcontext;
963
964         /* First, search through the query result relations */
965         rInfo = estate->es_result_relations;
966         nr = estate->es_num_result_relations;
967         while (nr > 0)
968         {
969                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
970                         return rInfo;
971                 rInfo++;
972                 nr--;
973         }
974         /* Nope, but maybe we already made an extra ResultRelInfo for it */
975         foreach(l, estate->es_trig_target_relations)
976         {
977                 rInfo = (ResultRelInfo *) lfirst(l);
978                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
979                         return rInfo;
980         }
981         /* Nope, so we need a new one */
982
983         /*
984          * Open the target relation's relcache entry.  We assume that an
985          * appropriate lock is still held by the backend from whenever the trigger
986          * event got queued, so we need take no new lock here.
987          */
988         rel = heap_open(relid, NoLock);
989
990         /*
991          * Make the new entry in the right context.  Currently, we don't need any
992          * index information in ResultRelInfos used only for triggers, so tell
993          * InitResultRelInfo it's a DELETE.
994          */
995         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
996         rInfo = makeNode(ResultRelInfo);
997         InitResultRelInfo(rInfo,
998                                           rel,
999                                           0,            /* dummy rangetable index */
1000                                           CMD_DELETE,
1001                                           estate->es_instrument);
1002         estate->es_trig_target_relations =
1003                 lappend(estate->es_trig_target_relations, rInfo);
1004         MemoryContextSwitchTo(oldcontext);
1005
1006         return rInfo;
1007 }
1008
1009 /*
1010  *              ExecContextForcesOids
1011  *
1012  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1013  * we need to ensure that result tuples have space for an OID iff they are
1014  * going to be stored into a relation that has OIDs.  In other contexts
1015  * we are free to choose whether to leave space for OIDs in result tuples
1016  * (we generally don't want to, but we do if a physical-tlist optimization
1017  * is possible).  This routine checks the plan context and returns TRUE if the
1018  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1019  * *hasoids is set to the required value.
1020  *
1021  * One reason this is ugly is that all plan nodes in the plan tree will emit
1022  * tuples with space for an OID, though we really only need the topmost node
1023  * to do so.  However, node types like Sort don't project new tuples but just
1024  * return their inputs, and in those cases the requirement propagates down
1025  * to the input node.  Eventually we might make this code smart enough to
1026  * recognize how far down the requirement really goes, but for now we just
1027  * make all plan nodes do the same thing if the top level forces the choice.
1028  *
1029  * We assume that if we are generating tuples for INSERT or UPDATE,
1030  * estate->es_result_relation_info is already set up to describe the target
1031  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1032  * the target relations may have OIDs and some not.  We have to make the
1033  * decisions on a per-relation basis as we initialize each of the subplans of
1034  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1035  * while initializing each subplan.
1036  *
1037  * SELECT INTO is even uglier, because we don't have the INTO relation's
1038  * descriptor available when this code runs; we have to look aside at a
1039  * flag set by InitPlan().
1040  */
1041 bool
1042 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1043 {
1044         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1045
1046         if (ri != NULL)
1047         {
1048                 Relation        rel = ri->ri_RelationDesc;
1049
1050                 if (rel != NULL)
1051                 {
1052                         *hasoids = rel->rd_rel->relhasoids;
1053                         return true;
1054                 }
1055         }
1056
1057         if (planstate->state->es_select_into)
1058         {
1059                 *hasoids = planstate->state->es_into_oids;
1060                 return true;
1061         }
1062
1063         return false;
1064 }
1065
1066 /* ----------------------------------------------------------------
1067  *              ExecEndPlan
1068  *
1069  *              Cleans up the query plan -- closes files and frees up storage
1070  *
1071  * NOTE: we are no longer very worried about freeing storage per se
1072  * in this code; FreeExecutorState should be guaranteed to release all
1073  * memory that needs to be released.  What we are worried about doing
1074  * is closing relations and dropping buffer pins.  Thus, for example,
1075  * tuple tables must be cleared or dropped to ensure pins are released.
1076  * ----------------------------------------------------------------
1077  */
1078 static void
1079 ExecEndPlan(PlanState *planstate, EState *estate)
1080 {
1081         ResultRelInfo *resultRelInfo;
1082         int                     i;
1083         ListCell   *l;
1084
1085         /*
1086          * shut down the node-type-specific query processing
1087          */
1088         ExecEndNode(planstate);
1089
1090         /*
1091          * for subplans too
1092          */
1093         foreach(l, estate->es_subplanstates)
1094         {
1095                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1096
1097                 ExecEndNode(subplanstate);
1098         }
1099
1100         /*
1101          * destroy the executor's tuple table.  Actually we only care about
1102          * releasing buffer pins and tupdesc refcounts; there's no need to
1103          * pfree the TupleTableSlots, since the containing memory context
1104          * is about to go away anyway.
1105          */
1106         ExecResetTupleTable(estate->es_tupleTable, false);
1107
1108         /*
1109          * close the result relation(s) if any, but hold locks until xact commit.
1110          */
1111         resultRelInfo = estate->es_result_relations;
1112         for (i = estate->es_num_result_relations; i > 0; i--)
1113         {
1114                 /* Close indices and then the relation itself */
1115                 ExecCloseIndices(resultRelInfo);
1116                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1117                 resultRelInfo++;
1118         }
1119
1120         /*
1121          * likewise close any trigger target relations
1122          */
1123         foreach(l, estate->es_trig_target_relations)
1124         {
1125                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1126                 /* Close indices and then the relation itself */
1127                 ExecCloseIndices(resultRelInfo);
1128                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1129         }
1130
1131         /*
1132          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1133          */
1134         foreach(l, estate->es_rowMarks)
1135         {
1136                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1137
1138                 if (erm->relation)
1139                         heap_close(erm->relation, NoLock);
1140         }
1141 }
1142
1143 /* ----------------------------------------------------------------
1144  *              ExecutePlan
1145  *
1146  *              Processes the query plan until we have processed 'numberTuples' tuples,
1147  *              moving in the specified direction.
1148  *
1149  *              Runs to completion if numberTuples is 0
1150  *
1151  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1152  * user can see it
1153  * ----------------------------------------------------------------
1154  */
1155 static void
1156 ExecutePlan(EState *estate,
1157                         PlanState *planstate,
1158                         CmdType operation,
1159                         bool sendTuples,
1160                         long numberTuples,
1161                         ScanDirection direction,
1162                         DestReceiver *dest)
1163 {
1164         TupleTableSlot *slot;
1165         long            current_tuple_count;
1166
1167         /*
1168          * initialize local variables
1169          */
1170         current_tuple_count = 0;
1171
1172         /*
1173          * Set the direction.
1174          */
1175         estate->es_direction = direction;
1176
1177         /*
1178          * Loop until we've processed the proper number of tuples from the plan.
1179          */
1180         for (;;)
1181         {
1182                 /* Reset the per-output-tuple exprcontext */
1183                 ResetPerTupleExprContext(estate);
1184
1185                 /*
1186                  * Execute the plan and obtain a tuple
1187                  */
1188                 slot = ExecProcNode(planstate);
1189
1190                 /*
1191                  * if the tuple is null, then we assume there is nothing more to
1192                  * process so we just end the loop...
1193                  */
1194                 if (TupIsNull(slot))
1195                         break;
1196
1197                 /*
1198                  * If we have a junk filter, then project a new tuple with the junk
1199                  * removed.
1200                  *
1201                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1202                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1203                  * because that tuple slot has the wrong descriptor.)
1204                  */
1205                 if (estate->es_junkFilter != NULL)
1206                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1207
1208                 /*
1209                  * If we are supposed to send the tuple somewhere, do so.
1210                  * (In practice, this is probably always the case at this point.)
1211                  */
1212                 if (sendTuples)
1213                         (*dest->receiveSlot) (slot, dest);
1214
1215                 /*
1216                  * Count tuples processed, if this is a SELECT.  (For other operation
1217                  * types, the ModifyTable plan node must count the appropriate
1218                  * events.)
1219                  */
1220                 if (operation == CMD_SELECT)
1221                         (estate->es_processed)++;
1222
1223                 /*
1224                  * check our tuple count.. if we've processed the proper number then
1225                  * quit, else loop again and process more tuples.  Zero numberTuples
1226                  * means no limit.
1227                  */
1228                 current_tuple_count++;
1229                 if (numberTuples && numberTuples == current_tuple_count)
1230                         break;
1231         }
1232 }
1233
1234
1235 /*
1236  * ExecRelCheck --- check that tuple meets constraints for result relation
1237  */
1238 static const char *
1239 ExecRelCheck(ResultRelInfo *resultRelInfo,
1240                          TupleTableSlot *slot, EState *estate)
1241 {
1242         Relation        rel = resultRelInfo->ri_RelationDesc;
1243         int                     ncheck = rel->rd_att->constr->num_check;
1244         ConstrCheck *check = rel->rd_att->constr->check;
1245         ExprContext *econtext;
1246         MemoryContext oldContext;
1247         List       *qual;
1248         int                     i;
1249
1250         /*
1251          * If first time through for this result relation, build expression
1252          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1253          * memory context so they'll survive throughout the query.
1254          */
1255         if (resultRelInfo->ri_ConstraintExprs == NULL)
1256         {
1257                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1258                 resultRelInfo->ri_ConstraintExprs =
1259                         (List **) palloc(ncheck * sizeof(List *));
1260                 for (i = 0; i < ncheck; i++)
1261                 {
1262                         /* ExecQual wants implicit-AND form */
1263                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1264                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1265                                 ExecPrepareExpr((Expr *) qual, estate);
1266                 }
1267                 MemoryContextSwitchTo(oldContext);
1268         }
1269
1270         /*
1271          * We will use the EState's per-tuple context for evaluating constraint
1272          * expressions (creating it if it's not already there).
1273          */
1274         econtext = GetPerTupleExprContext(estate);
1275
1276         /* Arrange for econtext's scan tuple to be the tuple under test */
1277         econtext->ecxt_scantuple = slot;
1278
1279         /* And evaluate the constraints */
1280         for (i = 0; i < ncheck; i++)
1281         {
1282                 qual = resultRelInfo->ri_ConstraintExprs[i];
1283
1284                 /*
1285                  * NOTE: SQL92 specifies that a NULL result from a constraint
1286                  * expression is not to be treated as a failure.  Therefore, tell
1287                  * ExecQual to return TRUE for NULL.
1288                  */
1289                 if (!ExecQual(qual, econtext, true))
1290                         return check[i].ccname;
1291         }
1292
1293         /* NULL result means no error */
1294         return NULL;
1295 }
1296
1297 void
1298 ExecConstraints(ResultRelInfo *resultRelInfo,
1299                                 TupleTableSlot *slot, EState *estate)
1300 {
1301         Relation        rel = resultRelInfo->ri_RelationDesc;
1302         TupleConstr *constr = rel->rd_att->constr;
1303
1304         Assert(constr);
1305
1306         if (constr->has_not_null)
1307         {
1308                 int                     natts = rel->rd_att->natts;
1309                 int                     attrChk;
1310
1311                 for (attrChk = 1; attrChk <= natts; attrChk++)
1312                 {
1313                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1314                                 slot_attisnull(slot, attrChk))
1315                                 ereport(ERROR,
1316                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1317                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1318                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1319                 }
1320         }
1321
1322         if (constr->num_check > 0)
1323         {
1324                 const char *failed;
1325
1326                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1327                         ereport(ERROR,
1328                                         (errcode(ERRCODE_CHECK_VIOLATION),
1329                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1330                                                         RelationGetRelationName(rel), failed)));
1331         }
1332 }
1333
1334
1335 /*
1336  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1337  * process the updated version under READ COMMITTED rules.
1338  *
1339  * See backend/executor/README for some info about how this works.
1340  */
1341
1342
1343 /*
1344  * Check a modified tuple to see if we want to process its updated version
1345  * under READ COMMITTED rules.
1346  *
1347  *      estate - outer executor state data
1348  *      epqstate - state for EvalPlanQual rechecking
1349  *      relation - table containing tuple
1350  *      rti - rangetable index of table containing tuple
1351  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1352  *      priorXmax - t_xmax from the outdated tuple
1353  *
1354  * *tid is also an output parameter: it's modified to hold the TID of the
1355  * latest version of the tuple (note this may be changed even on failure)
1356  *
1357  * Returns a slot containing the new candidate update/delete tuple, or
1358  * NULL if we determine we shouldn't process the row.
1359  */
1360 TupleTableSlot *
1361 EvalPlanQual(EState *estate, EPQState *epqstate,
1362                          Relation relation, Index rti,
1363                          ItemPointer tid, TransactionId priorXmax)
1364 {
1365         TupleTableSlot *slot;
1366         HeapTuple       copyTuple;
1367
1368         Assert(rti > 0);
1369
1370         /*
1371          * Get and lock the updated version of the row; if fail, return NULL.
1372          */
1373         copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
1374                                                                   tid, priorXmax);
1375
1376         if (copyTuple == NULL)
1377                 return NULL;
1378
1379         /*
1380          * For UPDATE/DELETE we have to return tid of actual row we're executing
1381          * PQ for.
1382          */
1383         *tid = copyTuple->t_self;
1384
1385         /*
1386          * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
1387          */
1388         EvalPlanQualBegin(epqstate, estate);
1389
1390         /*
1391          * Free old test tuple, if any, and store new tuple where relation's
1392          * scan node will see it
1393          */
1394         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1395
1396         /*
1397          * Fetch any non-locked source rows
1398          */
1399         EvalPlanQualFetchRowMarks(epqstate);
1400
1401         /*
1402          * Run the EPQ query.  We assume it will return at most one tuple.
1403          */
1404         slot = EvalPlanQualNext(epqstate);
1405
1406         /*
1407          * Clear out the test tuple.  This is needed in case the EPQ query
1408          * is re-used to test a tuple for a different relation.  (Not clear
1409          * that can really happen, but let's be safe.)
1410          */
1411         EvalPlanQualSetTuple(epqstate, rti, NULL);
1412
1413         return slot;
1414 }
1415
1416 /*
1417  * Fetch a copy of the newest version of an outdated tuple
1418  *
1419  *      estate - executor state data
1420  *      relation - table containing tuple
1421  *      lockmode - requested tuple lock mode
1422  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1423  *      priorXmax - t_xmax from the outdated tuple
1424  *
1425  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1426  * that there is no newest version (ie, the row was deleted not updated).
1427  * If successful, we have locked the newest tuple version, so caller does not
1428  * need to worry about it changing anymore.
1429  *
1430  * Note: properly, lockmode should be declared as enum LockTupleMode,
1431  * but we use "int" to avoid having to include heapam.h in executor.h.
1432  */
1433 HeapTuple
1434 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1435                                   ItemPointer tid, TransactionId priorXmax)
1436 {
1437         HeapTuple       copyTuple = NULL;
1438         HeapTupleData tuple;
1439         SnapshotData SnapshotDirty;
1440
1441         /*
1442          * fetch target tuple
1443          *
1444          * Loop here to deal with updated or busy tuples
1445          */
1446         InitDirtySnapshot(SnapshotDirty);
1447         tuple.t_self = *tid;
1448         for (;;)
1449         {
1450                 Buffer          buffer;
1451
1452                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1453                 {
1454                         HTSU_Result test;
1455                         ItemPointerData update_ctid;
1456                         TransactionId update_xmax;
1457
1458                         /*
1459                          * If xmin isn't what we're expecting, the slot must have been
1460                          * recycled and reused for an unrelated tuple.  This implies that
1461                          * the latest version of the row was deleted, so we need do
1462                          * nothing.  (Should be safe to examine xmin without getting
1463                          * buffer's content lock, since xmin never changes in an existing
1464                          * tuple.)
1465                          */
1466                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1467                                                                          priorXmax))
1468                         {
1469                                 ReleaseBuffer(buffer);
1470                                 return NULL;
1471                         }
1472
1473                         /* otherwise xmin should not be dirty... */
1474                         if (TransactionIdIsValid(SnapshotDirty.xmin))
1475                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1476
1477                         /*
1478                          * If tuple is being updated by other transaction then we have to
1479                          * wait for its commit/abort.
1480                          */
1481                         if (TransactionIdIsValid(SnapshotDirty.xmax))
1482                         {
1483                                 ReleaseBuffer(buffer);
1484                                 XactLockTableWait(SnapshotDirty.xmax);
1485                                 continue;               /* loop back to repeat heap_fetch */
1486                         }
1487
1488                         /*
1489                          * If tuple was inserted by our own transaction, we have to check
1490                          * cmin against es_output_cid: cmin >= current CID means our
1491                          * command cannot see the tuple, so we should ignore it.  Without
1492                          * this we are open to the "Halloween problem" of indefinitely
1493                          * re-updating the same tuple. (We need not check cmax because
1494                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1495                          * transaction dead, regardless of cmax.)  We just checked that
1496                          * priorXmax == xmin, so we can test that variable instead of
1497                          * doing HeapTupleHeaderGetXmin again.
1498                          */
1499                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1500                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
1501                         {
1502                                 ReleaseBuffer(buffer);
1503                                 return NULL;
1504                         }
1505
1506                         /*
1507                          * This is a live tuple, so now try to lock it.
1508                          */
1509                         test = heap_lock_tuple(relation, &tuple, &buffer,
1510                                                                    &update_ctid, &update_xmax,
1511                                                                    estate->es_output_cid,
1512                                                                    lockmode, false);
1513                         /* We now have two pins on the buffer, get rid of one */
1514                         ReleaseBuffer(buffer);
1515
1516                         switch (test)
1517                         {
1518                                 case HeapTupleSelfUpdated:
1519                                         /* treat it as deleted; do not process */
1520                                         ReleaseBuffer(buffer);
1521                                         return NULL;
1522
1523                                 case HeapTupleMayBeUpdated:
1524                                         /* successfully locked */
1525                                         break;
1526
1527                                 case HeapTupleUpdated:
1528                                         ReleaseBuffer(buffer);
1529                                         if (IsXactIsoLevelSerializable)
1530                                                 ereport(ERROR,
1531                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1532                                                                  errmsg("could not serialize access due to concurrent update")));
1533                                         if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
1534                                         {
1535                                                 /* it was updated, so look at the updated version */
1536                                                 tuple.t_self = update_ctid;
1537                                                 continue;
1538                                         }
1539                                         /* tuple was deleted, so give up */
1540                                         return NULL;
1541
1542                                 default:
1543                                         ReleaseBuffer(buffer);
1544                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1545                                                  test);
1546                                         return NULL;    /* keep compiler quiet */
1547                         }
1548
1549                         /*
1550                          * We got tuple - now copy it for use by recheck query.
1551                          */
1552                         copyTuple = heap_copytuple(&tuple);
1553                         ReleaseBuffer(buffer);
1554                         break;
1555                 }
1556
1557                 /*
1558                  * If the referenced slot was actually empty, the latest version of
1559                  * the row must have been deleted, so we need do nothing.
1560                  */
1561                 if (tuple.t_data == NULL)
1562                 {
1563                         ReleaseBuffer(buffer);
1564                         return NULL;
1565                 }
1566
1567                 /*
1568                  * As above, if xmin isn't what we're expecting, do nothing.
1569                  */
1570                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1571                                                                  priorXmax))
1572                 {
1573                         ReleaseBuffer(buffer);
1574                         return NULL;
1575                 }
1576
1577                 /*
1578                  * If we get here, the tuple was found but failed SnapshotDirty.
1579                  * Assuming the xmin is either a committed xact or our own xact (as it
1580                  * certainly should be if we're trying to modify the tuple), this must
1581                  * mean that the row was updated or deleted by either a committed xact
1582                  * or our own xact.  If it was deleted, we can ignore it; if it was
1583                  * updated then chain up to the next version and repeat the whole
1584                  * process.
1585                  *
1586                  * As above, it should be safe to examine xmax and t_ctid without the
1587                  * buffer content lock, because they can't be changing.
1588                  */
1589                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1590                 {
1591                         /* deleted, so forget about it */
1592                         ReleaseBuffer(buffer);
1593                         return NULL;
1594                 }
1595
1596                 /* updated, so look at the updated row */
1597                 tuple.t_self = tuple.t_data->t_ctid;
1598                 /* updated row should have xmin matching this xmax */
1599                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1600                 ReleaseBuffer(buffer);
1601                 /* loop back to fetch next in chain */
1602         }
1603
1604         /*
1605          * Return the copied tuple
1606          */
1607         return copyTuple;
1608 }
1609
1610 /*
1611  * EvalPlanQualInit -- initialize during creation of a plan state node
1612  * that might need to invoke EPQ processing.
1613  * Note: subplan can be NULL if it will be set later with EvalPlanQualSetPlan.
1614  */
1615 void
1616 EvalPlanQualInit(EPQState *epqstate, EState *estate,
1617                                  Plan *subplan, int epqParam)
1618 {
1619         /* Mark the EPQ state inactive */
1620         epqstate->estate = NULL;
1621         epqstate->planstate = NULL;
1622         epqstate->origslot = NULL;
1623         /* ... and remember data that EvalPlanQualBegin will need */
1624         epqstate->plan = subplan;
1625         epqstate->rowMarks = NIL;
1626         epqstate->epqParam = epqParam;
1627 }
1628
1629 /*
1630  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
1631  *
1632  * We need this so that ModifyTuple can deal with multiple subplans.
1633  */
1634 void
1635 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan)
1636 {
1637         /* If we have a live EPQ query, shut it down */
1638         EvalPlanQualEnd(epqstate);
1639         /* And set/change the plan pointer */
1640         epqstate->plan = subplan;
1641 }
1642
1643 /*
1644  * EvalPlanQualAddRowMark -- add an ExecRowMark that EPQ needs to handle.
1645  *
1646  * Currently, only non-locking RowMarks are supported.
1647  */
1648 void
1649 EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm)
1650 {
1651         if (RowMarkRequiresRowShareLock(erm->markType))
1652                 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
1653         epqstate->rowMarks = lappend(epqstate->rowMarks, erm);
1654 }
1655
1656 /*
1657  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
1658  *
1659  * NB: passed tuple must be palloc'd; it may get freed later
1660  */
1661 void
1662 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
1663 {
1664         EState     *estate = epqstate->estate;
1665
1666         Assert(rti > 0);
1667
1668         /*
1669          * free old test tuple, if any, and store new tuple where relation's
1670          * scan node will see it
1671          */
1672         if (estate->es_epqTuple[rti - 1] != NULL)
1673                 heap_freetuple(estate->es_epqTuple[rti - 1]);
1674         estate->es_epqTuple[rti - 1] = tuple;
1675         estate->es_epqTupleSet[rti - 1] = true;
1676 }
1677
1678 /*
1679  * Fetch back the current test tuple (if any) for the specified RTI
1680  */
1681 HeapTuple
1682 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
1683 {
1684         EState     *estate = epqstate->estate;
1685
1686         Assert(rti > 0);
1687
1688         return estate->es_epqTuple[rti - 1];
1689 }
1690
1691 /*
1692  * Fetch the current row values for any non-locked relations that need
1693  * to be scanned by an EvalPlanQual operation.  origslot must have been set
1694  * to contain the current result row (top-level row) that we need to recheck.
1695  */
1696 void
1697 EvalPlanQualFetchRowMarks(EPQState *epqstate)
1698 {
1699         ListCell   *l;
1700
1701         Assert(epqstate->origslot != NULL);
1702
1703         foreach(l, epqstate->rowMarks)
1704         {
1705                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1706                 Datum           datum;
1707                 bool            isNull;
1708                 HeapTupleData tuple;
1709
1710                 /* clear any leftover test tuple for this rel */
1711                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
1712
1713                 if (erm->relation)
1714                 {
1715                         Buffer          buffer;
1716
1717                         Assert(erm->markType == ROW_MARK_REFERENCE);
1718
1719                         /* if child rel, must check whether it produced this row */
1720                         if (erm->rti != erm->prti)
1721                         {
1722                                 Oid                     tableoid;
1723
1724                                 datum = ExecGetJunkAttribute(epqstate->origslot,
1725                                                                                          erm->toidAttNo,
1726                                                                                          &isNull);
1727                                 /* non-locked rels could be on the inside of outer joins */
1728                                 if (isNull)
1729                                         continue;
1730                                 tableoid = DatumGetObjectId(datum);
1731
1732                                 if (tableoid != RelationGetRelid(erm->relation))
1733                                 {
1734                                         /* this child is inactive right now */
1735                                         continue;
1736                                 }
1737                         }
1738
1739                         /* fetch the tuple's ctid */
1740                         datum = ExecGetJunkAttribute(epqstate->origslot,
1741                                                                                  erm->ctidAttNo,
1742                                                                                  &isNull);
1743                         /* non-locked rels could be on the inside of outer joins */
1744                         if (isNull)
1745                                 continue;
1746                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1747
1748                         /* okay, fetch the tuple */
1749                         if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
1750                                                         false, NULL))
1751                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
1752
1753                         /* successful, copy and store tuple */
1754                         EvalPlanQualSetTuple(epqstate, erm->rti,
1755                                                                  heap_copytuple(&tuple));
1756                         ReleaseBuffer(buffer);
1757                 }
1758                 else
1759                 {
1760                         HeapTupleHeader td;
1761
1762                         Assert(erm->markType == ROW_MARK_COPY);
1763
1764                         /* fetch the whole-row Var for the relation */
1765                         datum = ExecGetJunkAttribute(epqstate->origslot,
1766                                                                                  erm->wholeAttNo,
1767                                                                                  &isNull);
1768                         /* non-locked rels could be on the inside of outer joins */
1769                         if (isNull)
1770                                 continue;
1771                         td = DatumGetHeapTupleHeader(datum);
1772
1773                         /* build a temporary HeapTuple control structure */
1774                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
1775                         ItemPointerSetInvalid(&(tuple.t_self));
1776                         tuple.t_tableOid = InvalidOid;
1777                         tuple.t_data = td;
1778
1779                         /* copy and store tuple */
1780                         EvalPlanQualSetTuple(epqstate, erm->rti,
1781                                                                  heap_copytuple(&tuple));
1782                 }
1783         }
1784 }
1785
1786 /*
1787  * Fetch the next row (if any) from EvalPlanQual testing
1788  *
1789  * (In practice, there should never be more than one row...)
1790  */
1791 TupleTableSlot *
1792 EvalPlanQualNext(EPQState *epqstate)
1793 {
1794         MemoryContext oldcontext;
1795         TupleTableSlot *slot;
1796
1797         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
1798         slot = ExecProcNode(epqstate->planstate);
1799         MemoryContextSwitchTo(oldcontext);
1800
1801         return slot;
1802 }
1803
1804 /*
1805  * Initialize or reset an EvalPlanQual state tree
1806  */
1807 void
1808 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
1809 {
1810         EState     *estate = epqstate->estate;
1811
1812         if (estate == NULL)
1813         {
1814                 /* First time through, so create a child EState */
1815                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
1816         }
1817         else
1818         {
1819                 /*
1820                  * We already have a suitable child EPQ tree, so just reset it.
1821                  */
1822                 int                     rtsize = list_length(parentestate->es_range_table);
1823                 PlanState  *planstate = epqstate->planstate;
1824
1825                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
1826
1827                 /* Recopy current values of parent parameters */
1828                 if (parentestate->es_plannedstmt->nParamExec > 0)
1829                 {
1830                         int             i = parentestate->es_plannedstmt->nParamExec;
1831
1832                         while (--i >= 0)
1833                         {
1834                                 /* copy value if any, but not execPlan link */
1835                                 estate->es_param_exec_vals[i].value =
1836                                         parentestate->es_param_exec_vals[i].value;
1837                                 estate->es_param_exec_vals[i].isnull =
1838                                         parentestate->es_param_exec_vals[i].isnull;
1839                         }
1840                 }
1841
1842                 /*
1843                  * Mark child plan tree as needing rescan at all scan nodes.  The
1844                  * first ExecProcNode will take care of actually doing the rescan.
1845                  */
1846                 planstate->chgParam = bms_add_member(planstate->chgParam,
1847                                                                                          epqstate->epqParam);
1848         }
1849 }
1850
1851 /*
1852  * Start execution of an EvalPlanQual plan tree.
1853  *
1854  * This is a cut-down version of ExecutorStart(): we copy some state from
1855  * the top-level estate rather than initializing it fresh.
1856  */
1857 static void
1858 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
1859 {
1860         EState     *estate;
1861         int                     rtsize;
1862         MemoryContext oldcontext;
1863         ListCell   *l;
1864
1865         rtsize = list_length(parentestate->es_range_table);
1866
1867         epqstate->estate = estate = CreateExecutorState();
1868
1869         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1870
1871         /*
1872          * Child EPQ EStates share the parent's copy of unchanging state such as
1873          * the snapshot, rangetable, result-rel info, and external Param info.
1874          * They need their own copies of local state, including a tuple table,
1875          * es_param_exec_vals, etc.
1876          */
1877         estate->es_direction = ForwardScanDirection;
1878         estate->es_snapshot = parentestate->es_snapshot;
1879         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
1880         estate->es_range_table = parentestate->es_range_table;
1881         estate->es_plannedstmt = parentestate->es_plannedstmt;
1882         estate->es_junkFilter = parentestate->es_junkFilter;
1883         estate->es_output_cid = parentestate->es_output_cid;
1884         estate->es_result_relations = parentestate->es_result_relations;
1885         estate->es_num_result_relations = parentestate->es_num_result_relations;
1886         estate->es_result_relation_info = parentestate->es_result_relation_info;
1887         /* es_trig_target_relations must NOT be copied */
1888         estate->es_rowMarks = parentestate->es_rowMarks;
1889         estate->es_instrument = parentestate->es_instrument;
1890         estate->es_select_into = parentestate->es_select_into;
1891         estate->es_into_oids = parentestate->es_into_oids;
1892
1893         /*
1894          * The external param list is simply shared from parent.  The internal
1895          * param workspace has to be local state, but we copy the initial values
1896          * from the parent, so as to have access to any param values that were
1897          * already set from other parts of the parent's plan tree.
1898          */
1899         estate->es_param_list_info = parentestate->es_param_list_info;
1900         if (parentestate->es_plannedstmt->nParamExec > 0)
1901         {
1902                 int             i = parentestate->es_plannedstmt->nParamExec;
1903
1904                 estate->es_param_exec_vals = (ParamExecData *)
1905                         palloc0(i * sizeof(ParamExecData));
1906                 while (--i >= 0)
1907                 {
1908                         /* copy value if any, but not execPlan link */
1909                         estate->es_param_exec_vals[i].value =
1910                                 parentestate->es_param_exec_vals[i].value;
1911                         estate->es_param_exec_vals[i].isnull =
1912                                 parentestate->es_param_exec_vals[i].isnull;
1913                 }
1914         }
1915
1916         /*
1917          * Each EState must have its own es_epqScanDone state, but if we have
1918          * nested EPQ checks they should share es_epqTuple arrays.  This allows
1919          * sub-rechecks to inherit the values being examined by an outer recheck.
1920          */
1921         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
1922         if (parentestate->es_epqTuple != NULL)
1923         {
1924                 estate->es_epqTuple = parentestate->es_epqTuple;
1925                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
1926         }
1927         else
1928         {
1929                 estate->es_epqTuple = (HeapTuple *)
1930                         palloc0(rtsize * sizeof(HeapTuple));
1931                 estate->es_epqTupleSet = (bool *)
1932                         palloc0(rtsize * sizeof(bool));
1933         }
1934
1935         /*
1936          * Each estate also has its own tuple table.
1937          */
1938         estate->es_tupleTable = NIL;
1939
1940         /*
1941          * Initialize private state information for each SubPlan.  We must do this
1942          * before running ExecInitNode on the main query tree, since
1943          * ExecInitSubPlan expects to be able to find these entries.
1944          * Some of the SubPlans might not be used in the part of the plan tree
1945          * we intend to run, but since it's not easy to tell which, we just
1946          * initialize them all.
1947          */
1948         Assert(estate->es_subplanstates == NIL);
1949         foreach(l, parentestate->es_plannedstmt->subplans)
1950         {
1951                 Plan       *subplan = (Plan *) lfirst(l);
1952                 PlanState  *subplanstate;
1953
1954                 subplanstate = ExecInitNode(subplan, estate, 0);
1955
1956                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1957                                                                                    subplanstate);
1958         }
1959
1960         /*
1961          * Initialize the private state information for all the nodes in the
1962          * part of the plan tree we need to run.  This opens files, allocates
1963          * storage and leaves us ready to start processing tuples.
1964          */
1965         epqstate->planstate = ExecInitNode(planTree, estate, 0);
1966
1967         MemoryContextSwitchTo(oldcontext);
1968 }
1969
1970 /*
1971  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
1972  * or if we are done with the current EPQ child.
1973  *
1974  * This is a cut-down version of ExecutorEnd(); basically we want to do most
1975  * of the normal cleanup, but *not* close result relations (which we are
1976  * just sharing from the outer query).  We do, however, have to close any
1977  * trigger target relations that got opened, since those are not shared.
1978  * (There probably shouldn't be any of the latter, but just in case...)
1979  */
1980 void
1981 EvalPlanQualEnd(EPQState *epqstate)
1982 {
1983         EState     *estate = epqstate->estate;
1984         MemoryContext oldcontext;
1985         ListCell   *l;
1986
1987         if (estate == NULL)
1988                 return;                                 /* idle, so nothing to do */
1989
1990         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1991
1992         ExecEndNode(epqstate->planstate);
1993
1994         foreach(l, estate->es_subplanstates)
1995         {
1996                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1997
1998                 ExecEndNode(subplanstate);
1999         }
2000
2001         /* throw away the per-estate tuple table */
2002         ExecResetTupleTable(estate->es_tupleTable, false);
2003
2004         /* close any trigger target relations attached to this EState */
2005         foreach(l, estate->es_trig_target_relations)
2006         {
2007                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2008
2009                 /* Close indices and then the relation itself */
2010                 ExecCloseIndices(resultRelInfo);
2011                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2012         }
2013
2014         MemoryContextSwitchTo(oldcontext);
2015
2016         FreeExecutorState(estate);
2017
2018         /* Mark EPQState idle */
2019         epqstate->estate = NULL;
2020         epqstate->planstate = NULL;
2021         epqstate->origslot = NULL;
2022 }
2023
2024
2025 /*
2026  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2027  *
2028  * We implement SELECT INTO by diverting SELECT's normal output with
2029  * a specialized DestReceiver type.
2030  */
2031
2032 typedef struct
2033 {
2034         DestReceiver pub;                       /* publicly-known function pointers */
2035         EState     *estate;                     /* EState we are working with */
2036         Relation        rel;                    /* Relation to write to */
2037         int                     hi_options;             /* heap_insert performance options */
2038         BulkInsertState bistate;        /* bulk insert state */
2039 } DR_intorel;
2040
2041 /*
2042  * OpenIntoRel --- actually create the SELECT INTO target relation
2043  *
2044  * This also replaces QueryDesc->dest with the special DestReceiver for
2045  * SELECT INTO.  We assume that the correct result tuple type has already
2046  * been placed in queryDesc->tupDesc.
2047  */
2048 static void
2049 OpenIntoRel(QueryDesc *queryDesc)
2050 {
2051         IntoClause *into = queryDesc->plannedstmt->intoClause;
2052         EState     *estate = queryDesc->estate;
2053         Relation        intoRelationDesc;
2054         char       *intoName;
2055         Oid                     namespaceId;
2056         Oid                     tablespaceId;
2057         Datum           reloptions;
2058         AclResult       aclresult;
2059         Oid                     intoRelationId;
2060         TupleDesc       tupdesc;
2061         DR_intorel *myState;
2062         static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2063
2064         Assert(into);
2065
2066         /*
2067          * Check consistency of arguments
2068          */
2069         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2070                 ereport(ERROR,
2071                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2072                                  errmsg("ON COMMIT can only be used on temporary tables")));
2073
2074         /*
2075          * Find namespace to create in, check its permissions
2076          */
2077         intoName = into->rel->relname;
2078         namespaceId = RangeVarGetCreationNamespace(into->rel);
2079
2080         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2081                                                                           ACL_CREATE);
2082         if (aclresult != ACLCHECK_OK)
2083                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2084                                            get_namespace_name(namespaceId));
2085
2086         /*
2087          * Select tablespace to use.  If not specified, use default tablespace
2088          * (which may in turn default to database's default).
2089          */
2090         if (into->tableSpaceName)
2091         {
2092                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2093                 if (!OidIsValid(tablespaceId))
2094                         ereport(ERROR,
2095                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2096                                          errmsg("tablespace \"%s\" does not exist",
2097                                                         into->tableSpaceName)));
2098         }
2099         else
2100         {
2101                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2102                 /* note InvalidOid is OK in this case */
2103         }
2104
2105         /* Check permissions except when using the database's default space */
2106         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2107         {
2108                 AclResult       aclresult;
2109
2110                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2111                                                                                    ACL_CREATE);
2112
2113                 if (aclresult != ACLCHECK_OK)
2114                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2115                                                    get_tablespace_name(tablespaceId));
2116         }
2117
2118         /* Parse and validate any reloptions */
2119         reloptions = transformRelOptions((Datum) 0,
2120                                                                          into->options,
2121                                                                          NULL,
2122                                                                          validnsps,
2123                                                                          true,
2124                                                                          false);
2125         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2126
2127         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2128         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2129
2130         /* Now we can actually create the new relation */
2131         intoRelationId = heap_create_with_catalog(intoName,
2132                                                                                           namespaceId,
2133                                                                                           tablespaceId,
2134                                                                                           InvalidOid,
2135                                                                                           InvalidOid,
2136                                                                                           GetUserId(),
2137                                                                                           tupdesc,
2138                                                                                           NIL,
2139                                                                                           RELKIND_RELATION,
2140                                                                                           false,
2141                                                                                           true,
2142                                                                                           0,
2143                                                                                           into->onCommit,
2144                                                                                           reloptions,
2145                                                                                           true,
2146                                                                                           allowSystemTableMods);
2147
2148         FreeTupleDesc(tupdesc);
2149
2150         /*
2151          * Advance command counter so that the newly-created relation's catalog
2152          * tuples will be visible to heap_open.
2153          */
2154         CommandCounterIncrement();
2155
2156         /*
2157          * If necessary, create a TOAST table for the INTO relation. Note that
2158          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2159          * the TOAST table will be visible for insertion.
2160          */
2161         reloptions = transformRelOptions((Datum) 0,
2162                                                                          into->options,
2163                                                                          "toast",
2164                                                                          validnsps,
2165                                                                          true,
2166                                                                          false);
2167
2168         (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2169
2170         AlterTableCreateToastTable(intoRelationId, InvalidOid, reloptions, false);
2171
2172         /*
2173          * And open the constructed table for writing.
2174          */
2175         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2176
2177         /*
2178          * Now replace the query's DestReceiver with one for SELECT INTO
2179          */
2180         queryDesc->dest = CreateDestReceiver(DestIntoRel);
2181         myState = (DR_intorel *) queryDesc->dest;
2182         Assert(myState->pub.mydest == DestIntoRel);
2183         myState->estate = estate;
2184         myState->rel = intoRelationDesc;
2185
2186         /*
2187          * We can skip WAL-logging the insertions, unless PITR is in use.  We can
2188          * skip the FSM in any case.
2189          */
2190         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2191                 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2192         myState->bistate = GetBulkInsertState();
2193
2194         /* Not using WAL requires rd_targblock be initially invalid */
2195         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2196 }
2197
2198 /*
2199  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2200  */
2201 static void
2202 CloseIntoRel(QueryDesc *queryDesc)
2203 {
2204         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2205
2206         /* OpenIntoRel might never have gotten called */
2207         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2208         {
2209                 FreeBulkInsertState(myState->bistate);
2210
2211                 /* If we skipped using WAL, must heap_sync before commit */
2212                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2213                         heap_sync(myState->rel);
2214
2215                 /* close rel, but keep lock until commit */
2216                 heap_close(myState->rel, NoLock);
2217
2218                 myState->rel = NULL;
2219         }
2220 }
2221
2222 /*
2223  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2224  */
2225 DestReceiver *
2226 CreateIntoRelDestReceiver(void)
2227 {
2228         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2229
2230         self->pub.receiveSlot = intorel_receive;
2231         self->pub.rStartup = intorel_startup;
2232         self->pub.rShutdown = intorel_shutdown;
2233         self->pub.rDestroy = intorel_destroy;
2234         self->pub.mydest = DestIntoRel;
2235
2236         /* private fields will be set by OpenIntoRel */
2237
2238         return (DestReceiver *) self;
2239 }
2240
2241 /*
2242  * intorel_startup --- executor startup
2243  */
2244 static void
2245 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2246 {
2247         /* no-op */
2248 }
2249
2250 /*
2251  * intorel_receive --- receive one tuple
2252  */
2253 static void
2254 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2255 {
2256         DR_intorel *myState = (DR_intorel *) self;
2257         HeapTuple       tuple;
2258
2259         /*
2260          * get the heap tuple out of the tuple table slot, making sure we have a
2261          * writable copy
2262          */
2263         tuple = ExecMaterializeSlot(slot);
2264
2265         /*
2266          * force assignment of new OID (see comments in ExecInsert)
2267          */
2268         if (myState->rel->rd_rel->relhasoids)
2269                 HeapTupleSetOid(tuple, InvalidOid);
2270
2271         heap_insert(myState->rel,
2272                                 tuple,
2273                                 myState->estate->es_output_cid,
2274                                 myState->hi_options,
2275                                 myState->bistate);
2276
2277         /* We know this is a newly created relation, so there are no indexes */
2278 }
2279
2280 /*
2281  * intorel_shutdown --- executor end
2282  */
2283 static void
2284 intorel_shutdown(DestReceiver *self)
2285 {
2286         /* no-op */
2287 }
2288
2289 /*
2290  * intorel_destroy --- release DestReceiver object
2291  */
2292 static void
2293 intorel_destroy(DestReceiver *self)
2294 {
2295         pfree(self);
2296 }