]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Fix typo
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorFinish()
10  *      ExecutorEnd()
11  *
12  *      These four procedures are the external interface to the executor.
13  *      In each case, the query descriptor is required as an argument.
14  *
15  *      ExecutorStart must be called at the beginning of execution of any
16  *      query plan and ExecutorEnd must always be called at the end of
17  *      execution of a plan (unless it is aborted due to error).
18  *
19  *      ExecutorRun accepts direction and count arguments that specify whether
20  *      the plan is to be executed forwards, backwards, and for how many tuples.
21  *      In some cases ExecutorRun may be called multiple times to process all
22  *      the tuples for a plan.  It is also acceptable to stop short of executing
23  *      the whole plan (but only if it is a SELECT).
24  *
25  *      ExecutorFinish must be called after the final ExecutorRun call and
26  *      before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
27  *      which should also omit ExecutorRun.
28  *
29  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
30  * Portions Copyright (c) 1994, Regents of the University of California
31  *
32  *
33  * IDENTIFICATION
34  *        src/backend/executor/execMain.c
35  *
36  *-------------------------------------------------------------------------
37  */
38 #include "postgres.h"
39
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "catalog/pg_publication.h"
46 #include "commands/matview.h"
47 #include "commands/trigger.h"
48 #include "executor/execdebug.h"
49 #include "foreign/fdwapi.h"
50 #include "jit/jit.h"
51 #include "mb/pg_wchar.h"
52 #include "miscadmin.h"
53 #include "optimizer/clauses.h"
54 #include "parser/parsetree.h"
55 #include "rewrite/rewriteManip.h"
56 #include "storage/bufmgr.h"
57 #include "storage/lmgr.h"
58 #include "tcop/utility.h"
59 #include "utils/acl.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/partcache.h"
63 #include "utils/rls.h"
64 #include "utils/ruleutils.h"
65 #include "utils/snapmgr.h"
66 #include "utils/tqual.h"
67
68
69 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
70 ExecutorStart_hook_type ExecutorStart_hook = NULL;
71 ExecutorRun_hook_type ExecutorRun_hook = NULL;
72 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
73 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
74
75 /* Hook for plugin to get control in ExecCheckRTPerms() */
76 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
77
78 /* decls for local routines only used within this module */
79 static void InitPlan(QueryDesc *queryDesc, int eflags);
80 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
81 static void ExecPostprocessPlan(EState *estate);
82 static void ExecEndPlan(PlanState *planstate, EState *estate);
83 static void ExecutePlan(EState *estate, PlanState *planstate,
84                         bool use_parallel_mode,
85                         CmdType operation,
86                         bool sendTuples,
87                         uint64 numberTuples,
88                         ScanDirection direction,
89                         DestReceiver *dest,
90                         bool execute_once);
91 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
92 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
93                                                   Bitmapset *modifiedCols,
94                                                   AclMode requiredPerms);
95 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
96 static char *ExecBuildSlotValueDescription(Oid reloid,
97                                                           TupleTableSlot *slot,
98                                                           TupleDesc tupdesc,
99                                                           Bitmapset *modifiedCols,
100                                                           int maxfieldlen);
101 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
102                                   Plan *planTree);
103
104 /*
105  * Note that GetUpdatedColumns() also exists in commands/trigger.c.  There does
106  * not appear to be any good header to put it into, given the structures that
107  * it uses, so we let them be duplicated.  Be sure to update both if one needs
108  * to be changed, however.
109  */
110 #define GetInsertedColumns(relinfo, estate) \
111         (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
112 #define GetUpdatedColumns(relinfo, estate) \
113         (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
114
115 /* end of local decls */
116
117
118 /* ----------------------------------------------------------------
119  *              ExecutorStart
120  *
121  *              This routine must be called at the beginning of any execution of any
122  *              query plan
123  *
124  * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
125  * only because some places use QueryDescs for utility commands).  The tupDesc
126  * field of the QueryDesc is filled in to describe the tuples that will be
127  * returned, and the internal fields (estate and planstate) are set up.
128  *
129  * eflags contains flag bits as described in executor.h.
130  *
131  * NB: the CurrentMemoryContext when this is called will become the parent
132  * of the per-query context used for this Executor invocation.
133  *
134  * We provide a function hook variable that lets loadable plugins
135  * get control when ExecutorStart is called.  Such a plugin would
136  * normally call standard_ExecutorStart().
137  *
138  * ----------------------------------------------------------------
139  */
140 void
141 ExecutorStart(QueryDesc *queryDesc, int eflags)
142 {
143         if (ExecutorStart_hook)
144                 (*ExecutorStart_hook) (queryDesc, eflags);
145         else
146                 standard_ExecutorStart(queryDesc, eflags);
147 }
148
149 void
150 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
151 {
152         EState     *estate;
153         MemoryContext oldcontext;
154
155         /* sanity checks: queryDesc must not be started already */
156         Assert(queryDesc != NULL);
157         Assert(queryDesc->estate == NULL);
158
159         /*
160          * If the transaction is read-only, we need to check if any writes are
161          * planned to non-temporary tables.  EXPLAIN is considered read-only.
162          *
163          * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE
164          * would require (a) storing the combocid hash in shared memory, rather
165          * than synchronizing it just once at the start of parallelism, and (b) an
166          * alternative to heap_update()'s reliance on xmax for mutual exclusion.
167          * INSERT may have no such troubles, but we forbid it to simplify the
168          * checks.
169          *
170          * We have lower-level defenses in CommandCounterIncrement and elsewhere
171          * against performing unsafe operations in parallel mode, but this gives a
172          * more user-friendly error message.
173          */
174         if ((XactReadOnly || IsInParallelMode()) &&
175                 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
176                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
177
178         /*
179          * Build EState, switch into per-query memory context for startup.
180          */
181         estate = CreateExecutorState();
182         queryDesc->estate = estate;
183
184         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
185
186         /*
187          * Fill in external parameters, if any, from queryDesc; and allocate
188          * workspace for internal parameters
189          */
190         estate->es_param_list_info = queryDesc->params;
191
192         if (queryDesc->plannedstmt->paramExecTypes != NIL)
193         {
194                 int                     nParamExec;
195
196                 nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
197                 estate->es_param_exec_vals = (ParamExecData *)
198                         palloc0(nParamExec * sizeof(ParamExecData));
199         }
200
201         estate->es_sourceText = queryDesc->sourceText;
202
203         /*
204          * Fill in the query environment, if any, from queryDesc.
205          */
206         estate->es_queryEnv = queryDesc->queryEnv;
207
208         /*
209          * If non-read-only query, set the command ID to mark output tuples with
210          */
211         switch (queryDesc->operation)
212         {
213                 case CMD_SELECT:
214
215                         /*
216                          * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
217                          * tuples
218                          */
219                         if (queryDesc->plannedstmt->rowMarks != NIL ||
220                                 queryDesc->plannedstmt->hasModifyingCTE)
221                                 estate->es_output_cid = GetCurrentCommandId(true);
222
223                         /*
224                          * A SELECT without modifying CTEs can't possibly queue triggers,
225                          * so force skip-triggers mode. This is just a marginal efficiency
226                          * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
227                          * all that expensive, but we might as well do it.
228                          */
229                         if (!queryDesc->plannedstmt->hasModifyingCTE)
230                                 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
231                         break;
232
233                 case CMD_INSERT:
234                 case CMD_DELETE:
235                 case CMD_UPDATE:
236                         estate->es_output_cid = GetCurrentCommandId(true);
237                         break;
238
239                 default:
240                         elog(ERROR, "unrecognized operation code: %d",
241                                  (int) queryDesc->operation);
242                         break;
243         }
244
245         /*
246          * Copy other important information into the EState
247          */
248         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
249         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
250         estate->es_top_eflags = eflags;
251         estate->es_instrument = queryDesc->instrument_options;
252         estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
253
254         /*
255          * Set up an AFTER-trigger statement context, unless told not to, or
256          * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
257          */
258         if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
259                 AfterTriggerBeginQuery();
260
261         /*
262          * Initialize the plan state tree
263          */
264         InitPlan(queryDesc, eflags);
265
266         MemoryContextSwitchTo(oldcontext);
267 }
268
269 /* ----------------------------------------------------------------
270  *              ExecutorRun
271  *
272  *              This is the main routine of the executor module. It accepts
273  *              the query descriptor from the traffic cop and executes the
274  *              query plan.
275  *
276  *              ExecutorStart must have been called already.
277  *
278  *              If direction is NoMovementScanDirection then nothing is done
279  *              except to start up/shut down the destination.  Otherwise,
280  *              we retrieve up to 'count' tuples in the specified direction.
281  *
282  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
283  *              completion.  Also note that the count limit is only applied to
284  *              retrieved tuples, not for instance to those inserted/updated/deleted
285  *              by a ModifyTable plan node.
286  *
287  *              There is no return value, but output tuples (if any) are sent to
288  *              the destination receiver specified in the QueryDesc; and the number
289  *              of tuples processed at the top level can be found in
290  *              estate->es_processed.
291  *
292  *              We provide a function hook variable that lets loadable plugins
293  *              get control when ExecutorRun is called.  Such a plugin would
294  *              normally call standard_ExecutorRun().
295  *
296  * ----------------------------------------------------------------
297  */
298 void
299 ExecutorRun(QueryDesc *queryDesc,
300                         ScanDirection direction, uint64 count,
301                         bool execute_once)
302 {
303         if (ExecutorRun_hook)
304                 (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
305         else
306                 standard_ExecutorRun(queryDesc, direction, count, execute_once);
307 }
308
309 void
310 standard_ExecutorRun(QueryDesc *queryDesc,
311                                          ScanDirection direction, uint64 count, bool execute_once)
312 {
313         EState     *estate;
314         CmdType         operation;
315         DestReceiver *dest;
316         bool            sendTuples;
317         MemoryContext oldcontext;
318
319         /* sanity checks */
320         Assert(queryDesc != NULL);
321
322         estate = queryDesc->estate;
323
324         Assert(estate != NULL);
325         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
326
327         /*
328          * Switch into per-query memory context
329          */
330         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
331
332         /* Allow instrumentation of Executor overall runtime */
333         if (queryDesc->totaltime)
334                 InstrStartNode(queryDesc->totaltime);
335
336         /*
337          * extract information from the query descriptor and the query feature.
338          */
339         operation = queryDesc->operation;
340         dest = queryDesc->dest;
341
342         /*
343          * startup tuple receiver, if we will be emitting tuples
344          */
345         estate->es_processed = 0;
346         estate->es_lastoid = InvalidOid;
347
348         sendTuples = (operation == CMD_SELECT ||
349                                   queryDesc->plannedstmt->hasReturning);
350
351         if (sendTuples)
352                 dest->rStartup(dest, operation, queryDesc->tupDesc);
353
354         /*
355          * run plan
356          */
357         if (!ScanDirectionIsNoMovement(direction))
358         {
359                 if (execute_once && queryDesc->already_executed)
360                         elog(ERROR, "can't re-execute query flagged for single execution");
361                 queryDesc->already_executed = true;
362
363                 ExecutePlan(estate,
364                                         queryDesc->planstate,
365                                         queryDesc->plannedstmt->parallelModeNeeded,
366                                         operation,
367                                         sendTuples,
368                                         count,
369                                         direction,
370                                         dest,
371                                         execute_once);
372         }
373
374         /*
375          * shutdown tuple receiver, if we started it
376          */
377         if (sendTuples)
378                 dest->rShutdown(dest);
379
380         if (queryDesc->totaltime)
381                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
382
383         MemoryContextSwitchTo(oldcontext);
384 }
385
386 /* ----------------------------------------------------------------
387  *              ExecutorFinish
388  *
389  *              This routine must be called after the last ExecutorRun call.
390  *              It performs cleanup such as firing AFTER triggers.  It is
391  *              separate from ExecutorEnd because EXPLAIN ANALYZE needs to
392  *              include these actions in the total runtime.
393  *
394  *              We provide a function hook variable that lets loadable plugins
395  *              get control when ExecutorFinish is called.  Such a plugin would
396  *              normally call standard_ExecutorFinish().
397  *
398  * ----------------------------------------------------------------
399  */
400 void
401 ExecutorFinish(QueryDesc *queryDesc)
402 {
403         if (ExecutorFinish_hook)
404                 (*ExecutorFinish_hook) (queryDesc);
405         else
406                 standard_ExecutorFinish(queryDesc);
407 }
408
409 void
410 standard_ExecutorFinish(QueryDesc *queryDesc)
411 {
412         EState     *estate;
413         MemoryContext oldcontext;
414
415         /* sanity checks */
416         Assert(queryDesc != NULL);
417
418         estate = queryDesc->estate;
419
420         Assert(estate != NULL);
421         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
422
423         /* This should be run once and only once per Executor instance */
424         Assert(!estate->es_finished);
425
426         /* Switch into per-query memory context */
427         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
428
429         /* Allow instrumentation of Executor overall runtime */
430         if (queryDesc->totaltime)
431                 InstrStartNode(queryDesc->totaltime);
432
433         /* Run ModifyTable nodes to completion */
434         ExecPostprocessPlan(estate);
435
436         /* Execute queued AFTER triggers, unless told not to */
437         if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
438                 AfterTriggerEndQuery(estate);
439
440         if (queryDesc->totaltime)
441                 InstrStopNode(queryDesc->totaltime, 0);
442
443         MemoryContextSwitchTo(oldcontext);
444
445         estate->es_finished = true;
446 }
447
448 /* ----------------------------------------------------------------
449  *              ExecutorEnd
450  *
451  *              This routine must be called at the end of execution of any
452  *              query plan
453  *
454  *              We provide a function hook variable that lets loadable plugins
455  *              get control when ExecutorEnd is called.  Such a plugin would
456  *              normally call standard_ExecutorEnd().
457  *
458  * ----------------------------------------------------------------
459  */
460 void
461 ExecutorEnd(QueryDesc *queryDesc)
462 {
463         if (ExecutorEnd_hook)
464                 (*ExecutorEnd_hook) (queryDesc);
465         else
466                 standard_ExecutorEnd(queryDesc);
467 }
468
469 void
470 standard_ExecutorEnd(QueryDesc *queryDesc)
471 {
472         EState     *estate;
473         MemoryContext oldcontext;
474
475         /* sanity checks */
476         Assert(queryDesc != NULL);
477
478         estate = queryDesc->estate;
479
480         Assert(estate != NULL);
481
482         /*
483          * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
484          * Assert is needed because ExecutorFinish is new as of 9.1, and callers
485          * might forget to call it.
486          */
487         Assert(estate->es_finished ||
488                    (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
489
490         /*
491          * Switch into per-query memory context to run ExecEndPlan
492          */
493         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
494
495         ExecEndPlan(queryDesc->planstate, estate);
496
497         /* do away with our snapshots */
498         UnregisterSnapshot(estate->es_snapshot);
499         UnregisterSnapshot(estate->es_crosscheck_snapshot);
500
501         /* release JIT context, if allocated */
502         if (estate->es_jit)
503                 jit_release_context(estate->es_jit);
504
505         /*
506          * Must switch out of context before destroying it
507          */
508         MemoryContextSwitchTo(oldcontext);
509
510         /*
511          * Release EState and per-query memory context.  This should release
512          * everything the executor has allocated.
513          */
514         FreeExecutorState(estate);
515
516         /* Reset queryDesc fields that no longer point to anything */
517         queryDesc->tupDesc = NULL;
518         queryDesc->estate = NULL;
519         queryDesc->planstate = NULL;
520         queryDesc->totaltime = NULL;
521 }
522
523 /* ----------------------------------------------------------------
524  *              ExecutorRewind
525  *
526  *              This routine may be called on an open queryDesc to rewind it
527  *              to the start.
528  * ----------------------------------------------------------------
529  */
530 void
531 ExecutorRewind(QueryDesc *queryDesc)
532 {
533         EState     *estate;
534         MemoryContext oldcontext;
535
536         /* sanity checks */
537         Assert(queryDesc != NULL);
538
539         estate = queryDesc->estate;
540
541         Assert(estate != NULL);
542
543         /* It's probably not sensible to rescan updating queries */
544         Assert(queryDesc->operation == CMD_SELECT);
545
546         /*
547          * Switch into per-query memory context
548          */
549         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
550
551         /*
552          * rescan plan
553          */
554         ExecReScan(queryDesc->planstate);
555
556         MemoryContextSwitchTo(oldcontext);
557 }
558
559
560 /*
561  * ExecCheckRTPerms
562  *              Check access permissions for all relations listed in a range table.
563  *
564  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
565  * error if ereport_on_violation is true, or simply returns false otherwise.
566  *
567  * Note that this does NOT address row level security policies (aka: RLS).  If
568  * rows will be returned to the user as a result of this permission check
569  * passing, then RLS also needs to be consulted (and check_enable_rls()).
570  *
571  * See rewrite/rowsecurity.c.
572  */
573 bool
574 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
575 {
576         ListCell   *l;
577         bool            result = true;
578
579         foreach(l, rangeTable)
580         {
581                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
582
583                 result = ExecCheckRTEPerms(rte);
584                 if (!result)
585                 {
586                         Assert(rte->rtekind == RTE_RELATION);
587                         if (ereport_on_violation)
588                                 aclcheck_error(ACLCHECK_NO_PRIV, get_relkind_objtype(get_rel_relkind(rte->relid)),
589                                                            get_rel_name(rte->relid));
590                         return false;
591                 }
592         }
593
594         if (ExecutorCheckPerms_hook)
595                 result = (*ExecutorCheckPerms_hook) (rangeTable,
596                                                                                          ereport_on_violation);
597         return result;
598 }
599
600 /*
601  * ExecCheckRTEPerms
602  *              Check access permissions for a single RTE.
603  */
604 static bool
605 ExecCheckRTEPerms(RangeTblEntry *rte)
606 {
607         AclMode         requiredPerms;
608         AclMode         relPerms;
609         AclMode         remainingPerms;
610         Oid                     relOid;
611         Oid                     userid;
612
613         /*
614          * Only plain-relation RTEs need to be checked here.  Function RTEs are
615          * checked when the function is prepared for execution.  Join, subquery,
616          * and special RTEs need no checks.
617          */
618         if (rte->rtekind != RTE_RELATION)
619                 return true;
620
621         /*
622          * No work if requiredPerms is empty.
623          */
624         requiredPerms = rte->requiredPerms;
625         if (requiredPerms == 0)
626                 return true;
627
628         relOid = rte->relid;
629
630         /*
631          * userid to check as: current user unless we have a setuid indication.
632          *
633          * Note: GetUserId() is presently fast enough that there's no harm in
634          * calling it separately for each RTE.  If that stops being true, we could
635          * call it once in ExecCheckRTPerms and pass the userid down from there.
636          * But for now, no need for the extra clutter.
637          */
638         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
639
640         /*
641          * We must have *all* the requiredPerms bits, but some of the bits can be
642          * satisfied from column-level rather than relation-level permissions.
643          * First, remove any bits that are satisfied by relation permissions.
644          */
645         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
646         remainingPerms = requiredPerms & ~relPerms;
647         if (remainingPerms != 0)
648         {
649                 int                     col = -1;
650
651                 /*
652                  * If we lack any permissions that exist only as relation permissions,
653                  * we can fail straight away.
654                  */
655                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
656                         return false;
657
658                 /*
659                  * Check to see if we have the needed privileges at column level.
660                  *
661                  * Note: failures just report a table-level error; it would be nicer
662                  * to report a column-level error if we have some but not all of the
663                  * column privileges.
664                  */
665                 if (remainingPerms & ACL_SELECT)
666                 {
667                         /*
668                          * When the query doesn't explicitly reference any columns (for
669                          * example, SELECT COUNT(*) FROM table), allow the query if we
670                          * have SELECT on any column of the rel, as per SQL spec.
671                          */
672                         if (bms_is_empty(rte->selectedCols))
673                         {
674                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
675                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
676                                         return false;
677                         }
678
679                         while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
680                         {
681                                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
682                                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
683
684                                 if (attno == InvalidAttrNumber)
685                                 {
686                                         /* Whole-row reference, must have priv on all cols */
687                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
688                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
689                                                 return false;
690                                 }
691                                 else
692                                 {
693                                         if (pg_attribute_aclcheck(relOid, attno, userid,
694                                                                                           ACL_SELECT) != ACLCHECK_OK)
695                                                 return false;
696                                 }
697                         }
698                 }
699
700                 /*
701                  * Basically the same for the mod columns, for both INSERT and UPDATE
702                  * privilege as specified by remainingPerms.
703                  */
704                 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
705                                                                                                                                           userid,
706                                                                                                                                           rte->insertedCols,
707                                                                                                                                           ACL_INSERT))
708                         return false;
709
710                 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
711                                                                                                                                           userid,
712                                                                                                                                           rte->updatedCols,
713                                                                                                                                           ACL_UPDATE))
714                         return false;
715         }
716         return true;
717 }
718
719 /*
720  * ExecCheckRTEPermsModified
721  *              Check INSERT or UPDATE access permissions for a single RTE (these
722  *              are processed uniformly).
723  */
724 static bool
725 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
726                                                   AclMode requiredPerms)
727 {
728         int                     col = -1;
729
730         /*
731          * When the query doesn't explicitly update any columns, allow the query
732          * if we have permission on any column of the rel.  This is to handle
733          * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
734          */
735         if (bms_is_empty(modifiedCols))
736         {
737                 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
738                                                                           ACLMASK_ANY) != ACLCHECK_OK)
739                         return false;
740         }
741
742         while ((col = bms_next_member(modifiedCols, col)) >= 0)
743         {
744                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
745                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
746
747                 if (attno == InvalidAttrNumber)
748                 {
749                         /* whole-row reference can't happen here */
750                         elog(ERROR, "whole-row update is not implemented");
751                 }
752                 else
753                 {
754                         if (pg_attribute_aclcheck(relOid, attno, userid,
755                                                                           requiredPerms) != ACLCHECK_OK)
756                                 return false;
757                 }
758         }
759         return true;
760 }
761
762 /*
763  * Check that the query does not imply any writes to non-temp tables;
764  * unless we're in parallel mode, in which case don't even allow writes
765  * to temp tables.
766  *
767  * Note: in a Hot Standby this would need to reject writes to temp
768  * tables just as we do in parallel mode; but an HS standby can't have created
769  * any temp tables in the first place, so no need to check that.
770  */
771 static void
772 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
773 {
774         ListCell   *l;
775
776         /*
777          * Fail if write permissions are requested in parallel mode for table
778          * (temp or non-temp), otherwise fail for any non-temp table.
779          */
780         foreach(l, plannedstmt->rtable)
781         {
782                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
783
784                 if (rte->rtekind != RTE_RELATION)
785                         continue;
786
787                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
788                         continue;
789
790                 if (isTempNamespace(get_rel_namespace(rte->relid)))
791                         continue;
792
793                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
794         }
795
796         if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
797                 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
798 }
799
800
801 /* ----------------------------------------------------------------
802  *              InitPlan
803  *
804  *              Initializes the query plan: open files, allocate storage
805  *              and start up the rule manager
806  * ----------------------------------------------------------------
807  */
808 static void
809 InitPlan(QueryDesc *queryDesc, int eflags)
810 {
811         CmdType         operation = queryDesc->operation;
812         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
813         Plan       *plan = plannedstmt->planTree;
814         List       *rangeTable = plannedstmt->rtable;
815         EState     *estate = queryDesc->estate;
816         PlanState  *planstate;
817         TupleDesc       tupType;
818         ListCell   *l;
819         int                     i;
820
821         /*
822          * Do permissions checks
823          */
824         ExecCheckRTPerms(rangeTable, true);
825
826         /*
827          * initialize the node's execution state
828          */
829         estate->es_range_table = rangeTable;
830         estate->es_plannedstmt = plannedstmt;
831
832         /*
833          * initialize result relation stuff, and open/lock the result rels.
834          *
835          * We must do this before initializing the plan tree, else we might try to
836          * do a lock upgrade if a result rel is also a source rel.
837          */
838         if (plannedstmt->resultRelations)
839         {
840                 List       *resultRelations = plannedstmt->resultRelations;
841                 int                     numResultRelations = list_length(resultRelations);
842                 ResultRelInfo *resultRelInfos;
843                 ResultRelInfo *resultRelInfo;
844
845                 resultRelInfos = (ResultRelInfo *)
846                         palloc(numResultRelations * sizeof(ResultRelInfo));
847                 resultRelInfo = resultRelInfos;
848                 foreach(l, resultRelations)
849                 {
850                         Index           resultRelationIndex = lfirst_int(l);
851                         Oid                     resultRelationOid;
852                         Relation        resultRelation;
853
854                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
855                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
856
857                         InitResultRelInfo(resultRelInfo,
858                                                           resultRelation,
859                                                           resultRelationIndex,
860                                                           NULL,
861                                                           estate->es_instrument);
862                         resultRelInfo++;
863                 }
864                 estate->es_result_relations = resultRelInfos;
865                 estate->es_num_result_relations = numResultRelations;
866                 /* es_result_relation_info is NULL except when within ModifyTable */
867                 estate->es_result_relation_info = NULL;
868
869                 /*
870                  * In the partitioned result relation case, lock the non-leaf result
871                  * relations too.  A subset of these are the roots of respective
872                  * partitioned tables, for which we also allocate ResultRelInfos.
873                  */
874                 estate->es_root_result_relations = NULL;
875                 estate->es_num_root_result_relations = 0;
876                 if (plannedstmt->nonleafResultRelations)
877                 {
878                         int                     num_roots = list_length(plannedstmt->rootResultRelations);
879
880                         /*
881                          * Firstly, build ResultRelInfos for all the partitioned table
882                          * roots, because we will need them to fire the statement-level
883                          * triggers, if any.
884                          */
885                         resultRelInfos = (ResultRelInfo *)
886                                 palloc(num_roots * sizeof(ResultRelInfo));
887                         resultRelInfo = resultRelInfos;
888                         foreach(l, plannedstmt->rootResultRelations)
889                         {
890                                 Index           resultRelIndex = lfirst_int(l);
891                                 Oid                     resultRelOid;
892                                 Relation        resultRelDesc;
893
894                                 resultRelOid = getrelid(resultRelIndex, rangeTable);
895                                 resultRelDesc = heap_open(resultRelOid, RowExclusiveLock);
896                                 InitResultRelInfo(resultRelInfo,
897                                                                   resultRelDesc,
898                                                                   lfirst_int(l),
899                                                                   NULL,
900                                                                   estate->es_instrument);
901                                 resultRelInfo++;
902                         }
903
904                         estate->es_root_result_relations = resultRelInfos;
905                         estate->es_num_root_result_relations = num_roots;
906
907                         /* Simply lock the rest of them. */
908                         foreach(l, plannedstmt->nonleafResultRelations)
909                         {
910                                 Index           resultRelIndex = lfirst_int(l);
911
912                                 /* We locked the roots above. */
913                                 if (!list_member_int(plannedstmt->rootResultRelations,
914                                                                          resultRelIndex))
915                                         LockRelationOid(getrelid(resultRelIndex, rangeTable),
916                                                                         RowExclusiveLock);
917                         }
918                 }
919         }
920         else
921         {
922                 /*
923                  * if no result relation, then set state appropriately
924                  */
925                 estate->es_result_relations = NULL;
926                 estate->es_num_result_relations = 0;
927                 estate->es_result_relation_info = NULL;
928                 estate->es_root_result_relations = NULL;
929                 estate->es_num_root_result_relations = 0;
930         }
931
932         /*
933          * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
934          * before we initialize the plan tree, else we'd be risking lock upgrades.
935          * While we are at it, build the ExecRowMark list.  Any partitioned child
936          * tables are ignored here (because isParent=true) and will be locked by
937          * the first Append or MergeAppend node that references them.  (Note that
938          * the RowMarks corresponding to partitioned child tables are present in
939          * the same list as the rest, i.e., plannedstmt->rowMarks.)
940          */
941         estate->es_rowMarks = NIL;
942         foreach(l, plannedstmt->rowMarks)
943         {
944                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
945                 Oid                     relid;
946                 Relation        relation;
947                 ExecRowMark *erm;
948
949                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
950                 if (rc->isParent)
951                         continue;
952
953                 /* get relation's OID (will produce InvalidOid if subquery) */
954                 relid = getrelid(rc->rti, rangeTable);
955
956                 /*
957                  * If you change the conditions under which rel locks are acquired
958                  * here, be sure to adjust ExecOpenScanRelation to match.
959                  */
960                 switch (rc->markType)
961                 {
962                         case ROW_MARK_EXCLUSIVE:
963                         case ROW_MARK_NOKEYEXCLUSIVE:
964                         case ROW_MARK_SHARE:
965                         case ROW_MARK_KEYSHARE:
966                                 relation = heap_open(relid, RowShareLock);
967                                 break;
968                         case ROW_MARK_REFERENCE:
969                                 relation = heap_open(relid, AccessShareLock);
970                                 break;
971                         case ROW_MARK_COPY:
972                                 /* no physical table access is required */
973                                 relation = NULL;
974                                 break;
975                         default:
976                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
977                                 relation = NULL;        /* keep compiler quiet */
978                                 break;
979                 }
980
981                 /* Check that relation is a legal target for marking */
982                 if (relation)
983                         CheckValidRowMarkRel(relation, rc->markType);
984
985                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
986                 erm->relation = relation;
987                 erm->relid = relid;
988                 erm->rti = rc->rti;
989                 erm->prti = rc->prti;
990                 erm->rowmarkId = rc->rowmarkId;
991                 erm->markType = rc->markType;
992                 erm->strength = rc->strength;
993                 erm->waitPolicy = rc->waitPolicy;
994                 erm->ermActive = false;
995                 ItemPointerSetInvalid(&(erm->curCtid));
996                 erm->ermExtra = NULL;
997                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
998         }
999
1000         /*
1001          * Initialize the executor's tuple table to empty.
1002          */
1003         estate->es_tupleTable = NIL;
1004         estate->es_trig_tuple_slot = NULL;
1005         estate->es_trig_oldtup_slot = NULL;
1006         estate->es_trig_newtup_slot = NULL;
1007
1008         /* mark EvalPlanQual not active */
1009         estate->es_epqTuple = NULL;
1010         estate->es_epqTupleSet = NULL;
1011         estate->es_epqScanDone = NULL;
1012
1013         /*
1014          * Initialize private state information for each SubPlan.  We must do this
1015          * before running ExecInitNode on the main query tree, since
1016          * ExecInitSubPlan expects to be able to find these entries.
1017          */
1018         Assert(estate->es_subplanstates == NIL);
1019         i = 1;                                          /* subplan indices count from 1 */
1020         foreach(l, plannedstmt->subplans)
1021         {
1022                 Plan       *subplan = (Plan *) lfirst(l);
1023                 PlanState  *subplanstate;
1024                 int                     sp_eflags;
1025
1026                 /*
1027                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
1028                  * it is a parameterless subplan (not initplan), we suggest that it be
1029                  * prepared to handle REWIND efficiently; otherwise there is no need.
1030                  */
1031                 sp_eflags = eflags
1032                         & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
1033                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
1034                         sp_eflags |= EXEC_FLAG_REWIND;
1035
1036                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
1037
1038                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1039                                                                                    subplanstate);
1040
1041                 i++;
1042         }
1043
1044         /*
1045          * Initialize the private state information for all the nodes in the query
1046          * tree.  This opens files, allocates storage and leaves us ready to start
1047          * processing tuples.
1048          */
1049         planstate = ExecInitNode(plan, estate, eflags);
1050
1051         /*
1052          * Get the tuple descriptor describing the type of tuples to return.
1053          */
1054         tupType = ExecGetResultType(planstate);
1055
1056         /*
1057          * Initialize the junk filter if needed.  SELECT queries need a filter if
1058          * there are any junk attrs in the top-level tlist.
1059          */
1060         if (operation == CMD_SELECT)
1061         {
1062                 bool            junk_filter_needed = false;
1063                 ListCell   *tlist;
1064
1065                 foreach(tlist, plan->targetlist)
1066                 {
1067                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1068
1069                         if (tle->resjunk)
1070                         {
1071                                 junk_filter_needed = true;
1072                                 break;
1073                         }
1074                 }
1075
1076                 if (junk_filter_needed)
1077                 {
1078                         JunkFilter *j;
1079
1080                         j = ExecInitJunkFilter(planstate->plan->targetlist,
1081                                                                    tupType->tdhasoid,
1082                                                                    ExecInitExtraTupleSlot(estate, NULL));
1083                         estate->es_junkFilter = j;
1084
1085                         /* Want to return the cleaned tuple type */
1086                         tupType = j->jf_cleanTupType;
1087                 }
1088         }
1089
1090         queryDesc->tupDesc = tupType;
1091         queryDesc->planstate = planstate;
1092 }
1093
1094 /*
1095  * Check that a proposed result relation is a legal target for the operation
1096  *
1097  * Generally the parser and/or planner should have noticed any such mistake
1098  * already, but let's make sure.
1099  *
1100  * Note: when changing this function, you probably also need to look at
1101  * CheckValidRowMarkRel.
1102  */
1103 void
1104 CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
1105 {
1106         Relation        resultRel = resultRelInfo->ri_RelationDesc;
1107         TriggerDesc *trigDesc = resultRel->trigdesc;
1108         FdwRoutine *fdwroutine;
1109
1110         switch (resultRel->rd_rel->relkind)
1111         {
1112                 case RELKIND_RELATION:
1113                 case RELKIND_PARTITIONED_TABLE:
1114                         CheckCmdReplicaIdentity(resultRel, operation);
1115                         break;
1116                 case RELKIND_SEQUENCE:
1117                         ereport(ERROR,
1118                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1119                                          errmsg("cannot change sequence \"%s\"",
1120                                                         RelationGetRelationName(resultRel))));
1121                         break;
1122                 case RELKIND_TOASTVALUE:
1123                         ereport(ERROR,
1124                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1125                                          errmsg("cannot change TOAST relation \"%s\"",
1126                                                         RelationGetRelationName(resultRel))));
1127                         break;
1128                 case RELKIND_VIEW:
1129
1130                         /*
1131                          * Okay only if there's a suitable INSTEAD OF trigger.  Messages
1132                          * here should match rewriteHandler.c's rewriteTargetView, except
1133                          * that we omit errdetail because we haven't got the information
1134                          * handy (and given that we really shouldn't get here anyway, it's
1135                          * not worth great exertion to get).
1136                          */
1137                         switch (operation)
1138                         {
1139                                 case CMD_INSERT:
1140                                         if (!trigDesc || !trigDesc->trig_insert_instead_row)
1141                                                 ereport(ERROR,
1142                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1143                                                                  errmsg("cannot insert into view \"%s\"",
1144                                                                                 RelationGetRelationName(resultRel)),
1145                                                                  errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1146                                         break;
1147                                 case CMD_UPDATE:
1148                                         if (!trigDesc || !trigDesc->trig_update_instead_row)
1149                                                 ereport(ERROR,
1150                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1151                                                                  errmsg("cannot update view \"%s\"",
1152                                                                                 RelationGetRelationName(resultRel)),
1153                                                                  errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1154                                         break;
1155                                 case CMD_DELETE:
1156                                         if (!trigDesc || !trigDesc->trig_delete_instead_row)
1157                                                 ereport(ERROR,
1158                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1159                                                                  errmsg("cannot delete from view \"%s\"",
1160                                                                                 RelationGetRelationName(resultRel)),
1161                                                                  errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1162                                         break;
1163                                 default:
1164                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1165                                         break;
1166                         }
1167                         break;
1168                 case RELKIND_MATVIEW:
1169                         if (!MatViewIncrementalMaintenanceIsEnabled())
1170                                 ereport(ERROR,
1171                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1172                                                  errmsg("cannot change materialized view \"%s\"",
1173                                                                 RelationGetRelationName(resultRel))));
1174                         break;
1175                 case RELKIND_FOREIGN_TABLE:
1176                         /* Okay only if the FDW supports it */
1177                         fdwroutine = resultRelInfo->ri_FdwRoutine;
1178                         switch (operation)
1179                         {
1180                                 case CMD_INSERT:
1181                                         if (fdwroutine->ExecForeignInsert == NULL)
1182                                                 ereport(ERROR,
1183                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1184                                                                  errmsg("cannot insert into foreign table \"%s\"",
1185                                                                                 RelationGetRelationName(resultRel))));
1186                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1187                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1188                                                 ereport(ERROR,
1189                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1190                                                                  errmsg("foreign table \"%s\" does not allow inserts",
1191                                                                                 RelationGetRelationName(resultRel))));
1192                                         break;
1193                                 case CMD_UPDATE:
1194                                         if (fdwroutine->ExecForeignUpdate == NULL)
1195                                                 ereport(ERROR,
1196                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1197                                                                  errmsg("cannot update foreign table \"%s\"",
1198                                                                                 RelationGetRelationName(resultRel))));
1199                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1200                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1201                                                 ereport(ERROR,
1202                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1203                                                                  errmsg("foreign table \"%s\" does not allow updates",
1204                                                                                 RelationGetRelationName(resultRel))));
1205                                         break;
1206                                 case CMD_DELETE:
1207                                         if (fdwroutine->ExecForeignDelete == NULL)
1208                                                 ereport(ERROR,
1209                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1210                                                                  errmsg("cannot delete from foreign table \"%s\"",
1211                                                                                 RelationGetRelationName(resultRel))));
1212                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1213                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1214                                                 ereport(ERROR,
1215                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1216                                                                  errmsg("foreign table \"%s\" does not allow deletes",
1217                                                                                 RelationGetRelationName(resultRel))));
1218                                         break;
1219                                 default:
1220                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1221                                         break;
1222                         }
1223                         break;
1224                 default:
1225                         ereport(ERROR,
1226                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1227                                          errmsg("cannot change relation \"%s\"",
1228                                                         RelationGetRelationName(resultRel))));
1229                         break;
1230         }
1231 }
1232
1233 /*
1234  * Check that a proposed rowmark target relation is a legal target
1235  *
1236  * In most cases parser and/or planner should have noticed this already, but
1237  * they don't cover all cases.
1238  */
1239 static void
1240 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1241 {
1242         FdwRoutine *fdwroutine;
1243
1244         switch (rel->rd_rel->relkind)
1245         {
1246                 case RELKIND_RELATION:
1247                 case RELKIND_PARTITIONED_TABLE:
1248                         /* OK */
1249                         break;
1250                 case RELKIND_SEQUENCE:
1251                         /* Must disallow this because we don't vacuum sequences */
1252                         ereport(ERROR,
1253                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1254                                          errmsg("cannot lock rows in sequence \"%s\"",
1255                                                         RelationGetRelationName(rel))));
1256                         break;
1257                 case RELKIND_TOASTVALUE:
1258                         /* We could allow this, but there seems no good reason to */
1259                         ereport(ERROR,
1260                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1261                                          errmsg("cannot lock rows in TOAST relation \"%s\"",
1262                                                         RelationGetRelationName(rel))));
1263                         break;
1264                 case RELKIND_VIEW:
1265                         /* Should not get here; planner should have expanded the view */
1266                         ereport(ERROR,
1267                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1268                                          errmsg("cannot lock rows in view \"%s\"",
1269                                                         RelationGetRelationName(rel))));
1270                         break;
1271                 case RELKIND_MATVIEW:
1272                         /* Allow referencing a matview, but not actual locking clauses */
1273                         if (markType != ROW_MARK_REFERENCE)
1274                                 ereport(ERROR,
1275                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1276                                                  errmsg("cannot lock rows in materialized view \"%s\"",
1277                                                                 RelationGetRelationName(rel))));
1278                         break;
1279                 case RELKIND_FOREIGN_TABLE:
1280                         /* Okay only if the FDW supports it */
1281                         fdwroutine = GetFdwRoutineForRelation(rel, false);
1282                         if (fdwroutine->RefetchForeignRow == NULL)
1283                                 ereport(ERROR,
1284                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1285                                                  errmsg("cannot lock rows in foreign table \"%s\"",
1286                                                                 RelationGetRelationName(rel))));
1287                         break;
1288                 default:
1289                         ereport(ERROR,
1290                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1291                                          errmsg("cannot lock rows in relation \"%s\"",
1292                                                         RelationGetRelationName(rel))));
1293                         break;
1294         }
1295 }
1296
1297 /*
1298  * Initialize ResultRelInfo data for one result relation
1299  *
1300  * Caution: before Postgres 9.1, this function included the relkind checking
1301  * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1302  * appropriate.  Be sure callers cover those needs.
1303  */
1304 void
1305 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1306                                   Relation resultRelationDesc,
1307                                   Index resultRelationIndex,
1308                                   Relation partition_root,
1309                                   int instrument_options)
1310 {
1311         List       *partition_check = NIL;
1312
1313         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1314         resultRelInfo->type = T_ResultRelInfo;
1315         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1316         resultRelInfo->ri_RelationDesc = resultRelationDesc;
1317         resultRelInfo->ri_NumIndices = 0;
1318         resultRelInfo->ri_IndexRelationDescs = NULL;
1319         resultRelInfo->ri_IndexRelationInfo = NULL;
1320         /* make a copy so as not to depend on relcache info not changing... */
1321         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1322         if (resultRelInfo->ri_TrigDesc)
1323         {
1324                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
1325
1326                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1327                         palloc0(n * sizeof(FmgrInfo));
1328                 resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1329                         palloc0(n * sizeof(ExprState *));
1330                 if (instrument_options)
1331                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1332         }
1333         else
1334         {
1335                 resultRelInfo->ri_TrigFunctions = NULL;
1336                 resultRelInfo->ri_TrigWhenExprs = NULL;
1337                 resultRelInfo->ri_TrigInstrument = NULL;
1338         }
1339         if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1340                 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1341         else
1342                 resultRelInfo->ri_FdwRoutine = NULL;
1343
1344         /* The following fields are set later if needed */
1345         resultRelInfo->ri_FdwState = NULL;
1346         resultRelInfo->ri_usesFdwDirectModify = false;
1347         resultRelInfo->ri_ConstraintExprs = NULL;
1348         resultRelInfo->ri_junkFilter = NULL;
1349         resultRelInfo->ri_projectReturning = NULL;
1350         resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1351         resultRelInfo->ri_onConflict = NULL;
1352
1353         /*
1354          * Partition constraint, which also includes the partition constraint of
1355          * all the ancestors that are partitions.  Note that it will be checked
1356          * even in the case of tuple-routing where this table is the target leaf
1357          * partition, if there any BR triggers defined on the table.  Although
1358          * tuple-routing implicitly preserves the partition constraint of the
1359          * target partition for a given row, the BR triggers may change the row
1360          * such that the constraint is no longer satisfied, which we must fail for
1361          * by checking it explicitly.
1362          *
1363          * If this is a partitioned table, the partition constraint (if any) of a
1364          * given row will be checked just before performing tuple-routing.
1365          */
1366         partition_check = RelationGetPartitionQual(resultRelationDesc);
1367
1368         resultRelInfo->ri_PartitionCheck = partition_check;
1369         resultRelInfo->ri_PartitionRoot = partition_root;
1370         resultRelInfo->ri_PartitionReadyForRouting = false;
1371 }
1372
1373 /*
1374  *              ExecGetTriggerResultRel
1375  *
1376  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
1377  * triggers are fired on one of the result relations of the query, and so
1378  * we can just return a member of the es_result_relations array, the
1379  * es_root_result_relations array (if any), or the es_leaf_result_relations
1380  * list (if any).  (Note: in self-join situations there might be multiple
1381  * members with the same OID; if so it doesn't matter which one we pick.)
1382  * However, it is sometimes necessary to fire triggers on other relations;
1383  * this happens mainly when an RI update trigger queues additional triggers
1384  * on other relations, which will be processed in the context of the outer
1385  * query.  For efficiency's sake, we want to have a ResultRelInfo for those
1386  * triggers too; that can avoid repeated re-opening of the relation.  (It
1387  * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1388  * triggers.)  So we make additional ResultRelInfo's as needed, and save them
1389  * in es_trig_target_relations.
1390  */
1391 ResultRelInfo *
1392 ExecGetTriggerResultRel(EState *estate, Oid relid)
1393 {
1394         ResultRelInfo *rInfo;
1395         int                     nr;
1396         ListCell   *l;
1397         Relation        rel;
1398         MemoryContext oldcontext;
1399
1400         /* First, search through the query result relations */
1401         rInfo = estate->es_result_relations;
1402         nr = estate->es_num_result_relations;
1403         while (nr > 0)
1404         {
1405                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1406                         return rInfo;
1407                 rInfo++;
1408                 nr--;
1409         }
1410         /* Second, search through the root result relations, if any */
1411         rInfo = estate->es_root_result_relations;
1412         nr = estate->es_num_root_result_relations;
1413         while (nr > 0)
1414         {
1415                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1416                         return rInfo;
1417                 rInfo++;
1418                 nr--;
1419         }
1420
1421         /*
1422          * Third, search through the result relations that were created during
1423          * tuple routing, if any.
1424          */
1425         foreach(l, estate->es_tuple_routing_result_relations)
1426         {
1427                 rInfo = (ResultRelInfo *) lfirst(l);
1428                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1429                         return rInfo;
1430         }
1431         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1432         foreach(l, estate->es_trig_target_relations)
1433         {
1434                 rInfo = (ResultRelInfo *) lfirst(l);
1435                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1436                         return rInfo;
1437         }
1438         /* Nope, so we need a new one */
1439
1440         /*
1441          * Open the target relation's relcache entry.  We assume that an
1442          * appropriate lock is still held by the backend from whenever the trigger
1443          * event got queued, so we need take no new lock here.  Also, we need not
1444          * recheck the relkind, so no need for CheckValidResultRel.
1445          */
1446         rel = heap_open(relid, NoLock);
1447
1448         /*
1449          * Make the new entry in the right context.
1450          */
1451         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1452         rInfo = makeNode(ResultRelInfo);
1453         InitResultRelInfo(rInfo,
1454                                           rel,
1455                                           0,            /* dummy rangetable index */
1456                                           NULL,
1457                                           estate->es_instrument);
1458         estate->es_trig_target_relations =
1459                 lappend(estate->es_trig_target_relations, rInfo);
1460         MemoryContextSwitchTo(oldcontext);
1461
1462         /*
1463          * Currently, we don't need any index information in ResultRelInfos used
1464          * only for triggers, so no need to call ExecOpenIndices.
1465          */
1466
1467         return rInfo;
1468 }
1469
1470 /*
1471  * Close any relations that have been opened by ExecGetTriggerResultRel().
1472  */
1473 void
1474 ExecCleanUpTriggerState(EState *estate)
1475 {
1476         ListCell   *l;
1477
1478         foreach(l, estate->es_trig_target_relations)
1479         {
1480                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1481
1482                 /* Close indices and then the relation itself */
1483                 ExecCloseIndices(resultRelInfo);
1484                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1485         }
1486 }
1487
1488 /*
1489  *              ExecContextForcesOids
1490  *
1491  * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1492  * we need to ensure that result tuples have space for an OID iff they are
1493  * going to be stored into a relation that has OIDs.  In other contexts
1494  * we are free to choose whether to leave space for OIDs in result tuples
1495  * (we generally don't want to, but we do if a physical-tlist optimization
1496  * is possible).  This routine checks the plan context and returns true if the
1497  * choice is forced, false if the choice is not forced.  In the true case,
1498  * *hasoids is set to the required value.
1499  *
1500  * One reason this is ugly is that all plan nodes in the plan tree will emit
1501  * tuples with space for an OID, though we really only need the topmost node
1502  * to do so.  However, node types like Sort don't project new tuples but just
1503  * return their inputs, and in those cases the requirement propagates down
1504  * to the input node.  Eventually we might make this code smart enough to
1505  * recognize how far down the requirement really goes, but for now we just
1506  * make all plan nodes do the same thing if the top level forces the choice.
1507  *
1508  * We assume that if we are generating tuples for INSERT or UPDATE,
1509  * estate->es_result_relation_info is already set up to describe the target
1510  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1511  * the target relations may have OIDs and some not.  We have to make the
1512  * decisions on a per-relation basis as we initialize each of the subplans of
1513  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1514  * while initializing each subplan.
1515  *
1516  * CREATE TABLE AS is even uglier, because we don't have the target relation's
1517  * descriptor available when this code runs; we have to look aside at the
1518  * flags passed to ExecutorStart().
1519  */
1520 bool
1521 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1522 {
1523         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1524
1525         if (ri != NULL)
1526         {
1527                 Relation        rel = ri->ri_RelationDesc;
1528
1529                 if (rel != NULL)
1530                 {
1531                         *hasoids = rel->rd_rel->relhasoids;
1532                         return true;
1533                 }
1534         }
1535
1536         if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1537         {
1538                 *hasoids = true;
1539                 return true;
1540         }
1541         if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1542         {
1543                 *hasoids = false;
1544                 return true;
1545         }
1546
1547         return false;
1548 }
1549
1550 /* ----------------------------------------------------------------
1551  *              ExecPostprocessPlan
1552  *
1553  *              Give plan nodes a final chance to execute before shutdown
1554  * ----------------------------------------------------------------
1555  */
1556 static void
1557 ExecPostprocessPlan(EState *estate)
1558 {
1559         ListCell   *lc;
1560
1561         /*
1562          * Make sure nodes run forward.
1563          */
1564         estate->es_direction = ForwardScanDirection;
1565
1566         /*
1567          * Run any secondary ModifyTable nodes to completion, in case the main
1568          * query did not fetch all rows from them.  (We do this to ensure that
1569          * such nodes have predictable results.)
1570          */
1571         foreach(lc, estate->es_auxmodifytables)
1572         {
1573                 PlanState  *ps = (PlanState *) lfirst(lc);
1574
1575                 for (;;)
1576                 {
1577                         TupleTableSlot *slot;
1578
1579                         /* Reset the per-output-tuple exprcontext each time */
1580                         ResetPerTupleExprContext(estate);
1581
1582                         slot = ExecProcNode(ps);
1583
1584                         if (TupIsNull(slot))
1585                                 break;
1586                 }
1587         }
1588 }
1589
1590 /* ----------------------------------------------------------------
1591  *              ExecEndPlan
1592  *
1593  *              Cleans up the query plan -- closes files and frees up storage
1594  *
1595  * NOTE: we are no longer very worried about freeing storage per se
1596  * in this code; FreeExecutorState should be guaranteed to release all
1597  * memory that needs to be released.  What we are worried about doing
1598  * is closing relations and dropping buffer pins.  Thus, for example,
1599  * tuple tables must be cleared or dropped to ensure pins are released.
1600  * ----------------------------------------------------------------
1601  */
1602 static void
1603 ExecEndPlan(PlanState *planstate, EState *estate)
1604 {
1605         ResultRelInfo *resultRelInfo;
1606         int                     i;
1607         ListCell   *l;
1608
1609         /*
1610          * shut down the node-type-specific query processing
1611          */
1612         ExecEndNode(planstate);
1613
1614         /*
1615          * for subplans too
1616          */
1617         foreach(l, estate->es_subplanstates)
1618         {
1619                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1620
1621                 ExecEndNode(subplanstate);
1622         }
1623
1624         /*
1625          * destroy the executor's tuple table.  Actually we only care about
1626          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1627          * the TupleTableSlots, since the containing memory context is about to go
1628          * away anyway.
1629          */
1630         ExecResetTupleTable(estate->es_tupleTable, false);
1631
1632         /*
1633          * close the result relation(s) if any, but hold locks until xact commit.
1634          */
1635         resultRelInfo = estate->es_result_relations;
1636         for (i = estate->es_num_result_relations; i > 0; i--)
1637         {
1638                 /* Close indices and then the relation itself */
1639                 ExecCloseIndices(resultRelInfo);
1640                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1641                 resultRelInfo++;
1642         }
1643
1644         /* Close the root target relation(s). */
1645         resultRelInfo = estate->es_root_result_relations;
1646         for (i = estate->es_num_root_result_relations; i > 0; i--)
1647         {
1648                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1649                 resultRelInfo++;
1650         }
1651
1652         /* likewise close any trigger target relations */
1653         ExecCleanUpTriggerState(estate);
1654
1655         /*
1656          * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1657          * locks
1658          */
1659         foreach(l, estate->es_rowMarks)
1660         {
1661                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1662
1663                 if (erm->relation)
1664                         heap_close(erm->relation, NoLock);
1665         }
1666 }
1667
1668 /* ----------------------------------------------------------------
1669  *              ExecutePlan
1670  *
1671  *              Processes the query plan until we have retrieved 'numberTuples' tuples,
1672  *              moving in the specified direction.
1673  *
1674  *              Runs to completion if numberTuples is 0
1675  *
1676  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1677  * user can see it
1678  * ----------------------------------------------------------------
1679  */
1680 static void
1681 ExecutePlan(EState *estate,
1682                         PlanState *planstate,
1683                         bool use_parallel_mode,
1684                         CmdType operation,
1685                         bool sendTuples,
1686                         uint64 numberTuples,
1687                         ScanDirection direction,
1688                         DestReceiver *dest,
1689                         bool execute_once)
1690 {
1691         TupleTableSlot *slot;
1692         uint64          current_tuple_count;
1693
1694         /*
1695          * initialize local variables
1696          */
1697         current_tuple_count = 0;
1698
1699         /*
1700          * Set the direction.
1701          */
1702         estate->es_direction = direction;
1703
1704         /*
1705          * If the plan might potentially be executed multiple times, we must force
1706          * it to run without parallelism, because we might exit early.
1707          */
1708         if (!execute_once)
1709                 use_parallel_mode = false;
1710
1711         estate->es_use_parallel_mode = use_parallel_mode;
1712         if (use_parallel_mode)
1713                 EnterParallelMode();
1714
1715         /*
1716          * Loop until we've processed the proper number of tuples from the plan.
1717          */
1718         for (;;)
1719         {
1720                 /* Reset the per-output-tuple exprcontext */
1721                 ResetPerTupleExprContext(estate);
1722
1723                 /*
1724                  * Execute the plan and obtain a tuple
1725                  */
1726                 slot = ExecProcNode(planstate);
1727
1728                 /*
1729                  * if the tuple is null, then we assume there is nothing more to
1730                  * process so we just end the loop...
1731                  */
1732                 if (TupIsNull(slot))
1733                 {
1734                         /* Allow nodes to release or shut down resources. */
1735                         (void) ExecShutdownNode(planstate);
1736                         break;
1737                 }
1738
1739                 /*
1740                  * If we have a junk filter, then project a new tuple with the junk
1741                  * removed.
1742                  *
1743                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1744                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1745                  * because that tuple slot has the wrong descriptor.)
1746                  */
1747                 if (estate->es_junkFilter != NULL)
1748                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1749
1750                 /*
1751                  * If we are supposed to send the tuple somewhere, do so. (In
1752                  * practice, this is probably always the case at this point.)
1753                  */
1754                 if (sendTuples)
1755                 {
1756                         /*
1757                          * If we are not able to send the tuple, we assume the destination
1758                          * has closed and no more tuples can be sent. If that's the case,
1759                          * end the loop.
1760                          */
1761                         if (!dest->receiveSlot(slot, dest))
1762                                 break;
1763                 }
1764
1765                 /*
1766                  * Count tuples processed, if this is a SELECT.  (For other operation
1767                  * types, the ModifyTable plan node must count the appropriate
1768                  * events.)
1769                  */
1770                 if (operation == CMD_SELECT)
1771                         (estate->es_processed)++;
1772
1773                 /*
1774                  * check our tuple count.. if we've processed the proper number then
1775                  * quit, else loop again and process more tuples.  Zero numberTuples
1776                  * means no limit.
1777                  */
1778                 current_tuple_count++;
1779                 if (numberTuples && numberTuples == current_tuple_count)
1780                 {
1781                         /* Allow nodes to release or shut down resources. */
1782                         (void) ExecShutdownNode(planstate);
1783                         break;
1784                 }
1785         }
1786
1787         if (use_parallel_mode)
1788                 ExitParallelMode();
1789 }
1790
1791
1792 /*
1793  * ExecRelCheck --- check that tuple meets constraints for result relation
1794  *
1795  * Returns NULL if OK, else name of failed check constraint
1796  */
1797 static const char *
1798 ExecRelCheck(ResultRelInfo *resultRelInfo,
1799                          TupleTableSlot *slot, EState *estate)
1800 {
1801         Relation        rel = resultRelInfo->ri_RelationDesc;
1802         int                     ncheck = rel->rd_att->constr->num_check;
1803         ConstrCheck *check = rel->rd_att->constr->check;
1804         ExprContext *econtext;
1805         MemoryContext oldContext;
1806         int                     i;
1807
1808         /*
1809          * If first time through for this result relation, build expression
1810          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1811          * memory context so they'll survive throughout the query.
1812          */
1813         if (resultRelInfo->ri_ConstraintExprs == NULL)
1814         {
1815                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1816                 resultRelInfo->ri_ConstraintExprs =
1817                         (ExprState **) palloc(ncheck * sizeof(ExprState *));
1818                 for (i = 0; i < ncheck; i++)
1819                 {
1820                         Expr       *checkconstr;
1821
1822                         checkconstr = stringToNode(check[i].ccbin);
1823                         resultRelInfo->ri_ConstraintExprs[i] =
1824                                 ExecPrepareExpr(checkconstr, estate);
1825                 }
1826                 MemoryContextSwitchTo(oldContext);
1827         }
1828
1829         /*
1830          * We will use the EState's per-tuple context for evaluating constraint
1831          * expressions (creating it if it's not already there).
1832          */
1833         econtext = GetPerTupleExprContext(estate);
1834
1835         /* Arrange for econtext's scan tuple to be the tuple under test */
1836         econtext->ecxt_scantuple = slot;
1837
1838         /* And evaluate the constraints */
1839         for (i = 0; i < ncheck; i++)
1840         {
1841                 ExprState  *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1842
1843                 /*
1844                  * NOTE: SQL specifies that a NULL result from a constraint expression
1845                  * is not to be treated as a failure.  Therefore, use ExecCheck not
1846                  * ExecQual.
1847                  */
1848                 if (!ExecCheck(checkconstr, econtext))
1849                         return check[i].ccname;
1850         }
1851
1852         /* NULL result means no error */
1853         return NULL;
1854 }
1855
1856 /*
1857  * ExecPartitionCheck --- check that tuple meets the partition constraint.
1858  *
1859  * Exported in executor.h for outside use.
1860  * Returns true if it meets the partition constraint, else returns false.
1861  */
1862 bool
1863 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1864                                    EState *estate)
1865 {
1866         ExprContext *econtext;
1867
1868         /*
1869          * If first time through, build expression state tree for the partition
1870          * check expression.  Keep it in the per-query memory context so they'll
1871          * survive throughout the query.
1872          */
1873         if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1874         {
1875                 List       *qual = resultRelInfo->ri_PartitionCheck;
1876
1877                 resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1878         }
1879
1880         /*
1881          * We will use the EState's per-tuple context for evaluating constraint
1882          * expressions (creating it if it's not already there).
1883          */
1884         econtext = GetPerTupleExprContext(estate);
1885
1886         /* Arrange for econtext's scan tuple to be the tuple under test */
1887         econtext->ecxt_scantuple = slot;
1888
1889         /*
1890          * As in case of the catalogued constraints, we treat a NULL result as
1891          * success here, not a failure.
1892          */
1893         return ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1894 }
1895
1896 /*
1897  * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1898  * partition constraint check.
1899  */
1900 void
1901 ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1902                                                         TupleTableSlot *slot,
1903                                                         EState *estate)
1904 {
1905         Relation        rel = resultRelInfo->ri_RelationDesc;
1906         Relation        orig_rel = rel;
1907         TupleDesc       tupdesc = RelationGetDescr(rel);
1908         char       *val_desc;
1909         Bitmapset  *modifiedCols;
1910         Bitmapset  *insertedCols;
1911         Bitmapset  *updatedCols;
1912
1913         /*
1914          * Need to first convert the tuple to the root partitioned table's row
1915          * type. For details, check similar comments in ExecConstraints().
1916          */
1917         if (resultRelInfo->ri_PartitionRoot)
1918         {
1919                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
1920                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
1921                 TupleConversionMap *map;
1922
1923                 rel = resultRelInfo->ri_PartitionRoot;
1924                 tupdesc = RelationGetDescr(rel);
1925                 /* a reverse map */
1926                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
1927                                                                          gettext_noop("could not convert row type"));
1928                 if (map != NULL)
1929                 {
1930                         tuple = do_convert_tuple(tuple, map);
1931                         ExecSetSlotDescriptor(slot, tupdesc);
1932                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
1933                 }
1934         }
1935
1936         insertedCols = GetInsertedColumns(resultRelInfo, estate);
1937         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1938         modifiedCols = bms_union(insertedCols, updatedCols);
1939         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1940                                                                                          slot,
1941                                                                                          tupdesc,
1942                                                                                          modifiedCols,
1943                                                                                          64);
1944         ereport(ERROR,
1945                         (errcode(ERRCODE_CHECK_VIOLATION),
1946                          errmsg("new row for relation \"%s\" violates partition constraint",
1947                                         RelationGetRelationName(orig_rel)),
1948                          val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1949 }
1950
1951 /*
1952  * ExecConstraints - check constraints of the tuple in 'slot'
1953  *
1954  * This checks the traditional NOT NULL and check constraints, and if
1955  * requested, checks the partition constraint.
1956  *
1957  * Note: 'slot' contains the tuple to check the constraints of, which may
1958  * have been converted from the original input tuple after tuple routing.
1959  * 'resultRelInfo' is the original result relation, before tuple routing.
1960  */
1961 void
1962 ExecConstraints(ResultRelInfo *resultRelInfo,
1963                                 TupleTableSlot *slot, EState *estate,
1964                                 bool check_partition_constraint)
1965 {
1966         Relation        rel = resultRelInfo->ri_RelationDesc;
1967         TupleDesc       tupdesc = RelationGetDescr(rel);
1968         TupleConstr *constr = tupdesc->constr;
1969         Bitmapset  *modifiedCols;
1970         Bitmapset  *insertedCols;
1971         Bitmapset  *updatedCols;
1972
1973         Assert(constr || resultRelInfo->ri_PartitionCheck);
1974
1975         if (constr && constr->has_not_null)
1976         {
1977                 int                     natts = tupdesc->natts;
1978                 int                     attrChk;
1979
1980                 for (attrChk = 1; attrChk <= natts; attrChk++)
1981                 {
1982                         Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1);
1983
1984                         if (att->attnotnull && slot_attisnull(slot, attrChk))
1985                         {
1986                                 char       *val_desc;
1987                                 Relation        orig_rel = rel;
1988                                 TupleDesc       orig_tupdesc = RelationGetDescr(rel);
1989
1990                                 /*
1991                                  * If the tuple has been routed, it's been converted to the
1992                                  * partition's rowtype, which might differ from the root
1993                                  * table's.  We must convert it back to the root table's
1994                                  * rowtype so that val_desc shown error message matches the
1995                                  * input tuple.
1996                                  */
1997                                 if (resultRelInfo->ri_PartitionRoot)
1998                                 {
1999                                         HeapTuple       tuple = ExecFetchSlotTuple(slot);
2000                                         TupleConversionMap *map;
2001
2002                                         rel = resultRelInfo->ri_PartitionRoot;
2003                                         tupdesc = RelationGetDescr(rel);
2004                                         /* a reverse map */
2005                                         map = convert_tuples_by_name(orig_tupdesc, tupdesc,
2006                                                                                                  gettext_noop("could not convert row type"));
2007                                         if (map != NULL)
2008                                         {
2009                                                 tuple = do_convert_tuple(tuple, map);
2010                                                 ExecSetSlotDescriptor(slot, tupdesc);
2011                                                 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2012                                         }
2013                                 }
2014
2015                                 insertedCols = GetInsertedColumns(resultRelInfo, estate);
2016                                 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2017                                 modifiedCols = bms_union(insertedCols, updatedCols);
2018                                 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2019                                                                                                                  slot,
2020                                                                                                                  tupdesc,
2021                                                                                                                  modifiedCols,
2022                                                                                                                  64);
2023
2024                                 ereport(ERROR,
2025                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2026                                                  errmsg("null value in column \"%s\" violates not-null constraint",
2027                                                                 NameStr(att->attname)),
2028                                                  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2029                                                  errtablecol(orig_rel, attrChk)));
2030                         }
2031                 }
2032         }
2033
2034         if (constr && constr->num_check > 0)
2035         {
2036                 const char *failed;
2037
2038                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2039                 {
2040                         char       *val_desc;
2041                         Relation        orig_rel = rel;
2042
2043                         /* See the comment above. */
2044                         if (resultRelInfo->ri_PartitionRoot)
2045                         {
2046                                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
2047                                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
2048                                 TupleConversionMap *map;
2049
2050                                 rel = resultRelInfo->ri_PartitionRoot;
2051                                 tupdesc = RelationGetDescr(rel);
2052                                 /* a reverse map */
2053                                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2054                                                                                          gettext_noop("could not convert row type"));
2055                                 if (map != NULL)
2056                                 {
2057                                         tuple = do_convert_tuple(tuple, map);
2058                                         ExecSetSlotDescriptor(slot, tupdesc);
2059                                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2060                                 }
2061                         }
2062
2063                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
2064                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2065                         modifiedCols = bms_union(insertedCols, updatedCols);
2066                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2067                                                                                                          slot,
2068                                                                                                          tupdesc,
2069                                                                                                          modifiedCols,
2070                                                                                                          64);
2071                         ereport(ERROR,
2072                                         (errcode(ERRCODE_CHECK_VIOLATION),
2073                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2074                                                         RelationGetRelationName(orig_rel), failed),
2075                                          val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2076                                          errtableconstraint(orig_rel, failed)));
2077                 }
2078         }
2079
2080         if (check_partition_constraint && resultRelInfo->ri_PartitionCheck &&
2081                 !ExecPartitionCheck(resultRelInfo, slot, estate))
2082                 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
2083 }
2084
2085
2086 /*
2087  * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2088  * of the specified kind.
2089  *
2090  * Note that this needs to be called multiple times to ensure that all kinds of
2091  * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2092  * CHECK OPTION set and from row level security policies).  See ExecInsert()
2093  * and ExecUpdate().
2094  */
2095 void
2096 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2097                                          TupleTableSlot *slot, EState *estate)
2098 {
2099         Relation        rel = resultRelInfo->ri_RelationDesc;
2100         TupleDesc       tupdesc = RelationGetDescr(rel);
2101         ExprContext *econtext;
2102         ListCell   *l1,
2103                            *l2;
2104
2105         /*
2106          * We will use the EState's per-tuple context for evaluating constraint
2107          * expressions (creating it if it's not already there).
2108          */
2109         econtext = GetPerTupleExprContext(estate);
2110
2111         /* Arrange for econtext's scan tuple to be the tuple under test */
2112         econtext->ecxt_scantuple = slot;
2113
2114         /* Check each of the constraints */
2115         forboth(l1, resultRelInfo->ri_WithCheckOptions,
2116                         l2, resultRelInfo->ri_WithCheckOptionExprs)
2117         {
2118                 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2119                 ExprState  *wcoExpr = (ExprState *) lfirst(l2);
2120
2121                 /*
2122                  * Skip any WCOs which are not the kind we are looking for at this
2123                  * time.
2124                  */
2125                 if (wco->kind != kind)
2126                         continue;
2127
2128                 /*
2129                  * WITH CHECK OPTION checks are intended to ensure that the new tuple
2130                  * is visible (in the case of a view) or that it passes the
2131                  * 'with-check' policy (in the case of row security). If the qual
2132                  * evaluates to NULL or FALSE, then the new tuple won't be included in
2133                  * the view or doesn't pass the 'with-check' policy for the table.
2134                  */
2135                 if (!ExecQual(wcoExpr, econtext))
2136                 {
2137                         char       *val_desc;
2138                         Bitmapset  *modifiedCols;
2139                         Bitmapset  *insertedCols;
2140                         Bitmapset  *updatedCols;
2141
2142                         switch (wco->kind)
2143                         {
2144                                         /*
2145                                          * For WITH CHECK OPTIONs coming from views, we might be
2146                                          * able to provide the details on the row, depending on
2147                                          * the permissions on the relation (that is, if the user
2148                                          * could view it directly anyway).  For RLS violations, we
2149                                          * don't include the data since we don't know if the user
2150                                          * should be able to view the tuple as that depends on the
2151                                          * USING policy.
2152                                          */
2153                                 case WCO_VIEW_CHECK:
2154                                         /* See the comment in ExecConstraints(). */
2155                                         if (resultRelInfo->ri_PartitionRoot)
2156                                         {
2157                                                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
2158                                                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
2159                                                 TupleConversionMap *map;
2160
2161                                                 rel = resultRelInfo->ri_PartitionRoot;
2162                                                 tupdesc = RelationGetDescr(rel);
2163                                                 /* a reverse map */
2164                                                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2165                                                                                                          gettext_noop("could not convert row type"));
2166                                                 if (map != NULL)
2167                                                 {
2168                                                         tuple = do_convert_tuple(tuple, map);
2169                                                         ExecSetSlotDescriptor(slot, tupdesc);
2170                                                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2171                                                 }
2172                                         }
2173
2174                                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
2175                                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2176                                         modifiedCols = bms_union(insertedCols, updatedCols);
2177                                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2178                                                                                                                          slot,
2179                                                                                                                          tupdesc,
2180                                                                                                                          modifiedCols,
2181                                                                                                                          64);
2182
2183                                         ereport(ERROR,
2184                                                         (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2185                                                          errmsg("new row violates check option for view \"%s\"",
2186                                                                         wco->relname),
2187                                                          val_desc ? errdetail("Failing row contains %s.",
2188                                                                                                   val_desc) : 0));
2189                                         break;
2190                                 case WCO_RLS_INSERT_CHECK:
2191                                 case WCO_RLS_UPDATE_CHECK:
2192                                         if (wco->polname != NULL)
2193                                                 ereport(ERROR,
2194                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2195                                                                  errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2196                                                                                 wco->polname, wco->relname)));
2197                                         else
2198                                                 ereport(ERROR,
2199                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2200                                                                  errmsg("new row violates row-level security policy for table \"%s\"",
2201                                                                                 wco->relname)));
2202                                         break;
2203                                 case WCO_RLS_CONFLICT_CHECK:
2204                                         if (wco->polname != NULL)
2205                                                 ereport(ERROR,
2206                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2207                                                                  errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2208                                                                                 wco->polname, wco->relname)));
2209                                         else
2210                                                 ereport(ERROR,
2211                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2212                                                                  errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2213                                                                                 wco->relname)));
2214                                         break;
2215                                 default:
2216                                         elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2217                                         break;
2218                         }
2219                 }
2220         }
2221 }
2222
2223 /*
2224  * ExecBuildSlotValueDescription -- construct a string representing a tuple
2225  *
2226  * This is intentionally very similar to BuildIndexValueDescription, but
2227  * unlike that function, we truncate long field values (to at most maxfieldlen
2228  * bytes).  That seems necessary here since heap field values could be very
2229  * long, whereas index entries typically aren't so wide.
2230  *
2231  * Also, unlike the case with index entries, we need to be prepared to ignore
2232  * dropped columns.  We used to use the slot's tuple descriptor to decode the
2233  * data, but the slot's descriptor doesn't identify dropped columns, so we
2234  * now need to be passed the relation's descriptor.
2235  *
2236  * Note that, like BuildIndexValueDescription, if the user does not have
2237  * permission to view any of the columns involved, a NULL is returned.  Unlike
2238  * BuildIndexValueDescription, if the user has access to view a subset of the
2239  * column involved, that subset will be returned with a key identifying which
2240  * columns they are.
2241  */
2242 static char *
2243 ExecBuildSlotValueDescription(Oid reloid,
2244                                                           TupleTableSlot *slot,
2245                                                           TupleDesc tupdesc,
2246                                                           Bitmapset *modifiedCols,
2247                                                           int maxfieldlen)
2248 {
2249         StringInfoData buf;
2250         StringInfoData collist;
2251         bool            write_comma = false;
2252         bool            write_comma_collist = false;
2253         int                     i;
2254         AclResult       aclresult;
2255         bool            table_perm = false;
2256         bool            any_perm = false;
2257
2258         /*
2259          * Check if RLS is enabled and should be active for the relation; if so,
2260          * then don't return anything.  Otherwise, go through normal permission
2261          * checks.
2262          */
2263         if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2264                 return NULL;
2265
2266         initStringInfo(&buf);
2267
2268         appendStringInfoChar(&buf, '(');
2269
2270         /*
2271          * Check if the user has permissions to see the row.  Table-level SELECT
2272          * allows access to all columns.  If the user does not have table-level
2273          * SELECT then we check each column and include those the user has SELECT
2274          * rights on.  Additionally, we always include columns the user provided
2275          * data for.
2276          */
2277         aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2278         if (aclresult != ACLCHECK_OK)
2279         {
2280                 /* Set up the buffer for the column list */
2281                 initStringInfo(&collist);
2282                 appendStringInfoChar(&collist, '(');
2283         }
2284         else
2285                 table_perm = any_perm = true;
2286
2287         /* Make sure the tuple is fully deconstructed */
2288         slot_getallattrs(slot);
2289
2290         for (i = 0; i < tupdesc->natts; i++)
2291         {
2292                 bool            column_perm = false;
2293                 char       *val;
2294                 int                     vallen;
2295                 Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2296
2297                 /* ignore dropped columns */
2298                 if (att->attisdropped)
2299                         continue;
2300
2301                 if (!table_perm)
2302                 {
2303                         /*
2304                          * No table-level SELECT, so need to make sure they either have
2305                          * SELECT rights on the column or that they have provided the data
2306                          * for the column.  If not, omit this column from the error
2307                          * message.
2308                          */
2309                         aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2310                                                                                           GetUserId(), ACL_SELECT);
2311                         if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2312                                                           modifiedCols) || aclresult == ACLCHECK_OK)
2313                         {
2314                                 column_perm = any_perm = true;
2315
2316                                 if (write_comma_collist)
2317                                         appendStringInfoString(&collist, ", ");
2318                                 else
2319                                         write_comma_collist = true;
2320
2321                                 appendStringInfoString(&collist, NameStr(att->attname));
2322                         }
2323                 }
2324
2325                 if (table_perm || column_perm)
2326                 {
2327                         if (slot->tts_isnull[i])
2328                                 val = "null";
2329                         else
2330                         {
2331                                 Oid                     foutoid;
2332                                 bool            typisvarlena;
2333
2334                                 getTypeOutputInfo(att->atttypid,
2335                                                                   &foutoid, &typisvarlena);
2336                                 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2337                         }
2338
2339                         if (write_comma)
2340                                 appendStringInfoString(&buf, ", ");
2341                         else
2342                                 write_comma = true;
2343
2344                         /* truncate if needed */
2345                         vallen = strlen(val);
2346                         if (vallen <= maxfieldlen)
2347                                 appendStringInfoString(&buf, val);
2348                         else
2349                         {
2350                                 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2351                                 appendBinaryStringInfo(&buf, val, vallen);
2352                                 appendStringInfoString(&buf, "...");
2353                         }
2354                 }
2355         }
2356
2357         /* If we end up with zero columns being returned, then return NULL. */
2358         if (!any_perm)
2359                 return NULL;
2360
2361         appendStringInfoChar(&buf, ')');
2362
2363         if (!table_perm)
2364         {
2365                 appendStringInfoString(&collist, ") = ");
2366                 appendStringInfoString(&collist, buf.data);
2367
2368                 return collist.data;
2369         }
2370
2371         return buf.data;
2372 }
2373
2374
2375 /*
2376  * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2377  * given ResultRelInfo
2378  */
2379 LockTupleMode
2380 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2381 {
2382         Bitmapset  *keyCols;
2383         Bitmapset  *updatedCols;
2384
2385         /*
2386          * Compute lock mode to use.  If columns that are part of the key have not
2387          * been modified, then we can use a weaker lock, allowing for better
2388          * concurrency.
2389          */
2390         updatedCols = GetUpdatedColumns(relinfo, estate);
2391         keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2392                                                                                  INDEX_ATTR_BITMAP_KEY);
2393
2394         if (bms_overlap(keyCols, updatedCols))
2395                 return LockTupleExclusive;
2396
2397         return LockTupleNoKeyExclusive;
2398 }
2399
2400 /*
2401  * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2402  *
2403  * If no such struct, either return NULL or throw error depending on missing_ok
2404  */
2405 ExecRowMark *
2406 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2407 {
2408         ListCell   *lc;
2409
2410         foreach(lc, estate->es_rowMarks)
2411         {
2412                 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2413
2414                 if (erm->rti == rti)
2415                         return erm;
2416         }
2417         if (!missing_ok)
2418                 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2419         return NULL;
2420 }
2421
2422 /*
2423  * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2424  *
2425  * Inputs are the underlying ExecRowMark struct and the targetlist of the
2426  * input plan node (not planstate node!).  We need the latter to find out
2427  * the column numbers of the resjunk columns.
2428  */
2429 ExecAuxRowMark *
2430 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2431 {
2432         ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2433         char            resname[32];
2434
2435         aerm->rowmark = erm;
2436
2437         /* Look up the resjunk columns associated with this rowmark */
2438         if (erm->markType != ROW_MARK_COPY)
2439         {
2440                 /* need ctid for all methods other than COPY */
2441                 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2442                 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2443                                                                                                            resname);
2444                 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2445                         elog(ERROR, "could not find junk %s column", resname);
2446         }
2447         else
2448         {
2449                 /* need wholerow if COPY */
2450                 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2451                 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2452                                                                                                                 resname);
2453                 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2454                         elog(ERROR, "could not find junk %s column", resname);
2455         }
2456
2457         /* if child rel, need tableoid */
2458         if (erm->rti != erm->prti)
2459         {
2460                 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2461                 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2462                                                                                                            resname);
2463                 if (!AttributeNumberIsValid(aerm->toidAttNo))
2464                         elog(ERROR, "could not find junk %s column", resname);
2465         }
2466
2467         return aerm;
2468 }
2469
2470
2471 /*
2472  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2473  * process the updated version under READ COMMITTED rules.
2474  *
2475  * See backend/executor/README for some info about how this works.
2476  */
2477
2478
2479 /*
2480  * Check a modified tuple to see if we want to process its updated version
2481  * under READ COMMITTED rules.
2482  *
2483  *      estate - outer executor state data
2484  *      epqstate - state for EvalPlanQual rechecking
2485  *      relation - table containing tuple
2486  *      rti - rangetable index of table containing tuple
2487  *      lockmode - requested tuple lock mode
2488  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2489  *      priorXmax - t_xmax from the outdated tuple
2490  *
2491  * *tid is also an output parameter: it's modified to hold the TID of the
2492  * latest version of the tuple (note this may be changed even on failure)
2493  *
2494  * Returns a slot containing the new candidate update/delete tuple, or
2495  * NULL if we determine we shouldn't process the row.
2496  *
2497  * Note: properly, lockmode should be declared as enum LockTupleMode,
2498  * but we use "int" to avoid having to include heapam.h in executor.h.
2499  */
2500 TupleTableSlot *
2501 EvalPlanQual(EState *estate, EPQState *epqstate,
2502                          Relation relation, Index rti, int lockmode,
2503                          ItemPointer tid, TransactionId priorXmax)
2504 {
2505         TupleTableSlot *slot;
2506         HeapTuple       copyTuple;
2507
2508         Assert(rti > 0);
2509
2510         /*
2511          * Get and lock the updated version of the row; if fail, return NULL.
2512          */
2513         copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2514                                                                   tid, priorXmax);
2515
2516         if (copyTuple == NULL)
2517                 return NULL;
2518
2519         /*
2520          * For UPDATE/DELETE we have to return tid of actual row we're executing
2521          * PQ for.
2522          */
2523         *tid = copyTuple->t_self;
2524
2525         /*
2526          * Need to run a recheck subquery.  Initialize or reinitialize EPQ state.
2527          */
2528         EvalPlanQualBegin(epqstate, estate);
2529
2530         /*
2531          * Free old test tuple, if any, and store new tuple where relation's scan
2532          * node will see it
2533          */
2534         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2535
2536         /*
2537          * Fetch any non-locked source rows
2538          */
2539         EvalPlanQualFetchRowMarks(epqstate);
2540
2541         /*
2542          * Run the EPQ query.  We assume it will return at most one tuple.
2543          */
2544         slot = EvalPlanQualNext(epqstate);
2545
2546         /*
2547          * If we got a tuple, force the slot to materialize the tuple so that it
2548          * is not dependent on any local state in the EPQ query (in particular,
2549          * it's highly likely that the slot contains references to any pass-by-ref
2550          * datums that may be present in copyTuple).  As with the next step, this
2551          * is to guard against early re-use of the EPQ query.
2552          */
2553         if (!TupIsNull(slot))
2554                 (void) ExecMaterializeSlot(slot);
2555
2556         /*
2557          * Clear out the test tuple.  This is needed in case the EPQ query is
2558          * re-used to test a tuple for a different relation.  (Not clear that can
2559          * really happen, but let's be safe.)
2560          */
2561         EvalPlanQualSetTuple(epqstate, rti, NULL);
2562
2563         return slot;
2564 }
2565
2566 /*
2567  * Fetch a copy of the newest version of an outdated tuple
2568  *
2569  *      estate - executor state data
2570  *      relation - table containing tuple
2571  *      lockmode - requested tuple lock mode
2572  *      wait_policy - requested lock wait policy
2573  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2574  *      priorXmax - t_xmax from the outdated tuple
2575  *
2576  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2577  * that there is no newest version (ie, the row was deleted not updated).
2578  * We also return NULL if the tuple is locked and the wait policy is to skip
2579  * such tuples.
2580  *
2581  * If successful, we have locked the newest tuple version, so caller does not
2582  * need to worry about it changing anymore.
2583  *
2584  * Note: properly, lockmode should be declared as enum LockTupleMode,
2585  * but we use "int" to avoid having to include heapam.h in executor.h.
2586  */
2587 HeapTuple
2588 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2589                                   LockWaitPolicy wait_policy,
2590                                   ItemPointer tid, TransactionId priorXmax)
2591 {
2592         HeapTuple       copyTuple = NULL;
2593         HeapTupleData tuple;
2594         SnapshotData SnapshotDirty;
2595
2596         /*
2597          * fetch target tuple
2598          *
2599          * Loop here to deal with updated or busy tuples
2600          */
2601         InitDirtySnapshot(SnapshotDirty);
2602         tuple.t_self = *tid;
2603         for (;;)
2604         {
2605                 Buffer          buffer;
2606
2607                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2608                 {
2609                         HTSU_Result test;
2610                         HeapUpdateFailureData hufd;
2611
2612                         /*
2613                          * If xmin isn't what we're expecting, the slot must have been
2614                          * recycled and reused for an unrelated tuple.  This implies that
2615                          * the latest version of the row was deleted, so we need do
2616                          * nothing.  (Should be safe to examine xmin without getting
2617                          * buffer's content lock.  We assume reading a TransactionId to be
2618                          * atomic, and Xmin never changes in an existing tuple, except to
2619                          * invalid or frozen, and neither of those can match priorXmax.)
2620                          */
2621                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2622                                                                          priorXmax))
2623                         {
2624                                 ReleaseBuffer(buffer);
2625                                 return NULL;
2626                         }
2627
2628                         /* otherwise xmin should not be dirty... */
2629                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2630                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2631
2632                         /*
2633                          * If tuple is being updated by other transaction then we have to
2634                          * wait for its commit/abort, or die trying.
2635                          */
2636                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2637                         {
2638                                 ReleaseBuffer(buffer);
2639                                 switch (wait_policy)
2640                                 {
2641                                         case LockWaitBlock:
2642                                                 XactLockTableWait(SnapshotDirty.xmax,
2643                                                                                   relation, &tuple.t_self,
2644                                                                                   XLTW_FetchUpdated);
2645                                                 break;
2646                                         case LockWaitSkip:
2647                                                 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2648                                                         return NULL;    /* skip instead of waiting */
2649                                                 break;
2650                                         case LockWaitError:
2651                                                 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2652                                                         ereport(ERROR,
2653                                                                         (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2654                                                                          errmsg("could not obtain lock on row in relation \"%s\"",
2655                                                                                         RelationGetRelationName(relation))));
2656                                                 break;
2657                                 }
2658                                 continue;               /* loop back to repeat heap_fetch */
2659                         }
2660
2661                         /*
2662                          * If tuple was inserted by our own transaction, we have to check
2663                          * cmin against es_output_cid: cmin >= current CID means our
2664                          * command cannot see the tuple, so we should ignore it. Otherwise
2665                          * heap_lock_tuple() will throw an error, and so would any later
2666                          * attempt to update or delete the tuple.  (We need not check cmax
2667                          * because HeapTupleSatisfiesDirty will consider a tuple deleted
2668                          * by our transaction dead, regardless of cmax.) We just checked
2669                          * that priorXmax == xmin, so we can test that variable instead of
2670                          * doing HeapTupleHeaderGetXmin again.
2671                          */
2672                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2673                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2674                         {
2675                                 ReleaseBuffer(buffer);
2676                                 return NULL;
2677                         }
2678
2679                         /*
2680                          * This is a live tuple, so now try to lock it.
2681                          */
2682                         test = heap_lock_tuple(relation, &tuple,
2683                                                                    estate->es_output_cid,
2684                                                                    lockmode, wait_policy,
2685                                                                    false, &buffer, &hufd);
2686                         /* We now have two pins on the buffer, get rid of one */
2687                         ReleaseBuffer(buffer);
2688
2689                         switch (test)
2690                         {
2691                                 case HeapTupleSelfUpdated:
2692
2693                                         /*
2694                                          * The target tuple was already updated or deleted by the
2695                                          * current command, or by a later command in the current
2696                                          * transaction.  We *must* ignore the tuple in the former
2697                                          * case, so as to avoid the "Halloween problem" of
2698                                          * repeated update attempts.  In the latter case it might
2699                                          * be sensible to fetch the updated tuple instead, but
2700                                          * doing so would require changing heap_update and
2701                                          * heap_delete to not complain about updating "invisible"
2702                                          * tuples, which seems pretty scary (heap_lock_tuple will
2703                                          * not complain, but few callers expect
2704                                          * HeapTupleInvisible, and we're not one of them).  So for
2705                                          * now, treat the tuple as deleted and do not process.
2706                                          */
2707                                         ReleaseBuffer(buffer);
2708                                         return NULL;
2709
2710                                 case HeapTupleMayBeUpdated:
2711                                         /* successfully locked */
2712                                         break;
2713
2714                                 case HeapTupleUpdated:
2715                                         ReleaseBuffer(buffer);
2716                                         if (IsolationUsesXactSnapshot())
2717                                                 ereport(ERROR,
2718                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2719                                                                  errmsg("could not serialize access due to concurrent update")));
2720                                         if (ItemPointerIndicatesMovedPartitions(&hufd.ctid))
2721                                                 ereport(ERROR,
2722                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2723                                                                  errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
2724
2725                                         /* Should not encounter speculative tuple on recheck */
2726                                         Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
2727                                         if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2728                                         {
2729                                                 /* it was updated, so look at the updated version */
2730                                                 tuple.t_self = hufd.ctid;
2731                                                 /* updated row should have xmin matching this xmax */
2732                                                 priorXmax = hufd.xmax;
2733                                                 continue;
2734                                         }
2735                                         /* tuple was deleted, so give up */
2736                                         return NULL;
2737
2738                                 case HeapTupleWouldBlock:
2739                                         ReleaseBuffer(buffer);
2740                                         return NULL;
2741
2742                                 case HeapTupleInvisible:
2743                                         elog(ERROR, "attempted to lock invisible tuple");
2744                                         break;
2745
2746                                 default:
2747                                         ReleaseBuffer(buffer);
2748                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2749                                                  test);
2750                                         return NULL;    /* keep compiler quiet */
2751                         }
2752
2753                         /*
2754                          * We got tuple - now copy it for use by recheck query.
2755                          */
2756                         copyTuple = heap_copytuple(&tuple);
2757                         ReleaseBuffer(buffer);
2758                         break;
2759                 }
2760
2761                 /*
2762                  * If the referenced slot was actually empty, the latest version of
2763                  * the row must have been deleted, so we need do nothing.
2764                  */
2765                 if (tuple.t_data == NULL)
2766                 {
2767                         ReleaseBuffer(buffer);
2768                         return NULL;
2769                 }
2770
2771                 /*
2772                  * As above, if xmin isn't what we're expecting, do nothing.
2773                  */
2774                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2775                                                                  priorXmax))
2776                 {
2777                         ReleaseBuffer(buffer);
2778                         return NULL;
2779                 }
2780
2781                 /*
2782                  * If we get here, the tuple was found but failed SnapshotDirty.
2783                  * Assuming the xmin is either a committed xact or our own xact (as it
2784                  * certainly should be if we're trying to modify the tuple), this must
2785                  * mean that the row was updated or deleted by either a committed xact
2786                  * or our own xact.  If it was deleted, we can ignore it; if it was
2787                  * updated then chain up to the next version and repeat the whole
2788                  * process.
2789                  *
2790                  * As above, it should be safe to examine xmax and t_ctid without the
2791                  * buffer content lock, because they can't be changing.
2792                  */
2793
2794                 /* check whether next version would be in a different partition */
2795                 if (HeapTupleHeaderIndicatesMovedPartitions(tuple.t_data))
2796                         ereport(ERROR,
2797                                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2798                                          errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
2799
2800                 /* check whether tuple has been deleted */
2801                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2802                 {
2803                         /* deleted, so forget about it */
2804                         ReleaseBuffer(buffer);
2805                         return NULL;
2806                 }
2807
2808                 /* updated, so look at the updated row */
2809                 tuple.t_self = tuple.t_data->t_ctid;
2810                 /* updated row should have xmin matching this xmax */
2811                 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2812                 ReleaseBuffer(buffer);
2813                 /* loop back to fetch next in chain */
2814         }
2815
2816         /*
2817          * Return the copied tuple
2818          */
2819         return copyTuple;
2820 }
2821
2822 /*
2823  * EvalPlanQualInit -- initialize during creation of a plan state node
2824  * that might need to invoke EPQ processing.
2825  *
2826  * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2827  * with EvalPlanQualSetPlan.
2828  */
2829 void
2830 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2831                                  Plan *subplan, List *auxrowmarks, int epqParam)
2832 {
2833         /* Mark the EPQ state inactive */
2834         epqstate->estate = NULL;
2835         epqstate->planstate = NULL;
2836         epqstate->origslot = NULL;
2837         /* ... and remember data that EvalPlanQualBegin will need */
2838         epqstate->plan = subplan;
2839         epqstate->arowMarks = auxrowmarks;
2840         epqstate->epqParam = epqParam;
2841 }
2842
2843 /*
2844  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2845  *
2846  * We need this so that ModifyTable can deal with multiple subplans.
2847  */
2848 void
2849 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2850 {
2851         /* If we have a live EPQ query, shut it down */
2852         EvalPlanQualEnd(epqstate);
2853         /* And set/change the plan pointer */
2854         epqstate->plan = subplan;
2855         /* The rowmarks depend on the plan, too */
2856         epqstate->arowMarks = auxrowmarks;
2857 }
2858
2859 /*
2860  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2861  *
2862  * NB: passed tuple must be palloc'd; it may get freed later
2863  */
2864 void
2865 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2866 {
2867         EState     *estate = epqstate->estate;
2868
2869         Assert(rti > 0);
2870
2871         /*
2872          * free old test tuple, if any, and store new tuple where relation's scan
2873          * node will see it
2874          */
2875         if (estate->es_epqTuple[rti - 1] != NULL)
2876                 heap_freetuple(estate->es_epqTuple[rti - 1]);
2877         estate->es_epqTuple[rti - 1] = tuple;
2878         estate->es_epqTupleSet[rti - 1] = true;
2879 }
2880
2881 /*
2882  * Fetch back the current test tuple (if any) for the specified RTI
2883  */
2884 HeapTuple
2885 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2886 {
2887         EState     *estate = epqstate->estate;
2888
2889         Assert(rti > 0);
2890
2891         return estate->es_epqTuple[rti - 1];
2892 }
2893
2894 /*
2895  * Fetch the current row values for any non-locked relations that need
2896  * to be scanned by an EvalPlanQual operation.  origslot must have been set
2897  * to contain the current result row (top-level row) that we need to recheck.
2898  */
2899 void
2900 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2901 {
2902         ListCell   *l;
2903
2904         Assert(epqstate->origslot != NULL);
2905
2906         foreach(l, epqstate->arowMarks)
2907         {
2908                 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2909                 ExecRowMark *erm = aerm->rowmark;
2910                 Datum           datum;
2911                 bool            isNull;
2912                 HeapTupleData tuple;
2913
2914                 if (RowMarkRequiresRowShareLock(erm->markType))
2915                         elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2916
2917                 /* clear any leftover test tuple for this rel */
2918                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2919
2920                 /* if child rel, must check whether it produced this row */
2921                 if (erm->rti != erm->prti)
2922                 {
2923                         Oid                     tableoid;
2924
2925                         datum = ExecGetJunkAttribute(epqstate->origslot,
2926                                                                                  aerm->toidAttNo,
2927                                                                                  &isNull);
2928                         /* non-locked rels could be on the inside of outer joins */
2929                         if (isNull)
2930                                 continue;
2931                         tableoid = DatumGetObjectId(datum);
2932
2933                         Assert(OidIsValid(erm->relid));
2934                         if (tableoid != erm->relid)
2935                         {
2936                                 /* this child is inactive right now */
2937                                 continue;
2938                         }
2939                 }
2940
2941                 if (erm->markType == ROW_MARK_REFERENCE)
2942                 {
2943                         HeapTuple       copyTuple;
2944
2945                         Assert(erm->relation != NULL);
2946
2947                         /* fetch the tuple's ctid */
2948                         datum = ExecGetJunkAttribute(epqstate->origslot,
2949                                                                                  aerm->ctidAttNo,
2950                                                                                  &isNull);
2951                         /* non-locked rels could be on the inside of outer joins */
2952                         if (isNull)
2953                                 continue;
2954
2955                         /* fetch requests on foreign tables must be passed to their FDW */
2956                         if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2957                         {
2958                                 FdwRoutine *fdwroutine;
2959                                 bool            updated = false;
2960
2961                                 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2962                                 /* this should have been checked already, but let's be safe */
2963                                 if (fdwroutine->RefetchForeignRow == NULL)
2964                                         ereport(ERROR,
2965                                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2966                                                          errmsg("cannot lock rows in foreign table \"%s\"",
2967                                                                         RelationGetRelationName(erm->relation))));
2968                                 copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2969                                                                                                                   erm,
2970                                                                                                                   datum,
2971                                                                                                                   &updated);
2972                                 if (copyTuple == NULL)
2973                                         elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2974
2975                                 /*
2976                                  * Ideally we'd insist on updated == false here, but that
2977                                  * assumes that FDWs can track that exactly, which they might
2978                                  * not be able to.  So just ignore the flag.
2979                                  */
2980                         }
2981                         else
2982                         {
2983                                 /* ordinary table, fetch the tuple */
2984                                 Buffer          buffer;
2985
2986                                 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2987                                 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2988                                                                 false, NULL))
2989                                         elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2990
2991                                 if (HeapTupleHeaderGetNatts(tuple.t_data) <
2992                                         RelationGetDescr(erm->relation)->natts)
2993                                 {
2994                                         copyTuple = heap_expand_tuple(&tuple,
2995                                                                                                   RelationGetDescr(erm->relation));
2996                                 }
2997                                 else
2998                                 {
2999                                         /* successful, copy tuple */
3000                                         copyTuple = heap_copytuple(&tuple);
3001                                 }
3002                                 ReleaseBuffer(buffer);
3003                         }
3004
3005                         /* store tuple */
3006                         EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
3007                 }
3008                 else
3009                 {
3010                         HeapTupleHeader td;
3011
3012                         Assert(erm->markType == ROW_MARK_COPY);
3013
3014                         /* fetch the whole-row Var for the relation */
3015                         datum = ExecGetJunkAttribute(epqstate->origslot,
3016                                                                                  aerm->wholeAttNo,
3017                                                                                  &isNull);
3018                         /* non-locked rels could be on the inside of outer joins */
3019                         if (isNull)
3020                                 continue;
3021                         td = DatumGetHeapTupleHeader(datum);
3022
3023                         /* build a temporary HeapTuple control structure */
3024                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
3025                         tuple.t_data = td;
3026                         /* relation might be a foreign table, if so provide tableoid */
3027                         tuple.t_tableOid = erm->relid;
3028                         /* also copy t_ctid in case there's valid data there */
3029                         tuple.t_self = td->t_ctid;
3030
3031                         /* copy and store tuple */
3032                         EvalPlanQualSetTuple(epqstate, erm->rti,
3033                                                                  heap_copytuple(&tuple));
3034                 }
3035         }
3036 }
3037
3038 /*
3039  * Fetch the next row (if any) from EvalPlanQual testing
3040  *
3041  * (In practice, there should never be more than one row...)
3042  */
3043 TupleTableSlot *
3044 EvalPlanQualNext(EPQState *epqstate)
3045 {
3046         MemoryContext oldcontext;
3047         TupleTableSlot *slot;
3048
3049         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
3050         slot = ExecProcNode(epqstate->planstate);
3051         MemoryContextSwitchTo(oldcontext);
3052
3053         return slot;
3054 }
3055
3056 /*
3057  * Initialize or reset an EvalPlanQual state tree
3058  */
3059 void
3060 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
3061 {
3062         EState     *estate = epqstate->estate;
3063
3064         if (estate == NULL)
3065         {
3066                 /* First time through, so create a child EState */
3067                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
3068         }
3069         else
3070         {
3071                 /*
3072                  * We already have a suitable child EPQ tree, so just reset it.
3073                  */
3074                 int                     rtsize = list_length(parentestate->es_range_table);
3075                 PlanState  *planstate = epqstate->planstate;
3076
3077                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
3078
3079                 /* Recopy current values of parent parameters */
3080                 if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3081                 {
3082                         int                     i;
3083
3084                         i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3085
3086                         while (--i >= 0)
3087                         {
3088                                 /* copy value if any, but not execPlan link */
3089                                 estate->es_param_exec_vals[i].value =
3090                                         parentestate->es_param_exec_vals[i].value;
3091                                 estate->es_param_exec_vals[i].isnull =
3092                                         parentestate->es_param_exec_vals[i].isnull;
3093                         }
3094                 }
3095
3096                 /*
3097                  * Mark child plan tree as needing rescan at all scan nodes.  The
3098                  * first ExecProcNode will take care of actually doing the rescan.
3099                  */
3100                 planstate->chgParam = bms_add_member(planstate->chgParam,
3101                                                                                          epqstate->epqParam);
3102         }
3103 }
3104
3105 /*
3106  * Start execution of an EvalPlanQual plan tree.
3107  *
3108  * This is a cut-down version of ExecutorStart(): we copy some state from
3109  * the top-level estate rather than initializing it fresh.
3110  */
3111 static void
3112 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
3113 {
3114         EState     *estate;
3115         int                     rtsize;
3116         MemoryContext oldcontext;
3117         ListCell   *l;
3118
3119         rtsize = list_length(parentestate->es_range_table);
3120
3121         epqstate->estate = estate = CreateExecutorState();
3122
3123         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3124
3125         /*
3126          * Child EPQ EStates share the parent's copy of unchanging state such as
3127          * the snapshot, rangetable, result-rel info, and external Param info.
3128          * They need their own copies of local state, including a tuple table,
3129          * es_param_exec_vals, etc.
3130          *
3131          * The ResultRelInfo array management is trickier than it looks.  We
3132          * create a fresh array for the child but copy all the content from the
3133          * parent.  This is because it's okay for the child to share any
3134          * per-relation state the parent has already created --- but if the child
3135          * sets up any ResultRelInfo fields, such as its own junkfilter, that
3136          * state must *not* propagate back to the parent.  (For one thing, the
3137          * pointed-to data is in a memory context that won't last long enough.)
3138          */
3139         estate->es_direction = ForwardScanDirection;
3140         estate->es_snapshot = parentestate->es_snapshot;
3141         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
3142         estate->es_range_table = parentestate->es_range_table;
3143         estate->es_plannedstmt = parentestate->es_plannedstmt;
3144         estate->es_junkFilter = parentestate->es_junkFilter;
3145         estate->es_output_cid = parentestate->es_output_cid;
3146         if (parentestate->es_num_result_relations > 0)
3147         {
3148                 int                     numResultRelations = parentestate->es_num_result_relations;
3149                 ResultRelInfo *resultRelInfos;
3150
3151                 resultRelInfos = (ResultRelInfo *)
3152                         palloc(numResultRelations * sizeof(ResultRelInfo));
3153                 memcpy(resultRelInfos, parentestate->es_result_relations,
3154                            numResultRelations * sizeof(ResultRelInfo));
3155                 estate->es_result_relations = resultRelInfos;
3156                 estate->es_num_result_relations = numResultRelations;
3157         }
3158         /* es_result_relation_info must NOT be copied */
3159         /* es_trig_target_relations must NOT be copied */
3160         estate->es_rowMarks = parentestate->es_rowMarks;
3161         estate->es_top_eflags = parentestate->es_top_eflags;
3162         estate->es_instrument = parentestate->es_instrument;
3163         /* es_auxmodifytables must NOT be copied */
3164
3165         /*
3166          * The external param list is simply shared from parent.  The internal
3167          * param workspace has to be local state, but we copy the initial values
3168          * from the parent, so as to have access to any param values that were
3169          * already set from other parts of the parent's plan tree.
3170          */
3171         estate->es_param_list_info = parentestate->es_param_list_info;
3172         if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3173         {
3174                 int                     i;
3175
3176                 i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3177                 estate->es_param_exec_vals = (ParamExecData *)
3178                         palloc0(i * sizeof(ParamExecData));
3179                 while (--i >= 0)
3180                 {
3181                         /* copy value if any, but not execPlan link */
3182                         estate->es_param_exec_vals[i].value =
3183                                 parentestate->es_param_exec_vals[i].value;
3184                         estate->es_param_exec_vals[i].isnull =
3185                                 parentestate->es_param_exec_vals[i].isnull;
3186                 }
3187         }
3188
3189         /*
3190          * Each EState must have its own es_epqScanDone state, but if we have
3191          * nested EPQ checks they should share es_epqTuple arrays.  This allows
3192          * sub-rechecks to inherit the values being examined by an outer recheck.
3193          */
3194         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
3195         if (parentestate->es_epqTuple != NULL)
3196         {
3197                 estate->es_epqTuple = parentestate->es_epqTuple;
3198                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
3199         }
3200         else
3201         {
3202                 estate->es_epqTuple = (HeapTuple *)
3203                         palloc0(rtsize * sizeof(HeapTuple));
3204                 estate->es_epqTupleSet = (bool *)
3205                         palloc0(rtsize * sizeof(bool));
3206         }
3207
3208         /*
3209          * Each estate also has its own tuple table.
3210          */
3211         estate->es_tupleTable = NIL;
3212
3213         /*
3214          * Initialize private state information for each SubPlan.  We must do this
3215          * before running ExecInitNode on the main query tree, since
3216          * ExecInitSubPlan expects to be able to find these entries. Some of the
3217          * SubPlans might not be used in the part of the plan tree we intend to
3218          * run, but since it's not easy to tell which, we just initialize them
3219          * all.
3220          */
3221         Assert(estate->es_subplanstates == NIL);
3222         foreach(l, parentestate->es_plannedstmt->subplans)
3223         {
3224                 Plan       *subplan = (Plan *) lfirst(l);
3225                 PlanState  *subplanstate;
3226
3227                 subplanstate = ExecInitNode(subplan, estate, 0);
3228                 estate->es_subplanstates = lappend(estate->es_subplanstates,
3229                                                                                    subplanstate);
3230         }
3231
3232         /*
3233          * Initialize the private state information for all the nodes in the part
3234          * of the plan tree we need to run.  This opens files, allocates storage
3235          * and leaves us ready to start processing tuples.
3236          */
3237         epqstate->planstate = ExecInitNode(planTree, estate, 0);
3238
3239         MemoryContextSwitchTo(oldcontext);
3240 }
3241
3242 /*
3243  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3244  * or if we are done with the current EPQ child.
3245  *
3246  * This is a cut-down version of ExecutorEnd(); basically we want to do most
3247  * of the normal cleanup, but *not* close result relations (which we are
3248  * just sharing from the outer query).  We do, however, have to close any
3249  * trigger target relations that got opened, since those are not shared.
3250  * (There probably shouldn't be any of the latter, but just in case...)
3251  */
3252 void
3253 EvalPlanQualEnd(EPQState *epqstate)
3254 {
3255         EState     *estate = epqstate->estate;
3256         MemoryContext oldcontext;
3257         ListCell   *l;
3258
3259         if (estate == NULL)
3260                 return;                                 /* idle, so nothing to do */
3261
3262         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3263
3264         ExecEndNode(epqstate->planstate);
3265
3266         foreach(l, estate->es_subplanstates)
3267         {
3268                 PlanState  *subplanstate = (PlanState *) lfirst(l);
3269
3270                 ExecEndNode(subplanstate);
3271         }
3272
3273         /* throw away the per-estate tuple table */
3274         ExecResetTupleTable(estate->es_tupleTable, false);
3275
3276         /* close any trigger target relations attached to this EState */
3277         ExecCleanUpTriggerState(estate);
3278
3279         MemoryContextSwitchTo(oldcontext);
3280
3281         FreeExecutorState(estate);
3282
3283         /* Mark EPQState idle */
3284         epqstate->estate = NULL;
3285         epqstate->planstate = NULL;
3286         epqstate->origslot = NULL;
3287 }