]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Modified files for MERGE
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorFinish()
10  *      ExecutorEnd()
11  *
12  *      These four procedures are the external interface to the executor.
13  *      In each case, the query descriptor is required as an argument.
14  *
15  *      ExecutorStart must be called at the beginning of execution of any
16  *      query plan and ExecutorEnd must always be called at the end of
17  *      execution of a plan (unless it is aborted due to error).
18  *
19  *      ExecutorRun accepts direction and count arguments that specify whether
20  *      the plan is to be executed forwards, backwards, and for how many tuples.
21  *      In some cases ExecutorRun may be called multiple times to process all
22  *      the tuples for a plan.  It is also acceptable to stop short of executing
23  *      the whole plan (but only if it is a SELECT).
24  *
25  *      ExecutorFinish must be called after the final ExecutorRun call and
26  *      before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
27  *      which should also omit ExecutorRun.
28  *
29  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
30  * Portions Copyright (c) 1994, Regents of the University of California
31  *
32  *
33  * IDENTIFICATION
34  *        src/backend/executor/execMain.c
35  *
36  *-------------------------------------------------------------------------
37  */
38 #include "postgres.h"
39
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "catalog/partition.h"
46 #include "catalog/pg_publication.h"
47 #include "commands/matview.h"
48 #include "commands/trigger.h"
49 #include "executor/execdebug.h"
50 #include "foreign/fdwapi.h"
51 #include "jit/jit.h"
52 #include "mb/pg_wchar.h"
53 #include "miscadmin.h"
54 #include "optimizer/clauses.h"
55 #include "parser/parsetree.h"
56 #include "rewrite/rewriteManip.h"
57 #include "storage/bufmgr.h"
58 #include "storage/lmgr.h"
59 #include "tcop/utility.h"
60 #include "utils/acl.h"
61 #include "utils/lsyscache.h"
62 #include "utils/memutils.h"
63 #include "utils/rls.h"
64 #include "utils/ruleutils.h"
65 #include "utils/snapmgr.h"
66 #include "utils/tqual.h"
67
68
69 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
70 ExecutorStart_hook_type ExecutorStart_hook = NULL;
71 ExecutorRun_hook_type ExecutorRun_hook = NULL;
72 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
73 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
74
75 /* Hook for plugin to get control in ExecCheckRTPerms() */
76 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
77
78 /* decls for local routines only used within this module */
79 static void InitPlan(QueryDesc *queryDesc, int eflags);
80 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
81 static void ExecPostprocessPlan(EState *estate);
82 static void ExecEndPlan(PlanState *planstate, EState *estate);
83 static void ExecutePlan(EState *estate, PlanState *planstate,
84                         bool use_parallel_mode,
85                         CmdType operation,
86                         bool sendTuples,
87                         uint64 numberTuples,
88                         ScanDirection direction,
89                         DestReceiver *dest,
90                         bool execute_once);
91 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
92 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
93                                                   Bitmapset *modifiedCols,
94                                                   AclMode requiredPerms);
95 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
96 static char *ExecBuildSlotValueDescription(Oid reloid,
97                                                           TupleTableSlot *slot,
98                                                           TupleDesc tupdesc,
99                                                           Bitmapset *modifiedCols,
100                                                           int maxfieldlen);
101 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
102                                   Plan *planTree);
103
104 /*
105  * Note that GetUpdatedColumns() also exists in commands/trigger.c.  There does
106  * not appear to be any good header to put it into, given the structures that
107  * it uses, so we let them be duplicated.  Be sure to update both if one needs
108  * to be changed, however.
109  */
110 #define GetInsertedColumns(relinfo, estate) \
111         (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
112 #define GetUpdatedColumns(relinfo, estate) \
113         (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
114
115 /* end of local decls */
116
117
118 /* ----------------------------------------------------------------
119  *              ExecutorStart
120  *
121  *              This routine must be called at the beginning of any execution of any
122  *              query plan
123  *
124  * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
125  * only because some places use QueryDescs for utility commands).  The tupDesc
126  * field of the QueryDesc is filled in to describe the tuples that will be
127  * returned, and the internal fields (estate and planstate) are set up.
128  *
129  * eflags contains flag bits as described in executor.h.
130  *
131  * NB: the CurrentMemoryContext when this is called will become the parent
132  * of the per-query context used for this Executor invocation.
133  *
134  * We provide a function hook variable that lets loadable plugins
135  * get control when ExecutorStart is called.  Such a plugin would
136  * normally call standard_ExecutorStart().
137  *
138  * ----------------------------------------------------------------
139  */
140 void
141 ExecutorStart(QueryDesc *queryDesc, int eflags)
142 {
143         if (ExecutorStart_hook)
144                 (*ExecutorStart_hook) (queryDesc, eflags);
145         else
146                 standard_ExecutorStart(queryDesc, eflags);
147 }
148
149 void
150 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
151 {
152         EState     *estate;
153         MemoryContext oldcontext;
154
155         /* sanity checks: queryDesc must not be started already */
156         Assert(queryDesc != NULL);
157         Assert(queryDesc->estate == NULL);
158
159         /*
160          * If the transaction is read-only, we need to check if any writes are
161          * planned to non-temporary tables.  EXPLAIN is considered read-only.
162          *
163          * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE
164          * would require (a) storing the combocid hash in shared memory, rather
165          * than synchronizing it just once at the start of parallelism, and (b) an
166          * alternative to heap_update()'s reliance on xmax for mutual exclusion.
167          * INSERT may have no such troubles, but we forbid it to simplify the
168          * checks.
169          *
170          * We have lower-level defenses in CommandCounterIncrement and elsewhere
171          * against performing unsafe operations in parallel mode, but this gives a
172          * more user-friendly error message.
173          */
174         if ((XactReadOnly || IsInParallelMode()) &&
175                 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
176                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
177
178         /*
179          * Build EState, switch into per-query memory context for startup.
180          */
181         estate = CreateExecutorState();
182         queryDesc->estate = estate;
183
184         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
185
186         /*
187          * Fill in external parameters, if any, from queryDesc; and allocate
188          * workspace for internal parameters
189          */
190         estate->es_param_list_info = queryDesc->params;
191
192         if (queryDesc->plannedstmt->paramExecTypes != NIL)
193         {
194                 int                     nParamExec;
195
196                 nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
197                 estate->es_param_exec_vals = (ParamExecData *)
198                         palloc0(nParamExec * sizeof(ParamExecData));
199         }
200
201         estate->es_sourceText = queryDesc->sourceText;
202
203         /*
204          * Fill in the query environment, if any, from queryDesc.
205          */
206         estate->es_queryEnv = queryDesc->queryEnv;
207
208         /*
209          * If non-read-only query, set the command ID to mark output tuples with
210          */
211         switch (queryDesc->operation)
212         {
213                 case CMD_SELECT:
214
215                         /*
216                          * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
217                          * tuples
218                          */
219                         if (queryDesc->plannedstmt->rowMarks != NIL ||
220                                 queryDesc->plannedstmt->hasModifyingCTE)
221                                 estate->es_output_cid = GetCurrentCommandId(true);
222
223                         /*
224                          * A SELECT without modifying CTEs can't possibly queue triggers,
225                          * so force skip-triggers mode. This is just a marginal efficiency
226                          * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
227                          * all that expensive, but we might as well do it.
228                          */
229                         if (!queryDesc->plannedstmt->hasModifyingCTE)
230                                 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
231                         break;
232
233                 case CMD_INSERT:
234                 case CMD_DELETE:
235                 case CMD_UPDATE:
236                 case CMD_MERGE:
237                         estate->es_output_cid = GetCurrentCommandId(true);
238                         break;
239
240                 default:
241                         elog(ERROR, "unrecognized operation code: %d",
242                                  (int) queryDesc->operation);
243                         break;
244         }
245
246         /*
247          * Copy other important information into the EState
248          */
249         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
250         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
251         estate->es_top_eflags = eflags;
252         estate->es_instrument = queryDesc->instrument_options;
253         estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
254
255         /*
256          * Set up an AFTER-trigger statement context, unless told not to, or
257          * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
258          */
259         if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
260                 AfterTriggerBeginQuery();
261
262         /*
263          * Initialize the plan state tree
264          */
265         InitPlan(queryDesc, eflags);
266
267         MemoryContextSwitchTo(oldcontext);
268 }
269
270 /* ----------------------------------------------------------------
271  *              ExecutorRun
272  *
273  *              This is the main routine of the executor module. It accepts
274  *              the query descriptor from the traffic cop and executes the
275  *              query plan.
276  *
277  *              ExecutorStart must have been called already.
278  *
279  *              If direction is NoMovementScanDirection then nothing is done
280  *              except to start up/shut down the destination.  Otherwise,
281  *              we retrieve up to 'count' tuples in the specified direction.
282  *
283  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
284  *              completion.  Also note that the count limit is only applied to
285  *              retrieved tuples, not for instance to those inserted/updated/deleted
286  *              by a ModifyTable plan node.
287  *
288  *              There is no return value, but output tuples (if any) are sent to
289  *              the destination receiver specified in the QueryDesc; and the number
290  *              of tuples processed at the top level can be found in
291  *              estate->es_processed.
292  *
293  *              We provide a function hook variable that lets loadable plugins
294  *              get control when ExecutorRun is called.  Such a plugin would
295  *              normally call standard_ExecutorRun().
296  *
297  * ----------------------------------------------------------------
298  */
299 void
300 ExecutorRun(QueryDesc *queryDesc,
301                         ScanDirection direction, uint64 count,
302                         bool execute_once)
303 {
304         if (ExecutorRun_hook)
305                 (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
306         else
307                 standard_ExecutorRun(queryDesc, direction, count, execute_once);
308 }
309
310 void
311 standard_ExecutorRun(QueryDesc *queryDesc,
312                                          ScanDirection direction, uint64 count, bool execute_once)
313 {
314         EState     *estate;
315         CmdType         operation;
316         DestReceiver *dest;
317         bool            sendTuples;
318         MemoryContext oldcontext;
319
320         /* sanity checks */
321         Assert(queryDesc != NULL);
322
323         estate = queryDesc->estate;
324
325         Assert(estate != NULL);
326         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
327
328         /*
329          * Switch into per-query memory context
330          */
331         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
332
333         /* Allow instrumentation of Executor overall runtime */
334         if (queryDesc->totaltime)
335                 InstrStartNode(queryDesc->totaltime);
336
337         /*
338          * extract information from the query descriptor and the query feature.
339          */
340         operation = queryDesc->operation;
341         dest = queryDesc->dest;
342
343         /*
344          * startup tuple receiver, if we will be emitting tuples
345          */
346         estate->es_processed = 0;
347         estate->es_lastoid = InvalidOid;
348
349         sendTuples = (operation == CMD_SELECT ||
350                                   queryDesc->plannedstmt->hasReturning);
351
352         if (sendTuples)
353                 dest->rStartup(dest, operation, queryDesc->tupDesc);
354
355         /*
356          * run plan
357          */
358         if (!ScanDirectionIsNoMovement(direction))
359         {
360                 if (execute_once && queryDesc->already_executed)
361                         elog(ERROR, "can't re-execute query flagged for single execution");
362                 queryDesc->already_executed = true;
363
364                 ExecutePlan(estate,
365                                         queryDesc->planstate,
366                                         queryDesc->plannedstmt->parallelModeNeeded,
367                                         operation,
368                                         sendTuples,
369                                         count,
370                                         direction,
371                                         dest,
372                                         execute_once);
373         }
374
375         /*
376          * shutdown tuple receiver, if we started it
377          */
378         if (sendTuples)
379                 dest->rShutdown(dest);
380
381         if (queryDesc->totaltime)
382                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
383
384         MemoryContextSwitchTo(oldcontext);
385 }
386
387 /* ----------------------------------------------------------------
388  *              ExecutorFinish
389  *
390  *              This routine must be called after the last ExecutorRun call.
391  *              It performs cleanup such as firing AFTER triggers.  It is
392  *              separate from ExecutorEnd because EXPLAIN ANALYZE needs to
393  *              include these actions in the total runtime.
394  *
395  *              We provide a function hook variable that lets loadable plugins
396  *              get control when ExecutorFinish is called.  Such a plugin would
397  *              normally call standard_ExecutorFinish().
398  *
399  * ----------------------------------------------------------------
400  */
401 void
402 ExecutorFinish(QueryDesc *queryDesc)
403 {
404         if (ExecutorFinish_hook)
405                 (*ExecutorFinish_hook) (queryDesc);
406         else
407                 standard_ExecutorFinish(queryDesc);
408 }
409
410 void
411 standard_ExecutorFinish(QueryDesc *queryDesc)
412 {
413         EState     *estate;
414         MemoryContext oldcontext;
415
416         /* sanity checks */
417         Assert(queryDesc != NULL);
418
419         estate = queryDesc->estate;
420
421         Assert(estate != NULL);
422         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
423
424         /* This should be run once and only once per Executor instance */
425         Assert(!estate->es_finished);
426
427         /* Switch into per-query memory context */
428         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
429
430         /* Allow instrumentation of Executor overall runtime */
431         if (queryDesc->totaltime)
432                 InstrStartNode(queryDesc->totaltime);
433
434         /* Run ModifyTable nodes to completion */
435         ExecPostprocessPlan(estate);
436
437         /* Execute queued AFTER triggers, unless told not to */
438         if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
439                 AfterTriggerEndQuery(estate);
440
441         if (queryDesc->totaltime)
442                 InstrStopNode(queryDesc->totaltime, 0);
443
444         MemoryContextSwitchTo(oldcontext);
445
446         estate->es_finished = true;
447 }
448
449 /* ----------------------------------------------------------------
450  *              ExecutorEnd
451  *
452  *              This routine must be called at the end of execution of any
453  *              query plan
454  *
455  *              We provide a function hook variable that lets loadable plugins
456  *              get control when ExecutorEnd is called.  Such a plugin would
457  *              normally call standard_ExecutorEnd().
458  *
459  * ----------------------------------------------------------------
460  */
461 void
462 ExecutorEnd(QueryDesc *queryDesc)
463 {
464         if (ExecutorEnd_hook)
465                 (*ExecutorEnd_hook) (queryDesc);
466         else
467                 standard_ExecutorEnd(queryDesc);
468 }
469
470 void
471 standard_ExecutorEnd(QueryDesc *queryDesc)
472 {
473         EState     *estate;
474         MemoryContext oldcontext;
475
476         /* sanity checks */
477         Assert(queryDesc != NULL);
478
479         estate = queryDesc->estate;
480
481         Assert(estate != NULL);
482
483         /*
484          * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
485          * Assert is needed because ExecutorFinish is new as of 9.1, and callers
486          * might forget to call it.
487          */
488         Assert(estate->es_finished ||
489                    (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
490
491         /*
492          * Switch into per-query memory context to run ExecEndPlan
493          */
494         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
495
496         ExecEndPlan(queryDesc->planstate, estate);
497
498         /* do away with our snapshots */
499         UnregisterSnapshot(estate->es_snapshot);
500         UnregisterSnapshot(estate->es_crosscheck_snapshot);
501
502         /* release JIT context, if allocated */
503         if (estate->es_jit)
504                 jit_release_context(estate->es_jit);
505
506         /*
507          * Must switch out of context before destroying it
508          */
509         MemoryContextSwitchTo(oldcontext);
510
511         /*
512          * Release EState and per-query memory context.  This should release
513          * everything the executor has allocated.
514          */
515         FreeExecutorState(estate);
516
517         /* Reset queryDesc fields that no longer point to anything */
518         queryDesc->tupDesc = NULL;
519         queryDesc->estate = NULL;
520         queryDesc->planstate = NULL;
521         queryDesc->totaltime = NULL;
522 }
523
524 /* ----------------------------------------------------------------
525  *              ExecutorRewind
526  *
527  *              This routine may be called on an open queryDesc to rewind it
528  *              to the start.
529  * ----------------------------------------------------------------
530  */
531 void
532 ExecutorRewind(QueryDesc *queryDesc)
533 {
534         EState     *estate;
535         MemoryContext oldcontext;
536
537         /* sanity checks */
538         Assert(queryDesc != NULL);
539
540         estate = queryDesc->estate;
541
542         Assert(estate != NULL);
543
544         /* It's probably not sensible to rescan updating queries */
545         Assert(queryDesc->operation == CMD_SELECT);
546
547         /*
548          * Switch into per-query memory context
549          */
550         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
551
552         /*
553          * rescan plan
554          */
555         ExecReScan(queryDesc->planstate);
556
557         MemoryContextSwitchTo(oldcontext);
558 }
559
560
561 /*
562  * ExecCheckRTPerms
563  *              Check access permissions for all relations listed in a range table.
564  *
565  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
566  * error if ereport_on_violation is true, or simply returns false otherwise.
567  *
568  * Note that this does NOT address row level security policies (aka: RLS).  If
569  * rows will be returned to the user as a result of this permission check
570  * passing, then RLS also needs to be consulted (and check_enable_rls()).
571  *
572  * See rewrite/rowsecurity.c.
573  */
574 bool
575 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
576 {
577         ListCell   *l;
578         bool            result = true;
579
580         foreach(l, rangeTable)
581         {
582                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
583
584                 result = ExecCheckRTEPerms(rte);
585                 if (!result)
586                 {
587                         Assert(rte->rtekind == RTE_RELATION);
588                         if (ereport_on_violation)
589                                 aclcheck_error(ACLCHECK_NO_PRIV, get_relkind_objtype(get_rel_relkind(rte->relid)),
590                                                            get_rel_name(rte->relid));
591                         return false;
592                 }
593         }
594
595         if (ExecutorCheckPerms_hook)
596                 result = (*ExecutorCheckPerms_hook) (rangeTable,
597                                                                                          ereport_on_violation);
598         return result;
599 }
600
601 /*
602  * ExecCheckRTEPerms
603  *              Check access permissions for a single RTE.
604  */
605 static bool
606 ExecCheckRTEPerms(RangeTblEntry *rte)
607 {
608         AclMode         requiredPerms;
609         AclMode         relPerms;
610         AclMode         remainingPerms;
611         Oid                     relOid;
612         Oid                     userid;
613
614         /*
615          * Only plain-relation RTEs need to be checked here.  Function RTEs are
616          * checked when the function is prepared for execution.  Join, subquery,
617          * and special RTEs need no checks.
618          */
619         if (rte->rtekind != RTE_RELATION)
620                 return true;
621
622         /*
623          * No work if requiredPerms is empty.
624          */
625         requiredPerms = rte->requiredPerms;
626         if (requiredPerms == 0)
627                 return true;
628
629         relOid = rte->relid;
630
631         /*
632          * userid to check as: current user unless we have a setuid indication.
633          *
634          * Note: GetUserId() is presently fast enough that there's no harm in
635          * calling it separately for each RTE.  If that stops being true, we could
636          * call it once in ExecCheckRTPerms and pass the userid down from there.
637          * But for now, no need for the extra clutter.
638          */
639         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
640
641         /*
642          * We must have *all* the requiredPerms bits, but some of the bits can be
643          * satisfied from column-level rather than relation-level permissions.
644          * First, remove any bits that are satisfied by relation permissions.
645          */
646         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
647         remainingPerms = requiredPerms & ~relPerms;
648         if (remainingPerms != 0)
649         {
650                 int                     col = -1;
651
652                 /*
653                  * If we lack any permissions that exist only as relation permissions,
654                  * we can fail straight away.
655                  */
656                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
657                         return false;
658
659                 /*
660                  * Check to see if we have the needed privileges at column level.
661                  *
662                  * Note: failures just report a table-level error; it would be nicer
663                  * to report a column-level error if we have some but not all of the
664                  * column privileges.
665                  */
666                 if (remainingPerms & ACL_SELECT)
667                 {
668                         /*
669                          * When the query doesn't explicitly reference any columns (for
670                          * example, SELECT COUNT(*) FROM table), allow the query if we
671                          * have SELECT on any column of the rel, as per SQL spec.
672                          */
673                         if (bms_is_empty(rte->selectedCols))
674                         {
675                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
676                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
677                                         return false;
678                         }
679
680                         while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
681                         {
682                                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
683                                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
684
685                                 if (attno == InvalidAttrNumber)
686                                 {
687                                         /* Whole-row reference, must have priv on all cols */
688                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
689                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
690                                                 return false;
691                                 }
692                                 else
693                                 {
694                                         if (pg_attribute_aclcheck(relOid, attno, userid,
695                                                                                           ACL_SELECT) != ACLCHECK_OK)
696                                                 return false;
697                                 }
698                         }
699                 }
700
701                 /*
702                  * Basically the same for the mod columns, for both INSERT and UPDATE
703                  * privilege as specified by remainingPerms.
704                  */
705                 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
706                                                                                                                                           userid,
707                                                                                                                                           rte->insertedCols,
708                                                                                                                                           ACL_INSERT))
709                         return false;
710
711                 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
712                                                                                                                                           userid,
713                                                                                                                                           rte->updatedCols,
714                                                                                                                                           ACL_UPDATE))
715                         return false;
716         }
717         return true;
718 }
719
720 /*
721  * ExecCheckRTEPermsModified
722  *              Check INSERT or UPDATE access permissions for a single RTE (these
723  *              are processed uniformly).
724  */
725 static bool
726 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
727                                                   AclMode requiredPerms)
728 {
729         int                     col = -1;
730
731         /*
732          * When the query doesn't explicitly update any columns, allow the query
733          * if we have permission on any column of the rel.  This is to handle
734          * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
735          */
736         if (bms_is_empty(modifiedCols))
737         {
738                 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
739                                                                           ACLMASK_ANY) != ACLCHECK_OK)
740                         return false;
741         }
742
743         while ((col = bms_next_member(modifiedCols, col)) >= 0)
744         {
745                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
746                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
747
748                 if (attno == InvalidAttrNumber)
749                 {
750                         /* whole-row reference can't happen here */
751                         elog(ERROR, "whole-row update is not implemented");
752                 }
753                 else
754                 {
755                         if (pg_attribute_aclcheck(relOid, attno, userid,
756                                                                           requiredPerms) != ACLCHECK_OK)
757                                 return false;
758                 }
759         }
760         return true;
761 }
762
763 /*
764  * Check that the query does not imply any writes to non-temp tables;
765  * unless we're in parallel mode, in which case don't even allow writes
766  * to temp tables.
767  *
768  * Note: in a Hot Standby this would need to reject writes to temp
769  * tables just as we do in parallel mode; but an HS standby can't have created
770  * any temp tables in the first place, so no need to check that.
771  */
772 static void
773 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
774 {
775         ListCell   *l;
776
777         /*
778          * Fail if write permissions are requested in parallel mode for table
779          * (temp or non-temp), otherwise fail for any non-temp table.
780          */
781         foreach(l, plannedstmt->rtable)
782         {
783                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
784
785                 if (rte->rtekind != RTE_RELATION)
786                         continue;
787
788                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
789                         continue;
790
791                 if (isTempNamespace(get_rel_namespace(rte->relid)))
792                         continue;
793
794                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
795         }
796
797         if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
798                 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
799 }
800
801
802 /* ----------------------------------------------------------------
803  *              InitPlan
804  *
805  *              Initializes the query plan: open files, allocate storage
806  *              and start up the rule manager
807  * ----------------------------------------------------------------
808  */
809 static void
810 InitPlan(QueryDesc *queryDesc, int eflags)
811 {
812         CmdType         operation = queryDesc->operation;
813         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
814         Plan       *plan = plannedstmt->planTree;
815         List       *rangeTable = plannedstmt->rtable;
816         EState     *estate = queryDesc->estate;
817         PlanState  *planstate;
818         TupleDesc       tupType;
819         ListCell   *l;
820         int                     i;
821
822         /*
823          * Do permissions checks
824          */
825         ExecCheckRTPerms(rangeTable, true);
826
827         /*
828          * initialize the node's execution state
829          */
830         estate->es_range_table = rangeTable;
831         estate->es_plannedstmt = plannedstmt;
832
833         /*
834          * initialize result relation stuff, and open/lock the result rels.
835          *
836          * We must do this before initializing the plan tree, else we might try to
837          * do a lock upgrade if a result rel is also a source rel.
838          */
839         if (plannedstmt->resultRelations)
840         {
841                 List       *resultRelations = plannedstmt->resultRelations;
842                 int                     numResultRelations = list_length(resultRelations);
843                 ResultRelInfo *resultRelInfos;
844                 ResultRelInfo *resultRelInfo;
845
846                 resultRelInfos = (ResultRelInfo *)
847                         palloc(numResultRelations * sizeof(ResultRelInfo));
848                 resultRelInfo = resultRelInfos;
849                 foreach(l, resultRelations)
850                 {
851                         Index           resultRelationIndex = lfirst_int(l);
852                         Oid                     resultRelationOid;
853                         Relation        resultRelation;
854
855                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
856                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
857
858                         InitResultRelInfo(resultRelInfo,
859                                                           resultRelation,
860                                                           resultRelationIndex,
861                                                           NULL,
862                                                           estate->es_instrument);
863                         resultRelInfo++;
864                 }
865                 estate->es_result_relations = resultRelInfos;
866                 estate->es_num_result_relations = numResultRelations;
867                 /* es_result_relation_info is NULL except when within ModifyTable */
868                 estate->es_result_relation_info = NULL;
869
870                 /*
871                  * In the partitioned result relation case, lock the non-leaf result
872                  * relations too.  A subset of these are the roots of respective
873                  * partitioned tables, for which we also allocate ResulRelInfos.
874                  */
875                 estate->es_root_result_relations = NULL;
876                 estate->es_num_root_result_relations = 0;
877                 if (plannedstmt->nonleafResultRelations)
878                 {
879                         int                     num_roots = list_length(plannedstmt->rootResultRelations);
880
881                         /*
882                          * Firstly, build ResultRelInfos for all the partitioned table
883                          * roots, because we will need them to fire the statement-level
884                          * triggers, if any.
885                          */
886                         resultRelInfos = (ResultRelInfo *)
887                                 palloc(num_roots * sizeof(ResultRelInfo));
888                         resultRelInfo = resultRelInfos;
889                         foreach(l, plannedstmt->rootResultRelations)
890                         {
891                                 Index           resultRelIndex = lfirst_int(l);
892                                 Oid                     resultRelOid;
893                                 Relation        resultRelDesc;
894
895                                 resultRelOid = getrelid(resultRelIndex, rangeTable);
896                                 resultRelDesc = heap_open(resultRelOid, RowExclusiveLock);
897                                 InitResultRelInfo(resultRelInfo,
898                                                                   resultRelDesc,
899                                                                   lfirst_int(l),
900                                                                   NULL,
901                                                                   estate->es_instrument);
902                                 resultRelInfo++;
903                         }
904
905                         estate->es_root_result_relations = resultRelInfos;
906                         estate->es_num_root_result_relations = num_roots;
907
908                         /* Simply lock the rest of them. */
909                         foreach(l, plannedstmt->nonleafResultRelations)
910                         {
911                                 Index           resultRelIndex = lfirst_int(l);
912
913                                 /* We locked the roots above. */
914                                 if (!list_member_int(plannedstmt->rootResultRelations,
915                                                                          resultRelIndex))
916                                         LockRelationOid(getrelid(resultRelIndex, rangeTable),
917                                                                         RowExclusiveLock);
918                         }
919                 }
920         }
921         else
922         {
923                 /*
924                  * if no result relation, then set state appropriately
925                  */
926                 estate->es_result_relations = NULL;
927                 estate->es_num_result_relations = 0;
928                 estate->es_result_relation_info = NULL;
929                 estate->es_root_result_relations = NULL;
930                 estate->es_num_root_result_relations = 0;
931         }
932
933         /*
934          * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
935          * before we initialize the plan tree, else we'd be risking lock upgrades.
936          * While we are at it, build the ExecRowMark list.  Any partitioned child
937          * tables are ignored here (because isParent=true) and will be locked by
938          * the first Append or MergeAppend node that references them.  (Note that
939          * the RowMarks corresponding to partitioned child tables are present in
940          * the same list as the rest, i.e., plannedstmt->rowMarks.)
941          */
942         estate->es_rowMarks = NIL;
943         foreach(l, plannedstmt->rowMarks)
944         {
945                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
946                 Oid                     relid;
947                 Relation        relation;
948                 ExecRowMark *erm;
949
950                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
951                 if (rc->isParent)
952                         continue;
953
954                 /* get relation's OID (will produce InvalidOid if subquery) */
955                 relid = getrelid(rc->rti, rangeTable);
956
957                 /*
958                  * If you change the conditions under which rel locks are acquired
959                  * here, be sure to adjust ExecOpenScanRelation to match.
960                  */
961                 switch (rc->markType)
962                 {
963                         case ROW_MARK_EXCLUSIVE:
964                         case ROW_MARK_NOKEYEXCLUSIVE:
965                         case ROW_MARK_SHARE:
966                         case ROW_MARK_KEYSHARE:
967                                 relation = heap_open(relid, RowShareLock);
968                                 break;
969                         case ROW_MARK_REFERENCE:
970                                 relation = heap_open(relid, AccessShareLock);
971                                 break;
972                         case ROW_MARK_COPY:
973                                 /* no physical table access is required */
974                                 relation = NULL;
975                                 break;
976                         default:
977                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
978                                 relation = NULL;        /* keep compiler quiet */
979                                 break;
980                 }
981
982                 /* Check that relation is a legal target for marking */
983                 if (relation)
984                         CheckValidRowMarkRel(relation, rc->markType);
985
986                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
987                 erm->relation = relation;
988                 erm->relid = relid;
989                 erm->rti = rc->rti;
990                 erm->prti = rc->prti;
991                 erm->rowmarkId = rc->rowmarkId;
992                 erm->markType = rc->markType;
993                 erm->strength = rc->strength;
994                 erm->waitPolicy = rc->waitPolicy;
995                 erm->ermActive = false;
996                 ItemPointerSetInvalid(&(erm->curCtid));
997                 erm->ermExtra = NULL;
998                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
999         }
1000
1001         /*
1002          * Initialize the executor's tuple table to empty.
1003          */
1004         estate->es_tupleTable = NIL;
1005         estate->es_trig_tuple_slot = NULL;
1006         estate->es_trig_oldtup_slot = NULL;
1007         estate->es_trig_newtup_slot = NULL;
1008
1009         /* mark EvalPlanQual not active */
1010         estate->es_epqTuple = NULL;
1011         estate->es_epqTupleSet = NULL;
1012         estate->es_epqScanDone = NULL;
1013
1014         /*
1015          * Initialize private state information for each SubPlan.  We must do this
1016          * before running ExecInitNode on the main query tree, since
1017          * ExecInitSubPlan expects to be able to find these entries.
1018          */
1019         Assert(estate->es_subplanstates == NIL);
1020         i = 1;                                          /* subplan indices count from 1 */
1021         foreach(l, plannedstmt->subplans)
1022         {
1023                 Plan       *subplan = (Plan *) lfirst(l);
1024                 PlanState  *subplanstate;
1025                 int                     sp_eflags;
1026
1027                 /*
1028                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
1029                  * it is a parameterless subplan (not initplan), we suggest that it be
1030                  * prepared to handle REWIND efficiently; otherwise there is no need.
1031                  */
1032                 sp_eflags = eflags
1033                         & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
1034                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
1035                         sp_eflags |= EXEC_FLAG_REWIND;
1036
1037                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
1038
1039                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1040                                                                                    subplanstate);
1041
1042                 i++;
1043         }
1044
1045         /*
1046          * Initialize the private state information for all the nodes in the query
1047          * tree.  This opens files, allocates storage and leaves us ready to start
1048          * processing tuples.
1049          */
1050         planstate = ExecInitNode(plan, estate, eflags);
1051
1052         /*
1053          * Get the tuple descriptor describing the type of tuples to return.
1054          */
1055         tupType = ExecGetResultType(planstate);
1056
1057         /*
1058          * Initialize the junk filter if needed.  SELECT queries need a filter if
1059          * there are any junk attrs in the top-level tlist.
1060          */
1061         if (operation == CMD_SELECT)
1062         {
1063                 bool            junk_filter_needed = false;
1064                 ListCell   *tlist;
1065
1066                 foreach(tlist, plan->targetlist)
1067                 {
1068                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1069
1070                         if (tle->resjunk)
1071                         {
1072                                 junk_filter_needed = true;
1073                                 break;
1074                         }
1075                 }
1076
1077                 if (junk_filter_needed)
1078                 {
1079                         JunkFilter *j;
1080
1081                         j = ExecInitJunkFilter(planstate->plan->targetlist,
1082                                                                    tupType->tdhasoid,
1083                                                                    ExecInitExtraTupleSlot(estate, NULL));
1084                         estate->es_junkFilter = j;
1085
1086                         /* Want to return the cleaned tuple type */
1087                         tupType = j->jf_cleanTupType;
1088                 }
1089         }
1090
1091         queryDesc->tupDesc = tupType;
1092         queryDesc->planstate = planstate;
1093 }
1094
1095 /*
1096  * Check that a proposed result relation is a legal target for the operation
1097  *
1098  * Generally the parser and/or planner should have noticed any such mistake
1099  * already, but let's make sure.
1100  *
1101  * Note: when changing this function, you probably also need to look at
1102  * CheckValidRowMarkRel.
1103  */
1104 void
1105 CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
1106 {
1107         Relation        resultRel = resultRelInfo->ri_RelationDesc;
1108         TriggerDesc *trigDesc = resultRel->trigdesc;
1109         FdwRoutine *fdwroutine;
1110
1111         switch (resultRel->rd_rel->relkind)
1112         {
1113                 case RELKIND_RELATION:
1114                 case RELKIND_PARTITIONED_TABLE:
1115                         CheckCmdReplicaIdentity(resultRel, operation);
1116                         break;
1117                 case RELKIND_SEQUENCE:
1118                         ereport(ERROR,
1119                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1120                                          errmsg("cannot change sequence \"%s\"",
1121                                                         RelationGetRelationName(resultRel))));
1122                         break;
1123                 case RELKIND_TOASTVALUE:
1124                         ereport(ERROR,
1125                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1126                                          errmsg("cannot change TOAST relation \"%s\"",
1127                                                         RelationGetRelationName(resultRel))));
1128                         break;
1129                 case RELKIND_VIEW:
1130
1131                         /*
1132                          * Okay only if there's a suitable INSTEAD OF trigger.  Messages
1133                          * here should match rewriteHandler.c's rewriteTargetView, except
1134                          * that we omit errdetail because we haven't got the information
1135                          * handy (and given that we really shouldn't get here anyway, it's
1136                          * not worth great exertion to get).
1137                          */
1138                         switch (operation)
1139                         {
1140                                 case CMD_INSERT:
1141                                         if (!trigDesc || !trigDesc->trig_insert_instead_row)
1142                                                 ereport(ERROR,
1143                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1144                                                                  errmsg("cannot insert into view \"%s\"",
1145                                                                                 RelationGetRelationName(resultRel)),
1146                                                                  errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1147                                         break;
1148                                 case CMD_UPDATE:
1149                                         if (!trigDesc || !trigDesc->trig_update_instead_row)
1150                                                 ereport(ERROR,
1151                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1152                                                                  errmsg("cannot update view \"%s\"",
1153                                                                                 RelationGetRelationName(resultRel)),
1154                                                                  errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1155                                         break;
1156                                 case CMD_DELETE:
1157                                         if (!trigDesc || !trigDesc->trig_delete_instead_row)
1158                                                 ereport(ERROR,
1159                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1160                                                                  errmsg("cannot delete from view \"%s\"",
1161                                                                                 RelationGetRelationName(resultRel)),
1162                                                                  errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1163                                         break;
1164                                 default:
1165                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1166                                         break;
1167                         }
1168                         break;
1169                 case RELKIND_MATVIEW:
1170                         if (!MatViewIncrementalMaintenanceIsEnabled())
1171                                 ereport(ERROR,
1172                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1173                                                  errmsg("cannot change materialized view \"%s\"",
1174                                                                 RelationGetRelationName(resultRel))));
1175                         break;
1176                 case RELKIND_FOREIGN_TABLE:
1177                         /* Okay only if the FDW supports it */
1178                         fdwroutine = resultRelInfo->ri_FdwRoutine;
1179                         switch (operation)
1180                         {
1181                                 case CMD_INSERT:
1182
1183                                         /*
1184                                          * If foreign partition to do tuple-routing for, skip the
1185                                          * check; it's disallowed elsewhere.
1186                                          */
1187                                         if (resultRelInfo->ri_PartitionRoot)
1188                                                 break;
1189                                         if (fdwroutine->ExecForeignInsert == NULL)
1190                                                 ereport(ERROR,
1191                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1192                                                                  errmsg("cannot insert into foreign table \"%s\"",
1193                                                                                 RelationGetRelationName(resultRel))));
1194                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1195                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1196                                                 ereport(ERROR,
1197                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1198                                                                  errmsg("foreign table \"%s\" does not allow inserts",
1199                                                                                 RelationGetRelationName(resultRel))));
1200                                         break;
1201                                 case CMD_UPDATE:
1202                                         if (fdwroutine->ExecForeignUpdate == NULL)
1203                                                 ereport(ERROR,
1204                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1205                                                                  errmsg("cannot update foreign table \"%s\"",
1206                                                                                 RelationGetRelationName(resultRel))));
1207                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1208                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1209                                                 ereport(ERROR,
1210                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1211                                                                  errmsg("foreign table \"%s\" does not allow updates",
1212                                                                                 RelationGetRelationName(resultRel))));
1213                                         break;
1214                                 case CMD_DELETE:
1215                                         if (fdwroutine->ExecForeignDelete == NULL)
1216                                                 ereport(ERROR,
1217                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1218                                                                  errmsg("cannot delete from foreign table \"%s\"",
1219                                                                                 RelationGetRelationName(resultRel))));
1220                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1221                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1222                                                 ereport(ERROR,
1223                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1224                                                                  errmsg("foreign table \"%s\" does not allow deletes",
1225                                                                                 RelationGetRelationName(resultRel))));
1226                                         break;
1227                                 default:
1228                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1229                                         break;
1230                         }
1231                         break;
1232                 default:
1233                         ereport(ERROR,
1234                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1235                                          errmsg("cannot change relation \"%s\"",
1236                                                         RelationGetRelationName(resultRel))));
1237                         break;
1238         }
1239 }
1240
1241 /*
1242  * Check that a proposed rowmark target relation is a legal target
1243  *
1244  * In most cases parser and/or planner should have noticed this already, but
1245  * they don't cover all cases.
1246  */
1247 static void
1248 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1249 {
1250         FdwRoutine *fdwroutine;
1251
1252         switch (rel->rd_rel->relkind)
1253         {
1254                 case RELKIND_RELATION:
1255                 case RELKIND_PARTITIONED_TABLE:
1256                         /* OK */
1257                         break;
1258                 case RELKIND_SEQUENCE:
1259                         /* Must disallow this because we don't vacuum sequences */
1260                         ereport(ERROR,
1261                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1262                                          errmsg("cannot lock rows in sequence \"%s\"",
1263                                                         RelationGetRelationName(rel))));
1264                         break;
1265                 case RELKIND_TOASTVALUE:
1266                         /* We could allow this, but there seems no good reason to */
1267                         ereport(ERROR,
1268                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1269                                          errmsg("cannot lock rows in TOAST relation \"%s\"",
1270                                                         RelationGetRelationName(rel))));
1271                         break;
1272                 case RELKIND_VIEW:
1273                         /* Should not get here; planner should have expanded the view */
1274                         ereport(ERROR,
1275                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1276                                          errmsg("cannot lock rows in view \"%s\"",
1277                                                         RelationGetRelationName(rel))));
1278                         break;
1279                 case RELKIND_MATVIEW:
1280                         /* Allow referencing a matview, but not actual locking clauses */
1281                         if (markType != ROW_MARK_REFERENCE)
1282                                 ereport(ERROR,
1283                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1284                                                  errmsg("cannot lock rows in materialized view \"%s\"",
1285                                                                 RelationGetRelationName(rel))));
1286                         break;
1287                 case RELKIND_FOREIGN_TABLE:
1288                         /* Okay only if the FDW supports it */
1289                         fdwroutine = GetFdwRoutineForRelation(rel, false);
1290                         if (fdwroutine->RefetchForeignRow == NULL)
1291                                 ereport(ERROR,
1292                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1293                                                  errmsg("cannot lock rows in foreign table \"%s\"",
1294                                                                 RelationGetRelationName(rel))));
1295                         break;
1296                 default:
1297                         ereport(ERROR,
1298                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1299                                          errmsg("cannot lock rows in relation \"%s\"",
1300                                                         RelationGetRelationName(rel))));
1301                         break;
1302         }
1303 }
1304
1305 /*
1306  * Initialize ResultRelInfo data for one result relation
1307  *
1308  * Caution: before Postgres 9.1, this function included the relkind checking
1309  * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1310  * appropriate.  Be sure callers cover those needs.
1311  */
1312 void
1313 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1314                                   Relation resultRelationDesc,
1315                                   Index resultRelationIndex,
1316                                   Relation partition_root,
1317                                   int instrument_options)
1318 {
1319         List       *partition_check = NIL;
1320
1321         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1322         resultRelInfo->type = T_ResultRelInfo;
1323         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1324         resultRelInfo->ri_RelationDesc = resultRelationDesc;
1325         resultRelInfo->ri_NumIndices = 0;
1326         resultRelInfo->ri_IndexRelationDescs = NULL;
1327         resultRelInfo->ri_IndexRelationInfo = NULL;
1328         /* make a copy so as not to depend on relcache info not changing... */
1329         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1330         if (resultRelInfo->ri_TrigDesc)
1331         {
1332                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
1333
1334                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1335                         palloc0(n * sizeof(FmgrInfo));
1336                 resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1337                         palloc0(n * sizeof(ExprState *));
1338                 if (instrument_options)
1339                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1340         }
1341         else
1342         {
1343                 resultRelInfo->ri_TrigFunctions = NULL;
1344                 resultRelInfo->ri_TrigWhenExprs = NULL;
1345                 resultRelInfo->ri_TrigInstrument = NULL;
1346         }
1347         if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1348                 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1349         else
1350                 resultRelInfo->ri_FdwRoutine = NULL;
1351
1352         /* The following fields are set later if needed */
1353         resultRelInfo->ri_FdwState = NULL;
1354         resultRelInfo->ri_usesFdwDirectModify = false;
1355         resultRelInfo->ri_ConstraintExprs = NULL;
1356         resultRelInfo->ri_junkFilter = NULL;
1357         resultRelInfo->ri_projectReturning = NULL;
1358         resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1359         resultRelInfo->ri_onConflict = NULL;
1360
1361         resultRelInfo->ri_mergeTargetRTI = 0;
1362         resultRelInfo->ri_mergeState = (MergeState *) palloc0(sizeof (MergeState));
1363
1364         /*
1365          * Partition constraint, which also includes the partition constraint of
1366          * all the ancestors that are partitions.  Note that it will be checked
1367          * even in the case of tuple-routing where this table is the target leaf
1368          * partition, if there any BR triggers defined on the table.  Although
1369          * tuple-routing implicitly preserves the partition constraint of the
1370          * target partition for a given row, the BR triggers may change the row
1371          * such that the constraint is no longer satisfied, which we must fail for
1372          * by checking it explicitly.
1373          *
1374          * If this is a partitioned table, the partition constraint (if any) of a
1375          * given row will be checked just before performing tuple-routing.
1376          */
1377         partition_check = RelationGetPartitionQual(resultRelationDesc);
1378
1379         resultRelInfo->ri_PartitionCheck = partition_check;
1380         resultRelInfo->ri_PartitionRoot = partition_root;
1381 }
1382
1383 /*
1384  *              ExecGetTriggerResultRel
1385  *
1386  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
1387  * triggers are fired on one of the result relations of the query, and so
1388  * we can just return a member of the es_result_relations array, the
1389  * es_root_result_relations array (if any), or the es_leaf_result_relations
1390  * list (if any).  (Note: in self-join situations there might be multiple
1391  * members with the same OID; if so it doesn't matter which one we pick.)
1392  * However, it is sometimes necessary to fire triggers on other relations;
1393  * this happens mainly when an RI update trigger queues additional triggers
1394  * on other relations, which will be processed in the context of the outer
1395  * query.  For efficiency's sake, we want to have a ResultRelInfo for those
1396  * triggers too; that can avoid repeated re-opening of the relation.  (It
1397  * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1398  * triggers.)  So we make additional ResultRelInfo's as needed, and save them
1399  * in es_trig_target_relations.
1400  */
1401 ResultRelInfo *
1402 ExecGetTriggerResultRel(EState *estate, Oid relid)
1403 {
1404         ResultRelInfo *rInfo;
1405         int                     nr;
1406         ListCell   *l;
1407         Relation        rel;
1408         MemoryContext oldcontext;
1409
1410         /* First, search through the query result relations */
1411         rInfo = estate->es_result_relations;
1412         nr = estate->es_num_result_relations;
1413         while (nr > 0)
1414         {
1415                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1416                         return rInfo;
1417                 rInfo++;
1418                 nr--;
1419         }
1420         /* Second, search through the root result relations, if any */
1421         rInfo = estate->es_root_result_relations;
1422         nr = estate->es_num_root_result_relations;
1423         while (nr > 0)
1424         {
1425                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1426                         return rInfo;
1427                 rInfo++;
1428                 nr--;
1429         }
1430         /*
1431          * Third, search through the result relations that were created during
1432          * tuple routing, if any.
1433          */
1434         foreach(l, estate->es_tuple_routing_result_relations)
1435         {
1436                 rInfo = (ResultRelInfo *) lfirst(l);
1437                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1438                         return rInfo;
1439         }
1440         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1441         foreach(l, estate->es_trig_target_relations)
1442         {
1443                 rInfo = (ResultRelInfo *) lfirst(l);
1444                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1445                         return rInfo;
1446         }
1447         /* Nope, so we need a new one */
1448
1449         /*
1450          * Open the target relation's relcache entry.  We assume that an
1451          * appropriate lock is still held by the backend from whenever the trigger
1452          * event got queued, so we need take no new lock here.  Also, we need not
1453          * recheck the relkind, so no need for CheckValidResultRel.
1454          */
1455         rel = heap_open(relid, NoLock);
1456
1457         /*
1458          * Make the new entry in the right context.
1459          */
1460         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1461         rInfo = makeNode(ResultRelInfo);
1462         InitResultRelInfo(rInfo,
1463                                           rel,
1464                                           0,            /* dummy rangetable index */
1465                                           NULL,
1466                                           estate->es_instrument);
1467         estate->es_trig_target_relations =
1468                 lappend(estate->es_trig_target_relations, rInfo);
1469         MemoryContextSwitchTo(oldcontext);
1470
1471         /*
1472          * Currently, we don't need any index information in ResultRelInfos used
1473          * only for triggers, so no need to call ExecOpenIndices.
1474          */
1475
1476         return rInfo;
1477 }
1478
1479 /*
1480  * Close any relations that have been opened by ExecGetTriggerResultRel().
1481  */
1482 void
1483 ExecCleanUpTriggerState(EState *estate)
1484 {
1485         ListCell   *l;
1486
1487         foreach(l, estate->es_trig_target_relations)
1488         {
1489                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1490
1491                 /* Close indices and then the relation itself */
1492                 ExecCloseIndices(resultRelInfo);
1493                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1494         }
1495 }
1496
1497 /*
1498  *              ExecContextForcesOids
1499  *
1500  * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1501  * we need to ensure that result tuples have space for an OID iff they are
1502  * going to be stored into a relation that has OIDs.  In other contexts
1503  * we are free to choose whether to leave space for OIDs in result tuples
1504  * (we generally don't want to, but we do if a physical-tlist optimization
1505  * is possible).  This routine checks the plan context and returns true if the
1506  * choice is forced, false if the choice is not forced.  In the true case,
1507  * *hasoids is set to the required value.
1508  *
1509  * One reason this is ugly is that all plan nodes in the plan tree will emit
1510  * tuples with space for an OID, though we really only need the topmost node
1511  * to do so.  However, node types like Sort don't project new tuples but just
1512  * return their inputs, and in those cases the requirement propagates down
1513  * to the input node.  Eventually we might make this code smart enough to
1514  * recognize how far down the requirement really goes, but for now we just
1515  * make all plan nodes do the same thing if the top level forces the choice.
1516  *
1517  * We assume that if we are generating tuples for INSERT or UPDATE,
1518  * estate->es_result_relation_info is already set up to describe the target
1519  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1520  * the target relations may have OIDs and some not.  We have to make the
1521  * decisions on a per-relation basis as we initialize each of the subplans of
1522  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1523  * while initializing each subplan.
1524  *
1525  * CREATE TABLE AS is even uglier, because we don't have the target relation's
1526  * descriptor available when this code runs; we have to look aside at the
1527  * flags passed to ExecutorStart().
1528  */
1529 bool
1530 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1531 {
1532         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1533
1534         if (ri != NULL)
1535         {
1536                 Relation        rel = ri->ri_RelationDesc;
1537
1538                 if (rel != NULL)
1539                 {
1540                         *hasoids = rel->rd_rel->relhasoids;
1541                         return true;
1542                 }
1543         }
1544
1545         if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1546         {
1547                 *hasoids = true;
1548                 return true;
1549         }
1550         if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1551         {
1552                 *hasoids = false;
1553                 return true;
1554         }
1555
1556         return false;
1557 }
1558
1559 /* ----------------------------------------------------------------
1560  *              ExecPostprocessPlan
1561  *
1562  *              Give plan nodes a final chance to execute before shutdown
1563  * ----------------------------------------------------------------
1564  */
1565 static void
1566 ExecPostprocessPlan(EState *estate)
1567 {
1568         ListCell   *lc;
1569
1570         /*
1571          * Make sure nodes run forward.
1572          */
1573         estate->es_direction = ForwardScanDirection;
1574
1575         /*
1576          * Run any secondary ModifyTable nodes to completion, in case the main
1577          * query did not fetch all rows from them.  (We do this to ensure that
1578          * such nodes have predictable results.)
1579          */
1580         foreach(lc, estate->es_auxmodifytables)
1581         {
1582                 PlanState  *ps = (PlanState *) lfirst(lc);
1583
1584                 for (;;)
1585                 {
1586                         TupleTableSlot *slot;
1587
1588                         /* Reset the per-output-tuple exprcontext each time */
1589                         ResetPerTupleExprContext(estate);
1590
1591                         slot = ExecProcNode(ps);
1592
1593                         if (TupIsNull(slot))
1594                                 break;
1595                 }
1596         }
1597 }
1598
1599 /* ----------------------------------------------------------------
1600  *              ExecEndPlan
1601  *
1602  *              Cleans up the query plan -- closes files and frees up storage
1603  *
1604  * NOTE: we are no longer very worried about freeing storage per se
1605  * in this code; FreeExecutorState should be guaranteed to release all
1606  * memory that needs to be released.  What we are worried about doing
1607  * is closing relations and dropping buffer pins.  Thus, for example,
1608  * tuple tables must be cleared or dropped to ensure pins are released.
1609  * ----------------------------------------------------------------
1610  */
1611 static void
1612 ExecEndPlan(PlanState *planstate, EState *estate)
1613 {
1614         ResultRelInfo *resultRelInfo;
1615         int                     i;
1616         ListCell   *l;
1617
1618         /*
1619          * shut down the node-type-specific query processing
1620          */
1621         ExecEndNode(planstate);
1622
1623         /*
1624          * for subplans too
1625          */
1626         foreach(l, estate->es_subplanstates)
1627         {
1628                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1629
1630                 ExecEndNode(subplanstate);
1631         }
1632
1633         /*
1634          * destroy the executor's tuple table.  Actually we only care about
1635          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1636          * the TupleTableSlots, since the containing memory context is about to go
1637          * away anyway.
1638          */
1639         ExecResetTupleTable(estate->es_tupleTable, false);
1640
1641         /*
1642          * close the result relation(s) if any, but hold locks until xact commit.
1643          */
1644         resultRelInfo = estate->es_result_relations;
1645         for (i = estate->es_num_result_relations; i > 0; i--)
1646         {
1647                 /* Close indices and then the relation itself */
1648                 ExecCloseIndices(resultRelInfo);
1649                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1650                 resultRelInfo++;
1651         }
1652
1653         /* Close the root target relation(s). */
1654         resultRelInfo = estate->es_root_result_relations;
1655         for (i = estate->es_num_root_result_relations; i > 0; i--)
1656         {
1657                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1658                 resultRelInfo++;
1659         }
1660
1661         /* likewise close any trigger target relations */
1662         ExecCleanUpTriggerState(estate);
1663
1664         /*
1665          * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1666          * locks
1667          */
1668         foreach(l, estate->es_rowMarks)
1669         {
1670                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1671
1672                 if (erm->relation)
1673                         heap_close(erm->relation, NoLock);
1674         }
1675 }
1676
1677 /* ----------------------------------------------------------------
1678  *              ExecutePlan
1679  *
1680  *              Processes the query plan until we have retrieved 'numberTuples' tuples,
1681  *              moving in the specified direction.
1682  *
1683  *              Runs to completion if numberTuples is 0
1684  *
1685  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1686  * user can see it
1687  * ----------------------------------------------------------------
1688  */
1689 static void
1690 ExecutePlan(EState *estate,
1691                         PlanState *planstate,
1692                         bool use_parallel_mode,
1693                         CmdType operation,
1694                         bool sendTuples,
1695                         uint64 numberTuples,
1696                         ScanDirection direction,
1697                         DestReceiver *dest,
1698                         bool execute_once)
1699 {
1700         TupleTableSlot *slot;
1701         uint64          current_tuple_count;
1702
1703         /*
1704          * initialize local variables
1705          */
1706         current_tuple_count = 0;
1707
1708         /*
1709          * Set the direction.
1710          */
1711         estate->es_direction = direction;
1712
1713         /*
1714          * If the plan might potentially be executed multiple times, we must force
1715          * it to run without parallelism, because we might exit early.
1716          */
1717         if (!execute_once)
1718                 use_parallel_mode = false;
1719
1720         estate->es_use_parallel_mode = use_parallel_mode;
1721         if (use_parallel_mode)
1722                 EnterParallelMode();
1723
1724         /*
1725          * Loop until we've processed the proper number of tuples from the plan.
1726          */
1727         for (;;)
1728         {
1729                 /* Reset the per-output-tuple exprcontext */
1730                 ResetPerTupleExprContext(estate);
1731
1732                 /*
1733                  * Execute the plan and obtain a tuple
1734                  */
1735                 slot = ExecProcNode(planstate);
1736
1737                 /*
1738                  * if the tuple is null, then we assume there is nothing more to
1739                  * process so we just end the loop...
1740                  */
1741                 if (TupIsNull(slot))
1742                 {
1743                         /* Allow nodes to release or shut down resources. */
1744                         (void) ExecShutdownNode(planstate);
1745                         break;
1746                 }
1747
1748                 /*
1749                  * If we have a junk filter, then project a new tuple with the junk
1750                  * removed.
1751                  *
1752                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1753                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1754                  * because that tuple slot has the wrong descriptor.)
1755                  */
1756                 if (estate->es_junkFilter != NULL)
1757                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1758
1759                 /*
1760                  * If we are supposed to send the tuple somewhere, do so. (In
1761                  * practice, this is probably always the case at this point.)
1762                  */
1763                 if (sendTuples)
1764                 {
1765                         /*
1766                          * If we are not able to send the tuple, we assume the destination
1767                          * has closed and no more tuples can be sent. If that's the case,
1768                          * end the loop.
1769                          */
1770                         if (!dest->receiveSlot(slot, dest))
1771                                 break;
1772                 }
1773
1774                 /*
1775                  * Count tuples processed, if this is a SELECT.  (For other operation
1776                  * types, the ModifyTable plan node must count the appropriate
1777                  * events.)
1778                  */
1779                 if (operation == CMD_SELECT)
1780                         (estate->es_processed)++;
1781
1782                 /*
1783                  * check our tuple count.. if we've processed the proper number then
1784                  * quit, else loop again and process more tuples.  Zero numberTuples
1785                  * means no limit.
1786                  */
1787                 current_tuple_count++;
1788                 if (numberTuples && numberTuples == current_tuple_count)
1789                 {
1790                         /* Allow nodes to release or shut down resources. */
1791                         (void) ExecShutdownNode(planstate);
1792                         break;
1793                 }
1794         }
1795
1796         if (use_parallel_mode)
1797                 ExitParallelMode();
1798 }
1799
1800
1801 /*
1802  * ExecRelCheck --- check that tuple meets constraints for result relation
1803  *
1804  * Returns NULL if OK, else name of failed check constraint
1805  */
1806 static const char *
1807 ExecRelCheck(ResultRelInfo *resultRelInfo,
1808                          TupleTableSlot *slot, EState *estate)
1809 {
1810         Relation        rel = resultRelInfo->ri_RelationDesc;
1811         int                     ncheck = rel->rd_att->constr->num_check;
1812         ConstrCheck *check = rel->rd_att->constr->check;
1813         ExprContext *econtext;
1814         MemoryContext oldContext;
1815         int                     i;
1816
1817         /*
1818          * If first time through for this result relation, build expression
1819          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1820          * memory context so they'll survive throughout the query.
1821          */
1822         if (resultRelInfo->ri_ConstraintExprs == NULL)
1823         {
1824                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1825                 resultRelInfo->ri_ConstraintExprs =
1826                         (ExprState **) palloc(ncheck * sizeof(ExprState *));
1827                 for (i = 0; i < ncheck; i++)
1828                 {
1829                         Expr       *checkconstr;
1830
1831                         checkconstr = stringToNode(check[i].ccbin);
1832                         resultRelInfo->ri_ConstraintExprs[i] =
1833                                 ExecPrepareExpr(checkconstr, estate);
1834                 }
1835                 MemoryContextSwitchTo(oldContext);
1836         }
1837
1838         /*
1839          * We will use the EState's per-tuple context for evaluating constraint
1840          * expressions (creating it if it's not already there).
1841          */
1842         econtext = GetPerTupleExprContext(estate);
1843
1844         /* Arrange for econtext's scan tuple to be the tuple under test */
1845         econtext->ecxt_scantuple = slot;
1846
1847         /* And evaluate the constraints */
1848         for (i = 0; i < ncheck; i++)
1849         {
1850                 ExprState  *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1851
1852                 /*
1853                  * NOTE: SQL specifies that a NULL result from a constraint expression
1854                  * is not to be treated as a failure.  Therefore, use ExecCheck not
1855                  * ExecQual.
1856                  */
1857                 if (!ExecCheck(checkconstr, econtext))
1858                         return check[i].ccname;
1859         }
1860
1861         /* NULL result means no error */
1862         return NULL;
1863 }
1864
1865 /*
1866  * ExecPartitionCheck --- check that tuple meets the partition constraint.
1867  *
1868  * Exported in executor.h for outside use.
1869  * Returns true if it meets the partition constraint, else returns false.
1870  */
1871 bool
1872 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1873                                    EState *estate)
1874 {
1875         ExprContext *econtext;
1876
1877         /*
1878          * If first time through, build expression state tree for the partition
1879          * check expression.  Keep it in the per-query memory context so they'll
1880          * survive throughout the query.
1881          */
1882         if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1883         {
1884                 List       *qual = resultRelInfo->ri_PartitionCheck;
1885
1886                 resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1887         }
1888
1889         /*
1890          * We will use the EState's per-tuple context for evaluating constraint
1891          * expressions (creating it if it's not already there).
1892          */
1893         econtext = GetPerTupleExprContext(estate);
1894
1895         /* Arrange for econtext's scan tuple to be the tuple under test */
1896         econtext->ecxt_scantuple = slot;
1897
1898         /*
1899          * As in case of the catalogued constraints, we treat a NULL result as
1900          * success here, not a failure.
1901          */
1902         return ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1903 }
1904
1905 /*
1906  * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1907  * partition constraint check.
1908  */
1909 void
1910 ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1911                                                         TupleTableSlot *slot,
1912                                                         EState *estate)
1913 {
1914         Relation        rel = resultRelInfo->ri_RelationDesc;
1915         Relation        orig_rel = rel;
1916         TupleDesc       tupdesc = RelationGetDescr(rel);
1917         char       *val_desc;
1918         Bitmapset  *modifiedCols;
1919         Bitmapset  *insertedCols;
1920         Bitmapset  *updatedCols;
1921
1922         /*
1923          * Need to first convert the tuple to the root partitioned table's row
1924          * type. For details, check similar comments in ExecConstraints().
1925          */
1926         if (resultRelInfo->ri_PartitionRoot)
1927         {
1928                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
1929                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
1930                 TupleConversionMap *map;
1931
1932                 rel = resultRelInfo->ri_PartitionRoot;
1933                 tupdesc = RelationGetDescr(rel);
1934                 /* a reverse map */
1935                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
1936                                                                          gettext_noop("could not convert row type"));
1937                 if (map != NULL)
1938                 {
1939                         tuple = do_convert_tuple(tuple, map);
1940                         ExecSetSlotDescriptor(slot, tupdesc);
1941                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
1942                 }
1943         }
1944
1945         insertedCols = GetInsertedColumns(resultRelInfo, estate);
1946         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1947         modifiedCols = bms_union(insertedCols, updatedCols);
1948         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1949                                                                                          slot,
1950                                                                                          tupdesc,
1951                                                                                          modifiedCols,
1952                                                                                          64);
1953         ereport(ERROR,
1954                         (errcode(ERRCODE_CHECK_VIOLATION),
1955                          errmsg("new row for relation \"%s\" violates partition constraint",
1956                                         RelationGetRelationName(orig_rel)),
1957                          val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1958 }
1959
1960 /*
1961  * ExecConstraints - check constraints of the tuple in 'slot'
1962  *
1963  * This checks the traditional NOT NULL and check constraints, and if
1964  * requested, checks the partition constraint.
1965  *
1966  * Note: 'slot' contains the tuple to check the constraints of, which may
1967  * have been converted from the original input tuple after tuple routing.
1968  * 'resultRelInfo' is the original result relation, before tuple routing.
1969  */
1970 void
1971 ExecConstraints(ResultRelInfo *resultRelInfo,
1972                                 TupleTableSlot *slot, EState *estate,
1973                                 bool check_partition_constraint)
1974 {
1975         Relation        rel = resultRelInfo->ri_RelationDesc;
1976         TupleDesc       tupdesc = RelationGetDescr(rel);
1977         TupleConstr *constr = tupdesc->constr;
1978         Bitmapset  *modifiedCols;
1979         Bitmapset  *insertedCols;
1980         Bitmapset  *updatedCols;
1981
1982         Assert(constr || resultRelInfo->ri_PartitionCheck);
1983
1984         if (constr && constr->has_not_null)
1985         {
1986                 int                     natts = tupdesc->natts;
1987                 int                     attrChk;
1988
1989                 for (attrChk = 1; attrChk <= natts; attrChk++)
1990                 {
1991                         Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1);
1992
1993                         if (att->attnotnull && slot_attisnull(slot, attrChk))
1994                         {
1995                                 char       *val_desc;
1996                                 Relation        orig_rel = rel;
1997                                 TupleDesc       orig_tupdesc = RelationGetDescr(rel);
1998
1999                                 /*
2000                                  * If the tuple has been routed, it's been converted to the
2001                                  * partition's rowtype, which might differ from the root
2002                                  * table's.  We must convert it back to the root table's
2003                                  * rowtype so that val_desc shown error message matches the
2004                                  * input tuple.
2005                                  */
2006                                 if (resultRelInfo->ri_PartitionRoot)
2007                                 {
2008                                         HeapTuple       tuple = ExecFetchSlotTuple(slot);
2009                                         TupleConversionMap *map;
2010
2011                                         rel = resultRelInfo->ri_PartitionRoot;
2012                                         tupdesc = RelationGetDescr(rel);
2013                                         /* a reverse map */
2014                                         map = convert_tuples_by_name(orig_tupdesc, tupdesc,
2015                                                                                                  gettext_noop("could not convert row type"));
2016                                         if (map != NULL)
2017                                         {
2018                                                 tuple = do_convert_tuple(tuple, map);
2019                                                 ExecSetSlotDescriptor(slot, tupdesc);
2020                                                 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2021                                         }
2022                                 }
2023
2024                                 insertedCols = GetInsertedColumns(resultRelInfo, estate);
2025                                 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2026                                 modifiedCols = bms_union(insertedCols, updatedCols);
2027                                 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2028                                                                                                                  slot,
2029                                                                                                                  tupdesc,
2030                                                                                                                  modifiedCols,
2031                                                                                                                  64);
2032
2033                                 ereport(ERROR,
2034                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2035                                                  errmsg("null value in column \"%s\" violates not-null constraint",
2036                                                                 NameStr(att->attname)),
2037                                                  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2038                                                  errtablecol(orig_rel, attrChk)));
2039                         }
2040                 }
2041         }
2042
2043         if (constr && constr->num_check > 0)
2044         {
2045                 const char *failed;
2046
2047                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2048                 {
2049                         char       *val_desc;
2050                         Relation        orig_rel = rel;
2051
2052                         /* See the comment above. */
2053                         if (resultRelInfo->ri_PartitionRoot)
2054                         {
2055                                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
2056                                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
2057                                 TupleConversionMap *map;
2058
2059                                 rel = resultRelInfo->ri_PartitionRoot;
2060                                 tupdesc = RelationGetDescr(rel);
2061                                 /* a reverse map */
2062                                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2063                                                                                          gettext_noop("could not convert row type"));
2064                                 if (map != NULL)
2065                                 {
2066                                         tuple = do_convert_tuple(tuple, map);
2067                                         ExecSetSlotDescriptor(slot, tupdesc);
2068                                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2069                                 }
2070                         }
2071
2072                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
2073                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2074                         modifiedCols = bms_union(insertedCols, updatedCols);
2075                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2076                                                                                                          slot,
2077                                                                                                          tupdesc,
2078                                                                                                          modifiedCols,
2079                                                                                                          64);
2080                         ereport(ERROR,
2081                                         (errcode(ERRCODE_CHECK_VIOLATION),
2082                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2083                                                         RelationGetRelationName(orig_rel), failed),
2084                                          val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2085                                          errtableconstraint(orig_rel, failed)));
2086                 }
2087         }
2088
2089         if (check_partition_constraint && resultRelInfo->ri_PartitionCheck &&
2090                 !ExecPartitionCheck(resultRelInfo, slot, estate))
2091                 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
2092 }
2093
2094
2095 /*
2096  * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2097  * of the specified kind.
2098  *
2099  * Note that this needs to be called multiple times to ensure that all kinds of
2100  * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2101  * CHECK OPTION set and from row level security policies).  See ExecInsert()
2102  * and ExecUpdate().
2103  */
2104 void
2105 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2106                                          TupleTableSlot *slot, EState *estate)
2107 {
2108         Relation        rel = resultRelInfo->ri_RelationDesc;
2109         TupleDesc       tupdesc = RelationGetDescr(rel);
2110         ExprContext *econtext;
2111         ListCell   *l1,
2112                            *l2;
2113
2114         /*
2115          * We will use the EState's per-tuple context for evaluating constraint
2116          * expressions (creating it if it's not already there).
2117          */
2118         econtext = GetPerTupleExprContext(estate);
2119
2120         /* Arrange for econtext's scan tuple to be the tuple under test */
2121         econtext->ecxt_scantuple = slot;
2122
2123         /* Check each of the constraints */
2124         forboth(l1, resultRelInfo->ri_WithCheckOptions,
2125                         l2, resultRelInfo->ri_WithCheckOptionExprs)
2126         {
2127                 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2128                 ExprState  *wcoExpr = (ExprState *) lfirst(l2);
2129
2130                 /*
2131                  * Skip any WCOs which are not the kind we are looking for at this
2132                  * time.
2133                  */
2134                 if (wco->kind != kind)
2135                         continue;
2136
2137                 /*
2138                  * WITH CHECK OPTION checks are intended to ensure that the new tuple
2139                  * is visible (in the case of a view) or that it passes the
2140                  * 'with-check' policy (in the case of row security). If the qual
2141                  * evaluates to NULL or FALSE, then the new tuple won't be included in
2142                  * the view or doesn't pass the 'with-check' policy for the table.
2143                  */
2144                 if (!ExecQual(wcoExpr, econtext))
2145                 {
2146                         char       *val_desc;
2147                         Bitmapset  *modifiedCols;
2148                         Bitmapset  *insertedCols;
2149                         Bitmapset  *updatedCols;
2150
2151                         switch (wco->kind)
2152                         {
2153                                         /*
2154                                          * For WITH CHECK OPTIONs coming from views, we might be
2155                                          * able to provide the details on the row, depending on
2156                                          * the permissions on the relation (that is, if the user
2157                                          * could view it directly anyway).  For RLS violations, we
2158                                          * don't include the data since we don't know if the user
2159                                          * should be able to view the tuple as that depends on the
2160                                          * USING policy.
2161                                          */
2162                                 case WCO_VIEW_CHECK:
2163                                         /* See the comment in ExecConstraints(). */
2164                                         if (resultRelInfo->ri_PartitionRoot)
2165                                         {
2166                                                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
2167                                                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
2168                                                 TupleConversionMap *map;
2169
2170                                                 rel = resultRelInfo->ri_PartitionRoot;
2171                                                 tupdesc = RelationGetDescr(rel);
2172                                                 /* a reverse map */
2173                                                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2174                                                                                                          gettext_noop("could not convert row type"));
2175                                                 if (map != NULL)
2176                                                 {
2177                                                         tuple = do_convert_tuple(tuple, map);
2178                                                         ExecSetSlotDescriptor(slot, tupdesc);
2179                                                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2180                                                 }
2181                                         }
2182
2183                                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
2184                                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2185                                         modifiedCols = bms_union(insertedCols, updatedCols);
2186                                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2187                                                                                                                          slot,
2188                                                                                                                          tupdesc,
2189                                                                                                                          modifiedCols,
2190                                                                                                                          64);
2191
2192                                         ereport(ERROR,
2193                                                         (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2194                                                          errmsg("new row violates check option for view \"%s\"",
2195                                                                         wco->relname),
2196                                                          val_desc ? errdetail("Failing row contains %s.",
2197                                                                                                   val_desc) : 0));
2198                                         break;
2199                                 case WCO_RLS_INSERT_CHECK:
2200                                 case WCO_RLS_UPDATE_CHECK:
2201                                         if (wco->polname != NULL)
2202                                                 ereport(ERROR,
2203                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2204                                                                  errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2205                                                                                 wco->polname, wco->relname)));
2206                                         else
2207                                                 ereport(ERROR,
2208                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2209                                                                  errmsg("new row violates row-level security policy for table \"%s\"",
2210                                                                                 wco->relname)));
2211                                         break;
2212                                 case WCO_RLS_MERGE_UPDATE_CHECK:
2213                                 case WCO_RLS_MERGE_DELETE_CHECK:
2214                                         if (wco->polname != NULL)
2215                                                 ereport(ERROR,
2216                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2217                                                                  errmsg("target row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2218                                                                                 wco->polname, wco->relname)));
2219                                         else
2220                                                 ereport(ERROR,
2221                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2222                                                                  errmsg("target row violates row-level security policy (USING expression) for table \"%s\"",
2223                                                                                 wco->relname)));
2224                                         break;
2225                                 case WCO_RLS_CONFLICT_CHECK:
2226                                         if (wco->polname != NULL)
2227                                                 ereport(ERROR,
2228                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2229                                                                  errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2230                                                                                 wco->polname, wco->relname)));
2231                                         else
2232                                                 ereport(ERROR,
2233                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2234                                                                  errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2235                                                                                 wco->relname)));
2236                                         break;
2237                                 default:
2238                                         elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2239                                         break;
2240                         }
2241                 }
2242         }
2243 }
2244
2245 /*
2246  * ExecBuildSlotValueDescription -- construct a string representing a tuple
2247  *
2248  * This is intentionally very similar to BuildIndexValueDescription, but
2249  * unlike that function, we truncate long field values (to at most maxfieldlen
2250  * bytes).  That seems necessary here since heap field values could be very
2251  * long, whereas index entries typically aren't so wide.
2252  *
2253  * Also, unlike the case with index entries, we need to be prepared to ignore
2254  * dropped columns.  We used to use the slot's tuple descriptor to decode the
2255  * data, but the slot's descriptor doesn't identify dropped columns, so we
2256  * now need to be passed the relation's descriptor.
2257  *
2258  * Note that, like BuildIndexValueDescription, if the user does not have
2259  * permission to view any of the columns involved, a NULL is returned.  Unlike
2260  * BuildIndexValueDescription, if the user has access to view a subset of the
2261  * column involved, that subset will be returned with a key identifying which
2262  * columns they are.
2263  */
2264 static char *
2265 ExecBuildSlotValueDescription(Oid reloid,
2266                                                           TupleTableSlot *slot,
2267                                                           TupleDesc tupdesc,
2268                                                           Bitmapset *modifiedCols,
2269                                                           int maxfieldlen)
2270 {
2271         StringInfoData buf;
2272         StringInfoData collist;
2273         bool            write_comma = false;
2274         bool            write_comma_collist = false;
2275         int                     i;
2276         AclResult       aclresult;
2277         bool            table_perm = false;
2278         bool            any_perm = false;
2279
2280         /*
2281          * Check if RLS is enabled and should be active for the relation; if so,
2282          * then don't return anything.  Otherwise, go through normal permission
2283          * checks.
2284          */
2285         if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2286                 return NULL;
2287
2288         initStringInfo(&buf);
2289
2290         appendStringInfoChar(&buf, '(');
2291
2292         /*
2293          * Check if the user has permissions to see the row.  Table-level SELECT
2294          * allows access to all columns.  If the user does not have table-level
2295          * SELECT then we check each column and include those the user has SELECT
2296          * rights on.  Additionally, we always include columns the user provided
2297          * data for.
2298          */
2299         aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2300         if (aclresult != ACLCHECK_OK)
2301         {
2302                 /* Set up the buffer for the column list */
2303                 initStringInfo(&collist);
2304                 appendStringInfoChar(&collist, '(');
2305         }
2306         else
2307                 table_perm = any_perm = true;
2308
2309         /* Make sure the tuple is fully deconstructed */
2310         slot_getallattrs(slot);
2311
2312         for (i = 0; i < tupdesc->natts; i++)
2313         {
2314                 bool            column_perm = false;
2315                 char       *val;
2316                 int                     vallen;
2317                 Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2318
2319                 /* ignore dropped columns */
2320                 if (att->attisdropped)
2321                         continue;
2322
2323                 if (!table_perm)
2324                 {
2325                         /*
2326                          * No table-level SELECT, so need to make sure they either have
2327                          * SELECT rights on the column or that they have provided the data
2328                          * for the column.  If not, omit this column from the error
2329                          * message.
2330                          */
2331                         aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2332                                                                                           GetUserId(), ACL_SELECT);
2333                         if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2334                                                           modifiedCols) || aclresult == ACLCHECK_OK)
2335                         {
2336                                 column_perm = any_perm = true;
2337
2338                                 if (write_comma_collist)
2339                                         appendStringInfoString(&collist, ", ");
2340                                 else
2341                                         write_comma_collist = true;
2342
2343                                 appendStringInfoString(&collist, NameStr(att->attname));
2344                         }
2345                 }
2346
2347                 if (table_perm || column_perm)
2348                 {
2349                         if (slot->tts_isnull[i])
2350                                 val = "null";
2351                         else
2352                         {
2353                                 Oid                     foutoid;
2354                                 bool            typisvarlena;
2355
2356                                 getTypeOutputInfo(att->atttypid,
2357                                                                   &foutoid, &typisvarlena);
2358                                 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2359                         }
2360
2361                         if (write_comma)
2362                                 appendStringInfoString(&buf, ", ");
2363                         else
2364                                 write_comma = true;
2365
2366                         /* truncate if needed */
2367                         vallen = strlen(val);
2368                         if (vallen <= maxfieldlen)
2369                                 appendStringInfoString(&buf, val);
2370                         else
2371                         {
2372                                 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2373                                 appendBinaryStringInfo(&buf, val, vallen);
2374                                 appendStringInfoString(&buf, "...");
2375                         }
2376                 }
2377         }
2378
2379         /* If we end up with zero columns being returned, then return NULL. */
2380         if (!any_perm)
2381                 return NULL;
2382
2383         appendStringInfoChar(&buf, ')');
2384
2385         if (!table_perm)
2386         {
2387                 appendStringInfoString(&collist, ") = ");
2388                 appendStringInfoString(&collist, buf.data);
2389
2390                 return collist.data;
2391         }
2392
2393         return buf.data;
2394 }
2395
2396
2397 /*
2398  * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2399  * given ResultRelInfo
2400  */
2401 LockTupleMode
2402 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2403 {
2404         Bitmapset  *keyCols;
2405         Bitmapset  *updatedCols;
2406
2407         /*
2408          * Compute lock mode to use.  If columns that are part of the key have not
2409          * been modified, then we can use a weaker lock, allowing for better
2410          * concurrency.
2411          */
2412         updatedCols = GetUpdatedColumns(relinfo, estate);
2413         keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2414                                                                                  INDEX_ATTR_BITMAP_KEY);
2415
2416         if (bms_overlap(keyCols, updatedCols))
2417                 return LockTupleExclusive;
2418
2419         return LockTupleNoKeyExclusive;
2420 }
2421
2422 /*
2423  * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2424  *
2425  * If no such struct, either return NULL or throw error depending on missing_ok
2426  */
2427 ExecRowMark *
2428 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2429 {
2430         ListCell   *lc;
2431
2432         foreach(lc, estate->es_rowMarks)
2433         {
2434                 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2435
2436                 if (erm->rti == rti)
2437                         return erm;
2438         }
2439         if (!missing_ok)
2440                 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2441         return NULL;
2442 }
2443
2444 /*
2445  * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2446  *
2447  * Inputs are the underlying ExecRowMark struct and the targetlist of the
2448  * input plan node (not planstate node!).  We need the latter to find out
2449  * the column numbers of the resjunk columns.
2450  */
2451 ExecAuxRowMark *
2452 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2453 {
2454         ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2455         char            resname[32];
2456
2457         aerm->rowmark = erm;
2458
2459         /* Look up the resjunk columns associated with this rowmark */
2460         if (erm->markType != ROW_MARK_COPY)
2461         {
2462                 /* need ctid for all methods other than COPY */
2463                 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2464                 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2465                                                                                                            resname);
2466                 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2467                         elog(ERROR, "could not find junk %s column", resname);
2468         }
2469         else
2470         {
2471                 /* need wholerow if COPY */
2472                 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2473                 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2474                                                                                                                 resname);
2475                 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2476                         elog(ERROR, "could not find junk %s column", resname);
2477         }
2478
2479         /* if child rel, need tableoid */
2480         if (erm->rti != erm->prti)
2481         {
2482                 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2483                 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2484                                                                                                            resname);
2485                 if (!AttributeNumberIsValid(aerm->toidAttNo))
2486                         elog(ERROR, "could not find junk %s column", resname);
2487         }
2488
2489         return aerm;
2490 }
2491
2492
2493 /*
2494  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2495  * process the updated version under READ COMMITTED rules.
2496  *
2497  * See backend/executor/README for some info about how this works.
2498  */
2499
2500
2501 /*
2502  * Check a modified tuple to see if we want to process its updated version
2503  * under READ COMMITTED rules.
2504  *
2505  *      estate - outer executor state data
2506  *      epqstate - state for EvalPlanQual rechecking
2507  *      relation - table containing tuple
2508  *      rti - rangetable index of table containing tuple
2509  *      lockmode - requested tuple lock mode
2510  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2511  *      priorXmax - t_xmax from the outdated tuple
2512  *
2513  * *tid is also an output parameter: it's modified to hold the TID of the
2514  * latest version of the tuple (note this may be changed even on failure)
2515  *
2516  * Returns a slot containing the new candidate update/delete tuple, or
2517  * NULL if we determine we shouldn't process the row.
2518  *
2519  * Note: properly, lockmode should be declared as enum LockTupleMode,
2520  * but we use "int" to avoid having to include heapam.h in executor.h.
2521  */
2522 TupleTableSlot *
2523 EvalPlanQual(EState *estate, EPQState *epqstate,
2524                          Relation relation, Index rti, int lockmode,
2525                          ItemPointer tid, TransactionId priorXmax)
2526 {
2527         TupleTableSlot *slot;
2528         HeapTuple       copyTuple;
2529
2530         Assert(rti > 0);
2531
2532         /*
2533          * Get and lock the updated version of the row; if fail, return NULL.
2534          */
2535         copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2536                                                                   tid, priorXmax);
2537
2538         if (copyTuple == NULL)
2539                 return NULL;
2540
2541         /*
2542          * For UPDATE/DELETE we have to return tid of actual row we're executing
2543          * PQ for.
2544          */
2545         *tid = copyTuple->t_self;
2546
2547         /*
2548          * Need to run a recheck subquery.  Initialize or reinitialize EPQ state.
2549          */
2550         EvalPlanQualBegin(epqstate, estate);
2551
2552         /*
2553          * Free old test tuple, if any, and store new tuple where relation's scan
2554          * node will see it
2555          */
2556         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2557
2558         /*
2559          * Fetch any non-locked source rows
2560          */
2561         EvalPlanQualFetchRowMarks(epqstate);
2562
2563         /*
2564          * Run the EPQ query.  We assume it will return at most one tuple.
2565          */
2566         slot = EvalPlanQualNext(epqstate);
2567
2568         /*
2569          * If we got a tuple, force the slot to materialize the tuple so that it
2570          * is not dependent on any local state in the EPQ query (in particular,
2571          * it's highly likely that the slot contains references to any pass-by-ref
2572          * datums that may be present in copyTuple).  As with the next step, this
2573          * is to guard against early re-use of the EPQ query.
2574          */
2575         if (!TupIsNull(slot))
2576                 (void) ExecMaterializeSlot(slot);
2577
2578         /*
2579          * Clear out the test tuple.  This is needed in case the EPQ query is
2580          * re-used to test a tuple for a different relation.  (Not clear that can
2581          * really happen, but let's be safe.)
2582          */
2583         EvalPlanQualSetTuple(epqstate, rti, NULL);
2584
2585         return slot;
2586 }
2587
2588 /*
2589  * Fetch a copy of the newest version of an outdated tuple
2590  *
2591  *      estate - executor state data
2592  *      relation - table containing tuple
2593  *      lockmode - requested tuple lock mode
2594  *      wait_policy - requested lock wait policy
2595  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2596  *      priorXmax - t_xmax from the outdated tuple
2597  *
2598  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2599  * that there is no newest version (ie, the row was deleted not updated).
2600  * We also return NULL if the tuple is locked and the wait policy is to skip
2601  * such tuples.
2602  *
2603  * If successful, we have locked the newest tuple version, so caller does not
2604  * need to worry about it changing anymore.
2605  *
2606  * Note: properly, lockmode should be declared as enum LockTupleMode,
2607  * but we use "int" to avoid having to include heapam.h in executor.h.
2608  */
2609 HeapTuple
2610 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2611                                   LockWaitPolicy wait_policy,
2612                                   ItemPointer tid, TransactionId priorXmax)
2613 {
2614         HeapTuple       copyTuple = NULL;
2615         HeapTupleData tuple;
2616         SnapshotData SnapshotDirty;
2617
2618         /*
2619          * fetch target tuple
2620          *
2621          * Loop here to deal with updated or busy tuples
2622          */
2623         InitDirtySnapshot(SnapshotDirty);
2624         tuple.t_self = *tid;
2625         for (;;)
2626         {
2627                 Buffer          buffer;
2628
2629                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2630                 {
2631                         HTSU_Result test;
2632                         HeapUpdateFailureData hufd;
2633
2634                         /*
2635                          * If xmin isn't what we're expecting, the slot must have been
2636                          * recycled and reused for an unrelated tuple.  This implies that
2637                          * the latest version of the row was deleted, so we need do
2638                          * nothing.  (Should be safe to examine xmin without getting
2639                          * buffer's content lock.  We assume reading a TransactionId to be
2640                          * atomic, and Xmin never changes in an existing tuple, except to
2641                          * invalid or frozen, and neither of those can match priorXmax.)
2642                          */
2643                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2644                                                                          priorXmax))
2645                         {
2646                                 ReleaseBuffer(buffer);
2647                                 return NULL;
2648                         }
2649
2650                         /* otherwise xmin should not be dirty... */
2651                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2652                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2653
2654                         /*
2655                          * If tuple is being updated by other transaction then we have to
2656                          * wait for its commit/abort, or die trying.
2657                          */
2658                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2659                         {
2660                                 ReleaseBuffer(buffer);
2661                                 switch (wait_policy)
2662                                 {
2663                                         case LockWaitBlock:
2664                                                 XactLockTableWait(SnapshotDirty.xmax,
2665                                                                                   relation, &tuple.t_self,
2666                                                                                   XLTW_FetchUpdated);
2667                                                 break;
2668                                         case LockWaitSkip:
2669                                                 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2670                                                         return NULL;    /* skip instead of waiting */
2671                                                 break;
2672                                         case LockWaitError:
2673                                                 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2674                                                         ereport(ERROR,
2675                                                                         (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2676                                                                          errmsg("could not obtain lock on row in relation \"%s\"",
2677                                                                                         RelationGetRelationName(relation))));
2678                                                 break;
2679                                 }
2680                                 continue;               /* loop back to repeat heap_fetch */
2681                         }
2682
2683                         /*
2684                          * If tuple was inserted by our own transaction, we have to check
2685                          * cmin against es_output_cid: cmin >= current CID means our
2686                          * command cannot see the tuple, so we should ignore it. Otherwise
2687                          * heap_lock_tuple() will throw an error, and so would any later
2688                          * attempt to update or delete the tuple.  (We need not check cmax
2689                          * because HeapTupleSatisfiesDirty will consider a tuple deleted
2690                          * by our transaction dead, regardless of cmax.) We just checked
2691                          * that priorXmax == xmin, so we can test that variable instead of
2692                          * doing HeapTupleHeaderGetXmin again.
2693                          */
2694                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2695                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2696                         {
2697                                 ReleaseBuffer(buffer);
2698                                 return NULL;
2699                         }
2700
2701                         /*
2702                          * This is a live tuple, so now try to lock it.
2703                          */
2704                         test = heap_lock_tuple(relation, &tuple,
2705                                                                    estate->es_output_cid,
2706                                                                    lockmode, wait_policy,
2707                                                                    false, &buffer, &hufd);
2708                         /* We now have two pins on the buffer, get rid of one */
2709                         ReleaseBuffer(buffer);
2710
2711                         switch (test)
2712                         {
2713                                 case HeapTupleSelfUpdated:
2714
2715                                         /*
2716                                          * The target tuple was already updated or deleted by the
2717                                          * current command, or by a later command in the current
2718                                          * transaction.  We *must* ignore the tuple in the former
2719                                          * case, so as to avoid the "Halloween problem" of
2720                                          * repeated update attempts.  In the latter case it might
2721                                          * be sensible to fetch the updated tuple instead, but
2722                                          * doing so would require changing heap_update and
2723                                          * heap_delete to not complain about updating "invisible"
2724                                          * tuples, which seems pretty scary (heap_lock_tuple will
2725                                          * not complain, but few callers expect
2726                                          * HeapTupleInvisible, and we're not one of them).  So for
2727                                          * now, treat the tuple as deleted and do not process.
2728                                          */
2729                                         ReleaseBuffer(buffer);
2730                                         return NULL;
2731
2732                                 case HeapTupleMayBeUpdated:
2733                                         /* successfully locked */
2734                                         break;
2735
2736                                 case HeapTupleUpdated:
2737                                         ReleaseBuffer(buffer);
2738                                         if (IsolationUsesXactSnapshot())
2739                                                 ereport(ERROR,
2740                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2741                                                                  errmsg("could not serialize access due to concurrent update")));
2742
2743                                         /* Should not encounter speculative tuple on recheck */
2744                                         Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
2745                                         if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2746                                         {
2747                                                 /* it was updated, so look at the updated version */
2748                                                 tuple.t_self = hufd.ctid;
2749                                                 /* updated row should have xmin matching this xmax */
2750                                                 priorXmax = hufd.xmax;
2751                                                 continue;
2752                                         }
2753                                         /* tuple was deleted, so give up */
2754                                         return NULL;
2755
2756                                 case HeapTupleWouldBlock:
2757                                         ReleaseBuffer(buffer);
2758                                         return NULL;
2759
2760                                 case HeapTupleInvisible:
2761                                         elog(ERROR, "attempted to lock invisible tuple");
2762
2763                                 default:
2764                                         ReleaseBuffer(buffer);
2765                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2766                                                  test);
2767                                         return NULL;    /* keep compiler quiet */
2768                         }
2769
2770                         /*
2771                          * We got tuple - now copy it for use by recheck query.
2772                          */
2773                         copyTuple = heap_copytuple(&tuple);
2774                         ReleaseBuffer(buffer);
2775                         break;
2776                 }
2777
2778                 /*
2779                  * If the referenced slot was actually empty, the latest version of
2780                  * the row must have been deleted, so we need do nothing.
2781                  */
2782                 if (tuple.t_data == NULL)
2783                 {
2784                         ReleaseBuffer(buffer);
2785                         return NULL;
2786                 }
2787
2788                 /*
2789                  * As above, if xmin isn't what we're expecting, do nothing.
2790                  */
2791                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2792                                                                  priorXmax))
2793                 {
2794                         ReleaseBuffer(buffer);
2795                         return NULL;
2796                 }
2797
2798                 /*
2799                  * If we get here, the tuple was found but failed SnapshotDirty.
2800                  * Assuming the xmin is either a committed xact or our own xact (as it
2801                  * certainly should be if we're trying to modify the tuple), this must
2802                  * mean that the row was updated or deleted by either a committed xact
2803                  * or our own xact.  If it was deleted, we can ignore it; if it was
2804                  * updated then chain up to the next version and repeat the whole
2805                  * process.
2806                  *
2807                  * As above, it should be safe to examine xmax and t_ctid without the
2808                  * buffer content lock, because they can't be changing.
2809                  */
2810                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2811                 {
2812                         /* deleted, so forget about it */
2813                         ReleaseBuffer(buffer);
2814                         return NULL;
2815                 }
2816
2817                 /* updated, so look at the updated row */
2818                 tuple.t_self = tuple.t_data->t_ctid;
2819                 /* updated row should have xmin matching this xmax */
2820                 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2821                 ReleaseBuffer(buffer);
2822                 /* loop back to fetch next in chain */
2823         }
2824
2825         /*
2826          * Return the copied tuple
2827          */
2828         return copyTuple;
2829 }
2830
2831 /*
2832  * EvalPlanQualInit -- initialize during creation of a plan state node
2833  * that might need to invoke EPQ processing.
2834  *
2835  * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2836  * with EvalPlanQualSetPlan.
2837  */
2838 void
2839 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2840                                  Plan *subplan, List *auxrowmarks, int epqParam)
2841 {
2842         /* Mark the EPQ state inactive */
2843         epqstate->estate = NULL;
2844         epqstate->planstate = NULL;
2845         epqstate->origslot = NULL;
2846         /* ... and remember data that EvalPlanQualBegin will need */
2847         epqstate->plan = subplan;
2848         epqstate->arowMarks = auxrowmarks;
2849         epqstate->epqParam = epqParam;
2850 }
2851
2852 /*
2853  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2854  *
2855  * We need this so that ModifyTable can deal with multiple subplans.
2856  */
2857 void
2858 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2859 {
2860         /* If we have a live EPQ query, shut it down */
2861         EvalPlanQualEnd(epqstate);
2862         /* And set/change the plan pointer */
2863         epqstate->plan = subplan;
2864         /* The rowmarks depend on the plan, too */
2865         epqstate->arowMarks = auxrowmarks;
2866 }
2867
2868 /*
2869  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2870  *
2871  * NB: passed tuple must be palloc'd; it may get freed later
2872  */
2873 void
2874 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2875 {
2876         EState     *estate = epqstate->estate;
2877
2878         Assert(rti > 0);
2879
2880         /*
2881          * free old test tuple, if any, and store new tuple where relation's scan
2882          * node will see it
2883          */
2884         if (estate->es_epqTuple[rti - 1] != NULL)
2885                 heap_freetuple(estate->es_epqTuple[rti - 1]);
2886         estate->es_epqTuple[rti - 1] = tuple;
2887         estate->es_epqTupleSet[rti - 1] = true;
2888 }
2889
2890 /*
2891  * Fetch back the current test tuple (if any) for the specified RTI
2892  */
2893 HeapTuple
2894 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2895 {
2896         EState     *estate = epqstate->estate;
2897
2898         Assert(rti > 0);
2899
2900         return estate->es_epqTuple[rti - 1];
2901 }
2902
2903 /*
2904  * Fetch the current row values for any non-locked relations that need
2905  * to be scanned by an EvalPlanQual operation.  origslot must have been set
2906  * to contain the current result row (top-level row) that we need to recheck.
2907  */
2908 void
2909 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2910 {
2911         ListCell   *l;
2912
2913         Assert(epqstate->origslot != NULL);
2914
2915         foreach(l, epqstate->arowMarks)
2916         {
2917                 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2918                 ExecRowMark *erm = aerm->rowmark;
2919                 Datum           datum;
2920                 bool            isNull;
2921                 HeapTupleData tuple;
2922
2923                 if (RowMarkRequiresRowShareLock(erm->markType))
2924                         elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2925
2926                 /* clear any leftover test tuple for this rel */
2927                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2928
2929                 /* if child rel, must check whether it produced this row */
2930                 if (erm->rti != erm->prti)
2931                 {
2932                         Oid                     tableoid;
2933
2934                         datum = ExecGetJunkAttribute(epqstate->origslot,
2935                                                                                  aerm->toidAttNo,
2936                                                                                  &isNull);
2937                         /* non-locked rels could be on the inside of outer joins */
2938                         if (isNull)
2939                                 continue;
2940                         tableoid = DatumGetObjectId(datum);
2941
2942                         Assert(OidIsValid(erm->relid));
2943                         if (tableoid != erm->relid)
2944                         {
2945                                 /* this child is inactive right now */
2946                                 continue;
2947                         }
2948                 }
2949
2950                 if (erm->markType == ROW_MARK_REFERENCE)
2951                 {
2952                         HeapTuple       copyTuple;
2953
2954                         Assert(erm->relation != NULL);
2955
2956                         /* fetch the tuple's ctid */
2957                         datum = ExecGetJunkAttribute(epqstate->origslot,
2958                                                                                  aerm->ctidAttNo,
2959                                                                                  &isNull);
2960                         /* non-locked rels could be on the inside of outer joins */
2961                         if (isNull)
2962                                 continue;
2963
2964                         /* fetch requests on foreign tables must be passed to their FDW */
2965                         if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2966                         {
2967                                 FdwRoutine *fdwroutine;
2968                                 bool            updated = false;
2969
2970                                 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2971                                 /* this should have been checked already, but let's be safe */
2972                                 if (fdwroutine->RefetchForeignRow == NULL)
2973                                         ereport(ERROR,
2974                                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2975                                                          errmsg("cannot lock rows in foreign table \"%s\"",
2976                                                                         RelationGetRelationName(erm->relation))));
2977                                 copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2978                                                                                                                   erm,
2979                                                                                                                   datum,
2980                                                                                                                   &updated);
2981                                 if (copyTuple == NULL)
2982                                         elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2983
2984                                 /*
2985                                  * Ideally we'd insist on updated == false here, but that
2986                                  * assumes that FDWs can track that exactly, which they might
2987                                  * not be able to.  So just ignore the flag.
2988                                  */
2989                         }
2990                         else
2991                         {
2992                                 /* ordinary table, fetch the tuple */
2993                                 Buffer          buffer;
2994
2995                                 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2996                                 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2997                                                                 false, NULL))
2998                                         elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2999
3000                                 if (HeapTupleHeaderGetNatts(tuple.t_data) <
3001                                         RelationGetDescr(erm->relation)->natts)
3002                                 {
3003                                         copyTuple = heap_expand_tuple(&tuple,
3004                                                                                                   RelationGetDescr(erm->relation));
3005                                 }
3006                                 else
3007                                 {
3008                                         /* successful, copy tuple */
3009                                         copyTuple = heap_copytuple(&tuple);
3010                                 }
3011                                 ReleaseBuffer(buffer);
3012                         }
3013
3014                         /* store tuple */
3015                         EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
3016                 }
3017                 else
3018                 {
3019                         HeapTupleHeader td;
3020
3021                         Assert(erm->markType == ROW_MARK_COPY);
3022
3023                         /* fetch the whole-row Var for the relation */
3024                         datum = ExecGetJunkAttribute(epqstate->origslot,
3025                                                                                  aerm->wholeAttNo,
3026                                                                                  &isNull);
3027                         /* non-locked rels could be on the inside of outer joins */
3028                         if (isNull)
3029                                 continue;
3030                         td = DatumGetHeapTupleHeader(datum);
3031
3032                         /* build a temporary HeapTuple control structure */
3033                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
3034                         tuple.t_data = td;
3035                         /* relation might be a foreign table, if so provide tableoid */
3036                         tuple.t_tableOid = erm->relid;
3037                         /* also copy t_ctid in case there's valid data there */
3038                         tuple.t_self = td->t_ctid;
3039
3040                         /* copy and store tuple */
3041                         EvalPlanQualSetTuple(epqstate, erm->rti,
3042                                                                  heap_copytuple(&tuple));
3043                 }
3044         }
3045 }
3046
3047 /*
3048  * Fetch the next row (if any) from EvalPlanQual testing
3049  *
3050  * (In practice, there should never be more than one row...)
3051  */
3052 TupleTableSlot *
3053 EvalPlanQualNext(EPQState *epqstate)
3054 {
3055         MemoryContext oldcontext;
3056         TupleTableSlot *slot;
3057
3058         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
3059         slot = ExecProcNode(epqstate->planstate);
3060         MemoryContextSwitchTo(oldcontext);
3061
3062         return slot;
3063 }
3064
3065 /*
3066  * Initialize or reset an EvalPlanQual state tree
3067  */
3068 void
3069 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
3070 {
3071         EState     *estate = epqstate->estate;
3072
3073         if (estate == NULL)
3074         {
3075                 /* First time through, so create a child EState */
3076                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
3077         }
3078         else
3079         {
3080                 /*
3081                  * We already have a suitable child EPQ tree, so just reset it.
3082                  */
3083                 int                     rtsize = list_length(parentestate->es_range_table);
3084                 PlanState  *planstate = epqstate->planstate;
3085
3086                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
3087
3088                 /* Recopy current values of parent parameters */
3089                 if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3090                 {
3091                         int                     i;
3092
3093                         i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3094
3095                         while (--i >= 0)
3096                         {
3097                                 /* copy value if any, but not execPlan link */
3098                                 estate->es_param_exec_vals[i].value =
3099                                         parentestate->es_param_exec_vals[i].value;
3100                                 estate->es_param_exec_vals[i].isnull =
3101                                         parentestate->es_param_exec_vals[i].isnull;
3102                         }
3103                 }
3104
3105                 /*
3106                  * Mark child plan tree as needing rescan at all scan nodes.  The
3107                  * first ExecProcNode will take care of actually doing the rescan.
3108                  */
3109                 planstate->chgParam = bms_add_member(planstate->chgParam,
3110                                                                                          epqstate->epqParam);
3111         }
3112 }
3113
3114 /*
3115  * Start execution of an EvalPlanQual plan tree.
3116  *
3117  * This is a cut-down version of ExecutorStart(): we copy some state from
3118  * the top-level estate rather than initializing it fresh.
3119  */
3120 static void
3121 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
3122 {
3123         EState     *estate;
3124         int                     rtsize;
3125         MemoryContext oldcontext;
3126         ListCell   *l;
3127
3128         rtsize = list_length(parentestate->es_range_table);
3129
3130         epqstate->estate = estate = CreateExecutorState();
3131
3132         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3133
3134         /*
3135          * Child EPQ EStates share the parent's copy of unchanging state such as
3136          * the snapshot, rangetable, result-rel info, and external Param info.
3137          * They need their own copies of local state, including a tuple table,
3138          * es_param_exec_vals, etc.
3139          *
3140          * The ResultRelInfo array management is trickier than it looks.  We
3141          * create a fresh array for the child but copy all the content from the
3142          * parent.  This is because it's okay for the child to share any
3143          * per-relation state the parent has already created --- but if the child
3144          * sets up any ResultRelInfo fields, such as its own junkfilter, that
3145          * state must *not* propagate back to the parent.  (For one thing, the
3146          * pointed-to data is in a memory context that won't last long enough.)
3147          */
3148         estate->es_direction = ForwardScanDirection;
3149         estate->es_snapshot = parentestate->es_snapshot;
3150         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
3151         estate->es_range_table = parentestate->es_range_table;
3152         estate->es_plannedstmt = parentestate->es_plannedstmt;
3153         estate->es_junkFilter = parentestate->es_junkFilter;
3154         estate->es_output_cid = parentestate->es_output_cid;
3155         if (parentestate->es_num_result_relations > 0)
3156         {
3157                 int                     numResultRelations = parentestate->es_num_result_relations;
3158                 ResultRelInfo *resultRelInfos;
3159
3160                 resultRelInfos = (ResultRelInfo *)
3161                         palloc(numResultRelations * sizeof(ResultRelInfo));
3162                 memcpy(resultRelInfos, parentestate->es_result_relations,
3163                            numResultRelations * sizeof(ResultRelInfo));
3164                 estate->es_result_relations = resultRelInfos;
3165                 estate->es_num_result_relations = numResultRelations;
3166         }
3167         /* es_result_relation_info must NOT be copied */
3168         /* es_trig_target_relations must NOT be copied */
3169         estate->es_rowMarks = parentestate->es_rowMarks;
3170         estate->es_top_eflags = parentestate->es_top_eflags;
3171         estate->es_instrument = parentestate->es_instrument;
3172         /* es_auxmodifytables must NOT be copied */
3173
3174         /*
3175          * The external param list is simply shared from parent.  The internal
3176          * param workspace has to be local state, but we copy the initial values
3177          * from the parent, so as to have access to any param values that were
3178          * already set from other parts of the parent's plan tree.
3179          */
3180         estate->es_param_list_info = parentestate->es_param_list_info;
3181         if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3182         {
3183                 int                     i;
3184
3185                 i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3186                 estate->es_param_exec_vals = (ParamExecData *)
3187                         palloc0(i * sizeof(ParamExecData));
3188                 while (--i >= 0)
3189                 {
3190                         /* copy value if any, but not execPlan link */
3191                         estate->es_param_exec_vals[i].value =
3192                                 parentestate->es_param_exec_vals[i].value;
3193                         estate->es_param_exec_vals[i].isnull =
3194                                 parentestate->es_param_exec_vals[i].isnull;
3195                 }
3196         }
3197
3198         /*
3199          * Each EState must have its own es_epqScanDone state, but if we have
3200          * nested EPQ checks they should share es_epqTuple arrays.  This allows
3201          * sub-rechecks to inherit the values being examined by an outer recheck.
3202          */
3203         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
3204         if (parentestate->es_epqTuple != NULL)
3205         {
3206                 estate->es_epqTuple = parentestate->es_epqTuple;
3207                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
3208         }
3209         else
3210         {
3211                 estate->es_epqTuple = (HeapTuple *)
3212                         palloc0(rtsize * sizeof(HeapTuple));
3213                 estate->es_epqTupleSet = (bool *)
3214                         palloc0(rtsize * sizeof(bool));
3215         }
3216
3217         /*
3218          * Each estate also has its own tuple table.
3219          */
3220         estate->es_tupleTable = NIL;
3221
3222         /*
3223          * Initialize private state information for each SubPlan.  We must do this
3224          * before running ExecInitNode on the main query tree, since
3225          * ExecInitSubPlan expects to be able to find these entries. Some of the
3226          * SubPlans might not be used in the part of the plan tree we intend to
3227          * run, but since it's not easy to tell which, we just initialize them
3228          * all.
3229          */
3230         Assert(estate->es_subplanstates == NIL);
3231         foreach(l, parentestate->es_plannedstmt->subplans)
3232         {
3233                 Plan       *subplan = (Plan *) lfirst(l);
3234                 PlanState  *subplanstate;
3235
3236                 subplanstate = ExecInitNode(subplan, estate, 0);
3237                 estate->es_subplanstates = lappend(estate->es_subplanstates,
3238                                                                                    subplanstate);
3239         }
3240
3241         /*
3242          * Initialize the private state information for all the nodes in the part
3243          * of the plan tree we need to run.  This opens files, allocates storage
3244          * and leaves us ready to start processing tuples.
3245          */
3246         epqstate->planstate = ExecInitNode(planTree, estate, 0);
3247
3248         MemoryContextSwitchTo(oldcontext);
3249 }
3250
3251 /*
3252  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3253  * or if we are done with the current EPQ child.
3254  *
3255  * This is a cut-down version of ExecutorEnd(); basically we want to do most
3256  * of the normal cleanup, but *not* close result relations (which we are
3257  * just sharing from the outer query).  We do, however, have to close any
3258  * trigger target relations that got opened, since those are not shared.
3259  * (There probably shouldn't be any of the latter, but just in case...)
3260  */
3261 void
3262 EvalPlanQualEnd(EPQState *epqstate)
3263 {
3264         EState     *estate = epqstate->estate;
3265         MemoryContext oldcontext;
3266         ListCell   *l;
3267
3268         if (estate == NULL)
3269                 return;                                 /* idle, so nothing to do */
3270
3271         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3272
3273         ExecEndNode(epqstate->planstate);
3274
3275         foreach(l, estate->es_subplanstates)
3276         {
3277                 PlanState  *subplanstate = (PlanState *) lfirst(l);
3278
3279                 ExecEndNode(subplanstate);
3280         }
3281
3282         /* throw away the per-estate tuple table */
3283         ExecResetTupleTable(estate->es_tupleTable, false);
3284
3285         /* close any trigger target relations attached to this EState */
3286         ExecCleanUpTriggerState(estate);
3287
3288         MemoryContextSwitchTo(oldcontext);
3289
3290         FreeExecutorState(estate);
3291
3292         /* Mark EPQState idle */
3293         epqstate->estate = NULL;
3294         epqstate->planstate = NULL;
3295         epqstate->origslot = NULL;
3296 }