]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Basic planner and executor integration for JIT.
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorFinish()
10  *      ExecutorEnd()
11  *
12  *      These four procedures are the external interface to the executor.
13  *      In each case, the query descriptor is required as an argument.
14  *
15  *      ExecutorStart must be called at the beginning of execution of any
16  *      query plan and ExecutorEnd must always be called at the end of
17  *      execution of a plan (unless it is aborted due to error).
18  *
19  *      ExecutorRun accepts direction and count arguments that specify whether
20  *      the plan is to be executed forwards, backwards, and for how many tuples.
21  *      In some cases ExecutorRun may be called multiple times to process all
22  *      the tuples for a plan.  It is also acceptable to stop short of executing
23  *      the whole plan (but only if it is a SELECT).
24  *
25  *      ExecutorFinish must be called after the final ExecutorRun call and
26  *      before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
27  *      which should also omit ExecutorRun.
28  *
29  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
30  * Portions Copyright (c) 1994, Regents of the University of California
31  *
32  *
33  * IDENTIFICATION
34  *        src/backend/executor/execMain.c
35  *
36  *-------------------------------------------------------------------------
37  */
38 #include "postgres.h"
39
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "catalog/partition.h"
46 #include "catalog/pg_publication.h"
47 #include "commands/matview.h"
48 #include "commands/trigger.h"
49 #include "executor/execdebug.h"
50 #include "foreign/fdwapi.h"
51 #include "jit/jit.h"
52 #include "mb/pg_wchar.h"
53 #include "miscadmin.h"
54 #include "optimizer/clauses.h"
55 #include "parser/parsetree.h"
56 #include "rewrite/rewriteManip.h"
57 #include "storage/bufmgr.h"
58 #include "storage/lmgr.h"
59 #include "tcop/utility.h"
60 #include "utils/acl.h"
61 #include "utils/lsyscache.h"
62 #include "utils/memutils.h"
63 #include "utils/rls.h"
64 #include "utils/ruleutils.h"
65 #include "utils/snapmgr.h"
66 #include "utils/tqual.h"
67
68
69 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
70 ExecutorStart_hook_type ExecutorStart_hook = NULL;
71 ExecutorRun_hook_type ExecutorRun_hook = NULL;
72 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
73 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
74
75 /* Hook for plugin to get control in ExecCheckRTPerms() */
76 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
77
78 /* decls for local routines only used within this module */
79 static void InitPlan(QueryDesc *queryDesc, int eflags);
80 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
81 static void ExecPostprocessPlan(EState *estate);
82 static void ExecEndPlan(PlanState *planstate, EState *estate);
83 static void ExecutePlan(EState *estate, PlanState *planstate,
84                         bool use_parallel_mode,
85                         CmdType operation,
86                         bool sendTuples,
87                         uint64 numberTuples,
88                         ScanDirection direction,
89                         DestReceiver *dest,
90                         bool execute_once);
91 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
92 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
93                                                   Bitmapset *modifiedCols,
94                                                   AclMode requiredPerms);
95 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
96 static char *ExecBuildSlotValueDescription(Oid reloid,
97                                                           TupleTableSlot *slot,
98                                                           TupleDesc tupdesc,
99                                                           Bitmapset *modifiedCols,
100                                                           int maxfieldlen);
101 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
102                                   Plan *planTree);
103
104 /*
105  * Note that GetUpdatedColumns() also exists in commands/trigger.c.  There does
106  * not appear to be any good header to put it into, given the structures that
107  * it uses, so we let them be duplicated.  Be sure to update both if one needs
108  * to be changed, however.
109  */
110 #define GetInsertedColumns(relinfo, estate) \
111         (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
112 #define GetUpdatedColumns(relinfo, estate) \
113         (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
114
115 /* end of local decls */
116
117
118 /* ----------------------------------------------------------------
119  *              ExecutorStart
120  *
121  *              This routine must be called at the beginning of any execution of any
122  *              query plan
123  *
124  * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
125  * only because some places use QueryDescs for utility commands).  The tupDesc
126  * field of the QueryDesc is filled in to describe the tuples that will be
127  * returned, and the internal fields (estate and planstate) are set up.
128  *
129  * eflags contains flag bits as described in executor.h.
130  *
131  * NB: the CurrentMemoryContext when this is called will become the parent
132  * of the per-query context used for this Executor invocation.
133  *
134  * We provide a function hook variable that lets loadable plugins
135  * get control when ExecutorStart is called.  Such a plugin would
136  * normally call standard_ExecutorStart().
137  *
138  * ----------------------------------------------------------------
139  */
140 void
141 ExecutorStart(QueryDesc *queryDesc, int eflags)
142 {
143         if (ExecutorStart_hook)
144                 (*ExecutorStart_hook) (queryDesc, eflags);
145         else
146                 standard_ExecutorStart(queryDesc, eflags);
147 }
148
149 void
150 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
151 {
152         EState     *estate;
153         MemoryContext oldcontext;
154
155         /* sanity checks: queryDesc must not be started already */
156         Assert(queryDesc != NULL);
157         Assert(queryDesc->estate == NULL);
158
159         /*
160          * If the transaction is read-only, we need to check if any writes are
161          * planned to non-temporary tables.  EXPLAIN is considered read-only.
162          *
163          * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE
164          * would require (a) storing the combocid hash in shared memory, rather
165          * than synchronizing it just once at the start of parallelism, and (b) an
166          * alternative to heap_update()'s reliance on xmax for mutual exclusion.
167          * INSERT may have no such troubles, but we forbid it to simplify the
168          * checks.
169          *
170          * We have lower-level defenses in CommandCounterIncrement and elsewhere
171          * against performing unsafe operations in parallel mode, but this gives a
172          * more user-friendly error message.
173          */
174         if ((XactReadOnly || IsInParallelMode()) &&
175                 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
176                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
177
178         /*
179          * Build EState, switch into per-query memory context for startup.
180          */
181         estate = CreateExecutorState();
182         queryDesc->estate = estate;
183
184         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
185
186         /*
187          * Fill in external parameters, if any, from queryDesc; and allocate
188          * workspace for internal parameters
189          */
190         estate->es_param_list_info = queryDesc->params;
191
192         if (queryDesc->plannedstmt->paramExecTypes != NIL)
193         {
194                 int                     nParamExec;
195
196                 nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
197                 estate->es_param_exec_vals = (ParamExecData *)
198                         palloc0(nParamExec * sizeof(ParamExecData));
199         }
200
201         estate->es_sourceText = queryDesc->sourceText;
202
203         /*
204          * Fill in the query environment, if any, from queryDesc.
205          */
206         estate->es_queryEnv = queryDesc->queryEnv;
207
208         /*
209          * If non-read-only query, set the command ID to mark output tuples with
210          */
211         switch (queryDesc->operation)
212         {
213                 case CMD_SELECT:
214
215                         /*
216                          * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
217                          * tuples
218                          */
219                         if (queryDesc->plannedstmt->rowMarks != NIL ||
220                                 queryDesc->plannedstmt->hasModifyingCTE)
221                                 estate->es_output_cid = GetCurrentCommandId(true);
222
223                         /*
224                          * A SELECT without modifying CTEs can't possibly queue triggers,
225                          * so force skip-triggers mode. This is just a marginal efficiency
226                          * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
227                          * all that expensive, but we might as well do it.
228                          */
229                         if (!queryDesc->plannedstmt->hasModifyingCTE)
230                                 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
231                         break;
232
233                 case CMD_INSERT:
234                 case CMD_DELETE:
235                 case CMD_UPDATE:
236                         estate->es_output_cid = GetCurrentCommandId(true);
237                         break;
238
239                 default:
240                         elog(ERROR, "unrecognized operation code: %d",
241                                  (int) queryDesc->operation);
242                         break;
243         }
244
245         /*
246          * Copy other important information into the EState
247          */
248         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
249         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
250         estate->es_top_eflags = eflags;
251         estate->es_instrument = queryDesc->instrument_options;
252
253         if (queryDesc->plannedstmt)
254                 estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
255
256         /*
257          * Set up an AFTER-trigger statement context, unless told not to, or
258          * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
259          */
260         if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
261                 AfterTriggerBeginQuery();
262
263         /*
264          * Initialize the plan state tree
265          */
266         InitPlan(queryDesc, eflags);
267
268         MemoryContextSwitchTo(oldcontext);
269 }
270
271 /* ----------------------------------------------------------------
272  *              ExecutorRun
273  *
274  *              This is the main routine of the executor module. It accepts
275  *              the query descriptor from the traffic cop and executes the
276  *              query plan.
277  *
278  *              ExecutorStart must have been called already.
279  *
280  *              If direction is NoMovementScanDirection then nothing is done
281  *              except to start up/shut down the destination.  Otherwise,
282  *              we retrieve up to 'count' tuples in the specified direction.
283  *
284  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
285  *              completion.  Also note that the count limit is only applied to
286  *              retrieved tuples, not for instance to those inserted/updated/deleted
287  *              by a ModifyTable plan node.
288  *
289  *              There is no return value, but output tuples (if any) are sent to
290  *              the destination receiver specified in the QueryDesc; and the number
291  *              of tuples processed at the top level can be found in
292  *              estate->es_processed.
293  *
294  *              We provide a function hook variable that lets loadable plugins
295  *              get control when ExecutorRun is called.  Such a plugin would
296  *              normally call standard_ExecutorRun().
297  *
298  * ----------------------------------------------------------------
299  */
300 void
301 ExecutorRun(QueryDesc *queryDesc,
302                         ScanDirection direction, uint64 count,
303                         bool execute_once)
304 {
305         if (ExecutorRun_hook)
306                 (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
307         else
308                 standard_ExecutorRun(queryDesc, direction, count, execute_once);
309 }
310
311 void
312 standard_ExecutorRun(QueryDesc *queryDesc,
313                                          ScanDirection direction, uint64 count, bool execute_once)
314 {
315         EState     *estate;
316         CmdType         operation;
317         DestReceiver *dest;
318         bool            sendTuples;
319         MemoryContext oldcontext;
320
321         /* sanity checks */
322         Assert(queryDesc != NULL);
323
324         estate = queryDesc->estate;
325
326         Assert(estate != NULL);
327         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
328
329         /*
330          * Switch into per-query memory context
331          */
332         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
333
334         /* Allow instrumentation of Executor overall runtime */
335         if (queryDesc->totaltime)
336                 InstrStartNode(queryDesc->totaltime);
337
338         /*
339          * extract information from the query descriptor and the query feature.
340          */
341         operation = queryDesc->operation;
342         dest = queryDesc->dest;
343
344         /*
345          * startup tuple receiver, if we will be emitting tuples
346          */
347         estate->es_processed = 0;
348         estate->es_lastoid = InvalidOid;
349
350         sendTuples = (operation == CMD_SELECT ||
351                                   queryDesc->plannedstmt->hasReturning);
352
353         if (sendTuples)
354                 dest->rStartup(dest, operation, queryDesc->tupDesc);
355
356         /*
357          * run plan
358          */
359         if (!ScanDirectionIsNoMovement(direction))
360         {
361                 if (execute_once && queryDesc->already_executed)
362                         elog(ERROR, "can't re-execute query flagged for single execution");
363                 queryDesc->already_executed = true;
364
365                 ExecutePlan(estate,
366                                         queryDesc->planstate,
367                                         queryDesc->plannedstmt->parallelModeNeeded,
368                                         operation,
369                                         sendTuples,
370                                         count,
371                                         direction,
372                                         dest,
373                                         execute_once);
374         }
375
376         /*
377          * shutdown tuple receiver, if we started it
378          */
379         if (sendTuples)
380                 dest->rShutdown(dest);
381
382         if (queryDesc->totaltime)
383                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
384
385         MemoryContextSwitchTo(oldcontext);
386 }
387
388 /* ----------------------------------------------------------------
389  *              ExecutorFinish
390  *
391  *              This routine must be called after the last ExecutorRun call.
392  *              It performs cleanup such as firing AFTER triggers.  It is
393  *              separate from ExecutorEnd because EXPLAIN ANALYZE needs to
394  *              include these actions in the total runtime.
395  *
396  *              We provide a function hook variable that lets loadable plugins
397  *              get control when ExecutorFinish is called.  Such a plugin would
398  *              normally call standard_ExecutorFinish().
399  *
400  * ----------------------------------------------------------------
401  */
402 void
403 ExecutorFinish(QueryDesc *queryDesc)
404 {
405         if (ExecutorFinish_hook)
406                 (*ExecutorFinish_hook) (queryDesc);
407         else
408                 standard_ExecutorFinish(queryDesc);
409 }
410
411 void
412 standard_ExecutorFinish(QueryDesc *queryDesc)
413 {
414         EState     *estate;
415         MemoryContext oldcontext;
416
417         /* sanity checks */
418         Assert(queryDesc != NULL);
419
420         estate = queryDesc->estate;
421
422         Assert(estate != NULL);
423         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
424
425         /* This should be run once and only once per Executor instance */
426         Assert(!estate->es_finished);
427
428         /* Switch into per-query memory context */
429         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
430
431         /* Allow instrumentation of Executor overall runtime */
432         if (queryDesc->totaltime)
433                 InstrStartNode(queryDesc->totaltime);
434
435         /* Run ModifyTable nodes to completion */
436         ExecPostprocessPlan(estate);
437
438         /* Execute queued AFTER triggers, unless told not to */
439         if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
440                 AfterTriggerEndQuery(estate);
441
442         if (queryDesc->totaltime)
443                 InstrStopNode(queryDesc->totaltime, 0);
444
445         MemoryContextSwitchTo(oldcontext);
446
447         estate->es_finished = true;
448 }
449
450 /* ----------------------------------------------------------------
451  *              ExecutorEnd
452  *
453  *              This routine must be called at the end of execution of any
454  *              query plan
455  *
456  *              We provide a function hook variable that lets loadable plugins
457  *              get control when ExecutorEnd is called.  Such a plugin would
458  *              normally call standard_ExecutorEnd().
459  *
460  * ----------------------------------------------------------------
461  */
462 void
463 ExecutorEnd(QueryDesc *queryDesc)
464 {
465         if (ExecutorEnd_hook)
466                 (*ExecutorEnd_hook) (queryDesc);
467         else
468                 standard_ExecutorEnd(queryDesc);
469 }
470
471 void
472 standard_ExecutorEnd(QueryDesc *queryDesc)
473 {
474         EState     *estate;
475         MemoryContext oldcontext;
476
477         /* sanity checks */
478         Assert(queryDesc != NULL);
479
480         estate = queryDesc->estate;
481
482         Assert(estate != NULL);
483
484         /*
485          * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
486          * Assert is needed because ExecutorFinish is new as of 9.1, and callers
487          * might forget to call it.
488          */
489         Assert(estate->es_finished ||
490                    (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
491
492         /*
493          * Switch into per-query memory context to run ExecEndPlan
494          */
495         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
496
497         ExecEndPlan(queryDesc->planstate, estate);
498
499         /* do away with our snapshots */
500         UnregisterSnapshot(estate->es_snapshot);
501         UnregisterSnapshot(estate->es_crosscheck_snapshot);
502
503         /* release JIT context, if allocated */
504         if (estate->es_jit)
505                 jit_release_context(estate->es_jit);
506
507         /*
508          * Must switch out of context before destroying it
509          */
510         MemoryContextSwitchTo(oldcontext);
511
512         /*
513          * Release EState and per-query memory context.  This should release
514          * everything the executor has allocated.
515          */
516         FreeExecutorState(estate);
517
518         /* Reset queryDesc fields that no longer point to anything */
519         queryDesc->tupDesc = NULL;
520         queryDesc->estate = NULL;
521         queryDesc->planstate = NULL;
522         queryDesc->totaltime = NULL;
523 }
524
525 /* ----------------------------------------------------------------
526  *              ExecutorRewind
527  *
528  *              This routine may be called on an open queryDesc to rewind it
529  *              to the start.
530  * ----------------------------------------------------------------
531  */
532 void
533 ExecutorRewind(QueryDesc *queryDesc)
534 {
535         EState     *estate;
536         MemoryContext oldcontext;
537
538         /* sanity checks */
539         Assert(queryDesc != NULL);
540
541         estate = queryDesc->estate;
542
543         Assert(estate != NULL);
544
545         /* It's probably not sensible to rescan updating queries */
546         Assert(queryDesc->operation == CMD_SELECT);
547
548         /*
549          * Switch into per-query memory context
550          */
551         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
552
553         /*
554          * rescan plan
555          */
556         ExecReScan(queryDesc->planstate);
557
558         MemoryContextSwitchTo(oldcontext);
559 }
560
561
562 /*
563  * ExecCheckRTPerms
564  *              Check access permissions for all relations listed in a range table.
565  *
566  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
567  * error if ereport_on_violation is true, or simply returns false otherwise.
568  *
569  * Note that this does NOT address row level security policies (aka: RLS).  If
570  * rows will be returned to the user as a result of this permission check
571  * passing, then RLS also needs to be consulted (and check_enable_rls()).
572  *
573  * See rewrite/rowsecurity.c.
574  */
575 bool
576 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
577 {
578         ListCell   *l;
579         bool            result = true;
580
581         foreach(l, rangeTable)
582         {
583                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
584
585                 result = ExecCheckRTEPerms(rte);
586                 if (!result)
587                 {
588                         Assert(rte->rtekind == RTE_RELATION);
589                         if (ereport_on_violation)
590                                 aclcheck_error(ACLCHECK_NO_PRIV, get_relkind_objtype(get_rel_relkind(rte->relid)),
591                                                            get_rel_name(rte->relid));
592                         return false;
593                 }
594         }
595
596         if (ExecutorCheckPerms_hook)
597                 result = (*ExecutorCheckPerms_hook) (rangeTable,
598                                                                                          ereport_on_violation);
599         return result;
600 }
601
602 /*
603  * ExecCheckRTEPerms
604  *              Check access permissions for a single RTE.
605  */
606 static bool
607 ExecCheckRTEPerms(RangeTblEntry *rte)
608 {
609         AclMode         requiredPerms;
610         AclMode         relPerms;
611         AclMode         remainingPerms;
612         Oid                     relOid;
613         Oid                     userid;
614
615         /*
616          * Only plain-relation RTEs need to be checked here.  Function RTEs are
617          * checked when the function is prepared for execution.  Join, subquery,
618          * and special RTEs need no checks.
619          */
620         if (rte->rtekind != RTE_RELATION)
621                 return true;
622
623         /*
624          * No work if requiredPerms is empty.
625          */
626         requiredPerms = rte->requiredPerms;
627         if (requiredPerms == 0)
628                 return true;
629
630         relOid = rte->relid;
631
632         /*
633          * userid to check as: current user unless we have a setuid indication.
634          *
635          * Note: GetUserId() is presently fast enough that there's no harm in
636          * calling it separately for each RTE.  If that stops being true, we could
637          * call it once in ExecCheckRTPerms and pass the userid down from there.
638          * But for now, no need for the extra clutter.
639          */
640         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
641
642         /*
643          * We must have *all* the requiredPerms bits, but some of the bits can be
644          * satisfied from column-level rather than relation-level permissions.
645          * First, remove any bits that are satisfied by relation permissions.
646          */
647         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
648         remainingPerms = requiredPerms & ~relPerms;
649         if (remainingPerms != 0)
650         {
651                 int                     col = -1;
652
653                 /*
654                  * If we lack any permissions that exist only as relation permissions,
655                  * we can fail straight away.
656                  */
657                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
658                         return false;
659
660                 /*
661                  * Check to see if we have the needed privileges at column level.
662                  *
663                  * Note: failures just report a table-level error; it would be nicer
664                  * to report a column-level error if we have some but not all of the
665                  * column privileges.
666                  */
667                 if (remainingPerms & ACL_SELECT)
668                 {
669                         /*
670                          * When the query doesn't explicitly reference any columns (for
671                          * example, SELECT COUNT(*) FROM table), allow the query if we
672                          * have SELECT on any column of the rel, as per SQL spec.
673                          */
674                         if (bms_is_empty(rte->selectedCols))
675                         {
676                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
677                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
678                                         return false;
679                         }
680
681                         while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
682                         {
683                                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
684                                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
685
686                                 if (attno == InvalidAttrNumber)
687                                 {
688                                         /* Whole-row reference, must have priv on all cols */
689                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
690                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
691                                                 return false;
692                                 }
693                                 else
694                                 {
695                                         if (pg_attribute_aclcheck(relOid, attno, userid,
696                                                                                           ACL_SELECT) != ACLCHECK_OK)
697                                                 return false;
698                                 }
699                         }
700                 }
701
702                 /*
703                  * Basically the same for the mod columns, for both INSERT and UPDATE
704                  * privilege as specified by remainingPerms.
705                  */
706                 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
707                                                                                                                                           userid,
708                                                                                                                                           rte->insertedCols,
709                                                                                                                                           ACL_INSERT))
710                         return false;
711
712                 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
713                                                                                                                                           userid,
714                                                                                                                                           rte->updatedCols,
715                                                                                                                                           ACL_UPDATE))
716                         return false;
717         }
718         return true;
719 }
720
721 /*
722  * ExecCheckRTEPermsModified
723  *              Check INSERT or UPDATE access permissions for a single RTE (these
724  *              are processed uniformly).
725  */
726 static bool
727 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
728                                                   AclMode requiredPerms)
729 {
730         int                     col = -1;
731
732         /*
733          * When the query doesn't explicitly update any columns, allow the query
734          * if we have permission on any column of the rel.  This is to handle
735          * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
736          */
737         if (bms_is_empty(modifiedCols))
738         {
739                 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
740                                                                           ACLMASK_ANY) != ACLCHECK_OK)
741                         return false;
742         }
743
744         while ((col = bms_next_member(modifiedCols, col)) >= 0)
745         {
746                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
747                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
748
749                 if (attno == InvalidAttrNumber)
750                 {
751                         /* whole-row reference can't happen here */
752                         elog(ERROR, "whole-row update is not implemented");
753                 }
754                 else
755                 {
756                         if (pg_attribute_aclcheck(relOid, attno, userid,
757                                                                           requiredPerms) != ACLCHECK_OK)
758                                 return false;
759                 }
760         }
761         return true;
762 }
763
764 /*
765  * Check that the query does not imply any writes to non-temp tables;
766  * unless we're in parallel mode, in which case don't even allow writes
767  * to temp tables.
768  *
769  * Note: in a Hot Standby this would need to reject writes to temp
770  * tables just as we do in parallel mode; but an HS standby can't have created
771  * any temp tables in the first place, so no need to check that.
772  */
773 static void
774 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
775 {
776         ListCell   *l;
777
778         /*
779          * Fail if write permissions are requested in parallel mode for table
780          * (temp or non-temp), otherwise fail for any non-temp table.
781          */
782         foreach(l, plannedstmt->rtable)
783         {
784                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
785
786                 if (rte->rtekind != RTE_RELATION)
787                         continue;
788
789                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
790                         continue;
791
792                 if (isTempNamespace(get_rel_namespace(rte->relid)))
793                         continue;
794
795                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
796         }
797
798         if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
799                 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
800 }
801
802
803 /* ----------------------------------------------------------------
804  *              InitPlan
805  *
806  *              Initializes the query plan: open files, allocate storage
807  *              and start up the rule manager
808  * ----------------------------------------------------------------
809  */
810 static void
811 InitPlan(QueryDesc *queryDesc, int eflags)
812 {
813         CmdType         operation = queryDesc->operation;
814         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
815         Plan       *plan = plannedstmt->planTree;
816         List       *rangeTable = plannedstmt->rtable;
817         EState     *estate = queryDesc->estate;
818         PlanState  *planstate;
819         TupleDesc       tupType;
820         ListCell   *l;
821         int                     i;
822
823         /*
824          * Do permissions checks
825          */
826         ExecCheckRTPerms(rangeTable, true);
827
828         /*
829          * initialize the node's execution state
830          */
831         estate->es_range_table = rangeTable;
832         estate->es_plannedstmt = plannedstmt;
833
834         /*
835          * initialize result relation stuff, and open/lock the result rels.
836          *
837          * We must do this before initializing the plan tree, else we might try to
838          * do a lock upgrade if a result rel is also a source rel.
839          */
840         if (plannedstmt->resultRelations)
841         {
842                 List       *resultRelations = plannedstmt->resultRelations;
843                 int                     numResultRelations = list_length(resultRelations);
844                 ResultRelInfo *resultRelInfos;
845                 ResultRelInfo *resultRelInfo;
846
847                 resultRelInfos = (ResultRelInfo *)
848                         palloc(numResultRelations * sizeof(ResultRelInfo));
849                 resultRelInfo = resultRelInfos;
850                 foreach(l, resultRelations)
851                 {
852                         Index           resultRelationIndex = lfirst_int(l);
853                         Oid                     resultRelationOid;
854                         Relation        resultRelation;
855
856                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
857                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
858
859                         InitResultRelInfo(resultRelInfo,
860                                                           resultRelation,
861                                                           resultRelationIndex,
862                                                           NULL,
863                                                           estate->es_instrument);
864                         resultRelInfo++;
865                 }
866                 estate->es_result_relations = resultRelInfos;
867                 estate->es_num_result_relations = numResultRelations;
868                 /* es_result_relation_info is NULL except when within ModifyTable */
869                 estate->es_result_relation_info = NULL;
870
871                 /*
872                  * In the partitioned result relation case, lock the non-leaf result
873                  * relations too.  A subset of these are the roots of respective
874                  * partitioned tables, for which we also allocate ResulRelInfos.
875                  */
876                 estate->es_root_result_relations = NULL;
877                 estate->es_num_root_result_relations = 0;
878                 if (plannedstmt->nonleafResultRelations)
879                 {
880                         int                     num_roots = list_length(plannedstmt->rootResultRelations);
881
882                         /*
883                          * Firstly, build ResultRelInfos for all the partitioned table
884                          * roots, because we will need them to fire the statement-level
885                          * triggers, if any.
886                          */
887                         resultRelInfos = (ResultRelInfo *)
888                                 palloc(num_roots * sizeof(ResultRelInfo));
889                         resultRelInfo = resultRelInfos;
890                         foreach(l, plannedstmt->rootResultRelations)
891                         {
892                                 Index           resultRelIndex = lfirst_int(l);
893                                 Oid                     resultRelOid;
894                                 Relation        resultRelDesc;
895
896                                 resultRelOid = getrelid(resultRelIndex, rangeTable);
897                                 resultRelDesc = heap_open(resultRelOid, RowExclusiveLock);
898                                 InitResultRelInfo(resultRelInfo,
899                                                                   resultRelDesc,
900                                                                   lfirst_int(l),
901                                                                   NULL,
902                                                                   estate->es_instrument);
903                                 resultRelInfo++;
904                         }
905
906                         estate->es_root_result_relations = resultRelInfos;
907                         estate->es_num_root_result_relations = num_roots;
908
909                         /* Simply lock the rest of them. */
910                         foreach(l, plannedstmt->nonleafResultRelations)
911                         {
912                                 Index           resultRelIndex = lfirst_int(l);
913
914                                 /* We locked the roots above. */
915                                 if (!list_member_int(plannedstmt->rootResultRelations,
916                                                                          resultRelIndex))
917                                         LockRelationOid(getrelid(resultRelIndex, rangeTable),
918                                                                         RowExclusiveLock);
919                         }
920                 }
921         }
922         else
923         {
924                 /*
925                  * if no result relation, then set state appropriately
926                  */
927                 estate->es_result_relations = NULL;
928                 estate->es_num_result_relations = 0;
929                 estate->es_result_relation_info = NULL;
930                 estate->es_root_result_relations = NULL;
931                 estate->es_num_root_result_relations = 0;
932         }
933
934         /*
935          * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
936          * before we initialize the plan tree, else we'd be risking lock upgrades.
937          * While we are at it, build the ExecRowMark list.  Any partitioned child
938          * tables are ignored here (because isParent=true) and will be locked by
939          * the first Append or MergeAppend node that references them.  (Note that
940          * the RowMarks corresponding to partitioned child tables are present in
941          * the same list as the rest, i.e., plannedstmt->rowMarks.)
942          */
943         estate->es_rowMarks = NIL;
944         foreach(l, plannedstmt->rowMarks)
945         {
946                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
947                 Oid                     relid;
948                 Relation        relation;
949                 ExecRowMark *erm;
950
951                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
952                 if (rc->isParent)
953                         continue;
954
955                 /* get relation's OID (will produce InvalidOid if subquery) */
956                 relid = getrelid(rc->rti, rangeTable);
957
958                 /*
959                  * If you change the conditions under which rel locks are acquired
960                  * here, be sure to adjust ExecOpenScanRelation to match.
961                  */
962                 switch (rc->markType)
963                 {
964                         case ROW_MARK_EXCLUSIVE:
965                         case ROW_MARK_NOKEYEXCLUSIVE:
966                         case ROW_MARK_SHARE:
967                         case ROW_MARK_KEYSHARE:
968                                 relation = heap_open(relid, RowShareLock);
969                                 break;
970                         case ROW_MARK_REFERENCE:
971                                 relation = heap_open(relid, AccessShareLock);
972                                 break;
973                         case ROW_MARK_COPY:
974                                 /* no physical table access is required */
975                                 relation = NULL;
976                                 break;
977                         default:
978                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
979                                 relation = NULL;        /* keep compiler quiet */
980                                 break;
981                 }
982
983                 /* Check that relation is a legal target for marking */
984                 if (relation)
985                         CheckValidRowMarkRel(relation, rc->markType);
986
987                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
988                 erm->relation = relation;
989                 erm->relid = relid;
990                 erm->rti = rc->rti;
991                 erm->prti = rc->prti;
992                 erm->rowmarkId = rc->rowmarkId;
993                 erm->markType = rc->markType;
994                 erm->strength = rc->strength;
995                 erm->waitPolicy = rc->waitPolicy;
996                 erm->ermActive = false;
997                 ItemPointerSetInvalid(&(erm->curCtid));
998                 erm->ermExtra = NULL;
999                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
1000         }
1001
1002         /*
1003          * Initialize the executor's tuple table to empty.
1004          */
1005         estate->es_tupleTable = NIL;
1006         estate->es_trig_tuple_slot = NULL;
1007         estate->es_trig_oldtup_slot = NULL;
1008         estate->es_trig_newtup_slot = NULL;
1009
1010         /* mark EvalPlanQual not active */
1011         estate->es_epqTuple = NULL;
1012         estate->es_epqTupleSet = NULL;
1013         estate->es_epqScanDone = NULL;
1014
1015         /*
1016          * Initialize private state information for each SubPlan.  We must do this
1017          * before running ExecInitNode on the main query tree, since
1018          * ExecInitSubPlan expects to be able to find these entries.
1019          */
1020         Assert(estate->es_subplanstates == NIL);
1021         i = 1;                                          /* subplan indices count from 1 */
1022         foreach(l, plannedstmt->subplans)
1023         {
1024                 Plan       *subplan = (Plan *) lfirst(l);
1025                 PlanState  *subplanstate;
1026                 int                     sp_eflags;
1027
1028                 /*
1029                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
1030                  * it is a parameterless subplan (not initplan), we suggest that it be
1031                  * prepared to handle REWIND efficiently; otherwise there is no need.
1032                  */
1033                 sp_eflags = eflags
1034                         & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
1035                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
1036                         sp_eflags |= EXEC_FLAG_REWIND;
1037
1038                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
1039
1040                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1041                                                                                    subplanstate);
1042
1043                 i++;
1044         }
1045
1046         /*
1047          * Initialize the private state information for all the nodes in the query
1048          * tree.  This opens files, allocates storage and leaves us ready to start
1049          * processing tuples.
1050          */
1051         planstate = ExecInitNode(plan, estate, eflags);
1052
1053         /*
1054          * Get the tuple descriptor describing the type of tuples to return.
1055          */
1056         tupType = ExecGetResultType(planstate);
1057
1058         /*
1059          * Initialize the junk filter if needed.  SELECT queries need a filter if
1060          * there are any junk attrs in the top-level tlist.
1061          */
1062         if (operation == CMD_SELECT)
1063         {
1064                 bool            junk_filter_needed = false;
1065                 ListCell   *tlist;
1066
1067                 foreach(tlist, plan->targetlist)
1068                 {
1069                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1070
1071                         if (tle->resjunk)
1072                         {
1073                                 junk_filter_needed = true;
1074                                 break;
1075                         }
1076                 }
1077
1078                 if (junk_filter_needed)
1079                 {
1080                         JunkFilter *j;
1081
1082                         j = ExecInitJunkFilter(planstate->plan->targetlist,
1083                                                                    tupType->tdhasoid,
1084                                                                    ExecInitExtraTupleSlot(estate, NULL));
1085                         estate->es_junkFilter = j;
1086
1087                         /* Want to return the cleaned tuple type */
1088                         tupType = j->jf_cleanTupType;
1089                 }
1090         }
1091
1092         queryDesc->tupDesc = tupType;
1093         queryDesc->planstate = planstate;
1094 }
1095
1096 /*
1097  * Check that a proposed result relation is a legal target for the operation
1098  *
1099  * Generally the parser and/or planner should have noticed any such mistake
1100  * already, but let's make sure.
1101  *
1102  * Note: when changing this function, you probably also need to look at
1103  * CheckValidRowMarkRel.
1104  */
1105 void
1106 CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
1107 {
1108         Relation        resultRel = resultRelInfo->ri_RelationDesc;
1109         TriggerDesc *trigDesc = resultRel->trigdesc;
1110         FdwRoutine *fdwroutine;
1111
1112         switch (resultRel->rd_rel->relkind)
1113         {
1114                 case RELKIND_RELATION:
1115                 case RELKIND_PARTITIONED_TABLE:
1116                         CheckCmdReplicaIdentity(resultRel, operation);
1117                         break;
1118                 case RELKIND_SEQUENCE:
1119                         ereport(ERROR,
1120                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1121                                          errmsg("cannot change sequence \"%s\"",
1122                                                         RelationGetRelationName(resultRel))));
1123                         break;
1124                 case RELKIND_TOASTVALUE:
1125                         ereport(ERROR,
1126                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1127                                          errmsg("cannot change TOAST relation \"%s\"",
1128                                                         RelationGetRelationName(resultRel))));
1129                         break;
1130                 case RELKIND_VIEW:
1131
1132                         /*
1133                          * Okay only if there's a suitable INSTEAD OF trigger.  Messages
1134                          * here should match rewriteHandler.c's rewriteTargetView, except
1135                          * that we omit errdetail because we haven't got the information
1136                          * handy (and given that we really shouldn't get here anyway, it's
1137                          * not worth great exertion to get).
1138                          */
1139                         switch (operation)
1140                         {
1141                                 case CMD_INSERT:
1142                                         if (!trigDesc || !trigDesc->trig_insert_instead_row)
1143                                                 ereport(ERROR,
1144                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1145                                                                  errmsg("cannot insert into view \"%s\"",
1146                                                                                 RelationGetRelationName(resultRel)),
1147                                                                  errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1148                                         break;
1149                                 case CMD_UPDATE:
1150                                         if (!trigDesc || !trigDesc->trig_update_instead_row)
1151                                                 ereport(ERROR,
1152                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1153                                                                  errmsg("cannot update view \"%s\"",
1154                                                                                 RelationGetRelationName(resultRel)),
1155                                                                  errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1156                                         break;
1157                                 case CMD_DELETE:
1158                                         if (!trigDesc || !trigDesc->trig_delete_instead_row)
1159                                                 ereport(ERROR,
1160                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1161                                                                  errmsg("cannot delete from view \"%s\"",
1162                                                                                 RelationGetRelationName(resultRel)),
1163                                                                  errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1164                                         break;
1165                                 default:
1166                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1167                                         break;
1168                         }
1169                         break;
1170                 case RELKIND_MATVIEW:
1171                         if (!MatViewIncrementalMaintenanceIsEnabled())
1172                                 ereport(ERROR,
1173                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1174                                                  errmsg("cannot change materialized view \"%s\"",
1175                                                                 RelationGetRelationName(resultRel))));
1176                         break;
1177                 case RELKIND_FOREIGN_TABLE:
1178                         /* Okay only if the FDW supports it */
1179                         fdwroutine = resultRelInfo->ri_FdwRoutine;
1180                         switch (operation)
1181                         {
1182                                 case CMD_INSERT:
1183
1184                                         /*
1185                                          * If foreign partition to do tuple-routing for, skip the
1186                                          * check; it's disallowed elsewhere.
1187                                          */
1188                                         if (resultRelInfo->ri_PartitionRoot)
1189                                                 break;
1190                                         if (fdwroutine->ExecForeignInsert == NULL)
1191                                                 ereport(ERROR,
1192                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1193                                                                  errmsg("cannot insert into foreign table \"%s\"",
1194                                                                                 RelationGetRelationName(resultRel))));
1195                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1196                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1197                                                 ereport(ERROR,
1198                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1199                                                                  errmsg("foreign table \"%s\" does not allow inserts",
1200                                                                                 RelationGetRelationName(resultRel))));
1201                                         break;
1202                                 case CMD_UPDATE:
1203                                         if (fdwroutine->ExecForeignUpdate == NULL)
1204                                                 ereport(ERROR,
1205                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1206                                                                  errmsg("cannot update foreign table \"%s\"",
1207                                                                                 RelationGetRelationName(resultRel))));
1208                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1209                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1210                                                 ereport(ERROR,
1211                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1212                                                                  errmsg("foreign table \"%s\" does not allow updates",
1213                                                                                 RelationGetRelationName(resultRel))));
1214                                         break;
1215                                 case CMD_DELETE:
1216                                         if (fdwroutine->ExecForeignDelete == NULL)
1217                                                 ereport(ERROR,
1218                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1219                                                                  errmsg("cannot delete from foreign table \"%s\"",
1220                                                                                 RelationGetRelationName(resultRel))));
1221                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1222                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1223                                                 ereport(ERROR,
1224                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1225                                                                  errmsg("foreign table \"%s\" does not allow deletes",
1226                                                                                 RelationGetRelationName(resultRel))));
1227                                         break;
1228                                 default:
1229                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1230                                         break;
1231                         }
1232                         break;
1233                 default:
1234                         ereport(ERROR,
1235                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1236                                          errmsg("cannot change relation \"%s\"",
1237                                                         RelationGetRelationName(resultRel))));
1238                         break;
1239         }
1240 }
1241
1242 /*
1243  * Check that a proposed rowmark target relation is a legal target
1244  *
1245  * In most cases parser and/or planner should have noticed this already, but
1246  * they don't cover all cases.
1247  */
1248 static void
1249 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1250 {
1251         FdwRoutine *fdwroutine;
1252
1253         switch (rel->rd_rel->relkind)
1254         {
1255                 case RELKIND_RELATION:
1256                 case RELKIND_PARTITIONED_TABLE:
1257                         /* OK */
1258                         break;
1259                 case RELKIND_SEQUENCE:
1260                         /* Must disallow this because we don't vacuum sequences */
1261                         ereport(ERROR,
1262                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1263                                          errmsg("cannot lock rows in sequence \"%s\"",
1264                                                         RelationGetRelationName(rel))));
1265                         break;
1266                 case RELKIND_TOASTVALUE:
1267                         /* We could allow this, but there seems no good reason to */
1268                         ereport(ERROR,
1269                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1270                                          errmsg("cannot lock rows in TOAST relation \"%s\"",
1271                                                         RelationGetRelationName(rel))));
1272                         break;
1273                 case RELKIND_VIEW:
1274                         /* Should not get here; planner should have expanded the view */
1275                         ereport(ERROR,
1276                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1277                                          errmsg("cannot lock rows in view \"%s\"",
1278                                                         RelationGetRelationName(rel))));
1279                         break;
1280                 case RELKIND_MATVIEW:
1281                         /* Allow referencing a matview, but not actual locking clauses */
1282                         if (markType != ROW_MARK_REFERENCE)
1283                                 ereport(ERROR,
1284                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1285                                                  errmsg("cannot lock rows in materialized view \"%s\"",
1286                                                                 RelationGetRelationName(rel))));
1287                         break;
1288                 case RELKIND_FOREIGN_TABLE:
1289                         /* Okay only if the FDW supports it */
1290                         fdwroutine = GetFdwRoutineForRelation(rel, false);
1291                         if (fdwroutine->RefetchForeignRow == NULL)
1292                                 ereport(ERROR,
1293                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1294                                                  errmsg("cannot lock rows in foreign table \"%s\"",
1295                                                                 RelationGetRelationName(rel))));
1296                         break;
1297                 default:
1298                         ereport(ERROR,
1299                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1300                                          errmsg("cannot lock rows in relation \"%s\"",
1301                                                         RelationGetRelationName(rel))));
1302                         break;
1303         }
1304 }
1305
1306 /*
1307  * Initialize ResultRelInfo data for one result relation
1308  *
1309  * Caution: before Postgres 9.1, this function included the relkind checking
1310  * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1311  * appropriate.  Be sure callers cover those needs.
1312  */
1313 void
1314 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1315                                   Relation resultRelationDesc,
1316                                   Index resultRelationIndex,
1317                                   Relation partition_root,
1318                                   int instrument_options)
1319 {
1320         List       *partition_check = NIL;
1321
1322         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1323         resultRelInfo->type = T_ResultRelInfo;
1324         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1325         resultRelInfo->ri_RelationDesc = resultRelationDesc;
1326         resultRelInfo->ri_NumIndices = 0;
1327         resultRelInfo->ri_IndexRelationDescs = NULL;
1328         resultRelInfo->ri_IndexRelationInfo = NULL;
1329         /* make a copy so as not to depend on relcache info not changing... */
1330         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1331         if (resultRelInfo->ri_TrigDesc)
1332         {
1333                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
1334
1335                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1336                         palloc0(n * sizeof(FmgrInfo));
1337                 resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1338                         palloc0(n * sizeof(ExprState *));
1339                 if (instrument_options)
1340                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1341         }
1342         else
1343         {
1344                 resultRelInfo->ri_TrigFunctions = NULL;
1345                 resultRelInfo->ri_TrigWhenExprs = NULL;
1346                 resultRelInfo->ri_TrigInstrument = NULL;
1347         }
1348         if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1349                 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1350         else
1351                 resultRelInfo->ri_FdwRoutine = NULL;
1352         resultRelInfo->ri_FdwState = NULL;
1353         resultRelInfo->ri_usesFdwDirectModify = false;
1354         resultRelInfo->ri_ConstraintExprs = NULL;
1355         resultRelInfo->ri_junkFilter = NULL;
1356         resultRelInfo->ri_projectReturning = NULL;
1357
1358         /*
1359          * Partition constraint, which also includes the partition constraint of
1360          * all the ancestors that are partitions.  Note that it will be checked
1361          * even in the case of tuple-routing where this table is the target leaf
1362          * partition, if there any BR triggers defined on the table.  Although
1363          * tuple-routing implicitly preserves the partition constraint of the
1364          * target partition for a given row, the BR triggers may change the row
1365          * such that the constraint is no longer satisfied, which we must fail for
1366          * by checking it explicitly.
1367          *
1368          * If this is a partitioned table, the partition constraint (if any) of a
1369          * given row will be checked just before performing tuple-routing.
1370          */
1371         partition_check = RelationGetPartitionQual(resultRelationDesc);
1372
1373         resultRelInfo->ri_PartitionCheck = partition_check;
1374         resultRelInfo->ri_PartitionRoot = partition_root;
1375 }
1376
1377 /*
1378  *              ExecGetTriggerResultRel
1379  *
1380  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
1381  * triggers are fired on one of the result relations of the query, and so
1382  * we can just return a member of the es_result_relations array, the
1383  * es_root_result_relations array (if any), or the es_leaf_result_relations
1384  * list (if any).  (Note: in self-join situations there might be multiple
1385  * members with the same OID; if so it doesn't matter which one we pick.)
1386  * However, it is sometimes necessary to fire triggers on other relations;
1387  * this happens mainly when an RI update trigger queues additional triggers
1388  * on other relations, which will be processed in the context of the outer
1389  * query.  For efficiency's sake, we want to have a ResultRelInfo for those
1390  * triggers too; that can avoid repeated re-opening of the relation.  (It
1391  * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1392  * triggers.)  So we make additional ResultRelInfo's as needed, and save them
1393  * in es_trig_target_relations.
1394  */
1395 ResultRelInfo *
1396 ExecGetTriggerResultRel(EState *estate, Oid relid)
1397 {
1398         ResultRelInfo *rInfo;
1399         int                     nr;
1400         ListCell   *l;
1401         Relation        rel;
1402         MemoryContext oldcontext;
1403
1404         /* First, search through the query result relations */
1405         rInfo = estate->es_result_relations;
1406         nr = estate->es_num_result_relations;
1407         while (nr > 0)
1408         {
1409                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1410                         return rInfo;
1411                 rInfo++;
1412                 nr--;
1413         }
1414         /* Second, search through the root result relations, if any */
1415         rInfo = estate->es_root_result_relations;
1416         nr = estate->es_num_root_result_relations;
1417         while (nr > 0)
1418         {
1419                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1420                         return rInfo;
1421                 rInfo++;
1422                 nr--;
1423         }
1424         /*
1425          * Third, search through the result relations that were created during
1426          * tuple routing, if any.
1427          */
1428         foreach(l, estate->es_tuple_routing_result_relations)
1429         {
1430                 rInfo = (ResultRelInfo *) lfirst(l);
1431                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1432                         return rInfo;
1433         }
1434         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1435         foreach(l, estate->es_trig_target_relations)
1436         {
1437                 rInfo = (ResultRelInfo *) lfirst(l);
1438                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1439                         return rInfo;
1440         }
1441         /* Nope, so we need a new one */
1442
1443         /*
1444          * Open the target relation's relcache entry.  We assume that an
1445          * appropriate lock is still held by the backend from whenever the trigger
1446          * event got queued, so we need take no new lock here.  Also, we need not
1447          * recheck the relkind, so no need for CheckValidResultRel.
1448          */
1449         rel = heap_open(relid, NoLock);
1450
1451         /*
1452          * Make the new entry in the right context.
1453          */
1454         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1455         rInfo = makeNode(ResultRelInfo);
1456         InitResultRelInfo(rInfo,
1457                                           rel,
1458                                           0,            /* dummy rangetable index */
1459                                           NULL,
1460                                           estate->es_instrument);
1461         estate->es_trig_target_relations =
1462                 lappend(estate->es_trig_target_relations, rInfo);
1463         MemoryContextSwitchTo(oldcontext);
1464
1465         /*
1466          * Currently, we don't need any index information in ResultRelInfos used
1467          * only for triggers, so no need to call ExecOpenIndices.
1468          */
1469
1470         return rInfo;
1471 }
1472
1473 /*
1474  * Close any relations that have been opened by ExecGetTriggerResultRel().
1475  */
1476 void
1477 ExecCleanUpTriggerState(EState *estate)
1478 {
1479         ListCell   *l;
1480
1481         foreach(l, estate->es_trig_target_relations)
1482         {
1483                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1484
1485                 /* Close indices and then the relation itself */
1486                 ExecCloseIndices(resultRelInfo);
1487                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1488         }
1489 }
1490
1491 /*
1492  *              ExecContextForcesOids
1493  *
1494  * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1495  * we need to ensure that result tuples have space for an OID iff they are
1496  * going to be stored into a relation that has OIDs.  In other contexts
1497  * we are free to choose whether to leave space for OIDs in result tuples
1498  * (we generally don't want to, but we do if a physical-tlist optimization
1499  * is possible).  This routine checks the plan context and returns true if the
1500  * choice is forced, false if the choice is not forced.  In the true case,
1501  * *hasoids is set to the required value.
1502  *
1503  * One reason this is ugly is that all plan nodes in the plan tree will emit
1504  * tuples with space for an OID, though we really only need the topmost node
1505  * to do so.  However, node types like Sort don't project new tuples but just
1506  * return their inputs, and in those cases the requirement propagates down
1507  * to the input node.  Eventually we might make this code smart enough to
1508  * recognize how far down the requirement really goes, but for now we just
1509  * make all plan nodes do the same thing if the top level forces the choice.
1510  *
1511  * We assume that if we are generating tuples for INSERT or UPDATE,
1512  * estate->es_result_relation_info is already set up to describe the target
1513  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1514  * the target relations may have OIDs and some not.  We have to make the
1515  * decisions on a per-relation basis as we initialize each of the subplans of
1516  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1517  * while initializing each subplan.
1518  *
1519  * CREATE TABLE AS is even uglier, because we don't have the target relation's
1520  * descriptor available when this code runs; we have to look aside at the
1521  * flags passed to ExecutorStart().
1522  */
1523 bool
1524 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1525 {
1526         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1527
1528         if (ri != NULL)
1529         {
1530                 Relation        rel = ri->ri_RelationDesc;
1531
1532                 if (rel != NULL)
1533                 {
1534                         *hasoids = rel->rd_rel->relhasoids;
1535                         return true;
1536                 }
1537         }
1538
1539         if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1540         {
1541                 *hasoids = true;
1542                 return true;
1543         }
1544         if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1545         {
1546                 *hasoids = false;
1547                 return true;
1548         }
1549
1550         return false;
1551 }
1552
1553 /* ----------------------------------------------------------------
1554  *              ExecPostprocessPlan
1555  *
1556  *              Give plan nodes a final chance to execute before shutdown
1557  * ----------------------------------------------------------------
1558  */
1559 static void
1560 ExecPostprocessPlan(EState *estate)
1561 {
1562         ListCell   *lc;
1563
1564         /*
1565          * Make sure nodes run forward.
1566          */
1567         estate->es_direction = ForwardScanDirection;
1568
1569         /*
1570          * Run any secondary ModifyTable nodes to completion, in case the main
1571          * query did not fetch all rows from them.  (We do this to ensure that
1572          * such nodes have predictable results.)
1573          */
1574         foreach(lc, estate->es_auxmodifytables)
1575         {
1576                 PlanState  *ps = (PlanState *) lfirst(lc);
1577
1578                 for (;;)
1579                 {
1580                         TupleTableSlot *slot;
1581
1582                         /* Reset the per-output-tuple exprcontext each time */
1583                         ResetPerTupleExprContext(estate);
1584
1585                         slot = ExecProcNode(ps);
1586
1587                         if (TupIsNull(slot))
1588                                 break;
1589                 }
1590         }
1591 }
1592
1593 /* ----------------------------------------------------------------
1594  *              ExecEndPlan
1595  *
1596  *              Cleans up the query plan -- closes files and frees up storage
1597  *
1598  * NOTE: we are no longer very worried about freeing storage per se
1599  * in this code; FreeExecutorState should be guaranteed to release all
1600  * memory that needs to be released.  What we are worried about doing
1601  * is closing relations and dropping buffer pins.  Thus, for example,
1602  * tuple tables must be cleared or dropped to ensure pins are released.
1603  * ----------------------------------------------------------------
1604  */
1605 static void
1606 ExecEndPlan(PlanState *planstate, EState *estate)
1607 {
1608         ResultRelInfo *resultRelInfo;
1609         int                     i;
1610         ListCell   *l;
1611
1612         /*
1613          * shut down the node-type-specific query processing
1614          */
1615         ExecEndNode(planstate);
1616
1617         /*
1618          * for subplans too
1619          */
1620         foreach(l, estate->es_subplanstates)
1621         {
1622                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1623
1624                 ExecEndNode(subplanstate);
1625         }
1626
1627         /*
1628          * destroy the executor's tuple table.  Actually we only care about
1629          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1630          * the TupleTableSlots, since the containing memory context is about to go
1631          * away anyway.
1632          */
1633         ExecResetTupleTable(estate->es_tupleTable, false);
1634
1635         /*
1636          * close the result relation(s) if any, but hold locks until xact commit.
1637          */
1638         resultRelInfo = estate->es_result_relations;
1639         for (i = estate->es_num_result_relations; i > 0; i--)
1640         {
1641                 /* Close indices and then the relation itself */
1642                 ExecCloseIndices(resultRelInfo);
1643                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1644                 resultRelInfo++;
1645         }
1646
1647         /* Close the root target relation(s). */
1648         resultRelInfo = estate->es_root_result_relations;
1649         for (i = estate->es_num_root_result_relations; i > 0; i--)
1650         {
1651                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1652                 resultRelInfo++;
1653         }
1654
1655         /* likewise close any trigger target relations */
1656         ExecCleanUpTriggerState(estate);
1657
1658         /*
1659          * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1660          * locks
1661          */
1662         foreach(l, estate->es_rowMarks)
1663         {
1664                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1665
1666                 if (erm->relation)
1667                         heap_close(erm->relation, NoLock);
1668         }
1669 }
1670
1671 /* ----------------------------------------------------------------
1672  *              ExecutePlan
1673  *
1674  *              Processes the query plan until we have retrieved 'numberTuples' tuples,
1675  *              moving in the specified direction.
1676  *
1677  *              Runs to completion if numberTuples is 0
1678  *
1679  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1680  * user can see it
1681  * ----------------------------------------------------------------
1682  */
1683 static void
1684 ExecutePlan(EState *estate,
1685                         PlanState *planstate,
1686                         bool use_parallel_mode,
1687                         CmdType operation,
1688                         bool sendTuples,
1689                         uint64 numberTuples,
1690                         ScanDirection direction,
1691                         DestReceiver *dest,
1692                         bool execute_once)
1693 {
1694         TupleTableSlot *slot;
1695         uint64          current_tuple_count;
1696
1697         /*
1698          * initialize local variables
1699          */
1700         current_tuple_count = 0;
1701
1702         /*
1703          * Set the direction.
1704          */
1705         estate->es_direction = direction;
1706
1707         /*
1708          * If the plan might potentially be executed multiple times, we must force
1709          * it to run without parallelism, because we might exit early.
1710          */
1711         if (!execute_once)
1712                 use_parallel_mode = false;
1713
1714         estate->es_use_parallel_mode = use_parallel_mode;
1715         if (use_parallel_mode)
1716                 EnterParallelMode();
1717
1718         /*
1719          * Loop until we've processed the proper number of tuples from the plan.
1720          */
1721         for (;;)
1722         {
1723                 /* Reset the per-output-tuple exprcontext */
1724                 ResetPerTupleExprContext(estate);
1725
1726                 /*
1727                  * Execute the plan and obtain a tuple
1728                  */
1729                 slot = ExecProcNode(planstate);
1730
1731                 /*
1732                  * if the tuple is null, then we assume there is nothing more to
1733                  * process so we just end the loop...
1734                  */
1735                 if (TupIsNull(slot))
1736                 {
1737                         /* Allow nodes to release or shut down resources. */
1738                         (void) ExecShutdownNode(planstate);
1739                         break;
1740                 }
1741
1742                 /*
1743                  * If we have a junk filter, then project a new tuple with the junk
1744                  * removed.
1745                  *
1746                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1747                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1748                  * because that tuple slot has the wrong descriptor.)
1749                  */
1750                 if (estate->es_junkFilter != NULL)
1751                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1752
1753                 /*
1754                  * If we are supposed to send the tuple somewhere, do so. (In
1755                  * practice, this is probably always the case at this point.)
1756                  */
1757                 if (sendTuples)
1758                 {
1759                         /*
1760                          * If we are not able to send the tuple, we assume the destination
1761                          * has closed and no more tuples can be sent. If that's the case,
1762                          * end the loop.
1763                          */
1764                         if (!dest->receiveSlot(slot, dest))
1765                                 break;
1766                 }
1767
1768                 /*
1769                  * Count tuples processed, if this is a SELECT.  (For other operation
1770                  * types, the ModifyTable plan node must count the appropriate
1771                  * events.)
1772                  */
1773                 if (operation == CMD_SELECT)
1774                         (estate->es_processed)++;
1775
1776                 /*
1777                  * check our tuple count.. if we've processed the proper number then
1778                  * quit, else loop again and process more tuples.  Zero numberTuples
1779                  * means no limit.
1780                  */
1781                 current_tuple_count++;
1782                 if (numberTuples && numberTuples == current_tuple_count)
1783                 {
1784                         /* Allow nodes to release or shut down resources. */
1785                         (void) ExecShutdownNode(planstate);
1786                         break;
1787                 }
1788         }
1789
1790         if (use_parallel_mode)
1791                 ExitParallelMode();
1792 }
1793
1794
1795 /*
1796  * ExecRelCheck --- check that tuple meets constraints for result relation
1797  *
1798  * Returns NULL if OK, else name of failed check constraint
1799  */
1800 static const char *
1801 ExecRelCheck(ResultRelInfo *resultRelInfo,
1802                          TupleTableSlot *slot, EState *estate)
1803 {
1804         Relation        rel = resultRelInfo->ri_RelationDesc;
1805         int                     ncheck = rel->rd_att->constr->num_check;
1806         ConstrCheck *check = rel->rd_att->constr->check;
1807         ExprContext *econtext;
1808         MemoryContext oldContext;
1809         int                     i;
1810
1811         /*
1812          * If first time through for this result relation, build expression
1813          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1814          * memory context so they'll survive throughout the query.
1815          */
1816         if (resultRelInfo->ri_ConstraintExprs == NULL)
1817         {
1818                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1819                 resultRelInfo->ri_ConstraintExprs =
1820                         (ExprState **) palloc(ncheck * sizeof(ExprState *));
1821                 for (i = 0; i < ncheck; i++)
1822                 {
1823                         Expr       *checkconstr;
1824
1825                         checkconstr = stringToNode(check[i].ccbin);
1826                         resultRelInfo->ri_ConstraintExprs[i] =
1827                                 ExecPrepareExpr(checkconstr, estate);
1828                 }
1829                 MemoryContextSwitchTo(oldContext);
1830         }
1831
1832         /*
1833          * We will use the EState's per-tuple context for evaluating constraint
1834          * expressions (creating it if it's not already there).
1835          */
1836         econtext = GetPerTupleExprContext(estate);
1837
1838         /* Arrange for econtext's scan tuple to be the tuple under test */
1839         econtext->ecxt_scantuple = slot;
1840
1841         /* And evaluate the constraints */
1842         for (i = 0; i < ncheck; i++)
1843         {
1844                 ExprState  *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1845
1846                 /*
1847                  * NOTE: SQL specifies that a NULL result from a constraint expression
1848                  * is not to be treated as a failure.  Therefore, use ExecCheck not
1849                  * ExecQual.
1850                  */
1851                 if (!ExecCheck(checkconstr, econtext))
1852                         return check[i].ccname;
1853         }
1854
1855         /* NULL result means no error */
1856         return NULL;
1857 }
1858
1859 /*
1860  * ExecPartitionCheck --- check that tuple meets the partition constraint.
1861  *
1862  * Exported in executor.h for outside use.
1863  * Returns true if it meets the partition constraint, else returns false.
1864  */
1865 bool
1866 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1867                                    EState *estate)
1868 {
1869         ExprContext *econtext;
1870
1871         /*
1872          * If first time through, build expression state tree for the partition
1873          * check expression.  Keep it in the per-query memory context so they'll
1874          * survive throughout the query.
1875          */
1876         if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1877         {
1878                 List       *qual = resultRelInfo->ri_PartitionCheck;
1879
1880                 resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1881         }
1882
1883         /*
1884          * We will use the EState's per-tuple context for evaluating constraint
1885          * expressions (creating it if it's not already there).
1886          */
1887         econtext = GetPerTupleExprContext(estate);
1888
1889         /* Arrange for econtext's scan tuple to be the tuple under test */
1890         econtext->ecxt_scantuple = slot;
1891
1892         /*
1893          * As in case of the catalogued constraints, we treat a NULL result as
1894          * success here, not a failure.
1895          */
1896         return ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1897 }
1898
1899 /*
1900  * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1901  * partition constraint check.
1902  */
1903 void
1904 ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1905                                                         TupleTableSlot *slot,
1906                                                         EState *estate)
1907 {
1908         Relation        rel = resultRelInfo->ri_RelationDesc;
1909         Relation        orig_rel = rel;
1910         TupleDesc       tupdesc = RelationGetDescr(rel);
1911         char       *val_desc;
1912         Bitmapset  *modifiedCols;
1913         Bitmapset  *insertedCols;
1914         Bitmapset  *updatedCols;
1915
1916         /*
1917          * Need to first convert the tuple to the root partitioned table's row
1918          * type. For details, check similar comments in ExecConstraints().
1919          */
1920         if (resultRelInfo->ri_PartitionRoot)
1921         {
1922                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
1923                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
1924                 TupleConversionMap *map;
1925
1926                 rel = resultRelInfo->ri_PartitionRoot;
1927                 tupdesc = RelationGetDescr(rel);
1928                 /* a reverse map */
1929                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
1930                                                                          gettext_noop("could not convert row type"));
1931                 if (map != NULL)
1932                 {
1933                         tuple = do_convert_tuple(tuple, map);
1934                         ExecSetSlotDescriptor(slot, tupdesc);
1935                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
1936                 }
1937         }
1938
1939         insertedCols = GetInsertedColumns(resultRelInfo, estate);
1940         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1941         modifiedCols = bms_union(insertedCols, updatedCols);
1942         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1943                                                                                          slot,
1944                                                                                          tupdesc,
1945                                                                                          modifiedCols,
1946                                                                                          64);
1947         ereport(ERROR,
1948                         (errcode(ERRCODE_CHECK_VIOLATION),
1949                          errmsg("new row for relation \"%s\" violates partition constraint",
1950                                         RelationGetRelationName(orig_rel)),
1951                          val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1952 }
1953
1954 /*
1955  * ExecConstraints - check constraints of the tuple in 'slot'
1956  *
1957  * This checks the traditional NOT NULL and check constraints, and if
1958  * requested, checks the partition constraint.
1959  *
1960  * Note: 'slot' contains the tuple to check the constraints of, which may
1961  * have been converted from the original input tuple after tuple routing.
1962  * 'resultRelInfo' is the original result relation, before tuple routing.
1963  */
1964 void
1965 ExecConstraints(ResultRelInfo *resultRelInfo,
1966                                 TupleTableSlot *slot, EState *estate,
1967                                 bool check_partition_constraint)
1968 {
1969         Relation        rel = resultRelInfo->ri_RelationDesc;
1970         TupleDesc       tupdesc = RelationGetDescr(rel);
1971         TupleConstr *constr = tupdesc->constr;
1972         Bitmapset  *modifiedCols;
1973         Bitmapset  *insertedCols;
1974         Bitmapset  *updatedCols;
1975
1976         Assert(constr || resultRelInfo->ri_PartitionCheck);
1977
1978         if (constr && constr->has_not_null)
1979         {
1980                 int                     natts = tupdesc->natts;
1981                 int                     attrChk;
1982
1983                 for (attrChk = 1; attrChk <= natts; attrChk++)
1984                 {
1985                         Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1);
1986
1987                         if (att->attnotnull && slot_attisnull(slot, attrChk))
1988                         {
1989                                 char       *val_desc;
1990                                 Relation        orig_rel = rel;
1991                                 TupleDesc       orig_tupdesc = RelationGetDescr(rel);
1992
1993                                 /*
1994                                  * If the tuple has been routed, it's been converted to the
1995                                  * partition's rowtype, which might differ from the root
1996                                  * table's.  We must convert it back to the root table's
1997                                  * rowtype so that val_desc shown error message matches the
1998                                  * input tuple.
1999                                  */
2000                                 if (resultRelInfo->ri_PartitionRoot)
2001                                 {
2002                                         HeapTuple       tuple = ExecFetchSlotTuple(slot);
2003                                         TupleConversionMap *map;
2004
2005                                         rel = resultRelInfo->ri_PartitionRoot;
2006                                         tupdesc = RelationGetDescr(rel);
2007                                         /* a reverse map */
2008                                         map = convert_tuples_by_name(orig_tupdesc, tupdesc,
2009                                                                                                  gettext_noop("could not convert row type"));
2010                                         if (map != NULL)
2011                                         {
2012                                                 tuple = do_convert_tuple(tuple, map);
2013                                                 ExecSetSlotDescriptor(slot, tupdesc);
2014                                                 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2015                                         }
2016                                 }
2017
2018                                 insertedCols = GetInsertedColumns(resultRelInfo, estate);
2019                                 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2020                                 modifiedCols = bms_union(insertedCols, updatedCols);
2021                                 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2022                                                                                                                  slot,
2023                                                                                                                  tupdesc,
2024                                                                                                                  modifiedCols,
2025                                                                                                                  64);
2026
2027                                 ereport(ERROR,
2028                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2029                                                  errmsg("null value in column \"%s\" violates not-null constraint",
2030                                                                 NameStr(att->attname)),
2031                                                  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2032                                                  errtablecol(orig_rel, attrChk)));
2033                         }
2034                 }
2035         }
2036
2037         if (constr && constr->num_check > 0)
2038         {
2039                 const char *failed;
2040
2041                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2042                 {
2043                         char       *val_desc;
2044                         Relation        orig_rel = rel;
2045
2046                         /* See the comment above. */
2047                         if (resultRelInfo->ri_PartitionRoot)
2048                         {
2049                                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
2050                                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
2051                                 TupleConversionMap *map;
2052
2053                                 rel = resultRelInfo->ri_PartitionRoot;
2054                                 tupdesc = RelationGetDescr(rel);
2055                                 /* a reverse map */
2056                                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2057                                                                                          gettext_noop("could not convert row type"));
2058                                 if (map != NULL)
2059                                 {
2060                                         tuple = do_convert_tuple(tuple, map);
2061                                         ExecSetSlotDescriptor(slot, tupdesc);
2062                                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2063                                 }
2064                         }
2065
2066                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
2067                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2068                         modifiedCols = bms_union(insertedCols, updatedCols);
2069                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2070                                                                                                          slot,
2071                                                                                                          tupdesc,
2072                                                                                                          modifiedCols,
2073                                                                                                          64);
2074                         ereport(ERROR,
2075                                         (errcode(ERRCODE_CHECK_VIOLATION),
2076                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2077                                                         RelationGetRelationName(orig_rel), failed),
2078                                          val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2079                                          errtableconstraint(orig_rel, failed)));
2080                 }
2081         }
2082
2083         if (check_partition_constraint && resultRelInfo->ri_PartitionCheck &&
2084                 !ExecPartitionCheck(resultRelInfo, slot, estate))
2085                 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
2086 }
2087
2088
2089 /*
2090  * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2091  * of the specified kind.
2092  *
2093  * Note that this needs to be called multiple times to ensure that all kinds of
2094  * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2095  * CHECK OPTION set and from row level security policies).  See ExecInsert()
2096  * and ExecUpdate().
2097  */
2098 void
2099 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2100                                          TupleTableSlot *slot, EState *estate)
2101 {
2102         Relation        rel = resultRelInfo->ri_RelationDesc;
2103         TupleDesc       tupdesc = RelationGetDescr(rel);
2104         ExprContext *econtext;
2105         ListCell   *l1,
2106                            *l2;
2107
2108         /*
2109          * We will use the EState's per-tuple context for evaluating constraint
2110          * expressions (creating it if it's not already there).
2111          */
2112         econtext = GetPerTupleExprContext(estate);
2113
2114         /* Arrange for econtext's scan tuple to be the tuple under test */
2115         econtext->ecxt_scantuple = slot;
2116
2117         /* Check each of the constraints */
2118         forboth(l1, resultRelInfo->ri_WithCheckOptions,
2119                         l2, resultRelInfo->ri_WithCheckOptionExprs)
2120         {
2121                 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2122                 ExprState  *wcoExpr = (ExprState *) lfirst(l2);
2123
2124                 /*
2125                  * Skip any WCOs which are not the kind we are looking for at this
2126                  * time.
2127                  */
2128                 if (wco->kind != kind)
2129                         continue;
2130
2131                 /*
2132                  * WITH CHECK OPTION checks are intended to ensure that the new tuple
2133                  * is visible (in the case of a view) or that it passes the
2134                  * 'with-check' policy (in the case of row security). If the qual
2135                  * evaluates to NULL or FALSE, then the new tuple won't be included in
2136                  * the view or doesn't pass the 'with-check' policy for the table.
2137                  */
2138                 if (!ExecQual(wcoExpr, econtext))
2139                 {
2140                         char       *val_desc;
2141                         Bitmapset  *modifiedCols;
2142                         Bitmapset  *insertedCols;
2143                         Bitmapset  *updatedCols;
2144
2145                         switch (wco->kind)
2146                         {
2147                                         /*
2148                                          * For WITH CHECK OPTIONs coming from views, we might be
2149                                          * able to provide the details on the row, depending on
2150                                          * the permissions on the relation (that is, if the user
2151                                          * could view it directly anyway).  For RLS violations, we
2152                                          * don't include the data since we don't know if the user
2153                                          * should be able to view the tuple as that depends on the
2154                                          * USING policy.
2155                                          */
2156                                 case WCO_VIEW_CHECK:
2157                                         /* See the comment in ExecConstraints(). */
2158                                         if (resultRelInfo->ri_PartitionRoot)
2159                                         {
2160                                                 HeapTuple       tuple = ExecFetchSlotTuple(slot);
2161                                                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
2162                                                 TupleConversionMap *map;
2163
2164                                                 rel = resultRelInfo->ri_PartitionRoot;
2165                                                 tupdesc = RelationGetDescr(rel);
2166                                                 /* a reverse map */
2167                                                 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2168                                                                                                          gettext_noop("could not convert row type"));
2169                                                 if (map != NULL)
2170                                                 {
2171                                                         tuple = do_convert_tuple(tuple, map);
2172                                                         ExecSetSlotDescriptor(slot, tupdesc);
2173                                                         ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2174                                                 }
2175                                         }
2176
2177                                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
2178                                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2179                                         modifiedCols = bms_union(insertedCols, updatedCols);
2180                                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2181                                                                                                                          slot,
2182                                                                                                                          tupdesc,
2183                                                                                                                          modifiedCols,
2184                                                                                                                          64);
2185
2186                                         ereport(ERROR,
2187                                                         (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2188                                                          errmsg("new row violates check option for view \"%s\"",
2189                                                                         wco->relname),
2190                                                          val_desc ? errdetail("Failing row contains %s.",
2191                                                                                                   val_desc) : 0));
2192                                         break;
2193                                 case WCO_RLS_INSERT_CHECK:
2194                                 case WCO_RLS_UPDATE_CHECK:
2195                                         if (wco->polname != NULL)
2196                                                 ereport(ERROR,
2197                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2198                                                                  errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2199                                                                                 wco->polname, wco->relname)));
2200                                         else
2201                                                 ereport(ERROR,
2202                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2203                                                                  errmsg("new row violates row-level security policy for table \"%s\"",
2204                                                                                 wco->relname)));
2205                                         break;
2206                                 case WCO_RLS_CONFLICT_CHECK:
2207                                         if (wco->polname != NULL)
2208                                                 ereport(ERROR,
2209                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2210                                                                  errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2211                                                                                 wco->polname, wco->relname)));
2212                                         else
2213                                                 ereport(ERROR,
2214                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2215                                                                  errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2216                                                                                 wco->relname)));
2217                                         break;
2218                                 default:
2219                                         elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2220                                         break;
2221                         }
2222                 }
2223         }
2224 }
2225
2226 /*
2227  * ExecBuildSlotValueDescription -- construct a string representing a tuple
2228  *
2229  * This is intentionally very similar to BuildIndexValueDescription, but
2230  * unlike that function, we truncate long field values (to at most maxfieldlen
2231  * bytes).  That seems necessary here since heap field values could be very
2232  * long, whereas index entries typically aren't so wide.
2233  *
2234  * Also, unlike the case with index entries, we need to be prepared to ignore
2235  * dropped columns.  We used to use the slot's tuple descriptor to decode the
2236  * data, but the slot's descriptor doesn't identify dropped columns, so we
2237  * now need to be passed the relation's descriptor.
2238  *
2239  * Note that, like BuildIndexValueDescription, if the user does not have
2240  * permission to view any of the columns involved, a NULL is returned.  Unlike
2241  * BuildIndexValueDescription, if the user has access to view a subset of the
2242  * column involved, that subset will be returned with a key identifying which
2243  * columns they are.
2244  */
2245 static char *
2246 ExecBuildSlotValueDescription(Oid reloid,
2247                                                           TupleTableSlot *slot,
2248                                                           TupleDesc tupdesc,
2249                                                           Bitmapset *modifiedCols,
2250                                                           int maxfieldlen)
2251 {
2252         StringInfoData buf;
2253         StringInfoData collist;
2254         bool            write_comma = false;
2255         bool            write_comma_collist = false;
2256         int                     i;
2257         AclResult       aclresult;
2258         bool            table_perm = false;
2259         bool            any_perm = false;
2260
2261         /*
2262          * Check if RLS is enabled and should be active for the relation; if so,
2263          * then don't return anything.  Otherwise, go through normal permission
2264          * checks.
2265          */
2266         if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2267                 return NULL;
2268
2269         initStringInfo(&buf);
2270
2271         appendStringInfoChar(&buf, '(');
2272
2273         /*
2274          * Check if the user has permissions to see the row.  Table-level SELECT
2275          * allows access to all columns.  If the user does not have table-level
2276          * SELECT then we check each column and include those the user has SELECT
2277          * rights on.  Additionally, we always include columns the user provided
2278          * data for.
2279          */
2280         aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2281         if (aclresult != ACLCHECK_OK)
2282         {
2283                 /* Set up the buffer for the column list */
2284                 initStringInfo(&collist);
2285                 appendStringInfoChar(&collist, '(');
2286         }
2287         else
2288                 table_perm = any_perm = true;
2289
2290         /* Make sure the tuple is fully deconstructed */
2291         slot_getallattrs(slot);
2292
2293         for (i = 0; i < tupdesc->natts; i++)
2294         {
2295                 bool            column_perm = false;
2296                 char       *val;
2297                 int                     vallen;
2298                 Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2299
2300                 /* ignore dropped columns */
2301                 if (att->attisdropped)
2302                         continue;
2303
2304                 if (!table_perm)
2305                 {
2306                         /*
2307                          * No table-level SELECT, so need to make sure they either have
2308                          * SELECT rights on the column or that they have provided the data
2309                          * for the column.  If not, omit this column from the error
2310                          * message.
2311                          */
2312                         aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2313                                                                                           GetUserId(), ACL_SELECT);
2314                         if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2315                                                           modifiedCols) || aclresult == ACLCHECK_OK)
2316                         {
2317                                 column_perm = any_perm = true;
2318
2319                                 if (write_comma_collist)
2320                                         appendStringInfoString(&collist, ", ");
2321                                 else
2322                                         write_comma_collist = true;
2323
2324                                 appendStringInfoString(&collist, NameStr(att->attname));
2325                         }
2326                 }
2327
2328                 if (table_perm || column_perm)
2329                 {
2330                         if (slot->tts_isnull[i])
2331                                 val = "null";
2332                         else
2333                         {
2334                                 Oid                     foutoid;
2335                                 bool            typisvarlena;
2336
2337                                 getTypeOutputInfo(att->atttypid,
2338                                                                   &foutoid, &typisvarlena);
2339                                 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2340                         }
2341
2342                         if (write_comma)
2343                                 appendStringInfoString(&buf, ", ");
2344                         else
2345                                 write_comma = true;
2346
2347                         /* truncate if needed */
2348                         vallen = strlen(val);
2349                         if (vallen <= maxfieldlen)
2350                                 appendStringInfoString(&buf, val);
2351                         else
2352                         {
2353                                 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2354                                 appendBinaryStringInfo(&buf, val, vallen);
2355                                 appendStringInfoString(&buf, "...");
2356                         }
2357                 }
2358         }
2359
2360         /* If we end up with zero columns being returned, then return NULL. */
2361         if (!any_perm)
2362                 return NULL;
2363
2364         appendStringInfoChar(&buf, ')');
2365
2366         if (!table_perm)
2367         {
2368                 appendStringInfoString(&collist, ") = ");
2369                 appendStringInfoString(&collist, buf.data);
2370
2371                 return collist.data;
2372         }
2373
2374         return buf.data;
2375 }
2376
2377
2378 /*
2379  * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2380  * given ResultRelInfo
2381  */
2382 LockTupleMode
2383 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2384 {
2385         Bitmapset  *keyCols;
2386         Bitmapset  *updatedCols;
2387
2388         /*
2389          * Compute lock mode to use.  If columns that are part of the key have not
2390          * been modified, then we can use a weaker lock, allowing for better
2391          * concurrency.
2392          */
2393         updatedCols = GetUpdatedColumns(relinfo, estate);
2394         keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2395                                                                                  INDEX_ATTR_BITMAP_KEY);
2396
2397         if (bms_overlap(keyCols, updatedCols))
2398                 return LockTupleExclusive;
2399
2400         return LockTupleNoKeyExclusive;
2401 }
2402
2403 /*
2404  * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2405  *
2406  * If no such struct, either return NULL or throw error depending on missing_ok
2407  */
2408 ExecRowMark *
2409 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2410 {
2411         ListCell   *lc;
2412
2413         foreach(lc, estate->es_rowMarks)
2414         {
2415                 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2416
2417                 if (erm->rti == rti)
2418                         return erm;
2419         }
2420         if (!missing_ok)
2421                 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2422         return NULL;
2423 }
2424
2425 /*
2426  * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2427  *
2428  * Inputs are the underlying ExecRowMark struct and the targetlist of the
2429  * input plan node (not planstate node!).  We need the latter to find out
2430  * the column numbers of the resjunk columns.
2431  */
2432 ExecAuxRowMark *
2433 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2434 {
2435         ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2436         char            resname[32];
2437
2438         aerm->rowmark = erm;
2439
2440         /* Look up the resjunk columns associated with this rowmark */
2441         if (erm->markType != ROW_MARK_COPY)
2442         {
2443                 /* need ctid for all methods other than COPY */
2444                 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2445                 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2446                                                                                                            resname);
2447                 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2448                         elog(ERROR, "could not find junk %s column", resname);
2449         }
2450         else
2451         {
2452                 /* need wholerow if COPY */
2453                 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2454                 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2455                                                                                                                 resname);
2456                 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2457                         elog(ERROR, "could not find junk %s column", resname);
2458         }
2459
2460         /* if child rel, need tableoid */
2461         if (erm->rti != erm->prti)
2462         {
2463                 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2464                 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2465                                                                                                            resname);
2466                 if (!AttributeNumberIsValid(aerm->toidAttNo))
2467                         elog(ERROR, "could not find junk %s column", resname);
2468         }
2469
2470         return aerm;
2471 }
2472
2473
2474 /*
2475  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2476  * process the updated version under READ COMMITTED rules.
2477  *
2478  * See backend/executor/README for some info about how this works.
2479  */
2480
2481
2482 /*
2483  * Check a modified tuple to see if we want to process its updated version
2484  * under READ COMMITTED rules.
2485  *
2486  *      estate - outer executor state data
2487  *      epqstate - state for EvalPlanQual rechecking
2488  *      relation - table containing tuple
2489  *      rti - rangetable index of table containing tuple
2490  *      lockmode - requested tuple lock mode
2491  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2492  *      priorXmax - t_xmax from the outdated tuple
2493  *
2494  * *tid is also an output parameter: it's modified to hold the TID of the
2495  * latest version of the tuple (note this may be changed even on failure)
2496  *
2497  * Returns a slot containing the new candidate update/delete tuple, or
2498  * NULL if we determine we shouldn't process the row.
2499  *
2500  * Note: properly, lockmode should be declared as enum LockTupleMode,
2501  * but we use "int" to avoid having to include heapam.h in executor.h.
2502  */
2503 TupleTableSlot *
2504 EvalPlanQual(EState *estate, EPQState *epqstate,
2505                          Relation relation, Index rti, int lockmode,
2506                          ItemPointer tid, TransactionId priorXmax)
2507 {
2508         TupleTableSlot *slot;
2509         HeapTuple       copyTuple;
2510
2511         Assert(rti > 0);
2512
2513         /*
2514          * Get and lock the updated version of the row; if fail, return NULL.
2515          */
2516         copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2517                                                                   tid, priorXmax);
2518
2519         if (copyTuple == NULL)
2520                 return NULL;
2521
2522         /*
2523          * For UPDATE/DELETE we have to return tid of actual row we're executing
2524          * PQ for.
2525          */
2526         *tid = copyTuple->t_self;
2527
2528         /*
2529          * Need to run a recheck subquery.  Initialize or reinitialize EPQ state.
2530          */
2531         EvalPlanQualBegin(epqstate, estate);
2532
2533         /*
2534          * Free old test tuple, if any, and store new tuple where relation's scan
2535          * node will see it
2536          */
2537         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2538
2539         /*
2540          * Fetch any non-locked source rows
2541          */
2542         EvalPlanQualFetchRowMarks(epqstate);
2543
2544         /*
2545          * Run the EPQ query.  We assume it will return at most one tuple.
2546          */
2547         slot = EvalPlanQualNext(epqstate);
2548
2549         /*
2550          * If we got a tuple, force the slot to materialize the tuple so that it
2551          * is not dependent on any local state in the EPQ query (in particular,
2552          * it's highly likely that the slot contains references to any pass-by-ref
2553          * datums that may be present in copyTuple).  As with the next step, this
2554          * is to guard against early re-use of the EPQ query.
2555          */
2556         if (!TupIsNull(slot))
2557                 (void) ExecMaterializeSlot(slot);
2558
2559         /*
2560          * Clear out the test tuple.  This is needed in case the EPQ query is
2561          * re-used to test a tuple for a different relation.  (Not clear that can
2562          * really happen, but let's be safe.)
2563          */
2564         EvalPlanQualSetTuple(epqstate, rti, NULL);
2565
2566         return slot;
2567 }
2568
2569 /*
2570  * Fetch a copy of the newest version of an outdated tuple
2571  *
2572  *      estate - executor state data
2573  *      relation - table containing tuple
2574  *      lockmode - requested tuple lock mode
2575  *      wait_policy - requested lock wait policy
2576  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2577  *      priorXmax - t_xmax from the outdated tuple
2578  *
2579  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2580  * that there is no newest version (ie, the row was deleted not updated).
2581  * We also return NULL if the tuple is locked and the wait policy is to skip
2582  * such tuples.
2583  *
2584  * If successful, we have locked the newest tuple version, so caller does not
2585  * need to worry about it changing anymore.
2586  *
2587  * Note: properly, lockmode should be declared as enum LockTupleMode,
2588  * but we use "int" to avoid having to include heapam.h in executor.h.
2589  */
2590 HeapTuple
2591 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2592                                   LockWaitPolicy wait_policy,
2593                                   ItemPointer tid, TransactionId priorXmax)
2594 {
2595         HeapTuple       copyTuple = NULL;
2596         HeapTupleData tuple;
2597         SnapshotData SnapshotDirty;
2598
2599         /*
2600          * fetch target tuple
2601          *
2602          * Loop here to deal with updated or busy tuples
2603          */
2604         InitDirtySnapshot(SnapshotDirty);
2605         tuple.t_self = *tid;
2606         for (;;)
2607         {
2608                 Buffer          buffer;
2609
2610                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2611                 {
2612                         HTSU_Result test;
2613                         HeapUpdateFailureData hufd;
2614
2615                         /*
2616                          * If xmin isn't what we're expecting, the slot must have been
2617                          * recycled and reused for an unrelated tuple.  This implies that
2618                          * the latest version of the row was deleted, so we need do
2619                          * nothing.  (Should be safe to examine xmin without getting
2620                          * buffer's content lock.  We assume reading a TransactionId to be
2621                          * atomic, and Xmin never changes in an existing tuple, except to
2622                          * invalid or frozen, and neither of those can match priorXmax.)
2623                          */
2624                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2625                                                                          priorXmax))
2626                         {
2627                                 ReleaseBuffer(buffer);
2628                                 return NULL;
2629                         }
2630
2631                         /* otherwise xmin should not be dirty... */
2632                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2633                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2634
2635                         /*
2636                          * If tuple is being updated by other transaction then we have to
2637                          * wait for its commit/abort, or die trying.
2638                          */
2639                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2640                         {
2641                                 ReleaseBuffer(buffer);
2642                                 switch (wait_policy)
2643                                 {
2644                                         case LockWaitBlock:
2645                                                 XactLockTableWait(SnapshotDirty.xmax,
2646                                                                                   relation, &tuple.t_self,
2647                                                                                   XLTW_FetchUpdated);
2648                                                 break;
2649                                         case LockWaitSkip:
2650                                                 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2651                                                         return NULL;    /* skip instead of waiting */
2652                                                 break;
2653                                         case LockWaitError:
2654                                                 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2655                                                         ereport(ERROR,
2656                                                                         (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2657                                                                          errmsg("could not obtain lock on row in relation \"%s\"",
2658                                                                                         RelationGetRelationName(relation))));
2659                                                 break;
2660                                 }
2661                                 continue;               /* loop back to repeat heap_fetch */
2662                         }
2663
2664                         /*
2665                          * If tuple was inserted by our own transaction, we have to check
2666                          * cmin against es_output_cid: cmin >= current CID means our
2667                          * command cannot see the tuple, so we should ignore it. Otherwise
2668                          * heap_lock_tuple() will throw an error, and so would any later
2669                          * attempt to update or delete the tuple.  (We need not check cmax
2670                          * because HeapTupleSatisfiesDirty will consider a tuple deleted
2671                          * by our transaction dead, regardless of cmax.) We just checked
2672                          * that priorXmax == xmin, so we can test that variable instead of
2673                          * doing HeapTupleHeaderGetXmin again.
2674                          */
2675                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2676                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2677                         {
2678                                 ReleaseBuffer(buffer);
2679                                 return NULL;
2680                         }
2681
2682                         /*
2683                          * This is a live tuple, so now try to lock it.
2684                          */
2685                         test = heap_lock_tuple(relation, &tuple,
2686                                                                    estate->es_output_cid,
2687                                                                    lockmode, wait_policy,
2688                                                                    false, &buffer, &hufd);
2689                         /* We now have two pins on the buffer, get rid of one */
2690                         ReleaseBuffer(buffer);
2691
2692                         switch (test)
2693                         {
2694                                 case HeapTupleSelfUpdated:
2695
2696                                         /*
2697                                          * The target tuple was already updated or deleted by the
2698                                          * current command, or by a later command in the current
2699                                          * transaction.  We *must* ignore the tuple in the former
2700                                          * case, so as to avoid the "Halloween problem" of
2701                                          * repeated update attempts.  In the latter case it might
2702                                          * be sensible to fetch the updated tuple instead, but
2703                                          * doing so would require changing heap_update and
2704                                          * heap_delete to not complain about updating "invisible"
2705                                          * tuples, which seems pretty scary (heap_lock_tuple will
2706                                          * not complain, but few callers expect
2707                                          * HeapTupleInvisible, and we're not one of them).  So for
2708                                          * now, treat the tuple as deleted and do not process.
2709                                          */
2710                                         ReleaseBuffer(buffer);
2711                                         return NULL;
2712
2713                                 case HeapTupleMayBeUpdated:
2714                                         /* successfully locked */
2715                                         break;
2716
2717                                 case HeapTupleUpdated:
2718                                         ReleaseBuffer(buffer);
2719                                         if (IsolationUsesXactSnapshot())
2720                                                 ereport(ERROR,
2721                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2722                                                                  errmsg("could not serialize access due to concurrent update")));
2723
2724                                         /* Should not encounter speculative tuple on recheck */
2725                                         Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
2726                                         if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2727                                         {
2728                                                 /* it was updated, so look at the updated version */
2729                                                 tuple.t_self = hufd.ctid;
2730                                                 /* updated row should have xmin matching this xmax */
2731                                                 priorXmax = hufd.xmax;
2732                                                 continue;
2733                                         }
2734                                         /* tuple was deleted, so give up */
2735                                         return NULL;
2736
2737                                 case HeapTupleWouldBlock:
2738                                         ReleaseBuffer(buffer);
2739                                         return NULL;
2740
2741                                 case HeapTupleInvisible:
2742                                         elog(ERROR, "attempted to lock invisible tuple");
2743
2744                                 default:
2745                                         ReleaseBuffer(buffer);
2746                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2747                                                  test);
2748                                         return NULL;    /* keep compiler quiet */
2749                         }
2750
2751                         /*
2752                          * We got tuple - now copy it for use by recheck query.
2753                          */
2754                         copyTuple = heap_copytuple(&tuple);
2755                         ReleaseBuffer(buffer);
2756                         break;
2757                 }
2758
2759                 /*
2760                  * If the referenced slot was actually empty, the latest version of
2761                  * the row must have been deleted, so we need do nothing.
2762                  */
2763                 if (tuple.t_data == NULL)
2764                 {
2765                         ReleaseBuffer(buffer);
2766                         return NULL;
2767                 }
2768
2769                 /*
2770                  * As above, if xmin isn't what we're expecting, do nothing.
2771                  */
2772                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2773                                                                  priorXmax))
2774                 {
2775                         ReleaseBuffer(buffer);
2776                         return NULL;
2777                 }
2778
2779                 /*
2780                  * If we get here, the tuple was found but failed SnapshotDirty.
2781                  * Assuming the xmin is either a committed xact or our own xact (as it
2782                  * certainly should be if we're trying to modify the tuple), this must
2783                  * mean that the row was updated or deleted by either a committed xact
2784                  * or our own xact.  If it was deleted, we can ignore it; if it was
2785                  * updated then chain up to the next version and repeat the whole
2786                  * process.
2787                  *
2788                  * As above, it should be safe to examine xmax and t_ctid without the
2789                  * buffer content lock, because they can't be changing.
2790                  */
2791                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2792                 {
2793                         /* deleted, so forget about it */
2794                         ReleaseBuffer(buffer);
2795                         return NULL;
2796                 }
2797
2798                 /* updated, so look at the updated row */
2799                 tuple.t_self = tuple.t_data->t_ctid;
2800                 /* updated row should have xmin matching this xmax */
2801                 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2802                 ReleaseBuffer(buffer);
2803                 /* loop back to fetch next in chain */
2804         }
2805
2806         /*
2807          * Return the copied tuple
2808          */
2809         return copyTuple;
2810 }
2811
2812 /*
2813  * EvalPlanQualInit -- initialize during creation of a plan state node
2814  * that might need to invoke EPQ processing.
2815  *
2816  * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2817  * with EvalPlanQualSetPlan.
2818  */
2819 void
2820 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2821                                  Plan *subplan, List *auxrowmarks, int epqParam)
2822 {
2823         /* Mark the EPQ state inactive */
2824         epqstate->estate = NULL;
2825         epqstate->planstate = NULL;
2826         epqstate->origslot = NULL;
2827         /* ... and remember data that EvalPlanQualBegin will need */
2828         epqstate->plan = subplan;
2829         epqstate->arowMarks = auxrowmarks;
2830         epqstate->epqParam = epqParam;
2831 }
2832
2833 /*
2834  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2835  *
2836  * We need this so that ModifyTable can deal with multiple subplans.
2837  */
2838 void
2839 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2840 {
2841         /* If we have a live EPQ query, shut it down */
2842         EvalPlanQualEnd(epqstate);
2843         /* And set/change the plan pointer */
2844         epqstate->plan = subplan;
2845         /* The rowmarks depend on the plan, too */
2846         epqstate->arowMarks = auxrowmarks;
2847 }
2848
2849 /*
2850  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2851  *
2852  * NB: passed tuple must be palloc'd; it may get freed later
2853  */
2854 void
2855 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2856 {
2857         EState     *estate = epqstate->estate;
2858
2859         Assert(rti > 0);
2860
2861         /*
2862          * free old test tuple, if any, and store new tuple where relation's scan
2863          * node will see it
2864          */
2865         if (estate->es_epqTuple[rti - 1] != NULL)
2866                 heap_freetuple(estate->es_epqTuple[rti - 1]);
2867         estate->es_epqTuple[rti - 1] = tuple;
2868         estate->es_epqTupleSet[rti - 1] = true;
2869 }
2870
2871 /*
2872  * Fetch back the current test tuple (if any) for the specified RTI
2873  */
2874 HeapTuple
2875 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2876 {
2877         EState     *estate = epqstate->estate;
2878
2879         Assert(rti > 0);
2880
2881         return estate->es_epqTuple[rti - 1];
2882 }
2883
2884 /*
2885  * Fetch the current row values for any non-locked relations that need
2886  * to be scanned by an EvalPlanQual operation.  origslot must have been set
2887  * to contain the current result row (top-level row) that we need to recheck.
2888  */
2889 void
2890 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2891 {
2892         ListCell   *l;
2893
2894         Assert(epqstate->origslot != NULL);
2895
2896         foreach(l, epqstate->arowMarks)
2897         {
2898                 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2899                 ExecRowMark *erm = aerm->rowmark;
2900                 Datum           datum;
2901                 bool            isNull;
2902                 HeapTupleData tuple;
2903
2904                 if (RowMarkRequiresRowShareLock(erm->markType))
2905                         elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2906
2907                 /* clear any leftover test tuple for this rel */
2908                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2909
2910                 /* if child rel, must check whether it produced this row */
2911                 if (erm->rti != erm->prti)
2912                 {
2913                         Oid                     tableoid;
2914
2915                         datum = ExecGetJunkAttribute(epqstate->origslot,
2916                                                                                  aerm->toidAttNo,
2917                                                                                  &isNull);
2918                         /* non-locked rels could be on the inside of outer joins */
2919                         if (isNull)
2920                                 continue;
2921                         tableoid = DatumGetObjectId(datum);
2922
2923                         Assert(OidIsValid(erm->relid));
2924                         if (tableoid != erm->relid)
2925                         {
2926                                 /* this child is inactive right now */
2927                                 continue;
2928                         }
2929                 }
2930
2931                 if (erm->markType == ROW_MARK_REFERENCE)
2932                 {
2933                         HeapTuple       copyTuple;
2934
2935                         Assert(erm->relation != NULL);
2936
2937                         /* fetch the tuple's ctid */
2938                         datum = ExecGetJunkAttribute(epqstate->origslot,
2939                                                                                  aerm->ctidAttNo,
2940                                                                                  &isNull);
2941                         /* non-locked rels could be on the inside of outer joins */
2942                         if (isNull)
2943                                 continue;
2944
2945                         /* fetch requests on foreign tables must be passed to their FDW */
2946                         if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2947                         {
2948                                 FdwRoutine *fdwroutine;
2949                                 bool            updated = false;
2950
2951                                 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2952                                 /* this should have been checked already, but let's be safe */
2953                                 if (fdwroutine->RefetchForeignRow == NULL)
2954                                         ereport(ERROR,
2955                                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2956                                                          errmsg("cannot lock rows in foreign table \"%s\"",
2957                                                                         RelationGetRelationName(erm->relation))));
2958                                 copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2959                                                                                                                   erm,
2960                                                                                                                   datum,
2961                                                                                                                   &updated);
2962                                 if (copyTuple == NULL)
2963                                         elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2964
2965                                 /*
2966                                  * Ideally we'd insist on updated == false here, but that
2967                                  * assumes that FDWs can track that exactly, which they might
2968                                  * not be able to.  So just ignore the flag.
2969                                  */
2970                         }
2971                         else
2972                         {
2973                                 /* ordinary table, fetch the tuple */
2974                                 Buffer          buffer;
2975
2976                                 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2977                                 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2978                                                                 false, NULL))
2979                                         elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2980
2981                                 /* successful, copy tuple */
2982                                 copyTuple = heap_copytuple(&tuple);
2983                                 ReleaseBuffer(buffer);
2984                         }
2985
2986                         /* store tuple */
2987                         EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
2988                 }
2989                 else
2990                 {
2991                         HeapTupleHeader td;
2992
2993                         Assert(erm->markType == ROW_MARK_COPY);
2994
2995                         /* fetch the whole-row Var for the relation */
2996                         datum = ExecGetJunkAttribute(epqstate->origslot,
2997                                                                                  aerm->wholeAttNo,
2998                                                                                  &isNull);
2999                         /* non-locked rels could be on the inside of outer joins */
3000                         if (isNull)
3001                                 continue;
3002                         td = DatumGetHeapTupleHeader(datum);
3003
3004                         /* build a temporary HeapTuple control structure */
3005                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
3006                         tuple.t_data = td;
3007                         /* relation might be a foreign table, if so provide tableoid */
3008                         tuple.t_tableOid = erm->relid;
3009                         /* also copy t_ctid in case there's valid data there */
3010                         tuple.t_self = td->t_ctid;
3011
3012                         /* copy and store tuple */
3013                         EvalPlanQualSetTuple(epqstate, erm->rti,
3014                                                                  heap_copytuple(&tuple));
3015                 }
3016         }
3017 }
3018
3019 /*
3020  * Fetch the next row (if any) from EvalPlanQual testing
3021  *
3022  * (In practice, there should never be more than one row...)
3023  */
3024 TupleTableSlot *
3025 EvalPlanQualNext(EPQState *epqstate)
3026 {
3027         MemoryContext oldcontext;
3028         TupleTableSlot *slot;
3029
3030         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
3031         slot = ExecProcNode(epqstate->planstate);
3032         MemoryContextSwitchTo(oldcontext);
3033
3034         return slot;
3035 }
3036
3037 /*
3038  * Initialize or reset an EvalPlanQual state tree
3039  */
3040 void
3041 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
3042 {
3043         EState     *estate = epqstate->estate;
3044
3045         if (estate == NULL)
3046         {
3047                 /* First time through, so create a child EState */
3048                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
3049         }
3050         else
3051         {
3052                 /*
3053                  * We already have a suitable child EPQ tree, so just reset it.
3054                  */
3055                 int                     rtsize = list_length(parentestate->es_range_table);
3056                 PlanState  *planstate = epqstate->planstate;
3057
3058                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
3059
3060                 /* Recopy current values of parent parameters */
3061                 if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3062                 {
3063                         int                     i;
3064
3065                         i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3066
3067                         while (--i >= 0)
3068                         {
3069                                 /* copy value if any, but not execPlan link */
3070                                 estate->es_param_exec_vals[i].value =
3071                                         parentestate->es_param_exec_vals[i].value;
3072                                 estate->es_param_exec_vals[i].isnull =
3073                                         parentestate->es_param_exec_vals[i].isnull;
3074                         }
3075                 }
3076
3077                 /*
3078                  * Mark child plan tree as needing rescan at all scan nodes.  The
3079                  * first ExecProcNode will take care of actually doing the rescan.
3080                  */
3081                 planstate->chgParam = bms_add_member(planstate->chgParam,
3082                                                                                          epqstate->epqParam);
3083         }
3084 }
3085
3086 /*
3087  * Start execution of an EvalPlanQual plan tree.
3088  *
3089  * This is a cut-down version of ExecutorStart(): we copy some state from
3090  * the top-level estate rather than initializing it fresh.
3091  */
3092 static void
3093 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
3094 {
3095         EState     *estate;
3096         int                     rtsize;
3097         MemoryContext oldcontext;
3098         ListCell   *l;
3099
3100         rtsize = list_length(parentestate->es_range_table);
3101
3102         epqstate->estate = estate = CreateExecutorState();
3103
3104         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3105
3106         /*
3107          * Child EPQ EStates share the parent's copy of unchanging state such as
3108          * the snapshot, rangetable, result-rel info, and external Param info.
3109          * They need their own copies of local state, including a tuple table,
3110          * es_param_exec_vals, etc.
3111          *
3112          * The ResultRelInfo array management is trickier than it looks.  We
3113          * create a fresh array for the child but copy all the content from the
3114          * parent.  This is because it's okay for the child to share any
3115          * per-relation state the parent has already created --- but if the child
3116          * sets up any ResultRelInfo fields, such as its own junkfilter, that
3117          * state must *not* propagate back to the parent.  (For one thing, the
3118          * pointed-to data is in a memory context that won't last long enough.)
3119          */
3120         estate->es_direction = ForwardScanDirection;
3121         estate->es_snapshot = parentestate->es_snapshot;
3122         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
3123         estate->es_range_table = parentestate->es_range_table;
3124         estate->es_plannedstmt = parentestate->es_plannedstmt;
3125         estate->es_junkFilter = parentestate->es_junkFilter;
3126         estate->es_output_cid = parentestate->es_output_cid;
3127         if (parentestate->es_num_result_relations > 0)
3128         {
3129                 int                     numResultRelations = parentestate->es_num_result_relations;
3130                 ResultRelInfo *resultRelInfos;
3131
3132                 resultRelInfos = (ResultRelInfo *)
3133                         palloc(numResultRelations * sizeof(ResultRelInfo));
3134                 memcpy(resultRelInfos, parentestate->es_result_relations,
3135                            numResultRelations * sizeof(ResultRelInfo));
3136                 estate->es_result_relations = resultRelInfos;
3137                 estate->es_num_result_relations = numResultRelations;
3138         }
3139         /* es_result_relation_info must NOT be copied */
3140         /* es_trig_target_relations must NOT be copied */
3141         estate->es_rowMarks = parentestate->es_rowMarks;
3142         estate->es_top_eflags = parentestate->es_top_eflags;
3143         estate->es_instrument = parentestate->es_instrument;
3144         /* es_auxmodifytables must NOT be copied */
3145
3146         /*
3147          * The external param list is simply shared from parent.  The internal
3148          * param workspace has to be local state, but we copy the initial values
3149          * from the parent, so as to have access to any param values that were
3150          * already set from other parts of the parent's plan tree.
3151          */
3152         estate->es_param_list_info = parentestate->es_param_list_info;
3153         if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3154         {
3155                 int                     i;
3156
3157                 i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3158                 estate->es_param_exec_vals = (ParamExecData *)
3159                         palloc0(i * sizeof(ParamExecData));
3160                 while (--i >= 0)
3161                 {
3162                         /* copy value if any, but not execPlan link */
3163                         estate->es_param_exec_vals[i].value =
3164                                 parentestate->es_param_exec_vals[i].value;
3165                         estate->es_param_exec_vals[i].isnull =
3166                                 parentestate->es_param_exec_vals[i].isnull;
3167                 }
3168         }
3169
3170         /*
3171          * Each EState must have its own es_epqScanDone state, but if we have
3172          * nested EPQ checks they should share es_epqTuple arrays.  This allows
3173          * sub-rechecks to inherit the values being examined by an outer recheck.
3174          */
3175         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
3176         if (parentestate->es_epqTuple != NULL)
3177         {
3178                 estate->es_epqTuple = parentestate->es_epqTuple;
3179                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
3180         }
3181         else
3182         {
3183                 estate->es_epqTuple = (HeapTuple *)
3184                         palloc0(rtsize * sizeof(HeapTuple));
3185                 estate->es_epqTupleSet = (bool *)
3186                         palloc0(rtsize * sizeof(bool));
3187         }
3188
3189         /*
3190          * Each estate also has its own tuple table.
3191          */
3192         estate->es_tupleTable = NIL;
3193
3194         /*
3195          * Initialize private state information for each SubPlan.  We must do this
3196          * before running ExecInitNode on the main query tree, since
3197          * ExecInitSubPlan expects to be able to find these entries. Some of the
3198          * SubPlans might not be used in the part of the plan tree we intend to
3199          * run, but since it's not easy to tell which, we just initialize them
3200          * all.
3201          */
3202         Assert(estate->es_subplanstates == NIL);
3203         foreach(l, parentestate->es_plannedstmt->subplans)
3204         {
3205                 Plan       *subplan = (Plan *) lfirst(l);
3206                 PlanState  *subplanstate;
3207
3208                 subplanstate = ExecInitNode(subplan, estate, 0);
3209                 estate->es_subplanstates = lappend(estate->es_subplanstates,
3210                                                                                    subplanstate);
3211         }
3212
3213         /*
3214          * Initialize the private state information for all the nodes in the part
3215          * of the plan tree we need to run.  This opens files, allocates storage
3216          * and leaves us ready to start processing tuples.
3217          */
3218         epqstate->planstate = ExecInitNode(planTree, estate, 0);
3219
3220         MemoryContextSwitchTo(oldcontext);
3221 }
3222
3223 /*
3224  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3225  * or if we are done with the current EPQ child.
3226  *
3227  * This is a cut-down version of ExecutorEnd(); basically we want to do most
3228  * of the normal cleanup, but *not* close result relations (which we are
3229  * just sharing from the outer query).  We do, however, have to close any
3230  * trigger target relations that got opened, since those are not shared.
3231  * (There probably shouldn't be any of the latter, but just in case...)
3232  */
3233 void
3234 EvalPlanQualEnd(EPQState *epqstate)
3235 {
3236         EState     *estate = epqstate->estate;
3237         MemoryContext oldcontext;
3238         ListCell   *l;
3239
3240         if (estate == NULL)
3241                 return;                                 /* idle, so nothing to do */
3242
3243         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3244
3245         ExecEndNode(epqstate->planstate);
3246
3247         foreach(l, estate->es_subplanstates)
3248         {
3249                 PlanState  *subplanstate = (PlanState *) lfirst(l);
3250
3251                 ExecEndNode(subplanstate);
3252         }
3253
3254         /* throw away the per-estate tuple table */
3255         ExecResetTupleTable(estate->es_tupleTable, false);
3256
3257         /* close any trigger target relations attached to this EState */
3258         ExecCleanUpTriggerState(estate);
3259
3260         MemoryContextSwitchTo(oldcontext);
3261
3262         FreeExecutorState(estate);
3263
3264         /* Mark EPQState idle */
3265         epqstate->estate = NULL;
3266         epqstate->planstate = NULL;
3267         epqstate->origslot = NULL;
3268 }