]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Fix initialization of fake LSN for unlogged relations
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorFinish()
10  *      ExecutorEnd()
11  *
12  *      These four procedures are the external interface to the executor.
13  *      In each case, the query descriptor is required as an argument.
14  *
15  *      ExecutorStart must be called at the beginning of execution of any
16  *      query plan and ExecutorEnd must always be called at the end of
17  *      execution of a plan (unless it is aborted due to error).
18  *
19  *      ExecutorRun accepts direction and count arguments that specify whether
20  *      the plan is to be executed forwards, backwards, and for how many tuples.
21  *      In some cases ExecutorRun may be called multiple times to process all
22  *      the tuples for a plan.  It is also acceptable to stop short of executing
23  *      the whole plan (but only if it is a SELECT).
24  *
25  *      ExecutorFinish must be called after the final ExecutorRun call and
26  *      before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
27  *      which should also omit ExecutorRun.
28  *
29  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
30  * Portions Copyright (c) 1994, Regents of the University of California
31  *
32  *
33  * IDENTIFICATION
34  *        src/backend/executor/execMain.c
35  *
36  *-------------------------------------------------------------------------
37  */
38 #include "postgres.h"
39
40 #include "access/heapam.h"
41 #include "access/htup_details.h"
42 #include "access/sysattr.h"
43 #include "access/tableam.h"
44 #include "access/transam.h"
45 #include "access/xact.h"
46 #include "catalog/namespace.h"
47 #include "catalog/pg_publication.h"
48 #include "commands/matview.h"
49 #include "commands/trigger.h"
50 #include "executor/execdebug.h"
51 #include "executor/nodeSubplan.h"
52 #include "foreign/fdwapi.h"
53 #include "jit/jit.h"
54 #include "mb/pg_wchar.h"
55 #include "miscadmin.h"
56 #include "parser/parsetree.h"
57 #include "storage/bufmgr.h"
58 #include "storage/lmgr.h"
59 #include "tcop/utility.h"
60 #include "utils/acl.h"
61 #include "utils/lsyscache.h"
62 #include "utils/memutils.h"
63 #include "utils/partcache.h"
64 #include "utils/rls.h"
65 #include "utils/ruleutils.h"
66 #include "utils/snapmgr.h"
67
68
69 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
70 ExecutorStart_hook_type ExecutorStart_hook = NULL;
71 ExecutorRun_hook_type ExecutorRun_hook = NULL;
72 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
73 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
74
75 /* Hook for plugin to get control in ExecCheckRTPerms() */
76 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
77
78 /* decls for local routines only used within this module */
79 static void InitPlan(QueryDesc *queryDesc, int eflags);
80 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
81 static void ExecPostprocessPlan(EState *estate);
82 static void ExecEndPlan(PlanState *planstate, EState *estate);
83 static void ExecutePlan(EState *estate, PlanState *planstate,
84                                                 bool use_parallel_mode,
85                                                 CmdType operation,
86                                                 bool sendTuples,
87                                                 uint64 numberTuples,
88                                                 ScanDirection direction,
89                                                 DestReceiver *dest,
90                                                 bool execute_once);
91 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
92 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
93                                                                           Bitmapset *modifiedCols,
94                                                                           AclMode requiredPerms);
95 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
96 static char *ExecBuildSlotValueDescription(Oid reloid,
97                                                                                    TupleTableSlot *slot,
98                                                                                    TupleDesc tupdesc,
99                                                                                    Bitmapset *modifiedCols,
100                                                                                    int maxfieldlen);
101 static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree);
102
103 /*
104  * Note that GetAllUpdatedColumns() also exists in commands/trigger.c.  There does
105  * not appear to be any good header to put it into, given the structures that
106  * it uses, so we let them be duplicated.  Be sure to update both if one needs
107  * to be changed, however.
108  */
109 #define GetInsertedColumns(relinfo, estate) \
110         (exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->insertedCols)
111 #define GetUpdatedColumns(relinfo, estate) \
112         (exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols)
113 #define GetAllUpdatedColumns(relinfo, estate) \
114         (bms_union(exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols, \
115                            exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->extraUpdatedCols))
116
117 /* end of local decls */
118
119
120 /* ----------------------------------------------------------------
121  *              ExecutorStart
122  *
123  *              This routine must be called at the beginning of any execution of any
124  *              query plan
125  *
126  * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
127  * only because some places use QueryDescs for utility commands).  The tupDesc
128  * field of the QueryDesc is filled in to describe the tuples that will be
129  * returned, and the internal fields (estate and planstate) are set up.
130  *
131  * eflags contains flag bits as described in executor.h.
132  *
133  * NB: the CurrentMemoryContext when this is called will become the parent
134  * of the per-query context used for this Executor invocation.
135  *
136  * We provide a function hook variable that lets loadable plugins
137  * get control when ExecutorStart is called.  Such a plugin would
138  * normally call standard_ExecutorStart().
139  *
140  * ----------------------------------------------------------------
141  */
142 void
143 ExecutorStart(QueryDesc *queryDesc, int eflags)
144 {
145         if (ExecutorStart_hook)
146                 (*ExecutorStart_hook) (queryDesc, eflags);
147         else
148                 standard_ExecutorStart(queryDesc, eflags);
149 }
150
151 void
152 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
153 {
154         EState     *estate;
155         MemoryContext oldcontext;
156
157         /* sanity checks: queryDesc must not be started already */
158         Assert(queryDesc != NULL);
159         Assert(queryDesc->estate == NULL);
160
161         /*
162          * If the transaction is read-only, we need to check if any writes are
163          * planned to non-temporary tables.  EXPLAIN is considered read-only.
164          *
165          * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE
166          * would require (a) storing the combocid hash in shared memory, rather
167          * than synchronizing it just once at the start of parallelism, and (b) an
168          * alternative to heap_update()'s reliance on xmax for mutual exclusion.
169          * INSERT may have no such troubles, but we forbid it to simplify the
170          * checks.
171          *
172          * We have lower-level defenses in CommandCounterIncrement and elsewhere
173          * against performing unsafe operations in parallel mode, but this gives a
174          * more user-friendly error message.
175          */
176         if ((XactReadOnly || IsInParallelMode()) &&
177                 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
178                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
179
180         /*
181          * Build EState, switch into per-query memory context for startup.
182          */
183         estate = CreateExecutorState();
184         queryDesc->estate = estate;
185
186         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
187
188         /*
189          * Fill in external parameters, if any, from queryDesc; and allocate
190          * workspace for internal parameters
191          */
192         estate->es_param_list_info = queryDesc->params;
193
194         if (queryDesc->plannedstmt->paramExecTypes != NIL)
195         {
196                 int                     nParamExec;
197
198                 nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
199                 estate->es_param_exec_vals = (ParamExecData *)
200                         palloc0(nParamExec * sizeof(ParamExecData));
201         }
202
203         estate->es_sourceText = queryDesc->sourceText;
204
205         /*
206          * Fill in the query environment, if any, from queryDesc.
207          */
208         estate->es_queryEnv = queryDesc->queryEnv;
209
210         /*
211          * If non-read-only query, set the command ID to mark output tuples with
212          */
213         switch (queryDesc->operation)
214         {
215                 case CMD_SELECT:
216
217                         /*
218                          * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
219                          * tuples
220                          */
221                         if (queryDesc->plannedstmt->rowMarks != NIL ||
222                                 queryDesc->plannedstmt->hasModifyingCTE)
223                                 estate->es_output_cid = GetCurrentCommandId(true);
224
225                         /*
226                          * A SELECT without modifying CTEs can't possibly queue triggers,
227                          * so force skip-triggers mode. This is just a marginal efficiency
228                          * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
229                          * all that expensive, but we might as well do it.
230                          */
231                         if (!queryDesc->plannedstmt->hasModifyingCTE)
232                                 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
233                         break;
234
235                 case CMD_INSERT:
236                 case CMD_DELETE:
237                 case CMD_UPDATE:
238                         estate->es_output_cid = GetCurrentCommandId(true);
239                         break;
240
241                 default:
242                         elog(ERROR, "unrecognized operation code: %d",
243                                  (int) queryDesc->operation);
244                         break;
245         }
246
247         /*
248          * Copy other important information into the EState
249          */
250         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
251         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
252         estate->es_top_eflags = eflags;
253         estate->es_instrument = queryDesc->instrument_options;
254         estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
255
256         /*
257          * Set up an AFTER-trigger statement context, unless told not to, or
258          * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
259          */
260         if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
261                 AfterTriggerBeginQuery();
262
263         /*
264          * Initialize the plan state tree
265          */
266         InitPlan(queryDesc, eflags);
267
268         MemoryContextSwitchTo(oldcontext);
269 }
270
271 /* ----------------------------------------------------------------
272  *              ExecutorRun
273  *
274  *              This is the main routine of the executor module. It accepts
275  *              the query descriptor from the traffic cop and executes the
276  *              query plan.
277  *
278  *              ExecutorStart must have been called already.
279  *
280  *              If direction is NoMovementScanDirection then nothing is done
281  *              except to start up/shut down the destination.  Otherwise,
282  *              we retrieve up to 'count' tuples in the specified direction.
283  *
284  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
285  *              completion.  Also note that the count limit is only applied to
286  *              retrieved tuples, not for instance to those inserted/updated/deleted
287  *              by a ModifyTable plan node.
288  *
289  *              There is no return value, but output tuples (if any) are sent to
290  *              the destination receiver specified in the QueryDesc; and the number
291  *              of tuples processed at the top level can be found in
292  *              estate->es_processed.
293  *
294  *              We provide a function hook variable that lets loadable plugins
295  *              get control when ExecutorRun is called.  Such a plugin would
296  *              normally call standard_ExecutorRun().
297  *
298  * ----------------------------------------------------------------
299  */
300 void
301 ExecutorRun(QueryDesc *queryDesc,
302                         ScanDirection direction, uint64 count,
303                         bool execute_once)
304 {
305         if (ExecutorRun_hook)
306                 (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
307         else
308                 standard_ExecutorRun(queryDesc, direction, count, execute_once);
309 }
310
311 void
312 standard_ExecutorRun(QueryDesc *queryDesc,
313                                          ScanDirection direction, uint64 count, bool execute_once)
314 {
315         EState     *estate;
316         CmdType         operation;
317         DestReceiver *dest;
318         bool            sendTuples;
319         MemoryContext oldcontext;
320
321         /* sanity checks */
322         Assert(queryDesc != NULL);
323
324         estate = queryDesc->estate;
325
326         Assert(estate != NULL);
327         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
328
329         /*
330          * Switch into per-query memory context
331          */
332         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
333
334         /* Allow instrumentation of Executor overall runtime */
335         if (queryDesc->totaltime)
336                 InstrStartNode(queryDesc->totaltime);
337
338         /*
339          * extract information from the query descriptor and the query feature.
340          */
341         operation = queryDesc->operation;
342         dest = queryDesc->dest;
343
344         /*
345          * startup tuple receiver, if we will be emitting tuples
346          */
347         estate->es_processed = 0;
348
349         sendTuples = (operation == CMD_SELECT ||
350                                   queryDesc->plannedstmt->hasReturning);
351
352         if (sendTuples)
353                 dest->rStartup(dest, operation, queryDesc->tupDesc);
354
355         /*
356          * run plan
357          */
358         if (!ScanDirectionIsNoMovement(direction))
359         {
360                 if (execute_once && queryDesc->already_executed)
361                         elog(ERROR, "can't re-execute query flagged for single execution");
362                 queryDesc->already_executed = true;
363
364                 ExecutePlan(estate,
365                                         queryDesc->planstate,
366                                         queryDesc->plannedstmt->parallelModeNeeded,
367                                         operation,
368                                         sendTuples,
369                                         count,
370                                         direction,
371                                         dest,
372                                         execute_once);
373         }
374
375         /*
376          * shutdown tuple receiver, if we started it
377          */
378         if (sendTuples)
379                 dest->rShutdown(dest);
380
381         if (queryDesc->totaltime)
382                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
383
384         MemoryContextSwitchTo(oldcontext);
385 }
386
387 /* ----------------------------------------------------------------
388  *              ExecutorFinish
389  *
390  *              This routine must be called after the last ExecutorRun call.
391  *              It performs cleanup such as firing AFTER triggers.  It is
392  *              separate from ExecutorEnd because EXPLAIN ANALYZE needs to
393  *              include these actions in the total runtime.
394  *
395  *              We provide a function hook variable that lets loadable plugins
396  *              get control when ExecutorFinish is called.  Such a plugin would
397  *              normally call standard_ExecutorFinish().
398  *
399  * ----------------------------------------------------------------
400  */
401 void
402 ExecutorFinish(QueryDesc *queryDesc)
403 {
404         if (ExecutorFinish_hook)
405                 (*ExecutorFinish_hook) (queryDesc);
406         else
407                 standard_ExecutorFinish(queryDesc);
408 }
409
410 void
411 standard_ExecutorFinish(QueryDesc *queryDesc)
412 {
413         EState     *estate;
414         MemoryContext oldcontext;
415
416         /* sanity checks */
417         Assert(queryDesc != NULL);
418
419         estate = queryDesc->estate;
420
421         Assert(estate != NULL);
422         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
423
424         /* This should be run once and only once per Executor instance */
425         Assert(!estate->es_finished);
426
427         /* Switch into per-query memory context */
428         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
429
430         /* Allow instrumentation of Executor overall runtime */
431         if (queryDesc->totaltime)
432                 InstrStartNode(queryDesc->totaltime);
433
434         /* Run ModifyTable nodes to completion */
435         ExecPostprocessPlan(estate);
436
437         /* Execute queued AFTER triggers, unless told not to */
438         if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
439                 AfterTriggerEndQuery(estate);
440
441         if (queryDesc->totaltime)
442                 InstrStopNode(queryDesc->totaltime, 0);
443
444         MemoryContextSwitchTo(oldcontext);
445
446         estate->es_finished = true;
447 }
448
449 /* ----------------------------------------------------------------
450  *              ExecutorEnd
451  *
452  *              This routine must be called at the end of execution of any
453  *              query plan
454  *
455  *              We provide a function hook variable that lets loadable plugins
456  *              get control when ExecutorEnd is called.  Such a plugin would
457  *              normally call standard_ExecutorEnd().
458  *
459  * ----------------------------------------------------------------
460  */
461 void
462 ExecutorEnd(QueryDesc *queryDesc)
463 {
464         if (ExecutorEnd_hook)
465                 (*ExecutorEnd_hook) (queryDesc);
466         else
467                 standard_ExecutorEnd(queryDesc);
468 }
469
470 void
471 standard_ExecutorEnd(QueryDesc *queryDesc)
472 {
473         EState     *estate;
474         MemoryContext oldcontext;
475
476         /* sanity checks */
477         Assert(queryDesc != NULL);
478
479         estate = queryDesc->estate;
480
481         Assert(estate != NULL);
482
483         /*
484          * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
485          * Assert is needed because ExecutorFinish is new as of 9.1, and callers
486          * might forget to call it.
487          */
488         Assert(estate->es_finished ||
489                    (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
490
491         /*
492          * Switch into per-query memory context to run ExecEndPlan
493          */
494         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
495
496         ExecEndPlan(queryDesc->planstate, estate);
497
498         /* do away with our snapshots */
499         UnregisterSnapshot(estate->es_snapshot);
500         UnregisterSnapshot(estate->es_crosscheck_snapshot);
501
502         /*
503          * Must switch out of context before destroying it
504          */
505         MemoryContextSwitchTo(oldcontext);
506
507         /*
508          * Release EState and per-query memory context.  This should release
509          * everything the executor has allocated.
510          */
511         FreeExecutorState(estate);
512
513         /* Reset queryDesc fields that no longer point to anything */
514         queryDesc->tupDesc = NULL;
515         queryDesc->estate = NULL;
516         queryDesc->planstate = NULL;
517         queryDesc->totaltime = NULL;
518 }
519
520 /* ----------------------------------------------------------------
521  *              ExecutorRewind
522  *
523  *              This routine may be called on an open queryDesc to rewind it
524  *              to the start.
525  * ----------------------------------------------------------------
526  */
527 void
528 ExecutorRewind(QueryDesc *queryDesc)
529 {
530         EState     *estate;
531         MemoryContext oldcontext;
532
533         /* sanity checks */
534         Assert(queryDesc != NULL);
535
536         estate = queryDesc->estate;
537
538         Assert(estate != NULL);
539
540         /* It's probably not sensible to rescan updating queries */
541         Assert(queryDesc->operation == CMD_SELECT);
542
543         /*
544          * Switch into per-query memory context
545          */
546         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
547
548         /*
549          * rescan plan
550          */
551         ExecReScan(queryDesc->planstate);
552
553         MemoryContextSwitchTo(oldcontext);
554 }
555
556
557 /*
558  * ExecCheckRTPerms
559  *              Check access permissions for all relations listed in a range table.
560  *
561  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
562  * error if ereport_on_violation is true, or simply returns false otherwise.
563  *
564  * Note that this does NOT address row level security policies (aka: RLS).  If
565  * rows will be returned to the user as a result of this permission check
566  * passing, then RLS also needs to be consulted (and check_enable_rls()).
567  *
568  * See rewrite/rowsecurity.c.
569  */
570 bool
571 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
572 {
573         ListCell   *l;
574         bool            result = true;
575
576         foreach(l, rangeTable)
577         {
578                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
579
580                 result = ExecCheckRTEPerms(rte);
581                 if (!result)
582                 {
583                         Assert(rte->rtekind == RTE_RELATION);
584                         if (ereport_on_violation)
585                                 aclcheck_error(ACLCHECK_NO_PRIV, get_relkind_objtype(get_rel_relkind(rte->relid)),
586                                                            get_rel_name(rte->relid));
587                         return false;
588                 }
589         }
590
591         if (ExecutorCheckPerms_hook)
592                 result = (*ExecutorCheckPerms_hook) (rangeTable,
593                                                                                          ereport_on_violation);
594         return result;
595 }
596
597 /*
598  * ExecCheckRTEPerms
599  *              Check access permissions for a single RTE.
600  */
601 static bool
602 ExecCheckRTEPerms(RangeTblEntry *rte)
603 {
604         AclMode         requiredPerms;
605         AclMode         relPerms;
606         AclMode         remainingPerms;
607         Oid                     relOid;
608         Oid                     userid;
609
610         /*
611          * Only plain-relation RTEs need to be checked here.  Function RTEs are
612          * checked when the function is prepared for execution.  Join, subquery,
613          * and special RTEs need no checks.
614          */
615         if (rte->rtekind != RTE_RELATION)
616                 return true;
617
618         /*
619          * No work if requiredPerms is empty.
620          */
621         requiredPerms = rte->requiredPerms;
622         if (requiredPerms == 0)
623                 return true;
624
625         relOid = rte->relid;
626
627         /*
628          * userid to check as: current user unless we have a setuid indication.
629          *
630          * Note: GetUserId() is presently fast enough that there's no harm in
631          * calling it separately for each RTE.  If that stops being true, we could
632          * call it once in ExecCheckRTPerms and pass the userid down from there.
633          * But for now, no need for the extra clutter.
634          */
635         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
636
637         /*
638          * We must have *all* the requiredPerms bits, but some of the bits can be
639          * satisfied from column-level rather than relation-level permissions.
640          * First, remove any bits that are satisfied by relation permissions.
641          */
642         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
643         remainingPerms = requiredPerms & ~relPerms;
644         if (remainingPerms != 0)
645         {
646                 int                     col = -1;
647
648                 /*
649                  * If we lack any permissions that exist only as relation permissions,
650                  * we can fail straight away.
651                  */
652                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
653                         return false;
654
655                 /*
656                  * Check to see if we have the needed privileges at column level.
657                  *
658                  * Note: failures just report a table-level error; it would be nicer
659                  * to report a column-level error if we have some but not all of the
660                  * column privileges.
661                  */
662                 if (remainingPerms & ACL_SELECT)
663                 {
664                         /*
665                          * When the query doesn't explicitly reference any columns (for
666                          * example, SELECT COUNT(*) FROM table), allow the query if we
667                          * have SELECT on any column of the rel, as per SQL spec.
668                          */
669                         if (bms_is_empty(rte->selectedCols))
670                         {
671                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
672                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
673                                         return false;
674                         }
675
676                         while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
677                         {
678                                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
679                                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
680
681                                 if (attno == InvalidAttrNumber)
682                                 {
683                                         /* Whole-row reference, must have priv on all cols */
684                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
685                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
686                                                 return false;
687                                 }
688                                 else
689                                 {
690                                         if (pg_attribute_aclcheck(relOid, attno, userid,
691                                                                                           ACL_SELECT) != ACLCHECK_OK)
692                                                 return false;
693                                 }
694                         }
695                 }
696
697                 /*
698                  * Basically the same for the mod columns, for both INSERT and UPDATE
699                  * privilege as specified by remainingPerms.
700                  */
701                 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
702                                                                                                                                           userid,
703                                                                                                                                           rte->insertedCols,
704                                                                                                                                           ACL_INSERT))
705                         return false;
706
707                 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
708                                                                                                                                           userid,
709                                                                                                                                           rte->updatedCols,
710                                                                                                                                           ACL_UPDATE))
711                         return false;
712         }
713         return true;
714 }
715
716 /*
717  * ExecCheckRTEPermsModified
718  *              Check INSERT or UPDATE access permissions for a single RTE (these
719  *              are processed uniformly).
720  */
721 static bool
722 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
723                                                   AclMode requiredPerms)
724 {
725         int                     col = -1;
726
727         /*
728          * When the query doesn't explicitly update any columns, allow the query
729          * if we have permission on any column of the rel.  This is to handle
730          * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
731          */
732         if (bms_is_empty(modifiedCols))
733         {
734                 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
735                                                                           ACLMASK_ANY) != ACLCHECK_OK)
736                         return false;
737         }
738
739         while ((col = bms_next_member(modifiedCols, col)) >= 0)
740         {
741                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
742                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
743
744                 if (attno == InvalidAttrNumber)
745                 {
746                         /* whole-row reference can't happen here */
747                         elog(ERROR, "whole-row update is not implemented");
748                 }
749                 else
750                 {
751                         if (pg_attribute_aclcheck(relOid, attno, userid,
752                                                                           requiredPerms) != ACLCHECK_OK)
753                                 return false;
754                 }
755         }
756         return true;
757 }
758
759 /*
760  * Check that the query does not imply any writes to non-temp tables;
761  * unless we're in parallel mode, in which case don't even allow writes
762  * to temp tables.
763  *
764  * Note: in a Hot Standby this would need to reject writes to temp
765  * tables just as we do in parallel mode; but an HS standby can't have created
766  * any temp tables in the first place, so no need to check that.
767  */
768 static void
769 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
770 {
771         ListCell   *l;
772
773         /*
774          * Fail if write permissions are requested in parallel mode for table
775          * (temp or non-temp), otherwise fail for any non-temp table.
776          */
777         foreach(l, plannedstmt->rtable)
778         {
779                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
780
781                 if (rte->rtekind != RTE_RELATION)
782                         continue;
783
784                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
785                         continue;
786
787                 if (isTempNamespace(get_rel_namespace(rte->relid)))
788                         continue;
789
790                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
791         }
792
793         if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
794                 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
795 }
796
797
798 /* ----------------------------------------------------------------
799  *              InitPlan
800  *
801  *              Initializes the query plan: open files, allocate storage
802  *              and start up the rule manager
803  * ----------------------------------------------------------------
804  */
805 static void
806 InitPlan(QueryDesc *queryDesc, int eflags)
807 {
808         CmdType         operation = queryDesc->operation;
809         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
810         Plan       *plan = plannedstmt->planTree;
811         List       *rangeTable = plannedstmt->rtable;
812         EState     *estate = queryDesc->estate;
813         PlanState  *planstate;
814         TupleDesc       tupType;
815         ListCell   *l;
816         int                     i;
817
818         /*
819          * Do permissions checks
820          */
821         ExecCheckRTPerms(rangeTable, true);
822
823         /*
824          * initialize the node's execution state
825          */
826         ExecInitRangeTable(estate, rangeTable);
827
828         estate->es_plannedstmt = plannedstmt;
829
830         /*
831          * Initialize ResultRelInfo data structures, and open the result rels.
832          */
833         if (plannedstmt->resultRelations)
834         {
835                 List       *resultRelations = plannedstmt->resultRelations;
836                 int                     numResultRelations = list_length(resultRelations);
837                 ResultRelInfo *resultRelInfos;
838                 ResultRelInfo *resultRelInfo;
839
840                 resultRelInfos = (ResultRelInfo *)
841                         palloc(numResultRelations * sizeof(ResultRelInfo));
842                 resultRelInfo = resultRelInfos;
843                 foreach(l, resultRelations)
844                 {
845                         Index           resultRelationIndex = lfirst_int(l);
846                         Relation        resultRelation;
847
848                         resultRelation = ExecGetRangeTableRelation(estate,
849                                                                                                            resultRelationIndex);
850                         InitResultRelInfo(resultRelInfo,
851                                                           resultRelation,
852                                                           resultRelationIndex,
853                                                           NULL,
854                                                           estate->es_instrument);
855                         resultRelInfo++;
856                 }
857                 estate->es_result_relations = resultRelInfos;
858                 estate->es_num_result_relations = numResultRelations;
859
860                 /* es_result_relation_info is NULL except when within ModifyTable */
861                 estate->es_result_relation_info = NULL;
862
863                 /*
864                  * In the partitioned result relation case, also build ResultRelInfos
865                  * for all the partitioned table roots, because we will need them to
866                  * fire statement-level triggers, if any.
867                  */
868                 if (plannedstmt->rootResultRelations)
869                 {
870                         int                     num_roots = list_length(plannedstmt->rootResultRelations);
871
872                         resultRelInfos = (ResultRelInfo *)
873                                 palloc(num_roots * sizeof(ResultRelInfo));
874                         resultRelInfo = resultRelInfos;
875                         foreach(l, plannedstmt->rootResultRelations)
876                         {
877                                 Index           resultRelIndex = lfirst_int(l);
878                                 Relation        resultRelDesc;
879
880                                 resultRelDesc = ExecGetRangeTableRelation(estate,
881                                                                                                                   resultRelIndex);
882                                 InitResultRelInfo(resultRelInfo,
883                                                                   resultRelDesc,
884                                                                   resultRelIndex,
885                                                                   NULL,
886                                                                   estate->es_instrument);
887                                 resultRelInfo++;
888                         }
889
890                         estate->es_root_result_relations = resultRelInfos;
891                         estate->es_num_root_result_relations = num_roots;
892                 }
893                 else
894                 {
895                         estate->es_root_result_relations = NULL;
896                         estate->es_num_root_result_relations = 0;
897                 }
898         }
899         else
900         {
901                 /*
902                  * if no result relation, then set state appropriately
903                  */
904                 estate->es_result_relations = NULL;
905                 estate->es_num_result_relations = 0;
906                 estate->es_result_relation_info = NULL;
907                 estate->es_root_result_relations = NULL;
908                 estate->es_num_root_result_relations = 0;
909         }
910
911         /*
912          * Next, build the ExecRowMark array from the PlanRowMark(s), if any.
913          */
914         if (plannedstmt->rowMarks)
915         {
916                 estate->es_rowmarks = (ExecRowMark **)
917                         palloc0(estate->es_range_table_size * sizeof(ExecRowMark *));
918                 foreach(l, plannedstmt->rowMarks)
919                 {
920                         PlanRowMark *rc = (PlanRowMark *) lfirst(l);
921                         Oid                     relid;
922                         Relation        relation;
923                         ExecRowMark *erm;
924
925                         /* ignore "parent" rowmarks; they are irrelevant at runtime */
926                         if (rc->isParent)
927                                 continue;
928
929                         /* get relation's OID (will produce InvalidOid if subquery) */
930                         relid = exec_rt_fetch(rc->rti, estate)->relid;
931
932                         /* open relation, if we need to access it for this mark type */
933                         switch (rc->markType)
934                         {
935                                 case ROW_MARK_EXCLUSIVE:
936                                 case ROW_MARK_NOKEYEXCLUSIVE:
937                                 case ROW_MARK_SHARE:
938                                 case ROW_MARK_KEYSHARE:
939                                 case ROW_MARK_REFERENCE:
940                                         relation = ExecGetRangeTableRelation(estate, rc->rti);
941                                         break;
942                                 case ROW_MARK_COPY:
943                                         /* no physical table access is required */
944                                         relation = NULL;
945                                         break;
946                                 default:
947                                         elog(ERROR, "unrecognized markType: %d", rc->markType);
948                                         relation = NULL;        /* keep compiler quiet */
949                                         break;
950                         }
951
952                         /* Check that relation is a legal target for marking */
953                         if (relation)
954                                 CheckValidRowMarkRel(relation, rc->markType);
955
956                         erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
957                         erm->relation = relation;
958                         erm->relid = relid;
959                         erm->rti = rc->rti;
960                         erm->prti = rc->prti;
961                         erm->rowmarkId = rc->rowmarkId;
962                         erm->markType = rc->markType;
963                         erm->strength = rc->strength;
964                         erm->waitPolicy = rc->waitPolicy;
965                         erm->ermActive = false;
966                         ItemPointerSetInvalid(&(erm->curCtid));
967                         erm->ermExtra = NULL;
968
969                         Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size &&
970                                    estate->es_rowmarks[erm->rti - 1] == NULL);
971
972                         estate->es_rowmarks[erm->rti - 1] = erm;
973                 }
974         }
975
976         /*
977          * Initialize the executor's tuple table to empty.
978          */
979         estate->es_tupleTable = NIL;
980
981         /* signal that this EState is not used for EPQ */
982         estate->es_epq_active = NULL;
983
984         /*
985          * Initialize private state information for each SubPlan.  We must do this
986          * before running ExecInitNode on the main query tree, since
987          * ExecInitSubPlan expects to be able to find these entries.
988          */
989         Assert(estate->es_subplanstates == NIL);
990         i = 1;                                          /* subplan indices count from 1 */
991         foreach(l, plannedstmt->subplans)
992         {
993                 Plan       *subplan = (Plan *) lfirst(l);
994                 PlanState  *subplanstate;
995                 int                     sp_eflags;
996
997                 /*
998                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
999                  * it is a parameterless subplan (not initplan), we suggest that it be
1000                  * prepared to handle REWIND efficiently; otherwise there is no need.
1001                  */
1002                 sp_eflags = eflags
1003                         & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
1004                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
1005                         sp_eflags |= EXEC_FLAG_REWIND;
1006
1007                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
1008
1009                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1010                                                                                    subplanstate);
1011
1012                 i++;
1013         }
1014
1015         /*
1016          * Initialize the private state information for all the nodes in the query
1017          * tree.  This opens files, allocates storage and leaves us ready to start
1018          * processing tuples.
1019          */
1020         planstate = ExecInitNode(plan, estate, eflags);
1021
1022         /*
1023          * Get the tuple descriptor describing the type of tuples to return.
1024          */
1025         tupType = ExecGetResultType(planstate);
1026
1027         /*
1028          * Initialize the junk filter if needed.  SELECT queries need a filter if
1029          * there are any junk attrs in the top-level tlist.
1030          */
1031         if (operation == CMD_SELECT)
1032         {
1033                 bool            junk_filter_needed = false;
1034                 ListCell   *tlist;
1035
1036                 foreach(tlist, plan->targetlist)
1037                 {
1038                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1039
1040                         if (tle->resjunk)
1041                         {
1042                                 junk_filter_needed = true;
1043                                 break;
1044                         }
1045                 }
1046
1047                 if (junk_filter_needed)
1048                 {
1049                         JunkFilter *j;
1050                         TupleTableSlot *slot;
1051
1052                         slot = ExecInitExtraTupleSlot(estate, NULL, &TTSOpsVirtual);
1053                         j = ExecInitJunkFilter(planstate->plan->targetlist,
1054                                                                    slot);
1055                         estate->es_junkFilter = j;
1056
1057                         /* Want to return the cleaned tuple type */
1058                         tupType = j->jf_cleanTupType;
1059                 }
1060         }
1061
1062         queryDesc->tupDesc = tupType;
1063         queryDesc->planstate = planstate;
1064 }
1065
1066 /*
1067  * Check that a proposed result relation is a legal target for the operation
1068  *
1069  * Generally the parser and/or planner should have noticed any such mistake
1070  * already, but let's make sure.
1071  *
1072  * Note: when changing this function, you probably also need to look at
1073  * CheckValidRowMarkRel.
1074  */
1075 void
1076 CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
1077 {
1078         Relation        resultRel = resultRelInfo->ri_RelationDesc;
1079         TriggerDesc *trigDesc = resultRel->trigdesc;
1080         FdwRoutine *fdwroutine;
1081
1082         switch (resultRel->rd_rel->relkind)
1083         {
1084                 case RELKIND_RELATION:
1085                 case RELKIND_PARTITIONED_TABLE:
1086                         CheckCmdReplicaIdentity(resultRel, operation);
1087                         break;
1088                 case RELKIND_SEQUENCE:
1089                         ereport(ERROR,
1090                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1091                                          errmsg("cannot change sequence \"%s\"",
1092                                                         RelationGetRelationName(resultRel))));
1093                         break;
1094                 case RELKIND_TOASTVALUE:
1095                         ereport(ERROR,
1096                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1097                                          errmsg("cannot change TOAST relation \"%s\"",
1098                                                         RelationGetRelationName(resultRel))));
1099                         break;
1100                 case RELKIND_VIEW:
1101
1102                         /*
1103                          * Okay only if there's a suitable INSTEAD OF trigger.  Messages
1104                          * here should match rewriteHandler.c's rewriteTargetView, except
1105                          * that we omit errdetail because we haven't got the information
1106                          * handy (and given that we really shouldn't get here anyway, it's
1107                          * not worth great exertion to get).
1108                          */
1109                         switch (operation)
1110                         {
1111                                 case CMD_INSERT:
1112                                         if (!trigDesc || !trigDesc->trig_insert_instead_row)
1113                                                 ereport(ERROR,
1114                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1115                                                                  errmsg("cannot insert into view \"%s\"",
1116                                                                                 RelationGetRelationName(resultRel)),
1117                                                                  errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1118                                         break;
1119                                 case CMD_UPDATE:
1120                                         if (!trigDesc || !trigDesc->trig_update_instead_row)
1121                                                 ereport(ERROR,
1122                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1123                                                                  errmsg("cannot update view \"%s\"",
1124                                                                                 RelationGetRelationName(resultRel)),
1125                                                                  errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1126                                         break;
1127                                 case CMD_DELETE:
1128                                         if (!trigDesc || !trigDesc->trig_delete_instead_row)
1129                                                 ereport(ERROR,
1130                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1131                                                                  errmsg("cannot delete from view \"%s\"",
1132                                                                                 RelationGetRelationName(resultRel)),
1133                                                                  errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1134                                         break;
1135                                 default:
1136                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1137                                         break;
1138                         }
1139                         break;
1140                 case RELKIND_MATVIEW:
1141                         if (!MatViewIncrementalMaintenanceIsEnabled())
1142                                 ereport(ERROR,
1143                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1144                                                  errmsg("cannot change materialized view \"%s\"",
1145                                                                 RelationGetRelationName(resultRel))));
1146                         break;
1147                 case RELKIND_FOREIGN_TABLE:
1148                         /* Okay only if the FDW supports it */
1149                         fdwroutine = resultRelInfo->ri_FdwRoutine;
1150                         switch (operation)
1151                         {
1152                                 case CMD_INSERT:
1153                                         if (fdwroutine->ExecForeignInsert == NULL)
1154                                                 ereport(ERROR,
1155                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1156                                                                  errmsg("cannot insert into foreign table \"%s\"",
1157                                                                                 RelationGetRelationName(resultRel))));
1158                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1159                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1160                                                 ereport(ERROR,
1161                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1162                                                                  errmsg("foreign table \"%s\" does not allow inserts",
1163                                                                                 RelationGetRelationName(resultRel))));
1164                                         break;
1165                                 case CMD_UPDATE:
1166                                         if (fdwroutine->ExecForeignUpdate == NULL)
1167                                                 ereport(ERROR,
1168                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1169                                                                  errmsg("cannot update foreign table \"%s\"",
1170                                                                                 RelationGetRelationName(resultRel))));
1171                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1172                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1173                                                 ereport(ERROR,
1174                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1175                                                                  errmsg("foreign table \"%s\" does not allow updates",
1176                                                                                 RelationGetRelationName(resultRel))));
1177                                         break;
1178                                 case CMD_DELETE:
1179                                         if (fdwroutine->ExecForeignDelete == NULL)
1180                                                 ereport(ERROR,
1181                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1182                                                                  errmsg("cannot delete from foreign table \"%s\"",
1183                                                                                 RelationGetRelationName(resultRel))));
1184                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1185                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1186                                                 ereport(ERROR,
1187                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1188                                                                  errmsg("foreign table \"%s\" does not allow deletes",
1189                                                                                 RelationGetRelationName(resultRel))));
1190                                         break;
1191                                 default:
1192                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1193                                         break;
1194                         }
1195                         break;
1196                 default:
1197                         ereport(ERROR,
1198                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1199                                          errmsg("cannot change relation \"%s\"",
1200                                                         RelationGetRelationName(resultRel))));
1201                         break;
1202         }
1203 }
1204
1205 /*
1206  * Check that a proposed rowmark target relation is a legal target
1207  *
1208  * In most cases parser and/or planner should have noticed this already, but
1209  * they don't cover all cases.
1210  */
1211 static void
1212 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1213 {
1214         FdwRoutine *fdwroutine;
1215
1216         switch (rel->rd_rel->relkind)
1217         {
1218                 case RELKIND_RELATION:
1219                 case RELKIND_PARTITIONED_TABLE:
1220                         /* OK */
1221                         break;
1222                 case RELKIND_SEQUENCE:
1223                         /* Must disallow this because we don't vacuum sequences */
1224                         ereport(ERROR,
1225                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1226                                          errmsg("cannot lock rows in sequence \"%s\"",
1227                                                         RelationGetRelationName(rel))));
1228                         break;
1229                 case RELKIND_TOASTVALUE:
1230                         /* We could allow this, but there seems no good reason to */
1231                         ereport(ERROR,
1232                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1233                                          errmsg("cannot lock rows in TOAST relation \"%s\"",
1234                                                         RelationGetRelationName(rel))));
1235                         break;
1236                 case RELKIND_VIEW:
1237                         /* Should not get here; planner should have expanded the view */
1238                         ereport(ERROR,
1239                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1240                                          errmsg("cannot lock rows in view \"%s\"",
1241                                                         RelationGetRelationName(rel))));
1242                         break;
1243                 case RELKIND_MATVIEW:
1244                         /* Allow referencing a matview, but not actual locking clauses */
1245                         if (markType != ROW_MARK_REFERENCE)
1246                                 ereport(ERROR,
1247                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1248                                                  errmsg("cannot lock rows in materialized view \"%s\"",
1249                                                                 RelationGetRelationName(rel))));
1250                         break;
1251                 case RELKIND_FOREIGN_TABLE:
1252                         /* Okay only if the FDW supports it */
1253                         fdwroutine = GetFdwRoutineForRelation(rel, false);
1254                         if (fdwroutine->RefetchForeignRow == NULL)
1255                                 ereport(ERROR,
1256                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1257                                                  errmsg("cannot lock rows in foreign table \"%s\"",
1258                                                                 RelationGetRelationName(rel))));
1259                         break;
1260                 default:
1261                         ereport(ERROR,
1262                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1263                                          errmsg("cannot lock rows in relation \"%s\"",
1264                                                         RelationGetRelationName(rel))));
1265                         break;
1266         }
1267 }
1268
1269 /*
1270  * Initialize ResultRelInfo data for one result relation
1271  *
1272  * Caution: before Postgres 9.1, this function included the relkind checking
1273  * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1274  * appropriate.  Be sure callers cover those needs.
1275  */
1276 void
1277 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1278                                   Relation resultRelationDesc,
1279                                   Index resultRelationIndex,
1280                                   Relation partition_root,
1281                                   int instrument_options)
1282 {
1283         List       *partition_check = NIL;
1284
1285         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1286         resultRelInfo->type = T_ResultRelInfo;
1287         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1288         resultRelInfo->ri_RelationDesc = resultRelationDesc;
1289         resultRelInfo->ri_NumIndices = 0;
1290         resultRelInfo->ri_IndexRelationDescs = NULL;
1291         resultRelInfo->ri_IndexRelationInfo = NULL;
1292         /* make a copy so as not to depend on relcache info not changing... */
1293         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1294         if (resultRelInfo->ri_TrigDesc)
1295         {
1296                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
1297
1298                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1299                         palloc0(n * sizeof(FmgrInfo));
1300                 resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1301                         palloc0(n * sizeof(ExprState *));
1302                 if (instrument_options)
1303                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1304         }
1305         else
1306         {
1307                 resultRelInfo->ri_TrigFunctions = NULL;
1308                 resultRelInfo->ri_TrigWhenExprs = NULL;
1309                 resultRelInfo->ri_TrigInstrument = NULL;
1310         }
1311         if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1312                 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1313         else
1314                 resultRelInfo->ri_FdwRoutine = NULL;
1315
1316         /* The following fields are set later if needed */
1317         resultRelInfo->ri_FdwState = NULL;
1318         resultRelInfo->ri_usesFdwDirectModify = false;
1319         resultRelInfo->ri_ConstraintExprs = NULL;
1320         resultRelInfo->ri_GeneratedExprs = NULL;
1321         resultRelInfo->ri_junkFilter = NULL;
1322         resultRelInfo->ri_projectReturning = NULL;
1323         resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1324         resultRelInfo->ri_onConflict = NULL;
1325         resultRelInfo->ri_ReturningSlot = NULL;
1326         resultRelInfo->ri_TrigOldSlot = NULL;
1327         resultRelInfo->ri_TrigNewSlot = NULL;
1328
1329         /*
1330          * Partition constraint, which also includes the partition constraint of
1331          * all the ancestors that are partitions.  Note that it will be checked
1332          * even in the case of tuple-routing where this table is the target leaf
1333          * partition, if there any BR triggers defined on the table.  Although
1334          * tuple-routing implicitly preserves the partition constraint of the
1335          * target partition for a given row, the BR triggers may change the row
1336          * such that the constraint is no longer satisfied, which we must fail for
1337          * by checking it explicitly.
1338          *
1339          * If this is a partitioned table, the partition constraint (if any) of a
1340          * given row will be checked just before performing tuple-routing.
1341          */
1342         partition_check = RelationGetPartitionQual(resultRelationDesc);
1343
1344         resultRelInfo->ri_PartitionCheck = partition_check;
1345         resultRelInfo->ri_PartitionRoot = partition_root;
1346         resultRelInfo->ri_PartitionInfo = NULL; /* may be set later */
1347         resultRelInfo->ri_CopyMultiInsertBuffer = NULL;
1348 }
1349
1350 /*
1351  * ExecGetTriggerResultRel
1352  *              Get a ResultRelInfo for a trigger target relation.
1353  *
1354  * Most of the time, triggers are fired on one of the result relations of the
1355  * query, and so we can just return a member of the es_result_relations array,
1356  * or the es_root_result_relations array (if any), or the
1357  * es_tuple_routing_result_relations list (if any).  (Note: in self-join
1358  * situations there might be multiple members with the same OID; if so it
1359  * doesn't matter which one we pick.)
1360  *
1361  * However, it is sometimes necessary to fire triggers on other relations;
1362  * this happens mainly when an RI update trigger queues additional triggers
1363  * on other relations, which will be processed in the context of the outer
1364  * query.  For efficiency's sake, we want to have a ResultRelInfo for those
1365  * triggers too; that can avoid repeated re-opening of the relation.  (It
1366  * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1367  * triggers.)  So we make additional ResultRelInfo's as needed, and save them
1368  * in es_trig_target_relations.
1369  */
1370 ResultRelInfo *
1371 ExecGetTriggerResultRel(EState *estate, Oid relid)
1372 {
1373         ResultRelInfo *rInfo;
1374         int                     nr;
1375         ListCell   *l;
1376         Relation        rel;
1377         MemoryContext oldcontext;
1378
1379         /* First, search through the query result relations */
1380         rInfo = estate->es_result_relations;
1381         nr = estate->es_num_result_relations;
1382         while (nr > 0)
1383         {
1384                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1385                         return rInfo;
1386                 rInfo++;
1387                 nr--;
1388         }
1389         /* Second, search through the root result relations, if any */
1390         rInfo = estate->es_root_result_relations;
1391         nr = estate->es_num_root_result_relations;
1392         while (nr > 0)
1393         {
1394                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1395                         return rInfo;
1396                 rInfo++;
1397                 nr--;
1398         }
1399
1400         /*
1401          * Third, search through the result relations that were created during
1402          * tuple routing, if any.
1403          */
1404         foreach(l, estate->es_tuple_routing_result_relations)
1405         {
1406                 rInfo = (ResultRelInfo *) lfirst(l);
1407                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1408                         return rInfo;
1409         }
1410
1411         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1412         foreach(l, estate->es_trig_target_relations)
1413         {
1414                 rInfo = (ResultRelInfo *) lfirst(l);
1415                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1416                         return rInfo;
1417         }
1418         /* Nope, so we need a new one */
1419
1420         /*
1421          * Open the target relation's relcache entry.  We assume that an
1422          * appropriate lock is still held by the backend from whenever the trigger
1423          * event got queued, so we need take no new lock here.  Also, we need not
1424          * recheck the relkind, so no need for CheckValidResultRel.
1425          */
1426         rel = table_open(relid, NoLock);
1427
1428         /*
1429          * Make the new entry in the right context.
1430          */
1431         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1432         rInfo = makeNode(ResultRelInfo);
1433         InitResultRelInfo(rInfo,
1434                                           rel,
1435                                           0,            /* dummy rangetable index */
1436                                           NULL,
1437                                           estate->es_instrument);
1438         estate->es_trig_target_relations =
1439                 lappend(estate->es_trig_target_relations, rInfo);
1440         MemoryContextSwitchTo(oldcontext);
1441
1442         /*
1443          * Currently, we don't need any index information in ResultRelInfos used
1444          * only for triggers, so no need to call ExecOpenIndices.
1445          */
1446
1447         return rInfo;
1448 }
1449
1450 /*
1451  * Close any relations that have been opened by ExecGetTriggerResultRel().
1452  */
1453 void
1454 ExecCleanUpTriggerState(EState *estate)
1455 {
1456         ListCell   *l;
1457
1458         foreach(l, estate->es_trig_target_relations)
1459         {
1460                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1461
1462                 /*
1463                  * Assert this is a "dummy" ResultRelInfo, see above.  Otherwise we
1464                  * might be issuing a duplicate close against a Relation opened by
1465                  * ExecGetRangeTableRelation.
1466                  */
1467                 Assert(resultRelInfo->ri_RangeTableIndex == 0);
1468
1469                 /*
1470                  * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for
1471                  * these rels, we needn't call ExecCloseIndices either.
1472                  */
1473                 Assert(resultRelInfo->ri_NumIndices == 0);
1474
1475                 table_close(resultRelInfo->ri_RelationDesc, NoLock);
1476         }
1477 }
1478
1479 /* ----------------------------------------------------------------
1480  *              ExecPostprocessPlan
1481  *
1482  *              Give plan nodes a final chance to execute before shutdown
1483  * ----------------------------------------------------------------
1484  */
1485 static void
1486 ExecPostprocessPlan(EState *estate)
1487 {
1488         ListCell   *lc;
1489
1490         /*
1491          * Make sure nodes run forward.
1492          */
1493         estate->es_direction = ForwardScanDirection;
1494
1495         /*
1496          * Run any secondary ModifyTable nodes to completion, in case the main
1497          * query did not fetch all rows from them.  (We do this to ensure that
1498          * such nodes have predictable results.)
1499          */
1500         foreach(lc, estate->es_auxmodifytables)
1501         {
1502                 PlanState  *ps = (PlanState *) lfirst(lc);
1503
1504                 for (;;)
1505                 {
1506                         TupleTableSlot *slot;
1507
1508                         /* Reset the per-output-tuple exprcontext each time */
1509                         ResetPerTupleExprContext(estate);
1510
1511                         slot = ExecProcNode(ps);
1512
1513                         if (TupIsNull(slot))
1514                                 break;
1515                 }
1516         }
1517 }
1518
1519 /* ----------------------------------------------------------------
1520  *              ExecEndPlan
1521  *
1522  *              Cleans up the query plan -- closes files and frees up storage
1523  *
1524  * NOTE: we are no longer very worried about freeing storage per se
1525  * in this code; FreeExecutorState should be guaranteed to release all
1526  * memory that needs to be released.  What we are worried about doing
1527  * is closing relations and dropping buffer pins.  Thus, for example,
1528  * tuple tables must be cleared or dropped to ensure pins are released.
1529  * ----------------------------------------------------------------
1530  */
1531 static void
1532 ExecEndPlan(PlanState *planstate, EState *estate)
1533 {
1534         ResultRelInfo *resultRelInfo;
1535         Index           num_relations;
1536         Index           i;
1537         ListCell   *l;
1538
1539         /*
1540          * shut down the node-type-specific query processing
1541          */
1542         ExecEndNode(planstate);
1543
1544         /*
1545          * for subplans too
1546          */
1547         foreach(l, estate->es_subplanstates)
1548         {
1549                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1550
1551                 ExecEndNode(subplanstate);
1552         }
1553
1554         /*
1555          * destroy the executor's tuple table.  Actually we only care about
1556          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1557          * the TupleTableSlots, since the containing memory context is about to go
1558          * away anyway.
1559          */
1560         ExecResetTupleTable(estate->es_tupleTable, false);
1561
1562         /*
1563          * close indexes of result relation(s) if any.  (Rels themselves get
1564          * closed next.)
1565          */
1566         resultRelInfo = estate->es_result_relations;
1567         for (i = estate->es_num_result_relations; i > 0; i--)
1568         {
1569                 ExecCloseIndices(resultRelInfo);
1570                 resultRelInfo++;
1571         }
1572
1573         /*
1574          * close whatever rangetable Relations have been opened.  We do not
1575          * release any locks we might hold on those rels.
1576          */
1577         num_relations = estate->es_range_table_size;
1578         for (i = 0; i < num_relations; i++)
1579         {
1580                 if (estate->es_relations[i])
1581                         table_close(estate->es_relations[i], NoLock);
1582         }
1583
1584         /* likewise close any trigger target relations */
1585         ExecCleanUpTriggerState(estate);
1586 }
1587
1588 /* ----------------------------------------------------------------
1589  *              ExecutePlan
1590  *
1591  *              Processes the query plan until we have retrieved 'numberTuples' tuples,
1592  *              moving in the specified direction.
1593  *
1594  *              Runs to completion if numberTuples is 0
1595  *
1596  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1597  * user can see it
1598  * ----------------------------------------------------------------
1599  */
1600 static void
1601 ExecutePlan(EState *estate,
1602                         PlanState *planstate,
1603                         bool use_parallel_mode,
1604                         CmdType operation,
1605                         bool sendTuples,
1606                         uint64 numberTuples,
1607                         ScanDirection direction,
1608                         DestReceiver *dest,
1609                         bool execute_once)
1610 {
1611         TupleTableSlot *slot;
1612         uint64          current_tuple_count;
1613
1614         /*
1615          * initialize local variables
1616          */
1617         current_tuple_count = 0;
1618
1619         /*
1620          * Set the direction.
1621          */
1622         estate->es_direction = direction;
1623
1624         /*
1625          * If the plan might potentially be executed multiple times, we must force
1626          * it to run without parallelism, because we might exit early.
1627          */
1628         if (!execute_once)
1629                 use_parallel_mode = false;
1630
1631         estate->es_use_parallel_mode = use_parallel_mode;
1632         if (use_parallel_mode)
1633                 EnterParallelMode();
1634
1635         /*
1636          * Loop until we've processed the proper number of tuples from the plan.
1637          */
1638         for (;;)
1639         {
1640                 /* Reset the per-output-tuple exprcontext */
1641                 ResetPerTupleExprContext(estate);
1642
1643                 /*
1644                  * Execute the plan and obtain a tuple
1645                  */
1646                 slot = ExecProcNode(planstate);
1647
1648                 /*
1649                  * if the tuple is null, then we assume there is nothing more to
1650                  * process so we just end the loop...
1651                  */
1652                 if (TupIsNull(slot))
1653                 {
1654                         /*
1655                          * If we know we won't need to back up, we can release resources
1656                          * at this point.
1657                          */
1658                         if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD))
1659                                 (void) ExecShutdownNode(planstate);
1660                         break;
1661                 }
1662
1663                 /*
1664                  * If we have a junk filter, then project a new tuple with the junk
1665                  * removed.
1666                  *
1667                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1668                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1669                  * because that tuple slot has the wrong descriptor.)
1670                  */
1671                 if (estate->es_junkFilter != NULL)
1672                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1673
1674                 /*
1675                  * If we are supposed to send the tuple somewhere, do so. (In
1676                  * practice, this is probably always the case at this point.)
1677                  */
1678                 if (sendTuples)
1679                 {
1680                         /*
1681                          * If we are not able to send the tuple, we assume the destination
1682                          * has closed and no more tuples can be sent. If that's the case,
1683                          * end the loop.
1684                          */
1685                         if (!dest->receiveSlot(slot, dest))
1686                                 break;
1687                 }
1688
1689                 /*
1690                  * Count tuples processed, if this is a SELECT.  (For other operation
1691                  * types, the ModifyTable plan node must count the appropriate
1692                  * events.)
1693                  */
1694                 if (operation == CMD_SELECT)
1695                         (estate->es_processed)++;
1696
1697                 /*
1698                  * check our tuple count.. if we've processed the proper number then
1699                  * quit, else loop again and process more tuples.  Zero numberTuples
1700                  * means no limit.
1701                  */
1702                 current_tuple_count++;
1703                 if (numberTuples && numberTuples == current_tuple_count)
1704                 {
1705                         /*
1706                          * If we know we won't need to back up, we can release resources
1707                          * at this point.
1708                          */
1709                         if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD))
1710                                 (void) ExecShutdownNode(planstate);
1711                         break;
1712                 }
1713         }
1714
1715         if (use_parallel_mode)
1716                 ExitParallelMode();
1717 }
1718
1719
1720 /*
1721  * ExecRelCheck --- check that tuple meets constraints for result relation
1722  *
1723  * Returns NULL if OK, else name of failed check constraint
1724  */
1725 static const char *
1726 ExecRelCheck(ResultRelInfo *resultRelInfo,
1727                          TupleTableSlot *slot, EState *estate)
1728 {
1729         Relation        rel = resultRelInfo->ri_RelationDesc;
1730         int                     ncheck = rel->rd_att->constr->num_check;
1731         ConstrCheck *check = rel->rd_att->constr->check;
1732         ExprContext *econtext;
1733         MemoryContext oldContext;
1734         int                     i;
1735
1736         /*
1737          * If first time through for this result relation, build expression
1738          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1739          * memory context so they'll survive throughout the query.
1740          */
1741         if (resultRelInfo->ri_ConstraintExprs == NULL)
1742         {
1743                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1744                 resultRelInfo->ri_ConstraintExprs =
1745                         (ExprState **) palloc(ncheck * sizeof(ExprState *));
1746                 for (i = 0; i < ncheck; i++)
1747                 {
1748                         Expr       *checkconstr;
1749
1750                         checkconstr = stringToNode(check[i].ccbin);
1751                         resultRelInfo->ri_ConstraintExprs[i] =
1752                                 ExecPrepareExpr(checkconstr, estate);
1753                 }
1754                 MemoryContextSwitchTo(oldContext);
1755         }
1756
1757         /*
1758          * We will use the EState's per-tuple context for evaluating constraint
1759          * expressions (creating it if it's not already there).
1760          */
1761         econtext = GetPerTupleExprContext(estate);
1762
1763         /* Arrange for econtext's scan tuple to be the tuple under test */
1764         econtext->ecxt_scantuple = slot;
1765
1766         /* And evaluate the constraints */
1767         for (i = 0; i < ncheck; i++)
1768         {
1769                 ExprState  *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1770
1771                 /*
1772                  * NOTE: SQL specifies that a NULL result from a constraint expression
1773                  * is not to be treated as a failure.  Therefore, use ExecCheck not
1774                  * ExecQual.
1775                  */
1776                 if (!ExecCheck(checkconstr, econtext))
1777                         return check[i].ccname;
1778         }
1779
1780         /* NULL result means no error */
1781         return NULL;
1782 }
1783
1784 /*
1785  * ExecPartitionCheck --- check that tuple meets the partition constraint.
1786  *
1787  * Returns true if it meets the partition constraint.  If the constraint
1788  * fails and we're asked to emit to error, do so and don't return; otherwise
1789  * return false.
1790  */
1791 bool
1792 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1793                                    EState *estate, bool emitError)
1794 {
1795         ExprContext *econtext;
1796         bool            success;
1797
1798         /*
1799          * If first time through, build expression state tree for the partition
1800          * check expression.  Keep it in the per-query memory context so they'll
1801          * survive throughout the query.
1802          */
1803         if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1804         {
1805                 List       *qual = resultRelInfo->ri_PartitionCheck;
1806
1807                 resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1808         }
1809
1810         /*
1811          * We will use the EState's per-tuple context for evaluating constraint
1812          * expressions (creating it if it's not already there).
1813          */
1814         econtext = GetPerTupleExprContext(estate);
1815
1816         /* Arrange for econtext's scan tuple to be the tuple under test */
1817         econtext->ecxt_scantuple = slot;
1818
1819         /*
1820          * As in case of the catalogued constraints, we treat a NULL result as
1821          * success here, not a failure.
1822          */
1823         success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1824
1825         /* if asked to emit error, don't actually return on failure */
1826         if (!success && emitError)
1827                 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1828
1829         return success;
1830 }
1831
1832 /*
1833  * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1834  * partition constraint check.
1835  */
1836 void
1837 ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1838                                                         TupleTableSlot *slot,
1839                                                         EState *estate)
1840 {
1841         Oid                     root_relid;
1842         TupleDesc       tupdesc;
1843         char       *val_desc;
1844         Bitmapset  *modifiedCols;
1845
1846         /*
1847          * If the tuple has been routed, it's been converted to the partition's
1848          * rowtype, which might differ from the root table's.  We must convert it
1849          * back to the root table's rowtype so that val_desc in the error message
1850          * matches the input tuple.
1851          */
1852         if (resultRelInfo->ri_PartitionRoot)
1853         {
1854                 TupleDesc       old_tupdesc;
1855                 AttrNumber *map;
1856
1857                 root_relid = RelationGetRelid(resultRelInfo->ri_PartitionRoot);
1858                 tupdesc = RelationGetDescr(resultRelInfo->ri_PartitionRoot);
1859
1860                 old_tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1861                 /* a reverse map */
1862                 map = convert_tuples_by_name_map_if_req(old_tupdesc, tupdesc);
1863
1864                 /*
1865                  * Partition-specific slot's tupdesc can't be changed, so allocate a
1866                  * new one.
1867                  */
1868                 if (map != NULL)
1869                         slot = execute_attr_map_slot(map, slot,
1870                                                                                  MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1871         }
1872         else
1873         {
1874                 root_relid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1875                 tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1876         }
1877
1878         modifiedCols = bms_union(GetInsertedColumns(resultRelInfo, estate),
1879                                                          GetUpdatedColumns(resultRelInfo, estate));
1880
1881         val_desc = ExecBuildSlotValueDescription(root_relid,
1882                                                                                          slot,
1883                                                                                          tupdesc,
1884                                                                                          modifiedCols,
1885                                                                                          64);
1886         ereport(ERROR,
1887                         (errcode(ERRCODE_CHECK_VIOLATION),
1888                          errmsg("new row for relation \"%s\" violates partition constraint",
1889                                         RelationGetRelationName(resultRelInfo->ri_RelationDesc)),
1890                          val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1891 }
1892
1893 /*
1894  * ExecConstraints - check constraints of the tuple in 'slot'
1895  *
1896  * This checks the traditional NOT NULL and check constraints.
1897  *
1898  * The partition constraint is *NOT* checked.
1899  *
1900  * Note: 'slot' contains the tuple to check the constraints of, which may
1901  * have been converted from the original input tuple after tuple routing.
1902  * 'resultRelInfo' is the final result relation, after tuple routing.
1903  */
1904 void
1905 ExecConstraints(ResultRelInfo *resultRelInfo,
1906                                 TupleTableSlot *slot, EState *estate)
1907 {
1908         Relation        rel = resultRelInfo->ri_RelationDesc;
1909         TupleDesc       tupdesc = RelationGetDescr(rel);
1910         TupleConstr *constr = tupdesc->constr;
1911         Bitmapset  *modifiedCols;
1912         Bitmapset  *insertedCols;
1913         Bitmapset  *updatedCols;
1914
1915         Assert(constr || resultRelInfo->ri_PartitionCheck);
1916
1917         if (constr && constr->has_not_null)
1918         {
1919                 int                     natts = tupdesc->natts;
1920                 int                     attrChk;
1921
1922                 for (attrChk = 1; attrChk <= natts; attrChk++)
1923                 {
1924                         Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1);
1925
1926                         if (att->attnotnull && slot_attisnull(slot, attrChk))
1927                         {
1928                                 char       *val_desc;
1929                                 Relation        orig_rel = rel;
1930                                 TupleDesc       orig_tupdesc = RelationGetDescr(rel);
1931
1932                                 /*
1933                                  * If the tuple has been routed, it's been converted to the
1934                                  * partition's rowtype, which might differ from the root
1935                                  * table's.  We must convert it back to the root table's
1936                                  * rowtype so that val_desc shown error message matches the
1937                                  * input tuple.
1938                                  */
1939                                 if (resultRelInfo->ri_PartitionRoot)
1940                                 {
1941                                         AttrNumber *map;
1942
1943                                         rel = resultRelInfo->ri_PartitionRoot;
1944                                         tupdesc = RelationGetDescr(rel);
1945                                         /* a reverse map */
1946                                         map = convert_tuples_by_name_map_if_req(orig_tupdesc,
1947                                                                                                                         tupdesc);
1948
1949                                         /*
1950                                          * Partition-specific slot's tupdesc can't be changed, so
1951                                          * allocate a new one.
1952                                          */
1953                                         if (map != NULL)
1954                                                 slot = execute_attr_map_slot(map, slot,
1955                                                                                                          MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1956                                 }
1957
1958                                 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1959                                 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1960                                 modifiedCols = bms_union(insertedCols, updatedCols);
1961                                 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1962                                                                                                                  slot,
1963                                                                                                                  tupdesc,
1964                                                                                                                  modifiedCols,
1965                                                                                                                  64);
1966
1967                                 ereport(ERROR,
1968                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1969                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1970                                                                 NameStr(att->attname)),
1971                                                  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1972                                                  errtablecol(orig_rel, attrChk)));
1973                         }
1974                 }
1975         }
1976
1977         if (constr && constr->num_check > 0)
1978         {
1979                 const char *failed;
1980
1981                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1982                 {
1983                         char       *val_desc;
1984                         Relation        orig_rel = rel;
1985
1986                         /* See the comment above. */
1987                         if (resultRelInfo->ri_PartitionRoot)
1988                         {
1989                                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
1990                                 AttrNumber *map;
1991
1992                                 rel = resultRelInfo->ri_PartitionRoot;
1993                                 tupdesc = RelationGetDescr(rel);
1994                                 /* a reverse map */
1995                                 map = convert_tuples_by_name_map_if_req(old_tupdesc,
1996                                                                                                                 tupdesc);
1997
1998                                 /*
1999                                  * Partition-specific slot's tupdesc can't be changed, so
2000                                  * allocate a new one.
2001                                  */
2002                                 if (map != NULL)
2003                                         slot = execute_attr_map_slot(map, slot,
2004                                                                                                  MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2005                         }
2006
2007                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
2008                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2009                         modifiedCols = bms_union(insertedCols, updatedCols);
2010                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2011                                                                                                          slot,
2012                                                                                                          tupdesc,
2013                                                                                                          modifiedCols,
2014                                                                                                          64);
2015                         ereport(ERROR,
2016                                         (errcode(ERRCODE_CHECK_VIOLATION),
2017                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2018                                                         RelationGetRelationName(orig_rel), failed),
2019                                          val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2020                                          errtableconstraint(orig_rel, failed)));
2021                 }
2022         }
2023 }
2024
2025 /*
2026  * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2027  * of the specified kind.
2028  *
2029  * Note that this needs to be called multiple times to ensure that all kinds of
2030  * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2031  * CHECK OPTION set and from row level security policies).  See ExecInsert()
2032  * and ExecUpdate().
2033  */
2034 void
2035 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2036                                          TupleTableSlot *slot, EState *estate)
2037 {
2038         Relation        rel = resultRelInfo->ri_RelationDesc;
2039         TupleDesc       tupdesc = RelationGetDescr(rel);
2040         ExprContext *econtext;
2041         ListCell   *l1,
2042                            *l2;
2043
2044         /*
2045          * We will use the EState's per-tuple context for evaluating constraint
2046          * expressions (creating it if it's not already there).
2047          */
2048         econtext = GetPerTupleExprContext(estate);
2049
2050         /* Arrange for econtext's scan tuple to be the tuple under test */
2051         econtext->ecxt_scantuple = slot;
2052
2053         /* Check each of the constraints */
2054         forboth(l1, resultRelInfo->ri_WithCheckOptions,
2055                         l2, resultRelInfo->ri_WithCheckOptionExprs)
2056         {
2057                 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2058                 ExprState  *wcoExpr = (ExprState *) lfirst(l2);
2059
2060                 /*
2061                  * Skip any WCOs which are not the kind we are looking for at this
2062                  * time.
2063                  */
2064                 if (wco->kind != kind)
2065                         continue;
2066
2067                 /*
2068                  * WITH CHECK OPTION checks are intended to ensure that the new tuple
2069                  * is visible (in the case of a view) or that it passes the
2070                  * 'with-check' policy (in the case of row security). If the qual
2071                  * evaluates to NULL or FALSE, then the new tuple won't be included in
2072                  * the view or doesn't pass the 'with-check' policy for the table.
2073                  */
2074                 if (!ExecQual(wcoExpr, econtext))
2075                 {
2076                         char       *val_desc;
2077                         Bitmapset  *modifiedCols;
2078                         Bitmapset  *insertedCols;
2079                         Bitmapset  *updatedCols;
2080
2081                         switch (wco->kind)
2082                         {
2083                                         /*
2084                                          * For WITH CHECK OPTIONs coming from views, we might be
2085                                          * able to provide the details on the row, depending on
2086                                          * the permissions on the relation (that is, if the user
2087                                          * could view it directly anyway).  For RLS violations, we
2088                                          * don't include the data since we don't know if the user
2089                                          * should be able to view the tuple as that depends on the
2090                                          * USING policy.
2091                                          */
2092                                 case WCO_VIEW_CHECK:
2093                                         /* See the comment in ExecConstraints(). */
2094                                         if (resultRelInfo->ri_PartitionRoot)
2095                                         {
2096                                                 TupleDesc       old_tupdesc = RelationGetDescr(rel);
2097                                                 AttrNumber *map;
2098
2099                                                 rel = resultRelInfo->ri_PartitionRoot;
2100                                                 tupdesc = RelationGetDescr(rel);
2101                                                 /* a reverse map */
2102                                                 map = convert_tuples_by_name_map_if_req(old_tupdesc,
2103                                                                                                                                 tupdesc);
2104
2105                                                 /*
2106                                                  * Partition-specific slot's tupdesc can't be changed,
2107                                                  * so allocate a new one.
2108                                                  */
2109                                                 if (map != NULL)
2110                                                         slot = execute_attr_map_slot(map, slot,
2111                                                                                                                  MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2112                                         }
2113
2114                                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
2115                                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2116                                         modifiedCols = bms_union(insertedCols, updatedCols);
2117                                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2118                                                                                                                          slot,
2119                                                                                                                          tupdesc,
2120                                                                                                                          modifiedCols,
2121                                                                                                                          64);
2122
2123                                         ereport(ERROR,
2124                                                         (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2125                                                          errmsg("new row violates check option for view \"%s\"",
2126                                                                         wco->relname),
2127                                                          val_desc ? errdetail("Failing row contains %s.",
2128                                                                                                   val_desc) : 0));
2129                                         break;
2130                                 case WCO_RLS_INSERT_CHECK:
2131                                 case WCO_RLS_UPDATE_CHECK:
2132                                         if (wco->polname != NULL)
2133                                                 ereport(ERROR,
2134                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2135                                                                  errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2136                                                                                 wco->polname, wco->relname)));
2137                                         else
2138                                                 ereport(ERROR,
2139                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2140                                                                  errmsg("new row violates row-level security policy for table \"%s\"",
2141                                                                                 wco->relname)));
2142                                         break;
2143                                 case WCO_RLS_CONFLICT_CHECK:
2144                                         if (wco->polname != NULL)
2145                                                 ereport(ERROR,
2146                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2147                                                                  errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2148                                                                                 wco->polname, wco->relname)));
2149                                         else
2150                                                 ereport(ERROR,
2151                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2152                                                                  errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2153                                                                                 wco->relname)));
2154                                         break;
2155                                 default:
2156                                         elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2157                                         break;
2158                         }
2159                 }
2160         }
2161 }
2162
2163 /*
2164  * ExecBuildSlotValueDescription -- construct a string representing a tuple
2165  *
2166  * This is intentionally very similar to BuildIndexValueDescription, but
2167  * unlike that function, we truncate long field values (to at most maxfieldlen
2168  * bytes).  That seems necessary here since heap field values could be very
2169  * long, whereas index entries typically aren't so wide.
2170  *
2171  * Also, unlike the case with index entries, we need to be prepared to ignore
2172  * dropped columns.  We used to use the slot's tuple descriptor to decode the
2173  * data, but the slot's descriptor doesn't identify dropped columns, so we
2174  * now need to be passed the relation's descriptor.
2175  *
2176  * Note that, like BuildIndexValueDescription, if the user does not have
2177  * permission to view any of the columns involved, a NULL is returned.  Unlike
2178  * BuildIndexValueDescription, if the user has access to view a subset of the
2179  * column involved, that subset will be returned with a key identifying which
2180  * columns they are.
2181  */
2182 static char *
2183 ExecBuildSlotValueDescription(Oid reloid,
2184                                                           TupleTableSlot *slot,
2185                                                           TupleDesc tupdesc,
2186                                                           Bitmapset *modifiedCols,
2187                                                           int maxfieldlen)
2188 {
2189         StringInfoData buf;
2190         StringInfoData collist;
2191         bool            write_comma = false;
2192         bool            write_comma_collist = false;
2193         int                     i;
2194         AclResult       aclresult;
2195         bool            table_perm = false;
2196         bool            any_perm = false;
2197
2198         /*
2199          * Check if RLS is enabled and should be active for the relation; if so,
2200          * then don't return anything.  Otherwise, go through normal permission
2201          * checks.
2202          */
2203         if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2204                 return NULL;
2205
2206         initStringInfo(&buf);
2207
2208         appendStringInfoChar(&buf, '(');
2209
2210         /*
2211          * Check if the user has permissions to see the row.  Table-level SELECT
2212          * allows access to all columns.  If the user does not have table-level
2213          * SELECT then we check each column and include those the user has SELECT
2214          * rights on.  Additionally, we always include columns the user provided
2215          * data for.
2216          */
2217         aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2218         if (aclresult != ACLCHECK_OK)
2219         {
2220                 /* Set up the buffer for the column list */
2221                 initStringInfo(&collist);
2222                 appendStringInfoChar(&collist, '(');
2223         }
2224         else
2225                 table_perm = any_perm = true;
2226
2227         /* Make sure the tuple is fully deconstructed */
2228         slot_getallattrs(slot);
2229
2230         for (i = 0; i < tupdesc->natts; i++)
2231         {
2232                 bool            column_perm = false;
2233                 char       *val;
2234                 int                     vallen;
2235                 Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2236
2237                 /* ignore dropped columns */
2238                 if (att->attisdropped)
2239                         continue;
2240
2241                 if (!table_perm)
2242                 {
2243                         /*
2244                          * No table-level SELECT, so need to make sure they either have
2245                          * SELECT rights on the column or that they have provided the data
2246                          * for the column.  If not, omit this column from the error
2247                          * message.
2248                          */
2249                         aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2250                                                                                           GetUserId(), ACL_SELECT);
2251                         if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2252                                                           modifiedCols) || aclresult == ACLCHECK_OK)
2253                         {
2254                                 column_perm = any_perm = true;
2255
2256                                 if (write_comma_collist)
2257                                         appendStringInfoString(&collist, ", ");
2258                                 else
2259                                         write_comma_collist = true;
2260
2261                                 appendStringInfoString(&collist, NameStr(att->attname));
2262                         }
2263                 }
2264
2265                 if (table_perm || column_perm)
2266                 {
2267                         if (slot->tts_isnull[i])
2268                                 val = "null";
2269                         else
2270                         {
2271                                 Oid                     foutoid;
2272                                 bool            typisvarlena;
2273
2274                                 getTypeOutputInfo(att->atttypid,
2275                                                                   &foutoid, &typisvarlena);
2276                                 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2277                         }
2278
2279                         if (write_comma)
2280                                 appendStringInfoString(&buf, ", ");
2281                         else
2282                                 write_comma = true;
2283
2284                         /* truncate if needed */
2285                         vallen = strlen(val);
2286                         if (vallen <= maxfieldlen)
2287                                 appendBinaryStringInfo(&buf, val, vallen);
2288                         else
2289                         {
2290                                 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2291                                 appendBinaryStringInfo(&buf, val, vallen);
2292                                 appendStringInfoString(&buf, "...");
2293                         }
2294                 }
2295         }
2296
2297         /* If we end up with zero columns being returned, then return NULL. */
2298         if (!any_perm)
2299                 return NULL;
2300
2301         appendStringInfoChar(&buf, ')');
2302
2303         if (!table_perm)
2304         {
2305                 appendStringInfoString(&collist, ") = ");
2306                 appendBinaryStringInfo(&collist, buf.data, buf.len);
2307
2308                 return collist.data;
2309         }
2310
2311         return buf.data;
2312 }
2313
2314
2315 /*
2316  * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2317  * given ResultRelInfo
2318  */
2319 LockTupleMode
2320 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2321 {
2322         Bitmapset  *keyCols;
2323         Bitmapset  *updatedCols;
2324
2325         /*
2326          * Compute lock mode to use.  If columns that are part of the key have not
2327          * been modified, then we can use a weaker lock, allowing for better
2328          * concurrency.
2329          */
2330         updatedCols = GetAllUpdatedColumns(relinfo, estate);
2331         keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2332                                                                                  INDEX_ATTR_BITMAP_KEY);
2333
2334         if (bms_overlap(keyCols, updatedCols))
2335                 return LockTupleExclusive;
2336
2337         return LockTupleNoKeyExclusive;
2338 }
2339
2340 /*
2341  * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2342  *
2343  * If no such struct, either return NULL or throw error depending on missing_ok
2344  */
2345 ExecRowMark *
2346 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2347 {
2348         if (rti > 0 && rti <= estate->es_range_table_size &&
2349                 estate->es_rowmarks != NULL)
2350         {
2351                 ExecRowMark *erm = estate->es_rowmarks[rti - 1];
2352
2353                 if (erm)
2354                         return erm;
2355         }
2356         if (!missing_ok)
2357                 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2358         return NULL;
2359 }
2360
2361 /*
2362  * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2363  *
2364  * Inputs are the underlying ExecRowMark struct and the targetlist of the
2365  * input plan node (not planstate node!).  We need the latter to find out
2366  * the column numbers of the resjunk columns.
2367  */
2368 ExecAuxRowMark *
2369 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2370 {
2371         ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2372         char            resname[32];
2373
2374         aerm->rowmark = erm;
2375
2376         /* Look up the resjunk columns associated with this rowmark */
2377         if (erm->markType != ROW_MARK_COPY)
2378         {
2379                 /* need ctid for all methods other than COPY */
2380                 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2381                 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2382                                                                                                            resname);
2383                 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2384                         elog(ERROR, "could not find junk %s column", resname);
2385         }
2386         else
2387         {
2388                 /* need wholerow if COPY */
2389                 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2390                 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2391                                                                                                                 resname);
2392                 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2393                         elog(ERROR, "could not find junk %s column", resname);
2394         }
2395
2396         /* if child rel, need tableoid */
2397         if (erm->rti != erm->prti)
2398         {
2399                 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2400                 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2401                                                                                                            resname);
2402                 if (!AttributeNumberIsValid(aerm->toidAttNo))
2403                         elog(ERROR, "could not find junk %s column", resname);
2404         }
2405
2406         return aerm;
2407 }
2408
2409
2410 /*
2411  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2412  * process the updated version under READ COMMITTED rules.
2413  *
2414  * See backend/executor/README for some info about how this works.
2415  */
2416
2417
2418 /*
2419  * Check the updated version of a tuple to see if we want to process it under
2420  * READ COMMITTED rules.
2421  *
2422  *      epqstate - state for EvalPlanQual rechecking
2423  *      relation - table containing tuple
2424  *      rti - rangetable index of table containing tuple
2425  *      inputslot - tuple for processing - this can be the slot from
2426  *              EvalPlanQualSlot(), for the increased efficiency.
2427  *
2428  * This tests whether the tuple in inputslot still matches the relevant
2429  * quals. For that result to be useful, typically the input tuple has to be
2430  * last row version (otherwise the result isn't particularly useful) and
2431  * locked (otherwise the result might be out of date). That's typically
2432  * achieved by using table_tuple_lock() with the
2433  * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
2434  *
2435  * Returns a slot containing the new candidate update/delete tuple, or
2436  * NULL if we determine we shouldn't process the row.
2437  */
2438 TupleTableSlot *
2439 EvalPlanQual(EPQState *epqstate, Relation relation,
2440                          Index rti, TupleTableSlot *inputslot)
2441 {
2442         TupleTableSlot *slot;
2443         TupleTableSlot *testslot;
2444
2445         Assert(rti > 0);
2446
2447         /*
2448          * Need to run a recheck subquery.  Initialize or reinitialize EPQ state.
2449          */
2450         EvalPlanQualBegin(epqstate);
2451
2452         /*
2453          * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
2454          * an unnecessary copy.
2455          */
2456         testslot = EvalPlanQualSlot(epqstate, relation, rti);
2457         if (testslot != inputslot)
2458                 ExecCopySlot(testslot, inputslot);
2459
2460         /*
2461          * Run the EPQ query.  We assume it will return at most one tuple.
2462          */
2463         slot = EvalPlanQualNext(epqstate);
2464
2465         /*
2466          * If we got a tuple, force the slot to materialize the tuple so that it
2467          * is not dependent on any local state in the EPQ query (in particular,
2468          * it's highly likely that the slot contains references to any pass-by-ref
2469          * datums that may be present in copyTuple).  As with the next step, this
2470          * is to guard against early re-use of the EPQ query.
2471          */
2472         if (!TupIsNull(slot))
2473                 ExecMaterializeSlot(slot);
2474
2475         /*
2476          * Clear out the test tuple.  This is needed in case the EPQ query is
2477          * re-used to test a tuple for a different relation.  (Not clear that can
2478          * really happen, but let's be safe.)
2479          */
2480         ExecClearTuple(testslot);
2481
2482         return slot;
2483 }
2484
2485 /*
2486  * EvalPlanQualInit -- initialize during creation of a plan state node
2487  * that might need to invoke EPQ processing.
2488  *
2489  * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2490  * with EvalPlanQualSetPlan.
2491  */
2492 void
2493 EvalPlanQualInit(EPQState *epqstate, EState *parentestate,
2494                                  Plan *subplan, List *auxrowmarks, int epqParam)
2495 {
2496         Index           rtsize = parentestate->es_range_table_size;
2497
2498         /* initialize data not changing over EPQState's lifetime */
2499         epqstate->parentestate = parentestate;
2500         epqstate->epqParam = epqParam;
2501
2502         /*
2503          * Allocate space to reference a slot for each potential rti - do so now
2504          * rather than in EvalPlanQualBegin(), as done for other dynamically
2505          * allocated resources, so EvalPlanQualSlot() can be used to hold tuples
2506          * that *may* need EPQ later, without forcing the overhead of
2507          * EvalPlanQualBegin().
2508          */
2509         epqstate->tuple_table = NIL;
2510         epqstate->relsubs_slot = (TupleTableSlot **)
2511                 palloc0(rtsize * sizeof(TupleTableSlot *));
2512
2513         /* ... and remember data that EvalPlanQualBegin will need */
2514         epqstate->plan = subplan;
2515         epqstate->arowMarks = auxrowmarks;
2516
2517         /* ... and mark the EPQ state inactive */
2518         epqstate->origslot = NULL;
2519         epqstate->recheckestate = NULL;
2520         epqstate->recheckplanstate = NULL;
2521         epqstate->relsubs_rowmark = NULL;
2522         epqstate->relsubs_done = NULL;
2523 }
2524
2525 /*
2526  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2527  *
2528  * We need this so that ModifyTable can deal with multiple subplans.
2529  */
2530 void
2531 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2532 {
2533         /* If we have a live EPQ query, shut it down */
2534         EvalPlanQualEnd(epqstate);
2535         /* And set/change the plan pointer */
2536         epqstate->plan = subplan;
2537         /* The rowmarks depend on the plan, too */
2538         epqstate->arowMarks = auxrowmarks;
2539 }
2540
2541 /*
2542  * Return, and create if necessary, a slot for an EPQ test tuple.
2543  *
2544  * Note this only requires EvalPlanQualInit() to have been called,
2545  * EvalPlanQualBegin() is not necessary.
2546  */
2547 TupleTableSlot *
2548 EvalPlanQualSlot(EPQState *epqstate,
2549                                  Relation relation, Index rti)
2550 {
2551         TupleTableSlot **slot;
2552
2553         Assert(relation);
2554         Assert(rti > 0 && rti <= epqstate->parentestate->es_range_table_size);
2555         slot = &epqstate->relsubs_slot[rti - 1];
2556
2557         if (*slot == NULL)
2558         {
2559                 MemoryContext oldcontext;
2560
2561                 oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt);
2562                 *slot = table_slot_create(relation, &epqstate->tuple_table);
2563                 MemoryContextSwitchTo(oldcontext);
2564         }
2565
2566         return *slot;
2567 }
2568
2569 /*
2570  * Fetch the current row value for a non-locked relation, identified by rti,
2571  * that needs to be scanned by an EvalPlanQual operation.  origslot must have
2572  * been set to contain the current result row (top-level row) that we need to
2573  * recheck.  Returns true if a substitution tuple was found, false if not.
2574  */
2575 bool
2576 EvalPlanQualFetchRowMark(EPQState *epqstate, Index rti, TupleTableSlot *slot)
2577 {
2578         ExecAuxRowMark *earm = epqstate->relsubs_rowmark[rti - 1];
2579         ExecRowMark *erm = earm->rowmark;
2580         Datum           datum;
2581         bool            isNull;
2582
2583         Assert(earm != NULL);
2584         Assert(epqstate->origslot != NULL);
2585
2586         if (RowMarkRequiresRowShareLock(erm->markType))
2587                 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2588
2589         /* if child rel, must check whether it produced this row */
2590         if (erm->rti != erm->prti)
2591         {
2592                 Oid                     tableoid;
2593
2594                 datum = ExecGetJunkAttribute(epqstate->origslot,
2595                                                                          earm->toidAttNo,
2596                                                                          &isNull);
2597                 /* non-locked rels could be on the inside of outer joins */
2598                 if (isNull)
2599                         return false;
2600
2601                 tableoid = DatumGetObjectId(datum);
2602
2603                 Assert(OidIsValid(erm->relid));
2604                 if (tableoid != erm->relid)
2605                 {
2606                         /* this child is inactive right now */
2607                         return false;
2608                 }
2609         }
2610
2611         if (erm->markType == ROW_MARK_REFERENCE)
2612         {
2613                 Assert(erm->relation != NULL);
2614
2615                 /* fetch the tuple's ctid */
2616                 datum = ExecGetJunkAttribute(epqstate->origslot,
2617                                                                          earm->ctidAttNo,
2618                                                                          &isNull);
2619                 /* non-locked rels could be on the inside of outer joins */
2620                 if (isNull)
2621                         return false;
2622
2623                 /* fetch requests on foreign tables must be passed to their FDW */
2624                 if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2625                 {
2626                         FdwRoutine *fdwroutine;
2627                         bool            updated = false;
2628
2629                         fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2630                         /* this should have been checked already, but let's be safe */
2631                         if (fdwroutine->RefetchForeignRow == NULL)
2632                                 ereport(ERROR,
2633                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2634                                                  errmsg("cannot lock rows in foreign table \"%s\"",
2635                                                                 RelationGetRelationName(erm->relation))));
2636
2637                         fdwroutine->RefetchForeignRow(epqstate->recheckestate,
2638                                                                                   erm,
2639                                                                                   datum,
2640                                                                                   slot,
2641                                                                                   &updated);
2642                         if (TupIsNull(slot))
2643                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2644
2645                         /*
2646                          * Ideally we'd insist on updated == false here, but that assumes
2647                          * that FDWs can track that exactly, which they might not be able
2648                          * to.  So just ignore the flag.
2649                          */
2650                         return true;
2651                 }
2652                 else
2653                 {
2654                         /* ordinary table, fetch the tuple */
2655                         if (!table_tuple_fetch_row_version(erm->relation,
2656                                                                                            (ItemPointer) DatumGetPointer(datum),
2657                                                                                            SnapshotAny, slot))
2658                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2659                         return true;
2660                 }
2661         }
2662         else
2663         {
2664                 Assert(erm->markType == ROW_MARK_COPY);
2665
2666                 /* fetch the whole-row Var for the relation */
2667                 datum = ExecGetJunkAttribute(epqstate->origslot,
2668                                                                          earm->wholeAttNo,
2669                                                                          &isNull);
2670                 /* non-locked rels could be on the inside of outer joins */
2671                 if (isNull)
2672                         return false;
2673
2674                 ExecStoreHeapTupleDatum(datum, slot);
2675                 return true;
2676         }
2677 }
2678
2679 /*
2680  * Fetch the next row (if any) from EvalPlanQual testing
2681  *
2682  * (In practice, there should never be more than one row...)
2683  */
2684 TupleTableSlot *
2685 EvalPlanQualNext(EPQState *epqstate)
2686 {
2687         MemoryContext oldcontext;
2688         TupleTableSlot *slot;
2689
2690         oldcontext = MemoryContextSwitchTo(epqstate->recheckestate->es_query_cxt);
2691         slot = ExecProcNode(epqstate->recheckplanstate);
2692         MemoryContextSwitchTo(oldcontext);
2693
2694         return slot;
2695 }
2696
2697 /*
2698  * Initialize or reset an EvalPlanQual state tree
2699  */
2700 void
2701 EvalPlanQualBegin(EPQState *epqstate)
2702 {
2703         EState     *parentestate = epqstate->parentestate;
2704         EState     *recheckestate = epqstate->recheckestate;
2705
2706         if (recheckestate == NULL)
2707         {
2708                 /* First time through, so create a child EState */
2709                 EvalPlanQualStart(epqstate, epqstate->plan);
2710         }
2711         else
2712         {
2713                 /*
2714                  * We already have a suitable child EPQ tree, so just reset it.
2715                  */
2716                 Index           rtsize = parentestate->es_range_table_size;
2717                 PlanState  *rcplanstate = epqstate->recheckplanstate;
2718
2719                 MemSet(epqstate->relsubs_done, 0, rtsize * sizeof(bool));
2720
2721                 /* Recopy current values of parent parameters */
2722                 if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2723                 {
2724                         int                     i;
2725
2726                         /*
2727                          * Force evaluation of any InitPlan outputs that could be needed
2728                          * by the subplan, just in case they got reset since
2729                          * EvalPlanQualStart (see comments therein).
2730                          */
2731                         ExecSetParamPlanMulti(rcplanstate->plan->extParam,
2732                                                                   GetPerTupleExprContext(parentestate));
2733
2734                         i = list_length(parentestate->es_plannedstmt->paramExecTypes);
2735
2736                         while (--i >= 0)
2737                         {
2738                                 /* copy value if any, but not execPlan link */
2739                                 recheckestate->es_param_exec_vals[i].value =
2740                                         parentestate->es_param_exec_vals[i].value;
2741                                 recheckestate->es_param_exec_vals[i].isnull =
2742                                         parentestate->es_param_exec_vals[i].isnull;
2743                         }
2744                 }
2745
2746                 /*
2747                  * Mark child plan tree as needing rescan at all scan nodes.  The
2748                  * first ExecProcNode will take care of actually doing the rescan.
2749                  */
2750                 rcplanstate->chgParam = bms_add_member(rcplanstate->chgParam,
2751                                                                                            epqstate->epqParam);
2752         }
2753 }
2754
2755 /*
2756  * Start execution of an EvalPlanQual plan tree.
2757  *
2758  * This is a cut-down version of ExecutorStart(): we copy some state from
2759  * the top-level estate rather than initializing it fresh.
2760  */
2761 static void
2762 EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
2763 {
2764         EState     *parentestate = epqstate->parentestate;
2765         Index           rtsize = parentestate->es_range_table_size;
2766         EState     *rcestate;
2767         MemoryContext oldcontext;
2768         ListCell   *l;
2769
2770         epqstate->recheckestate = rcestate = CreateExecutorState();
2771
2772         oldcontext = MemoryContextSwitchTo(rcestate->es_query_cxt);
2773
2774         /* signal that this is an EState for executing EPQ */
2775         rcestate->es_epq_active = epqstate;
2776
2777         /*
2778          * Child EPQ EStates share the parent's copy of unchanging state such as
2779          * the snapshot, rangetable, result-rel info, and external Param info.
2780          * They need their own copies of local state, including a tuple table,
2781          * es_param_exec_vals, etc.
2782          *
2783          * The ResultRelInfo array management is trickier than it looks.  We
2784          * create fresh arrays for the child but copy all the content from the
2785          * parent.  This is because it's okay for the child to share any
2786          * per-relation state the parent has already created --- but if the child
2787          * sets up any ResultRelInfo fields, such as its own junkfilter, that
2788          * state must *not* propagate back to the parent.  (For one thing, the
2789          * pointed-to data is in a memory context that won't last long enough.)
2790          */
2791         rcestate->es_direction = ForwardScanDirection;
2792         rcestate->es_snapshot = parentestate->es_snapshot;
2793         rcestate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2794         rcestate->es_range_table = parentestate->es_range_table;
2795         rcestate->es_range_table_size = parentestate->es_range_table_size;
2796         rcestate->es_relations = parentestate->es_relations;
2797         rcestate->es_queryEnv = parentestate->es_queryEnv;
2798         rcestate->es_rowmarks = parentestate->es_rowmarks;
2799         rcestate->es_plannedstmt = parentestate->es_plannedstmt;
2800         rcestate->es_junkFilter = parentestate->es_junkFilter;
2801         rcestate->es_output_cid = parentestate->es_output_cid;
2802         if (parentestate->es_num_result_relations > 0)
2803         {
2804                 int                     numResultRelations = parentestate->es_num_result_relations;
2805                 int                     numRootResultRels = parentestate->es_num_root_result_relations;
2806                 ResultRelInfo *resultRelInfos;
2807
2808                 resultRelInfos = (ResultRelInfo *)
2809                         palloc(numResultRelations * sizeof(ResultRelInfo));
2810                 memcpy(resultRelInfos, parentestate->es_result_relations,
2811                            numResultRelations * sizeof(ResultRelInfo));
2812                 rcestate->es_result_relations = resultRelInfos;
2813                 rcestate->es_num_result_relations = numResultRelations;
2814
2815                 /* Also transfer partitioned root result relations. */
2816                 if (numRootResultRels > 0)
2817                 {
2818                         resultRelInfos = (ResultRelInfo *)
2819                                 palloc(numRootResultRels * sizeof(ResultRelInfo));
2820                         memcpy(resultRelInfos, parentestate->es_root_result_relations,
2821                                    numRootResultRels * sizeof(ResultRelInfo));
2822                         rcestate->es_root_result_relations = resultRelInfos;
2823                         rcestate->es_num_root_result_relations = numRootResultRels;
2824                 }
2825         }
2826         /* es_result_relation_info must NOT be copied */
2827         /* es_trig_target_relations must NOT be copied */
2828         rcestate->es_top_eflags = parentestate->es_top_eflags;
2829         rcestate->es_instrument = parentestate->es_instrument;
2830         /* es_auxmodifytables must NOT be copied */
2831
2832         /*
2833          * The external param list is simply shared from parent.  The internal
2834          * param workspace has to be local state, but we copy the initial values
2835          * from the parent, so as to have access to any param values that were
2836          * already set from other parts of the parent's plan tree.
2837          */
2838         rcestate->es_param_list_info = parentestate->es_param_list_info;
2839         if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2840         {
2841                 int                     i;
2842
2843                 /*
2844                  * Force evaluation of any InitPlan outputs that could be needed by
2845                  * the subplan.  (With more complexity, maybe we could postpone this
2846                  * till the subplan actually demands them, but it doesn't seem worth
2847                  * the trouble; this is a corner case already, since usually the
2848                  * InitPlans would have been evaluated before reaching EvalPlanQual.)
2849                  *
2850                  * This will not touch output params of InitPlans that occur somewhere
2851                  * within the subplan tree, only those that are attached to the
2852                  * ModifyTable node or above it and are referenced within the subplan.
2853                  * That's OK though, because the planner would only attach such
2854                  * InitPlans to a lower-level SubqueryScan node, and EPQ execution
2855                  * will not descend into a SubqueryScan.
2856                  *
2857                  * The EState's per-output-tuple econtext is sufficiently short-lived
2858                  * for this, since it should get reset before there is any chance of
2859                  * doing EvalPlanQual again.
2860                  */
2861                 ExecSetParamPlanMulti(planTree->extParam,
2862                                                           GetPerTupleExprContext(parentestate));
2863
2864                 /* now make the internal param workspace ... */
2865                 i = list_length(parentestate->es_plannedstmt->paramExecTypes);
2866                 rcestate->es_param_exec_vals = (ParamExecData *)
2867                         palloc0(i * sizeof(ParamExecData));
2868                 /* ... and copy down all values, whether really needed or not */
2869                 while (--i >= 0)
2870                 {
2871                         /* copy value if any, but not execPlan link */
2872                         rcestate->es_param_exec_vals[i].value =
2873                                 parentestate->es_param_exec_vals[i].value;
2874                         rcestate->es_param_exec_vals[i].isnull =
2875                                 parentestate->es_param_exec_vals[i].isnull;
2876                 }
2877         }
2878
2879         /*
2880          * Initialize private state information for each SubPlan.  We must do this
2881          * before running ExecInitNode on the main query tree, since
2882          * ExecInitSubPlan expects to be able to find these entries. Some of the
2883          * SubPlans might not be used in the part of the plan tree we intend to
2884          * run, but since it's not easy to tell which, we just initialize them
2885          * all.
2886          */
2887         Assert(rcestate->es_subplanstates == NIL);
2888         foreach(l, parentestate->es_plannedstmt->subplans)
2889         {
2890                 Plan       *subplan = (Plan *) lfirst(l);
2891                 PlanState  *subplanstate;
2892
2893                 subplanstate = ExecInitNode(subplan, rcestate, 0);
2894                 rcestate->es_subplanstates = lappend(rcestate->es_subplanstates,
2895                                                                                          subplanstate);
2896         }
2897
2898         /*
2899          * These arrays are reused across different plans set with
2900          * EvalPlanQualSetPlan(), which is safe because they all use the same
2901          * parent EState. Therefore we can reuse if already allocated.
2902          */
2903         if (epqstate->relsubs_rowmark == NULL)
2904         {
2905                 Assert(epqstate->relsubs_done == NULL);
2906                 epqstate->relsubs_rowmark = (ExecAuxRowMark **)
2907                         palloc0(rtsize * sizeof(ExecAuxRowMark *));
2908                 epqstate->relsubs_done = (bool *)
2909                         palloc0(rtsize * sizeof(bool));
2910         }
2911         else
2912         {
2913                 Assert(epqstate->relsubs_done != NULL);
2914                 memset(epqstate->relsubs_rowmark, 0,
2915                            rtsize * sizeof(ExecAuxRowMark *));
2916                 memset(epqstate->relsubs_done, 0,
2917                            rtsize * sizeof(bool));
2918         }
2919
2920         /*
2921          * Build an RTI indexed array of rowmarks, so that
2922          * EvalPlanQualFetchRowMark() can efficiently access the to be fetched
2923          * rowmark.
2924          */
2925         foreach(l, epqstate->arowMarks)
2926         {
2927                 ExecAuxRowMark *earm = (ExecAuxRowMark *) lfirst(l);
2928
2929                 epqstate->relsubs_rowmark[earm->rowmark->rti - 1] = earm;
2930         }
2931
2932         /*
2933          * Initialize the private state information for all the nodes in the part
2934          * of the plan tree we need to run.  This opens files, allocates storage
2935          * and leaves us ready to start processing tuples.
2936          */
2937         epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0);
2938
2939         MemoryContextSwitchTo(oldcontext);
2940 }
2941
2942 /*
2943  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2944  * or if we are done with the current EPQ child.
2945  *
2946  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2947  * of the normal cleanup, but *not* close result relations (which we are
2948  * just sharing from the outer query).  We do, however, have to close any
2949  * trigger target relations that got opened, since those are not shared.
2950  * (There probably shouldn't be any of the latter, but just in case...)
2951  */
2952 void
2953 EvalPlanQualEnd(EPQState *epqstate)
2954 {
2955         EState     *estate = epqstate->recheckestate;
2956         Index           rtsize;
2957         MemoryContext oldcontext;
2958         ListCell   *l;
2959
2960         rtsize = epqstate->parentestate->es_range_table_size;
2961
2962         /*
2963          * We may have a tuple table, even if EPQ wasn't started, because we allow
2964          * use of EvalPlanQualSlot() without calling EvalPlanQualBegin().
2965          */
2966         if (epqstate->tuple_table != NIL)
2967         {
2968                 memset(epqstate->relsubs_slot, 0,
2969                            rtsize * sizeof(TupleTableSlot *));
2970                 ExecResetTupleTable(epqstate->tuple_table, true);
2971                 epqstate->tuple_table = NIL;
2972         }
2973
2974         /* EPQ wasn't started, nothing further to do */
2975         if (estate == NULL)
2976                 return;
2977
2978         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2979
2980         ExecEndNode(epqstate->recheckplanstate);
2981
2982         foreach(l, estate->es_subplanstates)
2983         {
2984                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2985
2986                 ExecEndNode(subplanstate);
2987         }
2988
2989         /* throw away the per-estate tuple table, some node may have used it */
2990         ExecResetTupleTable(estate->es_tupleTable, false);
2991
2992         /* close any trigger target relations attached to this EState */
2993         ExecCleanUpTriggerState(estate);
2994
2995         MemoryContextSwitchTo(oldcontext);
2996
2997         FreeExecutorState(estate);
2998
2999         /* Mark EPQState idle */
3000         epqstate->recheckestate = NULL;
3001         epqstate->recheckplanstate = NULL;
3002         epqstate->origslot = NULL;
3003 }