]> granicus.if.org Git - postgresql/blob - src/backend/executor/execMain.c
Fix interaction of parallel query with prepared statements.
[postgresql] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorFinish()
10  *      ExecutorEnd()
11  *
12  *      These four procedures are the external interface to the executor.
13  *      In each case, the query descriptor is required as an argument.
14  *
15  *      ExecutorStart must be called at the beginning of execution of any
16  *      query plan and ExecutorEnd must always be called at the end of
17  *      execution of a plan (unless it is aborted due to error).
18  *
19  *      ExecutorRun accepts direction and count arguments that specify whether
20  *      the plan is to be executed forwards, backwards, and for how many tuples.
21  *      In some cases ExecutorRun may be called multiple times to process all
22  *      the tuples for a plan.  It is also acceptable to stop short of executing
23  *      the whole plan (but only if it is a SELECT).
24  *
25  *      ExecutorFinish must be called after the final ExecutorRun call and
26  *      before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
27  *      which should also omit ExecutorRun.
28  *
29  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
30  * Portions Copyright (c) 1994, Regents of the University of California
31  *
32  *
33  * IDENTIFICATION
34  *        src/backend/executor/execMain.c
35  *
36  *-------------------------------------------------------------------------
37  */
38 #include "postgres.h"
39
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "commands/matview.h"
46 #include "commands/trigger.h"
47 #include "executor/execdebug.h"
48 #include "foreign/fdwapi.h"
49 #include "mb/pg_wchar.h"
50 #include "miscadmin.h"
51 #include "optimizer/clauses.h"
52 #include "parser/parsetree.h"
53 #include "storage/bufmgr.h"
54 #include "storage/lmgr.h"
55 #include "tcop/utility.h"
56 #include "utils/acl.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/rls.h"
60 #include "utils/snapmgr.h"
61 #include "utils/tqual.h"
62
63
64 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
65 ExecutorStart_hook_type ExecutorStart_hook = NULL;
66 ExecutorRun_hook_type ExecutorRun_hook = NULL;
67 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
68 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
69
70 /* Hook for plugin to get control in ExecCheckRTPerms() */
71 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
72
73 /* decls for local routines only used within this module */
74 static void InitPlan(QueryDesc *queryDesc, int eflags);
75 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
76 static void ExecPostprocessPlan(EState *estate);
77 static void ExecEndPlan(PlanState *planstate, EState *estate);
78 static void ExecutePlan(EState *estate, PlanState *planstate,
79                         bool use_parallel_mode,
80                         CmdType operation,
81                         bool sendTuples,
82                         uint64 numberTuples,
83                         ScanDirection direction,
84                         DestReceiver *dest);
85 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
86 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
87                                                   Bitmapset *modifiedCols,
88                                                   AclMode requiredPerms);
89 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
90 static char *ExecBuildSlotValueDescription(Oid reloid,
91                                                           TupleTableSlot *slot,
92                                                           TupleDesc tupdesc,
93                                                           Bitmapset *modifiedCols,
94                                                           int maxfieldlen);
95 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
96                                   Plan *planTree);
97
98 /*
99  * Note that GetUpdatedColumns() also exists in commands/trigger.c.  There does
100  * not appear to be any good header to put it into, given the structures that
101  * it uses, so we let them be duplicated.  Be sure to update both if one needs
102  * to be changed, however.
103  */
104 #define GetInsertedColumns(relinfo, estate) \
105         (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
106 #define GetUpdatedColumns(relinfo, estate) \
107         (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
108
109 /* end of local decls */
110
111
112 /* ----------------------------------------------------------------
113  *              ExecutorStart
114  *
115  *              This routine must be called at the beginning of any execution of any
116  *              query plan
117  *
118  * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
119  * only because some places use QueryDescs for utility commands).  The tupDesc
120  * field of the QueryDesc is filled in to describe the tuples that will be
121  * returned, and the internal fields (estate and planstate) are set up.
122  *
123  * eflags contains flag bits as described in executor.h.
124  *
125  * NB: the CurrentMemoryContext when this is called will become the parent
126  * of the per-query context used for this Executor invocation.
127  *
128  * We provide a function hook variable that lets loadable plugins
129  * get control when ExecutorStart is called.  Such a plugin would
130  * normally call standard_ExecutorStart().
131  *
132  * ----------------------------------------------------------------
133  */
134 void
135 ExecutorStart(QueryDesc *queryDesc, int eflags)
136 {
137         if (ExecutorStart_hook)
138                 (*ExecutorStart_hook) (queryDesc, eflags);
139         else
140                 standard_ExecutorStart(queryDesc, eflags);
141 }
142
143 void
144 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
145 {
146         EState     *estate;
147         MemoryContext oldcontext;
148
149         /* sanity checks: queryDesc must not be started already */
150         Assert(queryDesc != NULL);
151         Assert(queryDesc->estate == NULL);
152
153         /*
154          * If the transaction is read-only, we need to check if any writes are
155          * planned to non-temporary tables.  EXPLAIN is considered read-only.
156          *
157          * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE
158          * would require (a) storing the combocid hash in shared memory, rather
159          * than synchronizing it just once at the start of parallelism, and (b) an
160          * alternative to heap_update()'s reliance on xmax for mutual exclusion.
161          * INSERT may have no such troubles, but we forbid it to simplify the
162          * checks.
163          *
164          * We have lower-level defenses in CommandCounterIncrement and elsewhere
165          * against performing unsafe operations in parallel mode, but this gives a
166          * more user-friendly error message.
167          */
168         if ((XactReadOnly || IsInParallelMode()) &&
169                 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
170                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
171
172         /*
173          * Build EState, switch into per-query memory context for startup.
174          */
175         estate = CreateExecutorState();
176         queryDesc->estate = estate;
177
178         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
179
180         /*
181          * Fill in external parameters, if any, from queryDesc; and allocate
182          * workspace for internal parameters
183          */
184         estate->es_param_list_info = queryDesc->params;
185
186         if (queryDesc->plannedstmt->nParamExec > 0)
187                 estate->es_param_exec_vals = (ParamExecData *)
188                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
189
190         /*
191          * If non-read-only query, set the command ID to mark output tuples with
192          */
193         switch (queryDesc->operation)
194         {
195                 case CMD_SELECT:
196
197                         /*
198                          * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
199                          * tuples
200                          */
201                         if (queryDesc->plannedstmt->rowMarks != NIL ||
202                                 queryDesc->plannedstmt->hasModifyingCTE)
203                                 estate->es_output_cid = GetCurrentCommandId(true);
204
205                         /*
206                          * A SELECT without modifying CTEs can't possibly queue triggers,
207                          * so force skip-triggers mode. This is just a marginal efficiency
208                          * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
209                          * all that expensive, but we might as well do it.
210                          */
211                         if (!queryDesc->plannedstmt->hasModifyingCTE)
212                                 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
213                         break;
214
215                 case CMD_INSERT:
216                 case CMD_DELETE:
217                 case CMD_UPDATE:
218                         estate->es_output_cid = GetCurrentCommandId(true);
219                         break;
220
221                 default:
222                         elog(ERROR, "unrecognized operation code: %d",
223                                  (int) queryDesc->operation);
224                         break;
225         }
226
227         /*
228          * Copy other important information into the EState
229          */
230         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
231         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
232         estate->es_top_eflags = eflags;
233         estate->es_instrument = queryDesc->instrument_options;
234
235         /*
236          * Initialize the plan state tree
237          */
238         InitPlan(queryDesc, eflags);
239
240         /*
241          * Set up an AFTER-trigger statement context, unless told not to, or
242          * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
243          */
244         if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
245                 AfterTriggerBeginQuery();
246
247         MemoryContextSwitchTo(oldcontext);
248 }
249
250 /* ----------------------------------------------------------------
251  *              ExecutorRun
252  *
253  *              This is the main routine of the executor module. It accepts
254  *              the query descriptor from the traffic cop and executes the
255  *              query plan.
256  *
257  *              ExecutorStart must have been called already.
258  *
259  *              If direction is NoMovementScanDirection then nothing is done
260  *              except to start up/shut down the destination.  Otherwise,
261  *              we retrieve up to 'count' tuples in the specified direction.
262  *
263  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
264  *              completion.  Also note that the count limit is only applied to
265  *              retrieved tuples, not for instance to those inserted/updated/deleted
266  *              by a ModifyTable plan node.
267  *
268  *              There is no return value, but output tuples (if any) are sent to
269  *              the destination receiver specified in the QueryDesc; and the number
270  *              of tuples processed at the top level can be found in
271  *              estate->es_processed.
272  *
273  *              We provide a function hook variable that lets loadable plugins
274  *              get control when ExecutorRun is called.  Such a plugin would
275  *              normally call standard_ExecutorRun().
276  *
277  * ----------------------------------------------------------------
278  */
279 void
280 ExecutorRun(QueryDesc *queryDesc,
281                         ScanDirection direction, uint64 count)
282 {
283         if (ExecutorRun_hook)
284                 (*ExecutorRun_hook) (queryDesc, direction, count);
285         else
286                 standard_ExecutorRun(queryDesc, direction, count);
287 }
288
289 void
290 standard_ExecutorRun(QueryDesc *queryDesc,
291                                          ScanDirection direction, uint64 count)
292 {
293         EState     *estate;
294         CmdType         operation;
295         DestReceiver *dest;
296         bool            sendTuples;
297         MemoryContext oldcontext;
298
299         /* sanity checks */
300         Assert(queryDesc != NULL);
301
302         estate = queryDesc->estate;
303
304         Assert(estate != NULL);
305         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
306
307         /*
308          * Switch into per-query memory context
309          */
310         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
311
312         /* Allow instrumentation of Executor overall runtime */
313         if (queryDesc->totaltime)
314                 InstrStartNode(queryDesc->totaltime);
315
316         /*
317          * extract information from the query descriptor and the query feature.
318          */
319         operation = queryDesc->operation;
320         dest = queryDesc->dest;
321
322         /*
323          * startup tuple receiver, if we will be emitting tuples
324          */
325         estate->es_processed = 0;
326         estate->es_lastoid = InvalidOid;
327
328         sendTuples = (operation == CMD_SELECT ||
329                                   queryDesc->plannedstmt->hasReturning);
330
331         if (sendTuples)
332                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
333
334         /*
335          * run plan
336          */
337         if (!ScanDirectionIsNoMovement(direction))
338                 ExecutePlan(estate,
339                                         queryDesc->planstate,
340                                         queryDesc->plannedstmt->parallelModeNeeded,
341                                         operation,
342                                         sendTuples,
343                                         count,
344                                         direction,
345                                         dest);
346
347         /*
348          * shutdown tuple receiver, if we started it
349          */
350         if (sendTuples)
351                 (*dest->rShutdown) (dest);
352
353         if (queryDesc->totaltime)
354                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
355
356         MemoryContextSwitchTo(oldcontext);
357 }
358
359 /* ----------------------------------------------------------------
360  *              ExecutorFinish
361  *
362  *              This routine must be called after the last ExecutorRun call.
363  *              It performs cleanup such as firing AFTER triggers.  It is
364  *              separate from ExecutorEnd because EXPLAIN ANALYZE needs to
365  *              include these actions in the total runtime.
366  *
367  *              We provide a function hook variable that lets loadable plugins
368  *              get control when ExecutorFinish is called.  Such a plugin would
369  *              normally call standard_ExecutorFinish().
370  *
371  * ----------------------------------------------------------------
372  */
373 void
374 ExecutorFinish(QueryDesc *queryDesc)
375 {
376         if (ExecutorFinish_hook)
377                 (*ExecutorFinish_hook) (queryDesc);
378         else
379                 standard_ExecutorFinish(queryDesc);
380 }
381
382 void
383 standard_ExecutorFinish(QueryDesc *queryDesc)
384 {
385         EState     *estate;
386         MemoryContext oldcontext;
387
388         /* sanity checks */
389         Assert(queryDesc != NULL);
390
391         estate = queryDesc->estate;
392
393         Assert(estate != NULL);
394         Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
395
396         /* This should be run once and only once per Executor instance */
397         Assert(!estate->es_finished);
398
399         /* Switch into per-query memory context */
400         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
401
402         /* Allow instrumentation of Executor overall runtime */
403         if (queryDesc->totaltime)
404                 InstrStartNode(queryDesc->totaltime);
405
406         /* Run ModifyTable nodes to completion */
407         ExecPostprocessPlan(estate);
408
409         /* Execute queued AFTER triggers, unless told not to */
410         if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
411                 AfterTriggerEndQuery(estate);
412
413         if (queryDesc->totaltime)
414                 InstrStopNode(queryDesc->totaltime, 0);
415
416         MemoryContextSwitchTo(oldcontext);
417
418         estate->es_finished = true;
419 }
420
421 /* ----------------------------------------------------------------
422  *              ExecutorEnd
423  *
424  *              This routine must be called at the end of execution of any
425  *              query plan
426  *
427  *              We provide a function hook variable that lets loadable plugins
428  *              get control when ExecutorEnd is called.  Such a plugin would
429  *              normally call standard_ExecutorEnd().
430  *
431  * ----------------------------------------------------------------
432  */
433 void
434 ExecutorEnd(QueryDesc *queryDesc)
435 {
436         if (ExecutorEnd_hook)
437                 (*ExecutorEnd_hook) (queryDesc);
438         else
439                 standard_ExecutorEnd(queryDesc);
440 }
441
442 void
443 standard_ExecutorEnd(QueryDesc *queryDesc)
444 {
445         EState     *estate;
446         MemoryContext oldcontext;
447
448         /* sanity checks */
449         Assert(queryDesc != NULL);
450
451         estate = queryDesc->estate;
452
453         Assert(estate != NULL);
454
455         /*
456          * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
457          * Assert is needed because ExecutorFinish is new as of 9.1, and callers
458          * might forget to call it.
459          */
460         Assert(estate->es_finished ||
461                    (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
462
463         /*
464          * Switch into per-query memory context to run ExecEndPlan
465          */
466         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
467
468         ExecEndPlan(queryDesc->planstate, estate);
469
470         /* do away with our snapshots */
471         UnregisterSnapshot(estate->es_snapshot);
472         UnregisterSnapshot(estate->es_crosscheck_snapshot);
473
474         /*
475          * Must switch out of context before destroying it
476          */
477         MemoryContextSwitchTo(oldcontext);
478
479         /*
480          * Release EState and per-query memory context.  This should release
481          * everything the executor has allocated.
482          */
483         FreeExecutorState(estate);
484
485         /* Reset queryDesc fields that no longer point to anything */
486         queryDesc->tupDesc = NULL;
487         queryDesc->estate = NULL;
488         queryDesc->planstate = NULL;
489         queryDesc->totaltime = NULL;
490 }
491
492 /* ----------------------------------------------------------------
493  *              ExecutorRewind
494  *
495  *              This routine may be called on an open queryDesc to rewind it
496  *              to the start.
497  * ----------------------------------------------------------------
498  */
499 void
500 ExecutorRewind(QueryDesc *queryDesc)
501 {
502         EState     *estate;
503         MemoryContext oldcontext;
504
505         /* sanity checks */
506         Assert(queryDesc != NULL);
507
508         estate = queryDesc->estate;
509
510         Assert(estate != NULL);
511
512         /* It's probably not sensible to rescan updating queries */
513         Assert(queryDesc->operation == CMD_SELECT);
514
515         /*
516          * Switch into per-query memory context
517          */
518         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
519
520         /*
521          * rescan plan
522          */
523         ExecReScan(queryDesc->planstate);
524
525         MemoryContextSwitchTo(oldcontext);
526 }
527
528
529 /*
530  * ExecCheckRTPerms
531  *              Check access permissions for all relations listed in a range table.
532  *
533  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
534  * error if ereport_on_violation is true, or simply returns false otherwise.
535  *
536  * Note that this does NOT address row level security policies (aka: RLS).  If
537  * rows will be returned to the user as a result of this permission check
538  * passing, then RLS also needs to be consulted (and check_enable_rls()).
539  *
540  * See rewrite/rowsecurity.c.
541  */
542 bool
543 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
544 {
545         ListCell   *l;
546         bool            result = true;
547
548         foreach(l, rangeTable)
549         {
550                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
551
552                 result = ExecCheckRTEPerms(rte);
553                 if (!result)
554                 {
555                         Assert(rte->rtekind == RTE_RELATION);
556                         if (ereport_on_violation)
557                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
558                                                            get_rel_name(rte->relid));
559                         return false;
560                 }
561         }
562
563         if (ExecutorCheckPerms_hook)
564                 result = (*ExecutorCheckPerms_hook) (rangeTable,
565                                                                                          ereport_on_violation);
566         return result;
567 }
568
569 /*
570  * ExecCheckRTEPerms
571  *              Check access permissions for a single RTE.
572  */
573 static bool
574 ExecCheckRTEPerms(RangeTblEntry *rte)
575 {
576         AclMode         requiredPerms;
577         AclMode         relPerms;
578         AclMode         remainingPerms;
579         Oid                     relOid;
580         Oid                     userid;
581
582         /*
583          * Only plain-relation RTEs need to be checked here.  Function RTEs are
584          * checked by init_fcache when the function is prepared for execution.
585          * Join, subquery, and special RTEs need no checks.
586          */
587         if (rte->rtekind != RTE_RELATION)
588                 return true;
589
590         /*
591          * No work if requiredPerms is empty.
592          */
593         requiredPerms = rte->requiredPerms;
594         if (requiredPerms == 0)
595                 return true;
596
597         relOid = rte->relid;
598
599         /*
600          * userid to check as: current user unless we have a setuid indication.
601          *
602          * Note: GetUserId() is presently fast enough that there's no harm in
603          * calling it separately for each RTE.  If that stops being true, we could
604          * call it once in ExecCheckRTPerms and pass the userid down from there.
605          * But for now, no need for the extra clutter.
606          */
607         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
608
609         /*
610          * We must have *all* the requiredPerms bits, but some of the bits can be
611          * satisfied from column-level rather than relation-level permissions.
612          * First, remove any bits that are satisfied by relation permissions.
613          */
614         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
615         remainingPerms = requiredPerms & ~relPerms;
616         if (remainingPerms != 0)
617         {
618                 int                     col = -1;
619
620                 /*
621                  * If we lack any permissions that exist only as relation permissions,
622                  * we can fail straight away.
623                  */
624                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
625                         return false;
626
627                 /*
628                  * Check to see if we have the needed privileges at column level.
629                  *
630                  * Note: failures just report a table-level error; it would be nicer
631                  * to report a column-level error if we have some but not all of the
632                  * column privileges.
633                  */
634                 if (remainingPerms & ACL_SELECT)
635                 {
636                         /*
637                          * When the query doesn't explicitly reference any columns (for
638                          * example, SELECT COUNT(*) FROM table), allow the query if we
639                          * have SELECT on any column of the rel, as per SQL spec.
640                          */
641                         if (bms_is_empty(rte->selectedCols))
642                         {
643                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
644                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
645                                         return false;
646                         }
647
648                         while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
649                         {
650                                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
651                                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
652
653                                 if (attno == InvalidAttrNumber)
654                                 {
655                                         /* Whole-row reference, must have priv on all cols */
656                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
657                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
658                                                 return false;
659                                 }
660                                 else
661                                 {
662                                         if (pg_attribute_aclcheck(relOid, attno, userid,
663                                                                                           ACL_SELECT) != ACLCHECK_OK)
664                                                 return false;
665                                 }
666                         }
667                 }
668
669                 /*
670                  * Basically the same for the mod columns, for both INSERT and UPDATE
671                  * privilege as specified by remainingPerms.
672                  */
673                 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
674                                                                                                                                           userid,
675                                                                                                                    rte->insertedCols,
676                                                                                                                                  ACL_INSERT))
677                         return false;
678
679                 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
680                                                                                                                                           userid,
681                                                                                                                         rte->updatedCols,
682                                                                                                                                  ACL_UPDATE))
683                         return false;
684         }
685         return true;
686 }
687
688 /*
689  * ExecCheckRTEPermsModified
690  *              Check INSERT or UPDATE access permissions for a single RTE (these
691  *              are processed uniformly).
692  */
693 static bool
694 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
695                                                   AclMode requiredPerms)
696 {
697         int                     col = -1;
698
699         /*
700          * When the query doesn't explicitly update any columns, allow the query
701          * if we have permission on any column of the rel.  This is to handle
702          * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
703          */
704         if (bms_is_empty(modifiedCols))
705         {
706                 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
707                                                                           ACLMASK_ANY) != ACLCHECK_OK)
708                         return false;
709         }
710
711         while ((col = bms_next_member(modifiedCols, col)) >= 0)
712         {
713                 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
714                 AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
715
716                 if (attno == InvalidAttrNumber)
717                 {
718                         /* whole-row reference can't happen here */
719                         elog(ERROR, "whole-row update is not implemented");
720                 }
721                 else
722                 {
723                         if (pg_attribute_aclcheck(relOid, attno, userid,
724                                                                           requiredPerms) != ACLCHECK_OK)
725                                 return false;
726                 }
727         }
728         return true;
729 }
730
731 /*
732  * Check that the query does not imply any writes to non-temp tables;
733  * unless we're in parallel mode, in which case don't even allow writes
734  * to temp tables.
735  *
736  * Note: in a Hot Standby slave this would need to reject writes to temp
737  * tables just as we do in parallel mode; but an HS slave can't have created
738  * any temp tables in the first place, so no need to check that.
739  */
740 static void
741 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
742 {
743         ListCell   *l;
744
745         /*
746          * Fail if write permissions are requested in parallel mode for table
747          * (temp or non-temp), otherwise fail for any non-temp table.
748          */
749         foreach(l, plannedstmt->rtable)
750         {
751                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
752
753                 if (rte->rtekind != RTE_RELATION)
754                         continue;
755
756                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
757                         continue;
758
759                 if (isTempNamespace(get_rel_namespace(rte->relid)))
760                         continue;
761
762                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
763         }
764
765         if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
766                 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
767 }
768
769
770 /* ----------------------------------------------------------------
771  *              InitPlan
772  *
773  *              Initializes the query plan: open files, allocate storage
774  *              and start up the rule manager
775  * ----------------------------------------------------------------
776  */
777 static void
778 InitPlan(QueryDesc *queryDesc, int eflags)
779 {
780         CmdType         operation = queryDesc->operation;
781         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
782         Plan       *plan = plannedstmt->planTree;
783         List       *rangeTable = plannedstmt->rtable;
784         EState     *estate = queryDesc->estate;
785         PlanState  *planstate;
786         TupleDesc       tupType;
787         ListCell   *l;
788         int                     i;
789
790         /*
791          * Do permissions checks
792          */
793         ExecCheckRTPerms(rangeTable, true);
794
795         /*
796          * initialize the node's execution state
797          */
798         estate->es_range_table = rangeTable;
799         estate->es_plannedstmt = plannedstmt;
800
801         /*
802          * initialize result relation stuff, and open/lock the result rels.
803          *
804          * We must do this before initializing the plan tree, else we might try to
805          * do a lock upgrade if a result rel is also a source rel.
806          */
807         if (plannedstmt->resultRelations)
808         {
809                 List       *resultRelations = plannedstmt->resultRelations;
810                 int                     numResultRelations = list_length(resultRelations);
811                 ResultRelInfo *resultRelInfos;
812                 ResultRelInfo *resultRelInfo;
813
814                 resultRelInfos = (ResultRelInfo *)
815                         palloc(numResultRelations * sizeof(ResultRelInfo));
816                 resultRelInfo = resultRelInfos;
817                 foreach(l, resultRelations)
818                 {
819                         Index           resultRelationIndex = lfirst_int(l);
820                         Oid                     resultRelationOid;
821                         Relation        resultRelation;
822
823                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
824                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
825                         InitResultRelInfo(resultRelInfo,
826                                                           resultRelation,
827                                                           resultRelationIndex,
828                                                           estate->es_instrument);
829                         resultRelInfo++;
830                 }
831                 estate->es_result_relations = resultRelInfos;
832                 estate->es_num_result_relations = numResultRelations;
833                 /* es_result_relation_info is NULL except when within ModifyTable */
834                 estate->es_result_relation_info = NULL;
835         }
836         else
837         {
838                 /*
839                  * if no result relation, then set state appropriately
840                  */
841                 estate->es_result_relations = NULL;
842                 estate->es_num_result_relations = 0;
843                 estate->es_result_relation_info = NULL;
844         }
845
846         /*
847          * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
848          * before we initialize the plan tree, else we'd be risking lock upgrades.
849          * While we are at it, build the ExecRowMark list.
850          */
851         estate->es_rowMarks = NIL;
852         foreach(l, plannedstmt->rowMarks)
853         {
854                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
855                 Oid                     relid;
856                 Relation        relation;
857                 ExecRowMark *erm;
858
859                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
860                 if (rc->isParent)
861                         continue;
862
863                 /* get relation's OID (will produce InvalidOid if subquery) */
864                 relid = getrelid(rc->rti, rangeTable);
865
866                 /*
867                  * If you change the conditions under which rel locks are acquired
868                  * here, be sure to adjust ExecOpenScanRelation to match.
869                  */
870                 switch (rc->markType)
871                 {
872                         case ROW_MARK_EXCLUSIVE:
873                         case ROW_MARK_NOKEYEXCLUSIVE:
874                         case ROW_MARK_SHARE:
875                         case ROW_MARK_KEYSHARE:
876                                 relation = heap_open(relid, RowShareLock);
877                                 break;
878                         case ROW_MARK_REFERENCE:
879                                 relation = heap_open(relid, AccessShareLock);
880                                 break;
881                         case ROW_MARK_COPY:
882                                 /* no physical table access is required */
883                                 relation = NULL;
884                                 break;
885                         default:
886                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
887                                 relation = NULL;        /* keep compiler quiet */
888                                 break;
889                 }
890
891                 /* Check that relation is a legal target for marking */
892                 if (relation)
893                         CheckValidRowMarkRel(relation, rc->markType);
894
895                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
896                 erm->relation = relation;
897                 erm->relid = relid;
898                 erm->rti = rc->rti;
899                 erm->prti = rc->prti;
900                 erm->rowmarkId = rc->rowmarkId;
901                 erm->markType = rc->markType;
902                 erm->strength = rc->strength;
903                 erm->waitPolicy = rc->waitPolicy;
904                 erm->ermActive = false;
905                 ItemPointerSetInvalid(&(erm->curCtid));
906                 erm->ermExtra = NULL;
907                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
908         }
909
910         /*
911          * Initialize the executor's tuple table to empty.
912          */
913         estate->es_tupleTable = NIL;
914         estate->es_trig_tuple_slot = NULL;
915         estate->es_trig_oldtup_slot = NULL;
916         estate->es_trig_newtup_slot = NULL;
917
918         /* mark EvalPlanQual not active */
919         estate->es_epqTuple = NULL;
920         estate->es_epqTupleSet = NULL;
921         estate->es_epqScanDone = NULL;
922
923         /*
924          * Initialize private state information for each SubPlan.  We must do this
925          * before running ExecInitNode on the main query tree, since
926          * ExecInitSubPlan expects to be able to find these entries.
927          */
928         Assert(estate->es_subplanstates == NIL);
929         i = 1;                                          /* subplan indices count from 1 */
930         foreach(l, plannedstmt->subplans)
931         {
932                 Plan       *subplan = (Plan *) lfirst(l);
933                 PlanState  *subplanstate;
934                 int                     sp_eflags;
935
936                 /*
937                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
938                  * it is a parameterless subplan (not initplan), we suggest that it be
939                  * prepared to handle REWIND efficiently; otherwise there is no need.
940                  */
941                 sp_eflags = eflags
942                         & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
943                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
944                         sp_eflags |= EXEC_FLAG_REWIND;
945
946                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
947
948                 estate->es_subplanstates = lappend(estate->es_subplanstates,
949                                                                                    subplanstate);
950
951                 i++;
952         }
953
954         /*
955          * Initialize the private state information for all the nodes in the query
956          * tree.  This opens files, allocates storage and leaves us ready to start
957          * processing tuples.
958          */
959         planstate = ExecInitNode(plan, estate, eflags);
960
961         /*
962          * Get the tuple descriptor describing the type of tuples to return.
963          */
964         tupType = ExecGetResultType(planstate);
965
966         /*
967          * Initialize the junk filter if needed.  SELECT queries need a filter if
968          * there are any junk attrs in the top-level tlist.
969          */
970         if (operation == CMD_SELECT)
971         {
972                 bool            junk_filter_needed = false;
973                 ListCell   *tlist;
974
975                 foreach(tlist, plan->targetlist)
976                 {
977                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
978
979                         if (tle->resjunk)
980                         {
981                                 junk_filter_needed = true;
982                                 break;
983                         }
984                 }
985
986                 if (junk_filter_needed)
987                 {
988                         JunkFilter *j;
989
990                         j = ExecInitJunkFilter(planstate->plan->targetlist,
991                                                                    tupType->tdhasoid,
992                                                                    ExecInitExtraTupleSlot(estate));
993                         estate->es_junkFilter = j;
994
995                         /* Want to return the cleaned tuple type */
996                         tupType = j->jf_cleanTupType;
997                 }
998         }
999
1000         queryDesc->tupDesc = tupType;
1001         queryDesc->planstate = planstate;
1002 }
1003
1004 /*
1005  * Check that a proposed result relation is a legal target for the operation
1006  *
1007  * Generally the parser and/or planner should have noticed any such mistake
1008  * already, but let's make sure.
1009  *
1010  * Note: when changing this function, you probably also need to look at
1011  * CheckValidRowMarkRel.
1012  */
1013 void
1014 CheckValidResultRel(Relation resultRel, CmdType operation)
1015 {
1016         TriggerDesc *trigDesc = resultRel->trigdesc;
1017         FdwRoutine *fdwroutine;
1018
1019         switch (resultRel->rd_rel->relkind)
1020         {
1021                 case RELKIND_RELATION:
1022                         /* OK */
1023                         break;
1024                 case RELKIND_SEQUENCE:
1025                         ereport(ERROR,
1026                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1027                                          errmsg("cannot change sequence \"%s\"",
1028                                                         RelationGetRelationName(resultRel))));
1029                         break;
1030                 case RELKIND_TOASTVALUE:
1031                         ereport(ERROR,
1032                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1033                                          errmsg("cannot change TOAST relation \"%s\"",
1034                                                         RelationGetRelationName(resultRel))));
1035                         break;
1036                 case RELKIND_VIEW:
1037
1038                         /*
1039                          * Okay only if there's a suitable INSTEAD OF trigger.  Messages
1040                          * here should match rewriteHandler.c's rewriteTargetView, except
1041                          * that we omit errdetail because we haven't got the information
1042                          * handy (and given that we really shouldn't get here anyway, it's
1043                          * not worth great exertion to get).
1044                          */
1045                         switch (operation)
1046                         {
1047                                 case CMD_INSERT:
1048                                         if (!trigDesc || !trigDesc->trig_insert_instead_row)
1049                                                 ereport(ERROR,
1050                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1051                                                    errmsg("cannot insert into view \"%s\"",
1052                                                                   RelationGetRelationName(resultRel)),
1053                                                    errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1054                                         break;
1055                                 case CMD_UPDATE:
1056                                         if (!trigDesc || !trigDesc->trig_update_instead_row)
1057                                                 ereport(ERROR,
1058                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1059                                                    errmsg("cannot update view \"%s\"",
1060                                                                   RelationGetRelationName(resultRel)),
1061                                                    errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1062                                         break;
1063                                 case CMD_DELETE:
1064                                         if (!trigDesc || !trigDesc->trig_delete_instead_row)
1065                                                 ereport(ERROR,
1066                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1067                                                    errmsg("cannot delete from view \"%s\"",
1068                                                                   RelationGetRelationName(resultRel)),
1069                                                    errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1070                                         break;
1071                                 default:
1072                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1073                                         break;
1074                         }
1075                         break;
1076                 case RELKIND_MATVIEW:
1077                         if (!MatViewIncrementalMaintenanceIsEnabled())
1078                                 ereport(ERROR,
1079                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1080                                                  errmsg("cannot change materialized view \"%s\"",
1081                                                                 RelationGetRelationName(resultRel))));
1082                         break;
1083                 case RELKIND_FOREIGN_TABLE:
1084                         /* Okay only if the FDW supports it */
1085                         fdwroutine = GetFdwRoutineForRelation(resultRel, false);
1086                         switch (operation)
1087                         {
1088                                 case CMD_INSERT:
1089                                         if (fdwroutine->ExecForeignInsert == NULL)
1090                                                 ereport(ERROR,
1091                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1092                                                         errmsg("cannot insert into foreign table \"%s\"",
1093                                                                    RelationGetRelationName(resultRel))));
1094                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1095                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1096                                                 ereport(ERROR,
1097                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1098                                                 errmsg("foreign table \"%s\" does not allow inserts",
1099                                                            RelationGetRelationName(resultRel))));
1100                                         break;
1101                                 case CMD_UPDATE:
1102                                         if (fdwroutine->ExecForeignUpdate == NULL)
1103                                                 ereport(ERROR,
1104                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1105                                                                  errmsg("cannot update foreign table \"%s\"",
1106                                                                                 RelationGetRelationName(resultRel))));
1107                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1108                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1109                                                 ereport(ERROR,
1110                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1111                                                 errmsg("foreign table \"%s\" does not allow updates",
1112                                                            RelationGetRelationName(resultRel))));
1113                                         break;
1114                                 case CMD_DELETE:
1115                                         if (fdwroutine->ExecForeignDelete == NULL)
1116                                                 ereport(ERROR,
1117                                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1118                                                         errmsg("cannot delete from foreign table \"%s\"",
1119                                                                    RelationGetRelationName(resultRel))));
1120                                         if (fdwroutine->IsForeignRelUpdatable != NULL &&
1121                                                 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1122                                                 ereport(ERROR,
1123                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1124                                                 errmsg("foreign table \"%s\" does not allow deletes",
1125                                                            RelationGetRelationName(resultRel))));
1126                                         break;
1127                                 default:
1128                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1129                                         break;
1130                         }
1131                         break;
1132                 default:
1133                         ereport(ERROR,
1134                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1135                                          errmsg("cannot change relation \"%s\"",
1136                                                         RelationGetRelationName(resultRel))));
1137                         break;
1138         }
1139 }
1140
1141 /*
1142  * Check that a proposed rowmark target relation is a legal target
1143  *
1144  * In most cases parser and/or planner should have noticed this already, but
1145  * they don't cover all cases.
1146  */
1147 static void
1148 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1149 {
1150         FdwRoutine *fdwroutine;
1151
1152         switch (rel->rd_rel->relkind)
1153         {
1154                 case RELKIND_RELATION:
1155                         /* OK */
1156                         break;
1157                 case RELKIND_SEQUENCE:
1158                         /* Must disallow this because we don't vacuum sequences */
1159                         ereport(ERROR,
1160                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1161                                          errmsg("cannot lock rows in sequence \"%s\"",
1162                                                         RelationGetRelationName(rel))));
1163                         break;
1164                 case RELKIND_TOASTVALUE:
1165                         /* We could allow this, but there seems no good reason to */
1166                         ereport(ERROR,
1167                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1168                                          errmsg("cannot lock rows in TOAST relation \"%s\"",
1169                                                         RelationGetRelationName(rel))));
1170                         break;
1171                 case RELKIND_VIEW:
1172                         /* Should not get here; planner should have expanded the view */
1173                         ereport(ERROR,
1174                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1175                                          errmsg("cannot lock rows in view \"%s\"",
1176                                                         RelationGetRelationName(rel))));
1177                         break;
1178                 case RELKIND_MATVIEW:
1179                         /* Allow referencing a matview, but not actual locking clauses */
1180                         if (markType != ROW_MARK_REFERENCE)
1181                                 ereport(ERROR,
1182                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1183                                            errmsg("cannot lock rows in materialized view \"%s\"",
1184                                                           RelationGetRelationName(rel))));
1185                         break;
1186                 case RELKIND_FOREIGN_TABLE:
1187                         /* Okay only if the FDW supports it */
1188                         fdwroutine = GetFdwRoutineForRelation(rel, false);
1189                         if (fdwroutine->RefetchForeignRow == NULL)
1190                                 ereport(ERROR,
1191                                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1192                                                  errmsg("cannot lock rows in foreign table \"%s\"",
1193                                                                 RelationGetRelationName(rel))));
1194                         break;
1195                 default:
1196                         ereport(ERROR,
1197                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1198                                          errmsg("cannot lock rows in relation \"%s\"",
1199                                                         RelationGetRelationName(rel))));
1200                         break;
1201         }
1202 }
1203
1204 /*
1205  * Initialize ResultRelInfo data for one result relation
1206  *
1207  * Caution: before Postgres 9.1, this function included the relkind checking
1208  * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1209  * appropriate.  Be sure callers cover those needs.
1210  */
1211 void
1212 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1213                                   Relation resultRelationDesc,
1214                                   Index resultRelationIndex,
1215                                   int instrument_options)
1216 {
1217         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1218         resultRelInfo->type = T_ResultRelInfo;
1219         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1220         resultRelInfo->ri_RelationDesc = resultRelationDesc;
1221         resultRelInfo->ri_NumIndices = 0;
1222         resultRelInfo->ri_IndexRelationDescs = NULL;
1223         resultRelInfo->ri_IndexRelationInfo = NULL;
1224         /* make a copy so as not to depend on relcache info not changing... */
1225         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1226         if (resultRelInfo->ri_TrigDesc)
1227         {
1228                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
1229
1230                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1231                         palloc0(n * sizeof(FmgrInfo));
1232                 resultRelInfo->ri_TrigWhenExprs = (List **)
1233                         palloc0(n * sizeof(List *));
1234                 if (instrument_options)
1235                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1236         }
1237         else
1238         {
1239                 resultRelInfo->ri_TrigFunctions = NULL;
1240                 resultRelInfo->ri_TrigWhenExprs = NULL;
1241                 resultRelInfo->ri_TrigInstrument = NULL;
1242         }
1243         if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1244                 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1245         else
1246                 resultRelInfo->ri_FdwRoutine = NULL;
1247         resultRelInfo->ri_FdwState = NULL;
1248         resultRelInfo->ri_usesFdwDirectModify = false;
1249         resultRelInfo->ri_ConstraintExprs = NULL;
1250         resultRelInfo->ri_junkFilter = NULL;
1251         resultRelInfo->ri_projectReturning = NULL;
1252 }
1253
1254 /*
1255  *              ExecGetTriggerResultRel
1256  *
1257  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
1258  * triggers are fired on one of the result relations of the query, and so
1259  * we can just return a member of the es_result_relations array.  (Note: in
1260  * self-join situations there might be multiple members with the same OID;
1261  * if so it doesn't matter which one we pick.)  However, it is sometimes
1262  * necessary to fire triggers on other relations; this happens mainly when an
1263  * RI update trigger queues additional triggers on other relations, which will
1264  * be processed in the context of the outer query.  For efficiency's sake,
1265  * we want to have a ResultRelInfo for those triggers too; that can avoid
1266  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
1267  * ANALYZE to report the runtimes of such triggers.)  So we make additional
1268  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1269  */
1270 ResultRelInfo *
1271 ExecGetTriggerResultRel(EState *estate, Oid relid)
1272 {
1273         ResultRelInfo *rInfo;
1274         int                     nr;
1275         ListCell   *l;
1276         Relation        rel;
1277         MemoryContext oldcontext;
1278
1279         /* First, search through the query result relations */
1280         rInfo = estate->es_result_relations;
1281         nr = estate->es_num_result_relations;
1282         while (nr > 0)
1283         {
1284                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1285                         return rInfo;
1286                 rInfo++;
1287                 nr--;
1288         }
1289         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1290         foreach(l, estate->es_trig_target_relations)
1291         {
1292                 rInfo = (ResultRelInfo *) lfirst(l);
1293                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1294                         return rInfo;
1295         }
1296         /* Nope, so we need a new one */
1297
1298         /*
1299          * Open the target relation's relcache entry.  We assume that an
1300          * appropriate lock is still held by the backend from whenever the trigger
1301          * event got queued, so we need take no new lock here.  Also, we need not
1302          * recheck the relkind, so no need for CheckValidResultRel.
1303          */
1304         rel = heap_open(relid, NoLock);
1305
1306         /*
1307          * Make the new entry in the right context.
1308          */
1309         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1310         rInfo = makeNode(ResultRelInfo);
1311         InitResultRelInfo(rInfo,
1312                                           rel,
1313                                           0,            /* dummy rangetable index */
1314                                           estate->es_instrument);
1315         estate->es_trig_target_relations =
1316                 lappend(estate->es_trig_target_relations, rInfo);
1317         MemoryContextSwitchTo(oldcontext);
1318
1319         /*
1320          * Currently, we don't need any index information in ResultRelInfos used
1321          * only for triggers, so no need to call ExecOpenIndices.
1322          */
1323
1324         return rInfo;
1325 }
1326
1327 /*
1328  *              ExecContextForcesOids
1329  *
1330  * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1331  * we need to ensure that result tuples have space for an OID iff they are
1332  * going to be stored into a relation that has OIDs.  In other contexts
1333  * we are free to choose whether to leave space for OIDs in result tuples
1334  * (we generally don't want to, but we do if a physical-tlist optimization
1335  * is possible).  This routine checks the plan context and returns TRUE if the
1336  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1337  * *hasoids is set to the required value.
1338  *
1339  * One reason this is ugly is that all plan nodes in the plan tree will emit
1340  * tuples with space for an OID, though we really only need the topmost node
1341  * to do so.  However, node types like Sort don't project new tuples but just
1342  * return their inputs, and in those cases the requirement propagates down
1343  * to the input node.  Eventually we might make this code smart enough to
1344  * recognize how far down the requirement really goes, but for now we just
1345  * make all plan nodes do the same thing if the top level forces the choice.
1346  *
1347  * We assume that if we are generating tuples for INSERT or UPDATE,
1348  * estate->es_result_relation_info is already set up to describe the target
1349  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1350  * the target relations may have OIDs and some not.  We have to make the
1351  * decisions on a per-relation basis as we initialize each of the subplans of
1352  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1353  * while initializing each subplan.
1354  *
1355  * CREATE TABLE AS is even uglier, because we don't have the target relation's
1356  * descriptor available when this code runs; we have to look aside at the
1357  * flags passed to ExecutorStart().
1358  */
1359 bool
1360 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1361 {
1362         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1363
1364         if (ri != NULL)
1365         {
1366                 Relation        rel = ri->ri_RelationDesc;
1367
1368                 if (rel != NULL)
1369                 {
1370                         *hasoids = rel->rd_rel->relhasoids;
1371                         return true;
1372                 }
1373         }
1374
1375         if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1376         {
1377                 *hasoids = true;
1378                 return true;
1379         }
1380         if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1381         {
1382                 *hasoids = false;
1383                 return true;
1384         }
1385
1386         return false;
1387 }
1388
1389 /* ----------------------------------------------------------------
1390  *              ExecPostprocessPlan
1391  *
1392  *              Give plan nodes a final chance to execute before shutdown
1393  * ----------------------------------------------------------------
1394  */
1395 static void
1396 ExecPostprocessPlan(EState *estate)
1397 {
1398         ListCell   *lc;
1399
1400         /*
1401          * Make sure nodes run forward.
1402          */
1403         estate->es_direction = ForwardScanDirection;
1404
1405         /*
1406          * Run any secondary ModifyTable nodes to completion, in case the main
1407          * query did not fetch all rows from them.  (We do this to ensure that
1408          * such nodes have predictable results.)
1409          */
1410         foreach(lc, estate->es_auxmodifytables)
1411         {
1412                 PlanState  *ps = (PlanState *) lfirst(lc);
1413
1414                 for (;;)
1415                 {
1416                         TupleTableSlot *slot;
1417
1418                         /* Reset the per-output-tuple exprcontext each time */
1419                         ResetPerTupleExprContext(estate);
1420
1421                         slot = ExecProcNode(ps);
1422
1423                         if (TupIsNull(slot))
1424                                 break;
1425                 }
1426         }
1427 }
1428
1429 /* ----------------------------------------------------------------
1430  *              ExecEndPlan
1431  *
1432  *              Cleans up the query plan -- closes files and frees up storage
1433  *
1434  * NOTE: we are no longer very worried about freeing storage per se
1435  * in this code; FreeExecutorState should be guaranteed to release all
1436  * memory that needs to be released.  What we are worried about doing
1437  * is closing relations and dropping buffer pins.  Thus, for example,
1438  * tuple tables must be cleared or dropped to ensure pins are released.
1439  * ----------------------------------------------------------------
1440  */
1441 static void
1442 ExecEndPlan(PlanState *planstate, EState *estate)
1443 {
1444         ResultRelInfo *resultRelInfo;
1445         int                     i;
1446         ListCell   *l;
1447
1448         /*
1449          * shut down the node-type-specific query processing
1450          */
1451         ExecEndNode(planstate);
1452
1453         /*
1454          * for subplans too
1455          */
1456         foreach(l, estate->es_subplanstates)
1457         {
1458                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1459
1460                 ExecEndNode(subplanstate);
1461         }
1462
1463         /*
1464          * destroy the executor's tuple table.  Actually we only care about
1465          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1466          * the TupleTableSlots, since the containing memory context is about to go
1467          * away anyway.
1468          */
1469         ExecResetTupleTable(estate->es_tupleTable, false);
1470
1471         /*
1472          * close the result relation(s) if any, but hold locks until xact commit.
1473          */
1474         resultRelInfo = estate->es_result_relations;
1475         for (i = estate->es_num_result_relations; i > 0; i--)
1476         {
1477                 /* Close indices and then the relation itself */
1478                 ExecCloseIndices(resultRelInfo);
1479                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1480                 resultRelInfo++;
1481         }
1482
1483         /*
1484          * likewise close any trigger target relations
1485          */
1486         foreach(l, estate->es_trig_target_relations)
1487         {
1488                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1489                 /* Close indices and then the relation itself */
1490                 ExecCloseIndices(resultRelInfo);
1491                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1492         }
1493
1494         /*
1495          * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1496          * locks
1497          */
1498         foreach(l, estate->es_rowMarks)
1499         {
1500                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1501
1502                 if (erm->relation)
1503                         heap_close(erm->relation, NoLock);
1504         }
1505 }
1506
1507 /* ----------------------------------------------------------------
1508  *              ExecutePlan
1509  *
1510  *              Processes the query plan until we have retrieved 'numberTuples' tuples,
1511  *              moving in the specified direction.
1512  *
1513  *              Runs to completion if numberTuples is 0
1514  *
1515  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1516  * user can see it
1517  * ----------------------------------------------------------------
1518  */
1519 static void
1520 ExecutePlan(EState *estate,
1521                         PlanState *planstate,
1522                         bool use_parallel_mode,
1523                         CmdType operation,
1524                         bool sendTuples,
1525                         uint64 numberTuples,
1526                         ScanDirection direction,
1527                         DestReceiver *dest)
1528 {
1529         TupleTableSlot *slot;
1530         uint64          current_tuple_count;
1531
1532         /*
1533          * initialize local variables
1534          */
1535         current_tuple_count = 0;
1536
1537         /*
1538          * Set the direction.
1539          */
1540         estate->es_direction = direction;
1541
1542         /*
1543          * If a tuple count was supplied or data is being written to relation, we
1544          * must force the plan to run without parallelism, because we might exit
1545          * early.
1546          */
1547         if (numberTuples || dest->mydest == DestIntoRel)
1548                 use_parallel_mode = false;
1549
1550         /*
1551          * If a tuple count was supplied, we must force the plan to run without
1552          * parallelism, because we might exit early.
1553          */
1554         if (use_parallel_mode)
1555                 EnterParallelMode();
1556
1557         /*
1558          * Loop until we've processed the proper number of tuples from the plan.
1559          */
1560         for (;;)
1561         {
1562                 /* Reset the per-output-tuple exprcontext */
1563                 ResetPerTupleExprContext(estate);
1564
1565                 /*
1566                  * Execute the plan and obtain a tuple
1567                  */
1568                 slot = ExecProcNode(planstate);
1569
1570                 /*
1571                  * if the tuple is null, then we assume there is nothing more to
1572                  * process so we just end the loop...
1573                  */
1574                 if (TupIsNull(slot))
1575                 {
1576                         /* Allow nodes to release or shut down resources. */
1577                         (void) ExecShutdownNode(planstate);
1578                         break;
1579                 }
1580
1581                 /*
1582                  * If we have a junk filter, then project a new tuple with the junk
1583                  * removed.
1584                  *
1585                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1586                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1587                  * because that tuple slot has the wrong descriptor.)
1588                  */
1589                 if (estate->es_junkFilter != NULL)
1590                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1591
1592                 /*
1593                  * If we are supposed to send the tuple somewhere, do so. (In
1594                  * practice, this is probably always the case at this point.)
1595                  */
1596                 if (sendTuples)
1597                 {
1598                         /*
1599                          * If we are not able to send the tuple, we assume the destination
1600                          * has closed and no more tuples can be sent. If that's the case,
1601                          * end the loop.
1602                          */
1603                         if (!((*dest->receiveSlot) (slot, dest)))
1604                                 break;
1605                 }
1606
1607                 /*
1608                  * Count tuples processed, if this is a SELECT.  (For other operation
1609                  * types, the ModifyTable plan node must count the appropriate
1610                  * events.)
1611                  */
1612                 if (operation == CMD_SELECT)
1613                         (estate->es_processed)++;
1614
1615                 /*
1616                  * check our tuple count.. if we've processed the proper number then
1617                  * quit, else loop again and process more tuples.  Zero numberTuples
1618                  * means no limit.
1619                  */
1620                 current_tuple_count++;
1621                 if (numberTuples && numberTuples == current_tuple_count)
1622                         break;
1623         }
1624
1625         if (use_parallel_mode)
1626                 ExitParallelMode();
1627 }
1628
1629
1630 /*
1631  * ExecRelCheck --- check that tuple meets constraints for result relation
1632  *
1633  * Returns NULL if OK, else name of failed check constraint
1634  */
1635 static const char *
1636 ExecRelCheck(ResultRelInfo *resultRelInfo,
1637                          TupleTableSlot *slot, EState *estate)
1638 {
1639         Relation        rel = resultRelInfo->ri_RelationDesc;
1640         int                     ncheck = rel->rd_att->constr->num_check;
1641         ConstrCheck *check = rel->rd_att->constr->check;
1642         ExprContext *econtext;
1643         MemoryContext oldContext;
1644         List       *qual;
1645         int                     i;
1646
1647         /*
1648          * If first time through for this result relation, build expression
1649          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1650          * memory context so they'll survive throughout the query.
1651          */
1652         if (resultRelInfo->ri_ConstraintExprs == NULL)
1653         {
1654                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1655                 resultRelInfo->ri_ConstraintExprs =
1656                         (List **) palloc(ncheck * sizeof(List *));
1657                 for (i = 0; i < ncheck; i++)
1658                 {
1659                         /* ExecQual wants implicit-AND form */
1660                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1661                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1662                                 ExecPrepareExpr((Expr *) qual, estate);
1663                 }
1664                 MemoryContextSwitchTo(oldContext);
1665         }
1666
1667         /*
1668          * We will use the EState's per-tuple context for evaluating constraint
1669          * expressions (creating it if it's not already there).
1670          */
1671         econtext = GetPerTupleExprContext(estate);
1672
1673         /* Arrange for econtext's scan tuple to be the tuple under test */
1674         econtext->ecxt_scantuple = slot;
1675
1676         /* And evaluate the constraints */
1677         for (i = 0; i < ncheck; i++)
1678         {
1679                 qual = resultRelInfo->ri_ConstraintExprs[i];
1680
1681                 /*
1682                  * NOTE: SQL specifies that a NULL result from a constraint expression
1683                  * is not to be treated as a failure.  Therefore, tell ExecQual to
1684                  * return TRUE for NULL.
1685                  */
1686                 if (!ExecQual(qual, econtext, true))
1687                         return check[i].ccname;
1688         }
1689
1690         /* NULL result means no error */
1691         return NULL;
1692 }
1693
1694 void
1695 ExecConstraints(ResultRelInfo *resultRelInfo,
1696                                 TupleTableSlot *slot, EState *estate)
1697 {
1698         Relation        rel = resultRelInfo->ri_RelationDesc;
1699         TupleDesc       tupdesc = RelationGetDescr(rel);
1700         TupleConstr *constr = tupdesc->constr;
1701         Bitmapset  *modifiedCols;
1702         Bitmapset  *insertedCols;
1703         Bitmapset  *updatedCols;
1704
1705         Assert(constr);
1706
1707         if (constr->has_not_null)
1708         {
1709                 int                     natts = tupdesc->natts;
1710                 int                     attrChk;
1711
1712                 for (attrChk = 1; attrChk <= natts; attrChk++)
1713                 {
1714                         if (tupdesc->attrs[attrChk - 1]->attnotnull &&
1715                                 slot_attisnull(slot, attrChk))
1716                         {
1717                                 char       *val_desc;
1718
1719                                 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1720                                 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1721                                 modifiedCols = bms_union(insertedCols, updatedCols);
1722                                 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1723                                                                                                                  slot,
1724                                                                                                                  tupdesc,
1725                                                                                                                  modifiedCols,
1726                                                                                                                  64);
1727
1728                                 ereport(ERROR,
1729                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1730                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1731                                                           NameStr(tupdesc->attrs[attrChk - 1]->attname)),
1732                                                  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1733                                                  errtablecol(rel, attrChk)));
1734                         }
1735                 }
1736         }
1737
1738         if (constr->num_check > 0)
1739         {
1740                 const char *failed;
1741
1742                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1743                 {
1744                         char       *val_desc;
1745
1746                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
1747                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1748                         modifiedCols = bms_union(insertedCols, updatedCols);
1749                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1750                                                                                                          slot,
1751                                                                                                          tupdesc,
1752                                                                                                          modifiedCols,
1753                                                                                                          64);
1754                         ereport(ERROR,
1755                                         (errcode(ERRCODE_CHECK_VIOLATION),
1756                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1757                                                         RelationGetRelationName(rel), failed),
1758                           val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1759                                          errtableconstraint(rel, failed)));
1760                 }
1761         }
1762 }
1763
1764 /*
1765  * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
1766  * of the specified kind.
1767  *
1768  * Note that this needs to be called multiple times to ensure that all kinds of
1769  * WITH CHECK OPTIONs are handled (both those from views which have the WITH
1770  * CHECK OPTION set and from row level security policies).  See ExecInsert()
1771  * and ExecUpdate().
1772  */
1773 void
1774 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
1775                                          TupleTableSlot *slot, EState *estate)
1776 {
1777         Relation        rel = resultRelInfo->ri_RelationDesc;
1778         TupleDesc       tupdesc = RelationGetDescr(rel);
1779         ExprContext *econtext;
1780         ListCell   *l1,
1781                            *l2;
1782
1783         /*
1784          * We will use the EState's per-tuple context for evaluating constraint
1785          * expressions (creating it if it's not already there).
1786          */
1787         econtext = GetPerTupleExprContext(estate);
1788
1789         /* Arrange for econtext's scan tuple to be the tuple under test */
1790         econtext->ecxt_scantuple = slot;
1791
1792         /* Check each of the constraints */
1793         forboth(l1, resultRelInfo->ri_WithCheckOptions,
1794                         l2, resultRelInfo->ri_WithCheckOptionExprs)
1795         {
1796                 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
1797                 ExprState  *wcoExpr = (ExprState *) lfirst(l2);
1798
1799                 /*
1800                  * Skip any WCOs which are not the kind we are looking for at this
1801                  * time.
1802                  */
1803                 if (wco->kind != kind)
1804                         continue;
1805
1806                 /*
1807                  * WITH CHECK OPTION checks are intended to ensure that the new tuple
1808                  * is visible (in the case of a view) or that it passes the
1809                  * 'with-check' policy (in the case of row security). If the qual
1810                  * evaluates to NULL or FALSE, then the new tuple won't be included in
1811                  * the view or doesn't pass the 'with-check' policy for the table.  We
1812                  * need ExecQual to return FALSE for NULL to handle the view case (the
1813                  * opposite of what we do above for CHECK constraints).
1814                  */
1815                 if (!ExecQual((List *) wcoExpr, econtext, false))
1816                 {
1817                         char       *val_desc;
1818                         Bitmapset  *modifiedCols;
1819                         Bitmapset  *insertedCols;
1820                         Bitmapset  *updatedCols;
1821
1822                         switch (wco->kind)
1823                         {
1824                                         /*
1825                                          * For WITH CHECK OPTIONs coming from views, we might be
1826                                          * able to provide the details on the row, depending on
1827                                          * the permissions on the relation (that is, if the user
1828                                          * could view it directly anyway).  For RLS violations, we
1829                                          * don't include the data since we don't know if the user
1830                                          * should be able to view the tuple as as that depends on
1831                                          * the USING policy.
1832                                          */
1833                                 case WCO_VIEW_CHECK:
1834                                         insertedCols = GetInsertedColumns(resultRelInfo, estate);
1835                                         updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1836                                         modifiedCols = bms_union(insertedCols, updatedCols);
1837                                         val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1838                                                                                                                          slot,
1839                                                                                                                          tupdesc,
1840                                                                                                                          modifiedCols,
1841                                                                                                                          64);
1842
1843                                         ereport(ERROR,
1844                                                         (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
1845                                           errmsg("new row violates check option for view \"%s\"",
1846                                                          wco->relname),
1847                                                          val_desc ? errdetail("Failing row contains %s.",
1848                                                                                                   val_desc) : 0));
1849                                         break;
1850                                 case WCO_RLS_INSERT_CHECK:
1851                                 case WCO_RLS_UPDATE_CHECK:
1852                                         if (wco->polname != NULL)
1853                                                 ereport(ERROR,
1854                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1855                                                                  errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
1856                                                                                 wco->polname, wco->relname)));
1857                                         else
1858                                                 ereport(ERROR,
1859                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1860                                                                  errmsg("new row violates row-level security policy for table \"%s\"",
1861                                                                                 wco->relname)));
1862                                         break;
1863                                 case WCO_RLS_CONFLICT_CHECK:
1864                                         if (wco->polname != NULL)
1865                                                 ereport(ERROR,
1866                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1867                                                                  errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
1868                                                                                 wco->polname, wco->relname)));
1869                                         else
1870                                                 ereport(ERROR,
1871                                                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1872                                                                  errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
1873                                                                                 wco->relname)));
1874                                         break;
1875                                 default:
1876                                         elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
1877                                         break;
1878                         }
1879                 }
1880         }
1881 }
1882
1883 /*
1884  * ExecBuildSlotValueDescription -- construct a string representing a tuple
1885  *
1886  * This is intentionally very similar to BuildIndexValueDescription, but
1887  * unlike that function, we truncate long field values (to at most maxfieldlen
1888  * bytes).  That seems necessary here since heap field values could be very
1889  * long, whereas index entries typically aren't so wide.
1890  *
1891  * Also, unlike the case with index entries, we need to be prepared to ignore
1892  * dropped columns.  We used to use the slot's tuple descriptor to decode the
1893  * data, but the slot's descriptor doesn't identify dropped columns, so we
1894  * now need to be passed the relation's descriptor.
1895  *
1896  * Note that, like BuildIndexValueDescription, if the user does not have
1897  * permission to view any of the columns involved, a NULL is returned.  Unlike
1898  * BuildIndexValueDescription, if the user has access to view a subset of the
1899  * column involved, that subset will be returned with a key identifying which
1900  * columns they are.
1901  */
1902 static char *
1903 ExecBuildSlotValueDescription(Oid reloid,
1904                                                           TupleTableSlot *slot,
1905                                                           TupleDesc tupdesc,
1906                                                           Bitmapset *modifiedCols,
1907                                                           int maxfieldlen)
1908 {
1909         StringInfoData buf;
1910         StringInfoData collist;
1911         bool            write_comma = false;
1912         bool            write_comma_collist = false;
1913         int                     i;
1914         AclResult       aclresult;
1915         bool            table_perm = false;
1916         bool            any_perm = false;
1917
1918         /*
1919          * Check if RLS is enabled and should be active for the relation; if so,
1920          * then don't return anything.  Otherwise, go through normal permission
1921          * checks.
1922          */
1923         if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
1924                 return NULL;
1925
1926         initStringInfo(&buf);
1927
1928         appendStringInfoChar(&buf, '(');
1929
1930         /*
1931          * Check if the user has permissions to see the row.  Table-level SELECT
1932          * allows access to all columns.  If the user does not have table-level
1933          * SELECT then we check each column and include those the user has SELECT
1934          * rights on.  Additionally, we always include columns the user provided
1935          * data for.
1936          */
1937         aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
1938         if (aclresult != ACLCHECK_OK)
1939         {
1940                 /* Set up the buffer for the column list */
1941                 initStringInfo(&collist);
1942                 appendStringInfoChar(&collist, '(');
1943         }
1944         else
1945                 table_perm = any_perm = true;
1946
1947         /* Make sure the tuple is fully deconstructed */
1948         slot_getallattrs(slot);
1949
1950         for (i = 0; i < tupdesc->natts; i++)
1951         {
1952                 bool            column_perm = false;
1953                 char       *val;
1954                 int                     vallen;
1955
1956                 /* ignore dropped columns */
1957                 if (tupdesc->attrs[i]->attisdropped)
1958                         continue;
1959
1960                 if (!table_perm)
1961                 {
1962                         /*
1963                          * No table-level SELECT, so need to make sure they either have
1964                          * SELECT rights on the column or that they have provided the data
1965                          * for the column.  If not, omit this column from the error
1966                          * message.
1967                          */
1968                         aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
1969                                                                                           GetUserId(), ACL_SELECT);
1970                         if (bms_is_member(tupdesc->attrs[i]->attnum - FirstLowInvalidHeapAttributeNumber,
1971                                                           modifiedCols) || aclresult == ACLCHECK_OK)
1972                         {
1973                                 column_perm = any_perm = true;
1974
1975                                 if (write_comma_collist)
1976                                         appendStringInfoString(&collist, ", ");
1977                                 else
1978                                         write_comma_collist = true;
1979
1980                                 appendStringInfoString(&collist, NameStr(tupdesc->attrs[i]->attname));
1981                         }
1982                 }
1983
1984                 if (table_perm || column_perm)
1985                 {
1986                         if (slot->tts_isnull[i])
1987                                 val = "null";
1988                         else
1989                         {
1990                                 Oid                     foutoid;
1991                                 bool            typisvarlena;
1992
1993                                 getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
1994                                                                   &foutoid, &typisvarlena);
1995                                 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
1996                         }
1997
1998                         if (write_comma)
1999                                 appendStringInfoString(&buf, ", ");
2000                         else
2001                                 write_comma = true;
2002
2003                         /* truncate if needed */
2004                         vallen = strlen(val);
2005                         if (vallen <= maxfieldlen)
2006                                 appendStringInfoString(&buf, val);
2007                         else
2008                         {
2009                                 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2010                                 appendBinaryStringInfo(&buf, val, vallen);
2011                                 appendStringInfoString(&buf, "...");
2012                         }
2013                 }
2014         }
2015
2016         /* If we end up with zero columns being returned, then return NULL. */
2017         if (!any_perm)
2018                 return NULL;
2019
2020         appendStringInfoChar(&buf, ')');
2021
2022         if (!table_perm)
2023         {
2024                 appendStringInfoString(&collist, ") = ");
2025                 appendStringInfoString(&collist, buf.data);
2026
2027                 return collist.data;
2028         }
2029
2030         return buf.data;
2031 }
2032
2033
2034 /*
2035  * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2036  * given ResultRelInfo
2037  */
2038 LockTupleMode
2039 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2040 {
2041         Bitmapset  *keyCols;
2042         Bitmapset  *updatedCols;
2043
2044         /*
2045          * Compute lock mode to use.  If columns that are part of the key have not
2046          * been modified, then we can use a weaker lock, allowing for better
2047          * concurrency.
2048          */
2049         updatedCols = GetUpdatedColumns(relinfo, estate);
2050         keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2051                                                                                  INDEX_ATTR_BITMAP_KEY);
2052
2053         if (bms_overlap(keyCols, updatedCols))
2054                 return LockTupleExclusive;
2055
2056         return LockTupleNoKeyExclusive;
2057 }
2058
2059 /*
2060  * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2061  *
2062  * If no such struct, either return NULL or throw error depending on missing_ok
2063  */
2064 ExecRowMark *
2065 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2066 {
2067         ListCell   *lc;
2068
2069         foreach(lc, estate->es_rowMarks)
2070         {
2071                 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2072
2073                 if (erm->rti == rti)
2074                         return erm;
2075         }
2076         if (!missing_ok)
2077                 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2078         return NULL;
2079 }
2080
2081 /*
2082  * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2083  *
2084  * Inputs are the underlying ExecRowMark struct and the targetlist of the
2085  * input plan node (not planstate node!).  We need the latter to find out
2086  * the column numbers of the resjunk columns.
2087  */
2088 ExecAuxRowMark *
2089 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2090 {
2091         ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2092         char            resname[32];
2093
2094         aerm->rowmark = erm;
2095
2096         /* Look up the resjunk columns associated with this rowmark */
2097         if (erm->markType != ROW_MARK_COPY)
2098         {
2099                 /* need ctid for all methods other than COPY */
2100                 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2101                 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2102                                                                                                            resname);
2103                 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2104                         elog(ERROR, "could not find junk %s column", resname);
2105         }
2106         else
2107         {
2108                 /* need wholerow if COPY */
2109                 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2110                 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2111                                                                                                                 resname);
2112                 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2113                         elog(ERROR, "could not find junk %s column", resname);
2114         }
2115
2116         /* if child rel, need tableoid */
2117         if (erm->rti != erm->prti)
2118         {
2119                 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2120                 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2121                                                                                                            resname);
2122                 if (!AttributeNumberIsValid(aerm->toidAttNo))
2123                         elog(ERROR, "could not find junk %s column", resname);
2124         }
2125
2126         return aerm;
2127 }
2128
2129
2130 /*
2131  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2132  * process the updated version under READ COMMITTED rules.
2133  *
2134  * See backend/executor/README for some info about how this works.
2135  */
2136
2137
2138 /*
2139  * Check a modified tuple to see if we want to process its updated version
2140  * under READ COMMITTED rules.
2141  *
2142  *      estate - outer executor state data
2143  *      epqstate - state for EvalPlanQual rechecking
2144  *      relation - table containing tuple
2145  *      rti - rangetable index of table containing tuple
2146  *      lockmode - requested tuple lock mode
2147  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2148  *      priorXmax - t_xmax from the outdated tuple
2149  *
2150  * *tid is also an output parameter: it's modified to hold the TID of the
2151  * latest version of the tuple (note this may be changed even on failure)
2152  *
2153  * Returns a slot containing the new candidate update/delete tuple, or
2154  * NULL if we determine we shouldn't process the row.
2155  *
2156  * Note: properly, lockmode should be declared as enum LockTupleMode,
2157  * but we use "int" to avoid having to include heapam.h in executor.h.
2158  */
2159 TupleTableSlot *
2160 EvalPlanQual(EState *estate, EPQState *epqstate,
2161                          Relation relation, Index rti, int lockmode,
2162                          ItemPointer tid, TransactionId priorXmax)
2163 {
2164         TupleTableSlot *slot;
2165         HeapTuple       copyTuple;
2166
2167         Assert(rti > 0);
2168
2169         /*
2170          * Get and lock the updated version of the row; if fail, return NULL.
2171          */
2172         copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2173                                                                   tid, priorXmax);
2174
2175         if (copyTuple == NULL)
2176                 return NULL;
2177
2178         /*
2179          * For UPDATE/DELETE we have to return tid of actual row we're executing
2180          * PQ for.
2181          */
2182         *tid = copyTuple->t_self;
2183
2184         /*
2185          * Need to run a recheck subquery.  Initialize or reinitialize EPQ state.
2186          */
2187         EvalPlanQualBegin(epqstate, estate);
2188
2189         /*
2190          * Free old test tuple, if any, and store new tuple where relation's scan
2191          * node will see it
2192          */
2193         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2194
2195         /*
2196          * Fetch any non-locked source rows
2197          */
2198         EvalPlanQualFetchRowMarks(epqstate);
2199
2200         /*
2201          * Run the EPQ query.  We assume it will return at most one tuple.
2202          */
2203         slot = EvalPlanQualNext(epqstate);
2204
2205         /*
2206          * If we got a tuple, force the slot to materialize the tuple so that it
2207          * is not dependent on any local state in the EPQ query (in particular,
2208          * it's highly likely that the slot contains references to any pass-by-ref
2209          * datums that may be present in copyTuple).  As with the next step, this
2210          * is to guard against early re-use of the EPQ query.
2211          */
2212         if (!TupIsNull(slot))
2213                 (void) ExecMaterializeSlot(slot);
2214
2215         /*
2216          * Clear out the test tuple.  This is needed in case the EPQ query is
2217          * re-used to test a tuple for a different relation.  (Not clear that can
2218          * really happen, but let's be safe.)
2219          */
2220         EvalPlanQualSetTuple(epqstate, rti, NULL);
2221
2222         return slot;
2223 }
2224
2225 /*
2226  * Fetch a copy of the newest version of an outdated tuple
2227  *
2228  *      estate - executor state data
2229  *      relation - table containing tuple
2230  *      lockmode - requested tuple lock mode
2231  *      wait_policy - requested lock wait policy
2232  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2233  *      priorXmax - t_xmax from the outdated tuple
2234  *
2235  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2236  * that there is no newest version (ie, the row was deleted not updated).
2237  * We also return NULL if the tuple is locked and the wait policy is to skip
2238  * such tuples.
2239  *
2240  * If successful, we have locked the newest tuple version, so caller does not
2241  * need to worry about it changing anymore.
2242  *
2243  * Note: properly, lockmode should be declared as enum LockTupleMode,
2244  * but we use "int" to avoid having to include heapam.h in executor.h.
2245  */
2246 HeapTuple
2247 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2248                                   LockWaitPolicy wait_policy,
2249                                   ItemPointer tid, TransactionId priorXmax)
2250 {
2251         HeapTuple       copyTuple = NULL;
2252         HeapTupleData tuple;
2253         SnapshotData SnapshotDirty;
2254
2255         /*
2256          * fetch target tuple
2257          *
2258          * Loop here to deal with updated or busy tuples
2259          */
2260         InitDirtySnapshot(SnapshotDirty);
2261         tuple.t_self = *tid;
2262         for (;;)
2263         {
2264                 Buffer          buffer;
2265
2266                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2267                 {
2268                         HTSU_Result test;
2269                         HeapUpdateFailureData hufd;
2270
2271                         /*
2272                          * If xmin isn't what we're expecting, the slot must have been
2273                          * recycled and reused for an unrelated tuple.  This implies that
2274                          * the latest version of the row was deleted, so we need do
2275                          * nothing.  (Should be safe to examine xmin without getting
2276                          * buffer's content lock.  We assume reading a TransactionId to be
2277                          * atomic, and Xmin never changes in an existing tuple, except to
2278                          * invalid or frozen, and neither of those can match priorXmax.)
2279                          */
2280                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2281                                                                          priorXmax))
2282                         {
2283                                 ReleaseBuffer(buffer);
2284                                 return NULL;
2285                         }
2286
2287                         /* otherwise xmin should not be dirty... */
2288                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2289                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2290
2291                         /*
2292                          * If tuple is being updated by other transaction then we have to
2293                          * wait for its commit/abort, or die trying.
2294                          */
2295                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2296                         {
2297                                 ReleaseBuffer(buffer);
2298                                 switch (wait_policy)
2299                                 {
2300                                         case LockWaitBlock:
2301                                                 XactLockTableWait(SnapshotDirty.xmax,
2302                                                                                   relation, &tuple.t_self,
2303                                                                                   XLTW_FetchUpdated);
2304                                                 break;
2305                                         case LockWaitSkip:
2306                                                 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2307                                                         return NULL;            /* skip instead of waiting */
2308                                                 break;
2309                                         case LockWaitError:
2310                                                 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2311                                                         ereport(ERROR,
2312                                                                         (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2313                                                                          errmsg("could not obtain lock on row in relation \"%s\"",
2314                                                                                 RelationGetRelationName(relation))));
2315                                                 break;
2316                                 }
2317                                 continue;               /* loop back to repeat heap_fetch */
2318                         }
2319
2320                         /*
2321                          * If tuple was inserted by our own transaction, we have to check
2322                          * cmin against es_output_cid: cmin >= current CID means our
2323                          * command cannot see the tuple, so we should ignore it. Otherwise
2324                          * heap_lock_tuple() will throw an error, and so would any later
2325                          * attempt to update or delete the tuple.  (We need not check cmax
2326                          * because HeapTupleSatisfiesDirty will consider a tuple deleted
2327                          * by our transaction dead, regardless of cmax.) We just checked
2328                          * that priorXmax == xmin, so we can test that variable instead of
2329                          * doing HeapTupleHeaderGetXmin again.
2330                          */
2331                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2332                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2333                         {
2334                                 ReleaseBuffer(buffer);
2335                                 return NULL;
2336                         }
2337
2338                         /*
2339                          * This is a live tuple, so now try to lock it.
2340                          */
2341                         test = heap_lock_tuple(relation, &tuple,
2342                                                                    estate->es_output_cid,
2343                                                                    lockmode, wait_policy,
2344                                                                    false, &buffer, &hufd);
2345                         /* We now have two pins on the buffer, get rid of one */
2346                         ReleaseBuffer(buffer);
2347
2348                         switch (test)
2349                         {
2350                                 case HeapTupleSelfUpdated:
2351
2352                                         /*
2353                                          * The target tuple was already updated or deleted by the
2354                                          * current command, or by a later command in the current
2355                                          * transaction.  We *must* ignore the tuple in the former
2356                                          * case, so as to avoid the "Halloween problem" of
2357                                          * repeated update attempts.  In the latter case it might
2358                                          * be sensible to fetch the updated tuple instead, but
2359                                          * doing so would require changing heap_update and
2360                                          * heap_delete to not complain about updating "invisible"
2361                                          * tuples, which seems pretty scary (heap_lock_tuple will
2362                                          * not complain, but few callers expect
2363                                          * HeapTupleInvisible, and we're not one of them).  So for
2364                                          * now, treat the tuple as deleted and do not process.
2365                                          */
2366                                         ReleaseBuffer(buffer);
2367                                         return NULL;
2368
2369                                 case HeapTupleMayBeUpdated:
2370                                         /* successfully locked */
2371                                         break;
2372
2373                                 case HeapTupleUpdated:
2374                                         ReleaseBuffer(buffer);
2375                                         if (IsolationUsesXactSnapshot())
2376                                                 ereport(ERROR,
2377                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2378                                                                  errmsg("could not serialize access due to concurrent update")));
2379
2380                                         /* Should not encounter speculative tuple on recheck */
2381                                         Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
2382                                         if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2383                                         {
2384                                                 /* it was updated, so look at the updated version */
2385                                                 tuple.t_self = hufd.ctid;
2386                                                 /* updated row should have xmin matching this xmax */
2387                                                 priorXmax = hufd.xmax;
2388                                                 continue;
2389                                         }
2390                                         /* tuple was deleted, so give up */
2391                                         return NULL;
2392
2393                                 case HeapTupleWouldBlock:
2394                                         ReleaseBuffer(buffer);
2395                                         return NULL;
2396
2397                                 case HeapTupleInvisible:
2398                                         elog(ERROR, "attempted to lock invisible tuple");
2399
2400                                 default:
2401                                         ReleaseBuffer(buffer);
2402                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2403                                                  test);
2404                                         return NULL;    /* keep compiler quiet */
2405                         }
2406
2407                         /*
2408                          * We got tuple - now copy it for use by recheck query.
2409                          */
2410                         copyTuple = heap_copytuple(&tuple);
2411                         ReleaseBuffer(buffer);
2412                         break;
2413                 }
2414
2415                 /*
2416                  * If the referenced slot was actually empty, the latest version of
2417                  * the row must have been deleted, so we need do nothing.
2418                  */
2419                 if (tuple.t_data == NULL)
2420                 {
2421                         ReleaseBuffer(buffer);
2422                         return NULL;
2423                 }
2424
2425                 /*
2426                  * As above, if xmin isn't what we're expecting, do nothing.
2427                  */
2428                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2429                                                                  priorXmax))
2430                 {
2431                         ReleaseBuffer(buffer);
2432                         return NULL;
2433                 }
2434
2435                 /*
2436                  * If we get here, the tuple was found but failed SnapshotDirty.
2437                  * Assuming the xmin is either a committed xact or our own xact (as it
2438                  * certainly should be if we're trying to modify the tuple), this must
2439                  * mean that the row was updated or deleted by either a committed xact
2440                  * or our own xact.  If it was deleted, we can ignore it; if it was
2441                  * updated then chain up to the next version and repeat the whole
2442                  * process.
2443                  *
2444                  * As above, it should be safe to examine xmax and t_ctid without the
2445                  * buffer content lock, because they can't be changing.
2446                  */
2447                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2448                 {
2449                         /* deleted, so forget about it */
2450                         ReleaseBuffer(buffer);
2451                         return NULL;
2452                 }
2453
2454                 /* updated, so look at the updated row */
2455                 tuple.t_self = tuple.t_data->t_ctid;
2456                 /* updated row should have xmin matching this xmax */
2457                 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2458                 ReleaseBuffer(buffer);
2459                 /* loop back to fetch next in chain */
2460         }
2461
2462         /*
2463          * Return the copied tuple
2464          */
2465         return copyTuple;
2466 }
2467
2468 /*
2469  * EvalPlanQualInit -- initialize during creation of a plan state node
2470  * that might need to invoke EPQ processing.
2471  *
2472  * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2473  * with EvalPlanQualSetPlan.
2474  */
2475 void
2476 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2477                                  Plan *subplan, List *auxrowmarks, int epqParam)
2478 {
2479         /* Mark the EPQ state inactive */
2480         epqstate->estate = NULL;
2481         epqstate->planstate = NULL;
2482         epqstate->origslot = NULL;
2483         /* ... and remember data that EvalPlanQualBegin will need */
2484         epqstate->plan = subplan;
2485         epqstate->arowMarks = auxrowmarks;
2486         epqstate->epqParam = epqParam;
2487 }
2488
2489 /*
2490  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2491  *
2492  * We need this so that ModifyTable can deal with multiple subplans.
2493  */
2494 void
2495 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2496 {
2497         /* If we have a live EPQ query, shut it down */
2498         EvalPlanQualEnd(epqstate);
2499         /* And set/change the plan pointer */
2500         epqstate->plan = subplan;
2501         /* The rowmarks depend on the plan, too */
2502         epqstate->arowMarks = auxrowmarks;
2503 }
2504
2505 /*
2506  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2507  *
2508  * NB: passed tuple must be palloc'd; it may get freed later
2509  */
2510 void
2511 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2512 {
2513         EState     *estate = epqstate->estate;
2514
2515         Assert(rti > 0);
2516
2517         /*
2518          * free old test tuple, if any, and store new tuple where relation's scan
2519          * node will see it
2520          */
2521         if (estate->es_epqTuple[rti - 1] != NULL)
2522                 heap_freetuple(estate->es_epqTuple[rti - 1]);
2523         estate->es_epqTuple[rti - 1] = tuple;
2524         estate->es_epqTupleSet[rti - 1] = true;
2525 }
2526
2527 /*
2528  * Fetch back the current test tuple (if any) for the specified RTI
2529  */
2530 HeapTuple
2531 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2532 {
2533         EState     *estate = epqstate->estate;
2534
2535         Assert(rti > 0);
2536
2537         return estate->es_epqTuple[rti - 1];
2538 }
2539
2540 /*
2541  * Fetch the current row values for any non-locked relations that need
2542  * to be scanned by an EvalPlanQual operation.  origslot must have been set
2543  * to contain the current result row (top-level row) that we need to recheck.
2544  */
2545 void
2546 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2547 {
2548         ListCell   *l;
2549
2550         Assert(epqstate->origslot != NULL);
2551
2552         foreach(l, epqstate->arowMarks)
2553         {
2554                 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2555                 ExecRowMark *erm = aerm->rowmark;
2556                 Datum           datum;
2557                 bool            isNull;
2558                 HeapTupleData tuple;
2559
2560                 if (RowMarkRequiresRowShareLock(erm->markType))
2561                         elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2562
2563                 /* clear any leftover test tuple for this rel */
2564                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2565
2566                 /* if child rel, must check whether it produced this row */
2567                 if (erm->rti != erm->prti)
2568                 {
2569                         Oid                     tableoid;
2570
2571                         datum = ExecGetJunkAttribute(epqstate->origslot,
2572                                                                                  aerm->toidAttNo,
2573                                                                                  &isNull);
2574                         /* non-locked rels could be on the inside of outer joins */
2575                         if (isNull)
2576                                 continue;
2577                         tableoid = DatumGetObjectId(datum);
2578
2579                         Assert(OidIsValid(erm->relid));
2580                         if (tableoid != erm->relid)
2581                         {
2582                                 /* this child is inactive right now */
2583                                 continue;
2584                         }
2585                 }
2586
2587                 if (erm->markType == ROW_MARK_REFERENCE)
2588                 {
2589                         HeapTuple       copyTuple;
2590
2591                         Assert(erm->relation != NULL);
2592
2593                         /* fetch the tuple's ctid */
2594                         datum = ExecGetJunkAttribute(epqstate->origslot,
2595                                                                                  aerm->ctidAttNo,
2596                                                                                  &isNull);
2597                         /* non-locked rels could be on the inside of outer joins */
2598                         if (isNull)
2599                                 continue;
2600
2601                         /* fetch requests on foreign tables must be passed to their FDW */
2602                         if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2603                         {
2604                                 FdwRoutine *fdwroutine;
2605                                 bool            updated = false;
2606
2607                                 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2608                                 /* this should have been checked already, but let's be safe */
2609                                 if (fdwroutine->RefetchForeignRow == NULL)
2610                                         ereport(ERROR,
2611                                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2612                                                    errmsg("cannot lock rows in foreign table \"%s\"",
2613                                                                   RelationGetRelationName(erm->relation))));
2614                                 copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2615                                                                                                                   erm,
2616                                                                                                                   datum,
2617                                                                                                                   &updated);
2618                                 if (copyTuple == NULL)
2619                                         elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2620
2621                                 /*
2622                                  * Ideally we'd insist on updated == false here, but that
2623                                  * assumes that FDWs can track that exactly, which they might
2624                                  * not be able to.  So just ignore the flag.
2625                                  */
2626                         }
2627                         else
2628                         {
2629                                 /* ordinary table, fetch the tuple */
2630                                 Buffer          buffer;
2631
2632                                 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2633                                 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2634                                                                 false, NULL))
2635                                         elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2636
2637                                 /* successful, copy tuple */
2638                                 copyTuple = heap_copytuple(&tuple);
2639                                 ReleaseBuffer(buffer);
2640                         }
2641
2642                         /* store tuple */
2643                         EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
2644                 }
2645                 else
2646                 {
2647                         HeapTupleHeader td;
2648
2649                         Assert(erm->markType == ROW_MARK_COPY);
2650
2651                         /* fetch the whole-row Var for the relation */
2652                         datum = ExecGetJunkAttribute(epqstate->origslot,
2653                                                                                  aerm->wholeAttNo,
2654                                                                                  &isNull);
2655                         /* non-locked rels could be on the inside of outer joins */
2656                         if (isNull)
2657                                 continue;
2658                         td = DatumGetHeapTupleHeader(datum);
2659
2660                         /* build a temporary HeapTuple control structure */
2661                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
2662                         tuple.t_data = td;
2663                         /* relation might be a foreign table, if so provide tableoid */
2664                         tuple.t_tableOid = erm->relid;
2665                         /* also copy t_ctid in case there's valid data there */
2666                         tuple.t_self = td->t_ctid;
2667
2668                         /* copy and store tuple */
2669                         EvalPlanQualSetTuple(epqstate, erm->rti,
2670                                                                  heap_copytuple(&tuple));
2671                 }
2672         }
2673 }
2674
2675 /*
2676  * Fetch the next row (if any) from EvalPlanQual testing
2677  *
2678  * (In practice, there should never be more than one row...)
2679  */
2680 TupleTableSlot *
2681 EvalPlanQualNext(EPQState *epqstate)
2682 {
2683         MemoryContext oldcontext;
2684         TupleTableSlot *slot;
2685
2686         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
2687         slot = ExecProcNode(epqstate->planstate);
2688         MemoryContextSwitchTo(oldcontext);
2689
2690         return slot;
2691 }
2692
2693 /*
2694  * Initialize or reset an EvalPlanQual state tree
2695  */
2696 void
2697 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
2698 {
2699         EState     *estate = epqstate->estate;
2700
2701         if (estate == NULL)
2702         {
2703                 /* First time through, so create a child EState */
2704                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
2705         }
2706         else
2707         {
2708                 /*
2709                  * We already have a suitable child EPQ tree, so just reset it.
2710                  */
2711                 int                     rtsize = list_length(parentestate->es_range_table);
2712                 PlanState  *planstate = epqstate->planstate;
2713
2714                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
2715
2716                 /* Recopy current values of parent parameters */
2717                 if (parentestate->es_plannedstmt->nParamExec > 0)
2718                 {
2719                         int                     i = parentestate->es_plannedstmt->nParamExec;
2720
2721                         while (--i >= 0)
2722                         {
2723                                 /* copy value if any, but not execPlan link */
2724                                 estate->es_param_exec_vals[i].value =
2725                                         parentestate->es_param_exec_vals[i].value;
2726                                 estate->es_param_exec_vals[i].isnull =
2727                                         parentestate->es_param_exec_vals[i].isnull;
2728                         }
2729                 }
2730
2731                 /*
2732                  * Mark child plan tree as needing rescan at all scan nodes.  The
2733                  * first ExecProcNode will take care of actually doing the rescan.
2734                  */
2735                 planstate->chgParam = bms_add_member(planstate->chgParam,
2736                                                                                          epqstate->epqParam);
2737         }
2738 }
2739
2740 /*
2741  * Start execution of an EvalPlanQual plan tree.
2742  *
2743  * This is a cut-down version of ExecutorStart(): we copy some state from
2744  * the top-level estate rather than initializing it fresh.
2745  */
2746 static void
2747 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
2748 {
2749         EState     *estate;
2750         int                     rtsize;
2751         MemoryContext oldcontext;
2752         ListCell   *l;
2753
2754         rtsize = list_length(parentestate->es_range_table);
2755
2756         epqstate->estate = estate = CreateExecutorState();
2757
2758         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2759
2760         /*
2761          * Child EPQ EStates share the parent's copy of unchanging state such as
2762          * the snapshot, rangetable, result-rel info, and external Param info.
2763          * They need their own copies of local state, including a tuple table,
2764          * es_param_exec_vals, etc.
2765          *
2766          * The ResultRelInfo array management is trickier than it looks.  We
2767          * create a fresh array for the child but copy all the content from the
2768          * parent.  This is because it's okay for the child to share any
2769          * per-relation state the parent has already created --- but if the child
2770          * sets up any ResultRelInfo fields, such as its own junkfilter, that
2771          * state must *not* propagate back to the parent.  (For one thing, the
2772          * pointed-to data is in a memory context that won't last long enough.)
2773          */
2774         estate->es_direction = ForwardScanDirection;
2775         estate->es_snapshot = parentestate->es_snapshot;
2776         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2777         estate->es_range_table = parentestate->es_range_table;
2778         estate->es_plannedstmt = parentestate->es_plannedstmt;
2779         estate->es_junkFilter = parentestate->es_junkFilter;
2780         estate->es_output_cid = parentestate->es_output_cid;
2781         if (parentestate->es_num_result_relations > 0)
2782         {
2783                 int                     numResultRelations = parentestate->es_num_result_relations;
2784                 ResultRelInfo *resultRelInfos;
2785
2786                 resultRelInfos = (ResultRelInfo *)
2787                         palloc(numResultRelations * sizeof(ResultRelInfo));
2788                 memcpy(resultRelInfos, parentestate->es_result_relations,
2789                            numResultRelations * sizeof(ResultRelInfo));
2790                 estate->es_result_relations = resultRelInfos;
2791                 estate->es_num_result_relations = numResultRelations;
2792         }
2793         /* es_result_relation_info must NOT be copied */
2794         /* es_trig_target_relations must NOT be copied */
2795         estate->es_rowMarks = parentestate->es_rowMarks;
2796         estate->es_top_eflags = parentestate->es_top_eflags;
2797         estate->es_instrument = parentestate->es_instrument;
2798         /* es_auxmodifytables must NOT be copied */
2799
2800         /*
2801          * The external param list is simply shared from parent.  The internal
2802          * param workspace has to be local state, but we copy the initial values
2803          * from the parent, so as to have access to any param values that were
2804          * already set from other parts of the parent's plan tree.
2805          */
2806         estate->es_param_list_info = parentestate->es_param_list_info;
2807         if (parentestate->es_plannedstmt->nParamExec > 0)
2808         {
2809                 int                     i = parentestate->es_plannedstmt->nParamExec;
2810
2811                 estate->es_param_exec_vals = (ParamExecData *)
2812                         palloc0(i * sizeof(ParamExecData));
2813                 while (--i >= 0)
2814                 {
2815                         /* copy value if any, but not execPlan link */
2816                         estate->es_param_exec_vals[i].value =
2817                                 parentestate->es_param_exec_vals[i].value;
2818                         estate->es_param_exec_vals[i].isnull =
2819                                 parentestate->es_param_exec_vals[i].isnull;
2820                 }
2821         }
2822
2823         /*
2824          * Each EState must have its own es_epqScanDone state, but if we have
2825          * nested EPQ checks they should share es_epqTuple arrays.  This allows
2826          * sub-rechecks to inherit the values being examined by an outer recheck.
2827          */
2828         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
2829         if (parentestate->es_epqTuple != NULL)
2830         {
2831                 estate->es_epqTuple = parentestate->es_epqTuple;
2832                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
2833         }
2834         else
2835         {
2836                 estate->es_epqTuple = (HeapTuple *)
2837                         palloc0(rtsize * sizeof(HeapTuple));
2838                 estate->es_epqTupleSet = (bool *)
2839                         palloc0(rtsize * sizeof(bool));
2840         }
2841
2842         /*
2843          * Each estate also has its own tuple table.
2844          */
2845         estate->es_tupleTable = NIL;
2846
2847         /*
2848          * Initialize private state information for each SubPlan.  We must do this
2849          * before running ExecInitNode on the main query tree, since
2850          * ExecInitSubPlan expects to be able to find these entries. Some of the
2851          * SubPlans might not be used in the part of the plan tree we intend to
2852          * run, but since it's not easy to tell which, we just initialize them
2853          * all.
2854          */
2855         Assert(estate->es_subplanstates == NIL);
2856         foreach(l, parentestate->es_plannedstmt->subplans)
2857         {
2858                 Plan       *subplan = (Plan *) lfirst(l);
2859                 PlanState  *subplanstate;
2860
2861                 subplanstate = ExecInitNode(subplan, estate, 0);
2862                 estate->es_subplanstates = lappend(estate->es_subplanstates,
2863                                                                                    subplanstate);
2864         }
2865
2866         /*
2867          * Initialize the private state information for all the nodes in the part
2868          * of the plan tree we need to run.  This opens files, allocates storage
2869          * and leaves us ready to start processing tuples.
2870          */
2871         epqstate->planstate = ExecInitNode(planTree, estate, 0);
2872
2873         MemoryContextSwitchTo(oldcontext);
2874 }
2875
2876 /*
2877  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2878  * or if we are done with the current EPQ child.
2879  *
2880  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2881  * of the normal cleanup, but *not* close result relations (which we are
2882  * just sharing from the outer query).  We do, however, have to close any
2883  * trigger target relations that got opened, since those are not shared.
2884  * (There probably shouldn't be any of the latter, but just in case...)
2885  */
2886 void
2887 EvalPlanQualEnd(EPQState *epqstate)
2888 {
2889         EState     *estate = epqstate->estate;
2890         MemoryContext oldcontext;
2891         ListCell   *l;
2892
2893         if (estate == NULL)
2894                 return;                                 /* idle, so nothing to do */
2895
2896         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2897
2898         ExecEndNode(epqstate->planstate);
2899
2900         foreach(l, estate->es_subplanstates)
2901         {
2902                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2903
2904                 ExecEndNode(subplanstate);
2905         }
2906
2907         /* throw away the per-estate tuple table */
2908         ExecResetTupleTable(estate->es_tupleTable, false);
2909
2910         /* close any trigger target relations attached to this EState */
2911         foreach(l, estate->es_trig_target_relations)
2912         {
2913                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2914
2915                 /* Close indices and then the relation itself */
2916                 ExecCloseIndices(resultRelInfo);
2917                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2918         }
2919
2920         MemoryContextSwitchTo(oldcontext);
2921
2922         FreeExecutorState(estate);
2923
2924         /* Mark EPQState idle */
2925         epqstate->estate = NULL;
2926         epqstate->planstate = NULL;
2927         epqstate->origslot = NULL;
2928 }