]> granicus.if.org Git - postgresql/blobdiff - src/backend/executor/execMain.c
Add a Gather executor node.
[postgresql] / src / backend / executor / execMain.c
index 2b811d9606e4afc19e35ef2eba3366e49460b239..37b7bbd413b3c629d95d2e27f0e9ea01adfebf04 100644 (file)
  * INTERFACE ROUTINES
  *     ExecutorStart()
  *     ExecutorRun()
+ *     ExecutorFinish()
  *     ExecutorEnd()
  *
- *     The old ExecutorMain() has been replaced by ExecutorStart(),
- *     ExecutorRun() and ExecutorEnd()
+ *     These four procedures are the external interface to the executor.
+ *     In each case, the query descriptor is required as an argument.
  *
- *     These three procedures are the external interfaces to the executor.
- *     In each case, the query descriptor and the execution state is required
- *      as arguments
- *
- *     ExecutorStart() must be called at the beginning of any execution of any
- *     query plan and ExecutorEnd() should always be called at the end of
- *     execution of a plan.
+ *     ExecutorStart must be called at the beginning of execution of any
+ *     query plan and ExecutorEnd must always be called at the end of
+ *     execution of a plan (unless it is aborted due to error).
  *
  *     ExecutorRun accepts direction and count arguments that specify whether
  *     the plan is to be executed forwards, backwards, and for how many tuples.
+ *     In some cases ExecutorRun may be called multiple times to process all
+ *     the tuples for a plan.  It is also acceptable to stop short of executing
+ *     the whole plan (but only if it is a SELECT).
+ *
+ *     ExecutorFinish must be called after the final ExecutorRun call and
+ *     before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
+ *     which should also omit ExecutorRun.
  *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.168 2002/06/26 21:58:56 momjian Exp $
+ *       src/backend/executor/execMain.c
  *
  *-------------------------------------------------------------------------
  */
 #include "postgres.h"
 
-#include "access/heapam.h"
-#include "catalog/heap.h"
+#include "access/htup_details.h"
+#include "access/sysattr.h"
+#include "access/transam.h"
+#include "access/xact.h"
 #include "catalog/namespace.h"
-#include "commands/tablecmds.h"
+#include "commands/matview.h"
 #include "commands/trigger.h"
 #include "executor/execdebug.h"
-#include "executor/execdefs.h"
+#include "foreign/fdwapi.h"
+#include "mb/pg_wchar.h"
 #include "miscadmin.h"
-#include "optimizer/var.h"
+#include "optimizer/clauses.h"
 #include "parser/parsetree.h"
+#include "storage/bufmgr.h"
+#include "storage/lmgr.h"
+#include "tcop/utility.h"
 #include "utils/acl.h"
 #include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/rls.h"
+#include "utils/snapmgr.h"
+#include "utils/tqual.h"
+
+
+/* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
+ExecutorStart_hook_type ExecutorStart_hook = NULL;
+ExecutorRun_hook_type ExecutorRun_hook = NULL;
+ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
+ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
 
+/* Hook for plugin to get control in ExecCheckRTPerms() */
+ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
 
 /* decls for local routines only used within this module */
-static TupleDesc InitPlan(CmdType operation,
-                Query *parseTree,
-                Plan *plan,
-                EState *estate);
-static void initResultRelInfo(ResultRelInfo *resultRelInfo,
-                                 Index resultRelationIndex,
-                                 List *rangeTable,
-                                 CmdType operation);
-static void EndPlan(Plan *plan, EState *estate);
-static TupleTableSlot *ExecutePlan(EState *estate, Plan *plan,
+static void InitPlan(QueryDesc *queryDesc, int eflags);
+static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
+static void ExecPostprocessPlan(EState *estate);
+static void ExecEndPlan(PlanState *planstate, EState *estate);
+static void ExecutePlan(EState *estate, PlanState *planstate,
                        CmdType operation,
+                       bool sendTuples,
                        long numberTuples,
                        ScanDirection direction,
-                       DestReceiver *destfunc);
-static void ExecSelect(TupleTableSlot *slot,
-                        DestReceiver *destfunc,
-                        EState *estate);
-static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
-                  EState *estate);
-static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
-                  EState *estate);
-static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
-                       EState *estate);
-static TupleTableSlot *EvalPlanQualNext(EState *estate);
-static void EndEvalPlanQual(EState *estate);
-static void ExecCheckQueryPerms(CmdType operation, Query *parseTree,
-                                       Plan *plan);
-static void ExecCheckPlanPerms(Plan *plan, List *rangeTable,
-                                  CmdType operation);
-static void ExecCheckRTPerms(List *rangeTable, CmdType operation);
-static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
+                       DestReceiver *dest);
+static bool ExecCheckRTEPerms(RangeTblEntry *rte);
+static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
+                                                 Bitmapset *modifiedCols,
+                                                 AclMode requiredPerms);
+static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
+static char *ExecBuildSlotValueDescription(Oid reloid,
+                                                         TupleTableSlot *slot,
+                                                         TupleDesc tupdesc,
+                                                         Bitmapset *modifiedCols,
+                                                         int maxfieldlen);
+static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
+                                 Plan *planTree);
+
+/*
+ * Note that GetUpdatedColumns() also exists in commands/trigger.c.  There does
+ * not appear to be any good header to put it into, given the structures that
+ * it uses, so we let them be duplicated.  Be sure to update both if one needs
+ * to be changed, however.
+ */
+#define GetInsertedColumns(relinfo, estate) \
+       (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
+#define GetUpdatedColumns(relinfo, estate) \
+       (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
 
 /* end of local decls */
 
@@ -89,50 +114,141 @@ static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
  *             This routine must be called at the beginning of any execution of any
  *             query plan
  *
- *             returns a TupleDesc which describes the attributes of the tuples to
- *             be returned by the query.  (Same value is saved in queryDesc)
+ * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
+ * only because some places use QueryDescs for utility commands).  The tupDesc
+ * field of the QueryDesc is filled in to describe the tuples that will be
+ * returned, and the internal fields (estate and planstate) are set up.
+ *
+ * eflags contains flag bits as described in executor.h.
+ *
+ * NB: the CurrentMemoryContext when this is called will become the parent
+ * of the per-query context used for this Executor invocation.
+ *
+ * We provide a function hook variable that lets loadable plugins
+ * get control when ExecutorStart is called.  Such a plugin would
+ * normally call standard_ExecutorStart().
  *
- * NB: the CurrentMemoryContext when this is called must be the context
- * to be used as the per-query context for the query plan.     ExecutorRun()
- * and ExecutorEnd() must be called in this same memory context.
  * ----------------------------------------------------------------
  */
-TupleDesc
-ExecutorStart(QueryDesc *queryDesc, EState *estate)
+void
+ExecutorStart(QueryDesc *queryDesc, int eflags)
 {
-       TupleDesc       result;
+       if (ExecutorStart_hook)
+               (*ExecutorStart_hook) (queryDesc, eflags);
+       else
+               standard_ExecutorStart(queryDesc, eflags);
+}
 
-       /* sanity checks */
+void
+standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
+{
+       EState     *estate;
+       MemoryContext oldcontext;
+
+       /* sanity checks: queryDesc must not be started already */
        Assert(queryDesc != NULL);
+       Assert(queryDesc->estate == NULL);
 
-       if (queryDesc->plantree->nParamExec > 0)
-       {
+       /*
+        * If the transaction is read-only, we need to check if any writes are
+        * planned to non-temporary tables.  EXPLAIN is considered read-only.
+        *
+        * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE
+        * would require (a) storing the combocid hash in shared memory, rather
+        * than synchronizing it just once at the start of parallelism, and (b) an
+        * alternative to heap_update()'s reliance on xmax for mutual exclusion.
+        * INSERT may have no such troubles, but we forbid it to simplify the
+        * checks.
+        *
+        * We have lower-level defenses in CommandCounterIncrement and elsewhere
+        * against performing unsafe operations in parallel mode, but this gives a
+        * more user-friendly error message.
+        */
+       if ((XactReadOnly || IsInParallelMode()) &&
+               !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
+               ExecCheckXactReadOnly(queryDesc->plannedstmt);
+
+       /*
+        * Build EState, switch into per-query memory context for startup.
+        */
+       estate = CreateExecutorState();
+       queryDesc->estate = estate;
+
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+       /*
+        * Fill in external parameters, if any, from queryDesc; and allocate
+        * workspace for internal parameters
+        */
+       estate->es_param_list_info = queryDesc->params;
+
+       if (queryDesc->plannedstmt->nParamExec > 0)
                estate->es_param_exec_vals = (ParamExecData *)
-                       palloc(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
-               MemSet(estate->es_param_exec_vals, 0,
-                          queryDesc->plantree->nParamExec * sizeof(ParamExecData));
+                       palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
+
+       /*
+        * If non-read-only query, set the command ID to mark output tuples with
+        */
+       switch (queryDesc->operation)
+       {
+               case CMD_SELECT:
+
+                       /*
+                        * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
+                        * tuples
+                        */
+                       if (queryDesc->plannedstmt->rowMarks != NIL ||
+                               queryDesc->plannedstmt->hasModifyingCTE)
+                               estate->es_output_cid = GetCurrentCommandId(true);
+
+                       /*
+                        * A SELECT without modifying CTEs can't possibly queue triggers,
+                        * so force skip-triggers mode. This is just a marginal efficiency
+                        * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
+                        * all that expensive, but we might as well do it.
+                        */
+                       if (!queryDesc->plannedstmt->hasModifyingCTE)
+                               eflags |= EXEC_FLAG_SKIP_TRIGGERS;
+                       break;
+
+               case CMD_INSERT:
+               case CMD_DELETE:
+               case CMD_UPDATE:
+                       estate->es_output_cid = GetCurrentCommandId(true);
+                       break;
+
+               default:
+                       elog(ERROR, "unrecognized operation code: %d",
+                                (int) queryDesc->operation);
+                       break;
        }
 
        /*
-        * Make our own private copy of the current query snapshot data.
-        *
-        * This "freezes" our idea of which tuples are good and which are not
-        * for the life of this query, even if it outlives the current command
-        * and current snapshot.
+        * Copy other important information into the EState
         */
-       estate->es_snapshot = CopyQuerySnapshot();
+       estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
+       estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
+       estate->es_top_eflags = eflags;
+       estate->es_instrument = queryDesc->instrument_options;
 
        /*
-        * Initialize the plan
+        * Initialize the plan state tree
         */
-       result = InitPlan(queryDesc->operation,
-                                         queryDesc->parsetree,
-                                         queryDesc->plantree,
-                                         estate);
+       InitPlan(queryDesc, eflags);
 
-       queryDesc->tupDesc = result;
+       /*
+        * Set up an AFTER-trigger statement context, unless told not to, or
+        * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
+        */
+       if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
+               AfterTriggerBeginQuery();
 
-       return result;
+       /* Enter parallel mode, if required by the query. */
+       if (queryDesc->plannedstmt->parallelModeNeeded &&
+               !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
+               EnterParallelMode();
+
+       MemoryContextSwitchTo(oldcontext);
 }
 
 /* ----------------------------------------------------------------
@@ -148,294 +264,519 @@ ExecutorStart(QueryDesc *queryDesc, EState *estate)
  *             except to start up/shut down the destination.  Otherwise,
  *             we retrieve up to 'count' tuples in the specified direction.
  *
- *             Note: count = 0 is interpreted as no portal limit, e.g. run to
- *             completion.
+ *             Note: count = 0 is interpreted as no portal limit, i.e., run to
+ *             completion.  Also note that the count limit is only applied to
+ *             retrieved tuples, not for instance to those inserted/updated/deleted
+ *             by a ModifyTable plan node.
+ *
+ *             There is no return value, but output tuples (if any) are sent to
+ *             the destination receiver specified in the QueryDesc; and the number
+ *             of tuples processed at the top level can be found in
+ *             estate->es_processed.
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorRun is called.  Such a plugin would
+ *             normally call standard_ExecutorRun().
  *
  * ----------------------------------------------------------------
  */
-TupleTableSlot *
-ExecutorRun(QueryDesc *queryDesc, EState *estate,
+void
+ExecutorRun(QueryDesc *queryDesc,
                        ScanDirection direction, long count)
 {
+       if (ExecutorRun_hook)
+               (*ExecutorRun_hook) (queryDesc, direction, count);
+       else
+               standard_ExecutorRun(queryDesc, direction, count);
+}
+
+void
+standard_ExecutorRun(QueryDesc *queryDesc,
+                                        ScanDirection direction, long count)
+{
+       EState     *estate;
        CmdType         operation;
-       Plan       *plan;
-       CommandDest dest;
-       DestReceiver *destfunc;
-       TupleTableSlot *result;
+       DestReceiver *dest;
+       bool            sendTuples;
+       MemoryContext oldcontext;
+
+       /* sanity checks */
+       Assert(queryDesc != NULL);
+
+       estate = queryDesc->estate;
+
+       Assert(estate != NULL);
+       Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
 
        /*
-        * sanity checks
+        * Switch into per-query memory context
         */
-       Assert(queryDesc != NULL);
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+       /* Allow instrumentation of Executor overall runtime */
+       if (queryDesc->totaltime)
+               InstrStartNode(queryDesc->totaltime);
 
        /*
-        * extract information from the query descriptor and the query
-        * feature.
+        * extract information from the query descriptor and the query feature.
         */
        operation = queryDesc->operation;
-       plan = queryDesc->plantree;
        dest = queryDesc->dest;
 
        /*
-        * startup tuple receiver
+        * startup tuple receiver, if we will be emitting tuples
         */
        estate->es_processed = 0;
        estate->es_lastoid = InvalidOid;
 
-       destfunc = DestToFunction(dest);
-       (*destfunc->setup) (destfunc, (int) operation,
-                                               queryDesc->portalName, queryDesc->tupDesc);
+       sendTuples = (operation == CMD_SELECT ||
+                                 queryDesc->plannedstmt->hasReturning);
+
+       if (sendTuples)
+               (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
 
        /*
         * run plan
         */
-       if (direction == NoMovementScanDirection)
-               result = NULL;
-       else
-               result = ExecutePlan(estate,
-                                                        plan,
-                                                        operation,
-                                                        count,
-                                                        direction,
-                                                        destfunc);
+       if (!ScanDirectionIsNoMovement(direction))
+               ExecutePlan(estate,
+                                       queryDesc->planstate,
+                                       operation,
+                                       sendTuples,
+                                       count,
+                                       direction,
+                                       dest);
+
+       /* Allow nodes to release or shut down resources. */
+       (void) ExecShutdownNode(queryDesc->planstate);
 
        /*
-        * shutdown receiver
+        * shutdown tuple receiver, if we started it
         */
-       (*destfunc->cleanup) (destfunc);
+       if (sendTuples)
+               (*dest->rShutdown) (dest);
 
-       return result;
+       if (queryDesc->totaltime)
+               InstrStopNode(queryDesc->totaltime, estate->es_processed);
+
+       MemoryContextSwitchTo(oldcontext);
 }
 
 /* ----------------------------------------------------------------
- *             ExecutorEnd
+ *             ExecutorFinish
+ *
+ *             This routine must be called after the last ExecutorRun call.
+ *             It performs cleanup such as firing AFTER triggers.  It is
+ *             separate from ExecutorEnd because EXPLAIN ANALYZE needs to
+ *             include these actions in the total runtime.
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorFinish is called.  Such a plugin would
+ *             normally call standard_ExecutorFinish().
  *
- *             This routine must be called at the end of execution of any
- *             query plan
  * ----------------------------------------------------------------
  */
 void
-ExecutorEnd(QueryDesc *queryDesc, EState *estate)
+ExecutorFinish(QueryDesc *queryDesc)
+{
+       if (ExecutorFinish_hook)
+               (*ExecutorFinish_hook) (queryDesc);
+       else
+               standard_ExecutorFinish(queryDesc);
+}
+
+void
+standard_ExecutorFinish(QueryDesc *queryDesc)
 {
+       EState     *estate;
+       MemoryContext oldcontext;
+
        /* sanity checks */
        Assert(queryDesc != NULL);
 
-       EndPlan(queryDesc->plantree, estate);
+       estate = queryDesc->estate;
 
-       if (estate->es_snapshot != NULL)
-       {
-               if (estate->es_snapshot->xcnt > 0)
-                       pfree(estate->es_snapshot->xip);
-               pfree(estate->es_snapshot);
-               estate->es_snapshot = NULL;
-       }
+       Assert(estate != NULL);
+       Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
 
-       if (estate->es_param_exec_vals != NULL)
-       {
-               pfree(estate->es_param_exec_vals);
-               estate->es_param_exec_vals = NULL;
-       }
-}
+       /* This should be run once and only once per Executor instance */
+       Assert(!estate->es_finished);
 
+       /* Switch into per-query memory context */
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
-/*
- * ExecCheckQueryPerms
- *             Check access permissions for all relations referenced in a query.
+       /* Allow instrumentation of Executor overall runtime */
+       if (queryDesc->totaltime)
+               InstrStartNode(queryDesc->totaltime);
+
+       /* Run ModifyTable nodes to completion */
+       ExecPostprocessPlan(estate);
+
+       /* Execute queued AFTER triggers, unless told not to */
+       if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
+               AfterTriggerEndQuery(estate);
+
+       if (queryDesc->totaltime)
+               InstrStopNode(queryDesc->totaltime, 0);
+
+       MemoryContextSwitchTo(oldcontext);
+
+       estate->es_finished = true;
+}
+
+/* ----------------------------------------------------------------
+ *             ExecutorEnd
+ *
+ *             This routine must be called at the end of execution of any
+ *             query plan
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorEnd is called.  Such a plugin would
+ *             normally call standard_ExecutorEnd().
+ *
+ * ----------------------------------------------------------------
  */
-static void
-ExecCheckQueryPerms(CmdType operation, Query *parseTree, Plan *plan)
+void
+ExecutorEnd(QueryDesc *queryDesc)
+{
+       if (ExecutorEnd_hook)
+               (*ExecutorEnd_hook) (queryDesc);
+       else
+               standard_ExecutorEnd(queryDesc);
+}
+
+void
+standard_ExecutorEnd(QueryDesc *queryDesc)
 {
+       EState     *estate;
+       MemoryContext oldcontext;
+
+       /* sanity checks */
+       Assert(queryDesc != NULL);
+
+       estate = queryDesc->estate;
+
+       Assert(estate != NULL);
+
        /*
-        * Check RTEs in the query's primary rangetable.
+        * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
+        * Assert is needed because ExecutorFinish is new as of 9.1, and callers
+        * might forget to call it.
         */
-       ExecCheckRTPerms(parseTree->rtable, operation);
+       Assert(estate->es_finished ||
+                  (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
 
        /*
-        * Search for subplans and APPEND nodes to check their rangetables.
+        * Switch into per-query memory context to run ExecEndPlan
         */
-       ExecCheckPlanPerms(plan, parseTree->rtable, operation);
-}
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
-/*
- * ExecCheckPlanPerms
- *             Recursively scan the plan tree to check access permissions in
- *             subplans.
- */
-static void
-ExecCheckPlanPerms(Plan *plan, List *rangeTable, CmdType operation)
-{
-       List       *subp;
+       ExecEndPlan(queryDesc->planstate, estate);
 
-       if (plan == NULL)
-               return;
+       /* do away with our snapshots */
+       UnregisterSnapshot(estate->es_snapshot);
+       UnregisterSnapshot(estate->es_crosscheck_snapshot);
 
-       /* Check subplans, which we assume are plain SELECT queries */
+       /*
+        * Must switch out of context before destroying it
+        */
+       MemoryContextSwitchTo(oldcontext);
 
-       foreach(subp, plan->initPlan)
-       {
-               SubPlan    *subplan = (SubPlan *) lfirst(subp);
+       /* Exit parallel mode, if it was required by the query. */
+       if (queryDesc->plannedstmt->parallelModeNeeded &&
+               !(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY))
+               ExitParallelMode();
 
-               ExecCheckRTPerms(subplan->rtable, CMD_SELECT);
-               ExecCheckPlanPerms(subplan->plan, subplan->rtable, CMD_SELECT);
-       }
-       foreach(subp, plan->subPlan)
-       {
-               SubPlan    *subplan = (SubPlan *) lfirst(subp);
+       /*
+        * Release EState and per-query memory context.  This should release
+        * everything the executor has allocated.
+        */
+       FreeExecutorState(estate);
 
-               ExecCheckRTPerms(subplan->rtable, CMD_SELECT);
-               ExecCheckPlanPerms(subplan->plan, subplan->rtable, CMD_SELECT);
-       }
+       /* Reset queryDesc fields that no longer point to anything */
+       queryDesc->tupDesc = NULL;
+       queryDesc->estate = NULL;
+       queryDesc->planstate = NULL;
+       queryDesc->totaltime = NULL;
+}
 
-       /* Check lower plan nodes */
+/* ----------------------------------------------------------------
+ *             ExecutorRewind
+ *
+ *             This routine may be called on an open queryDesc to rewind it
+ *             to the start.
+ * ----------------------------------------------------------------
+ */
+void
+ExecutorRewind(QueryDesc *queryDesc)
+{
+       EState     *estate;
+       MemoryContext oldcontext;
 
-       ExecCheckPlanPerms(plan->lefttree, rangeTable, operation);
-       ExecCheckPlanPerms(plan->righttree, rangeTable, operation);
+       /* sanity checks */
+       Assert(queryDesc != NULL);
 
-       /* Do node-type-specific checks */
+       estate = queryDesc->estate;
 
-       switch (nodeTag(plan))
-       {
-               case T_SubqueryScan:
-                       {
-                               SubqueryScan *scan = (SubqueryScan *) plan;
-                               RangeTblEntry *rte;
+       Assert(estate != NULL);
 
-                               /* Recursively check the subquery */
-                               rte = rt_fetch(scan->scan.scanrelid, rangeTable);
-                               Assert(rte->rtekind == RTE_SUBQUERY);
-                               ExecCheckQueryPerms(operation, rte->subquery, scan->subplan);
-                               break;
-                       }
-               case T_Append:
-                       {
-                               Append     *app = (Append *) plan;
-                               List       *appendplans;
+       /* It's probably not sensible to rescan updating queries */
+       Assert(queryDesc->operation == CMD_SELECT);
 
-                               foreach(appendplans, app->appendplans)
-                               {
-                                       ExecCheckPlanPerms((Plan *) lfirst(appendplans),
-                                                                          rangeTable,
-                                                                          operation);
-                               }
-                               break;
-                       }
+       /*
+        * Switch into per-query memory context
+        */
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
-               default:
-                       break;
-       }
+       /*
+        * rescan plan
+        */
+       ExecReScan(queryDesc->planstate);
+
+       MemoryContextSwitchTo(oldcontext);
 }
 
+
 /*
  * ExecCheckRTPerms
  *             Check access permissions for all relations listed in a range table.
+ *
+ * Returns true if permissions are adequate.  Otherwise, throws an appropriate
+ * error if ereport_on_violation is true, or simply returns false otherwise.
+ *
+ * Note that this does NOT address row level security policies (aka: RLS).  If
+ * rows will be returned to the user as a result of this permission check
+ * passing, then RLS also needs to be consulted (and check_enable_rls()).
+ *
+ * See rewrite/rowsecurity.c.
  */
-static void
-ExecCheckRTPerms(List *rangeTable, CmdType operation)
+bool
+ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
 {
-       List       *lp;
+       ListCell   *l;
+       bool            result = true;
 
-       foreach(lp, rangeTable)
+       foreach(l, rangeTable)
        {
-               RangeTblEntry *rte = lfirst(lp);
+               RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
 
-               ExecCheckRTEPerms(rte, operation);
+               result = ExecCheckRTEPerms(rte);
+               if (!result)
+               {
+                       Assert(rte->rtekind == RTE_RELATION);
+                       if (ereport_on_violation)
+                               aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
+                                                          get_rel_name(rte->relid));
+                       return false;
+               }
        }
+
+       if (ExecutorCheckPerms_hook)
+               result = (*ExecutorCheckPerms_hook) (rangeTable,
+                                                                                        ereport_on_violation);
+       return result;
 }
 
 /*
  * ExecCheckRTEPerms
  *             Check access permissions for a single RTE.
  */
-static void
-ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
+static bool
+ExecCheckRTEPerms(RangeTblEntry *rte)
 {
+       AclMode         requiredPerms;
+       AclMode         relPerms;
+       AclMode         remainingPerms;
        Oid                     relOid;
        Oid                     userid;
-       AclResult       aclcheck_result;
-
-       /*
-        * Only plain-relation RTEs need to be checked here.  Subquery RTEs
-        * will be checked when ExecCheckPlanPerms finds the SubqueryScan node,
-        * and function RTEs are checked by init_fcache when the function is
-        * prepared for execution.  Join and special RTEs need no checks.
-        */
+
+       /*
+        * Only plain-relation RTEs need to be checked here.  Function RTEs are
+        * checked by init_fcache when the function is prepared for execution.
+        * Join, subquery, and special RTEs need no checks.
+        */
        if (rte->rtekind != RTE_RELATION)
-               return;
+               return true;
+
+       /*
+        * No work if requiredPerms is empty.
+        */
+       requiredPerms = rte->requiredPerms;
+       if (requiredPerms == 0)
+               return true;
 
        relOid = rte->relid;
 
        /*
-        * userid to check as: current user unless we have a setuid
-        * indication.
+        * userid to check as: current user unless we have a setuid indication.
         *
         * Note: GetUserId() is presently fast enough that there's no harm in
-        * calling it separately for each RTE.  If that stops being true, we
-        * could call it once in ExecCheckQueryPerms and pass the userid down
-        * from there.  But for now, no need for the extra clutter.
+        * calling it separately for each RTE.  If that stops being true, we could
+        * call it once in ExecCheckRTPerms and pass the userid down from there.
+        * But for now, no need for the extra clutter.
         */
        userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
 
-#define CHECK(MODE)            pg_class_aclcheck(relOid, userid, MODE)
-
-       if (rte->checkForRead)
+       /*
+        * We must have *all* the requiredPerms bits, but some of the bits can be
+        * satisfied from column-level rather than relation-level permissions.
+        * First, remove any bits that are satisfied by relation permissions.
+        */
+       relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
+       remainingPerms = requiredPerms & ~relPerms;
+       if (remainingPerms != 0)
        {
-               aclcheck_result = CHECK(ACL_SELECT);
-               if (aclcheck_result != ACLCHECK_OK)
-                       aclcheck_error(aclcheck_result, get_rel_name(relOid));
-       }
+               int                     col = -1;
+
+               /*
+                * If we lack any permissions that exist only as relation permissions,
+                * we can fail straight away.
+                */
+               if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
+                       return false;
 
-       if (rte->checkForWrite)
-       {
                /*
-                * Note: write access in a SELECT context means SELECT FOR UPDATE.
-                * Right now we don't distinguish that from true update as far as
-                * permissions checks are concerned.
+                * Check to see if we have the needed privileges at column level.
+                *
+                * Note: failures just report a table-level error; it would be nicer
+                * to report a column-level error if we have some but not all of the
+                * column privileges.
                 */
-               switch (operation)
+               if (remainingPerms & ACL_SELECT)
                {
-                       case CMD_INSERT:
-                               aclcheck_result = CHECK(ACL_INSERT);
-                               break;
-                       case CMD_SELECT:
-                       case CMD_UPDATE:
-                               aclcheck_result = CHECK(ACL_UPDATE);
-                               break;
-                       case CMD_DELETE:
-                               aclcheck_result = CHECK(ACL_DELETE);
-                               break;
-                       default:
-                               elog(ERROR, "ExecCheckRTEPerms: bogus operation %d",
-                                        operation);
-                               aclcheck_result = ACLCHECK_OK;  /* keep compiler quiet */
-                               break;
+                       /*
+                        * When the query doesn't explicitly reference any columns (for
+                        * example, SELECT COUNT(*) FROM table), allow the query if we
+                        * have SELECT on any column of the rel, as per SQL spec.
+                        */
+                       if (bms_is_empty(rte->selectedCols))
+                       {
+                               if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
+                                                                                         ACLMASK_ANY) != ACLCHECK_OK)
+                                       return false;
+                       }
+
+                       while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
+                       {
+                               /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
+                               AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
+
+                               if (attno == InvalidAttrNumber)
+                               {
+                                       /* Whole-row reference, must have priv on all cols */
+                                       if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
+                                                                                                 ACLMASK_ALL) != ACLCHECK_OK)
+                                               return false;
+                               }
+                               else
+                               {
+                                       if (pg_attribute_aclcheck(relOid, attno, userid,
+                                                                                         ACL_SELECT) != ACLCHECK_OK)
+                                               return false;
+                               }
+                       }
                }
-               if (aclcheck_result != ACLCHECK_OK)
-                       aclcheck_error(aclcheck_result, get_rel_name(relOid));
+
+               /*
+                * Basically the same for the mod columns, for both INSERT and UPDATE
+                * privilege as specified by remainingPerms.
+                */
+               if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
+                                                                                                                                         userid,
+                                                                                                                  rte->insertedCols,
+                                                                                                                                ACL_INSERT))
+                       return false;
+
+               if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
+                                                                                                                                         userid,
+                                                                                                                       rte->updatedCols,
+                                                                                                                                ACL_UPDATE))
+                       return false;
        }
+       return true;
 }
 
-
-/* ===============================================================
- * ===============================================================
-                                                static routines follow
- * ===============================================================
- * ===============================================================
+/*
+ * ExecCheckRTEPermsModified
+ *             Check INSERT or UPDATE access permissions for a single RTE (these
+ *             are processed uniformly).
  */
-
-typedef struct execRowMark
+static bool
+ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
+                                                 AclMode requiredPerms)
 {
-       Relation        relation;
-       Index           rti;
-       char            resname[32];
-} execRowMark;
+       int                     col = -1;
+
+       /*
+        * When the query doesn't explicitly update any columns, allow the query
+        * if we have permission on any column of the rel.  This is to handle
+        * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
+        */
+       if (bms_is_empty(modifiedCols))
+       {
+               if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
+                                                                         ACLMASK_ANY) != ACLCHECK_OK)
+                       return false;
+       }
+
+       while ((col = bms_next_member(modifiedCols, col)) >= 0)
+       {
+               /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
+               AttrNumber      attno = col + FirstLowInvalidHeapAttributeNumber;
+
+               if (attno == InvalidAttrNumber)
+               {
+                       /* whole-row reference can't happen here */
+                       elog(ERROR, "whole-row update is not implemented");
+               }
+               else
+               {
+                       if (pg_attribute_aclcheck(relOid, attno, userid,
+                                                                         requiredPerms) != ACLCHECK_OK)
+                               return false;
+               }
+       }
+       return true;
+}
 
-typedef struct evalPlanQual
+/*
+ * Check that the query does not imply any writes to non-temp tables;
+ * unless we're in parallel mode, in which case don't even allow writes
+ * to temp tables.
+ *
+ * Note: in a Hot Standby slave this would need to reject writes to temp
+ * tables just as we do in parallel mode; but an HS slave can't have created
+ * any temp tables in the first place, so no need to check that.
+ */
+static void
+ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
 {
-       Plan       *plan;
-       Index           rti;
-       EState          estate;
-       struct evalPlanQual *free;
-} evalPlanQual;
+       ListCell   *l;
+
+       /*
+        * Fail if write permissions are requested in parallel mode for table
+        * (temp or non-temp), otherwise fail for any non-temp table.
+        */
+       foreach(l, plannedstmt->rtable)
+       {
+               RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
+
+               if (rte->rtekind != RTE_RELATION)
+                       continue;
+
+               if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
+                       continue;
+
+               if (isTempNamespace(get_rel_namespace(rte->relid)))
+                       continue;
+
+               PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
+       }
+
+       if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
+               PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
+}
+
 
 /* ----------------------------------------------------------------
  *             InitPlan
@@ -444,77 +785,64 @@ typedef struct evalPlanQual
  *             and start up the rule manager
  * ----------------------------------------------------------------
  */
-static TupleDesc
-InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
+static void
+InitPlan(QueryDesc *queryDesc, int eflags)
 {
-       List       *rangeTable;
-       Relation        intoRelationDesc;
+       CmdType         operation = queryDesc->operation;
+       PlannedStmt *plannedstmt = queryDesc->plannedstmt;
+       Plan       *plan = plannedstmt->planTree;
+       List       *rangeTable = plannedstmt->rtable;
+       EState     *estate = queryDesc->estate;
+       PlanState  *planstate;
        TupleDesc       tupType;
+       ListCell   *l;
+       int                     i;
 
        /*
-        * Do permissions checks.
-        */
-       ExecCheckQueryPerms(operation, parseTree, plan);
-
-       /*
-        * get information from query descriptor
+        * Do permissions checks
         */
-       rangeTable = parseTree->rtable;
+       ExecCheckRTPerms(rangeTable, true);
 
        /*
         * initialize the node's execution state
         */
        estate->es_range_table = rangeTable;
+       estate->es_plannedstmt = plannedstmt;
 
        /*
-        * if there is a result relation, initialize result relation stuff
+        * initialize result relation stuff, and open/lock the result rels.
+        *
+        * We must do this before initializing the plan tree, else we might try to
+        * do a lock upgrade if a result rel is also a source rel.
         */
-       if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
+       if (plannedstmt->resultRelations)
        {
-               List       *resultRelations = parseTree->resultRelations;
-               int                     numResultRelations;
+               List       *resultRelations = plannedstmt->resultRelations;
+               int                     numResultRelations = list_length(resultRelations);
                ResultRelInfo *resultRelInfos;
+               ResultRelInfo *resultRelInfo;
 
-               if (resultRelations != NIL)
-               {
-                       /*
-                        * Multiple result relations (due to inheritance)
-                        * parseTree->resultRelations identifies them all
-                        */
-                       ResultRelInfo *resultRelInfo;
-
-                       numResultRelations = length(resultRelations);
-                       resultRelInfos = (ResultRelInfo *)
-                               palloc(numResultRelations * sizeof(ResultRelInfo));
-                       resultRelInfo = resultRelInfos;
-                       while (resultRelations != NIL)
-                       {
-                               initResultRelInfo(resultRelInfo,
-                                                                 lfirsti(resultRelations),
-                                                                 rangeTable,
-                                                                 operation);
-                               resultRelInfo++;
-                               resultRelations = lnext(resultRelations);
-                       }
-               }
-               else
+               resultRelInfos = (ResultRelInfo *)
+                       palloc(numResultRelations * sizeof(ResultRelInfo));
+               resultRelInfo = resultRelInfos;
+               foreach(l, resultRelations)
                {
-                       /*
-                        * Single result relation identified by
-                        * parseTree->resultRelation
-                        */
-                       numResultRelations = 1;
-                       resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
-                       initResultRelInfo(resultRelInfos,
-                                                         parseTree->resultRelation,
-                                                         rangeTable,
-                                                         operation);
+                       Index           resultRelationIndex = lfirst_int(l);
+                       Oid                     resultRelationOid;
+                       Relation        resultRelation;
+
+                       resultRelationOid = getrelid(resultRelationIndex, rangeTable);
+                       resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
+                       InitResultRelInfo(resultRelInfo,
+                                                         resultRelation,
+                                                         resultRelationIndex,
+                                                         estate->es_instrument);
+                       resultRelInfo++;
                }
-
                estate->es_result_relations = resultRelInfos;
                estate->es_num_result_relations = numResultRelations;
-               /* Initialize to first or only result rel */
-               estate->es_result_relation_info = resultRelInfos;
+               /* es_result_relation_info is NULL except when within ModifyTable */
+               estate->es_result_relation_info = NULL;
        }
        else
        {
@@ -527,267 +855,376 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
        }
 
        /*
-        * Have to lock relations selected for update
+        * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
+        * before we initialize the plan tree, else we'd be risking lock upgrades.
+        * While we are at it, build the ExecRowMark list.
         */
-       estate->es_rowMark = NIL;
-       if (parseTree->rowMarks != NIL)
+       estate->es_rowMarks = NIL;
+       foreach(l, plannedstmt->rowMarks)
        {
-               List       *l;
+               PlanRowMark *rc = (PlanRowMark *) lfirst(l);
+               Oid                     relid;
+               Relation        relation;
+               ExecRowMark *erm;
+
+               /* ignore "parent" rowmarks; they are irrelevant at runtime */
+               if (rc->isParent)
+                       continue;
+
+               /* get relation's OID (will produce InvalidOid if subquery) */
+               relid = getrelid(rc->rti, rangeTable);
 
-               foreach(l, parseTree->rowMarks)
+               /*
+                * If you change the conditions under which rel locks are acquired
+                * here, be sure to adjust ExecOpenScanRelation to match.
+                */
+               switch (rc->markType)
                {
-                       Index           rti = lfirsti(l);
-                       Oid                     relid = getrelid(rti, rangeTable);
-                       Relation        relation;
-                       execRowMark *erm;
-
-                       relation = heap_open(relid, RowShareLock);
-                       erm = (execRowMark *) palloc(sizeof(execRowMark));
-                       erm->relation = relation;
-                       erm->rti = rti;
-                       sprintf(erm->resname, "ctid%u", rti);
-                       estate->es_rowMark = lappend(estate->es_rowMark, erm);
+                       case ROW_MARK_EXCLUSIVE:
+                       case ROW_MARK_NOKEYEXCLUSIVE:
+                       case ROW_MARK_SHARE:
+                       case ROW_MARK_KEYSHARE:
+                               relation = heap_open(relid, RowShareLock);
+                               break;
+                       case ROW_MARK_REFERENCE:
+                               relation = heap_open(relid, AccessShareLock);
+                               break;
+                       case ROW_MARK_COPY:
+                               /* no physical table access is required */
+                               relation = NULL;
+                               break;
+                       default:
+                               elog(ERROR, "unrecognized markType: %d", rc->markType);
+                               relation = NULL;        /* keep compiler quiet */
+                               break;
                }
+
+               /* Check that relation is a legal target for marking */
+               if (relation)
+                       CheckValidRowMarkRel(relation, rc->markType);
+
+               erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
+               erm->relation = relation;
+               erm->relid = relid;
+               erm->rti = rc->rti;
+               erm->prti = rc->prti;
+               erm->rowmarkId = rc->rowmarkId;
+               erm->markType = rc->markType;
+               erm->strength = rc->strength;
+               erm->waitPolicy = rc->waitPolicy;
+               erm->ermActive = false;
+               ItemPointerSetInvalid(&(erm->curCtid));
+               erm->ermExtra = NULL;
+               estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
        }
 
        /*
-        * initialize the executor "tuple" table.  We need slots for all the
-        * plan nodes, plus possibly output slots for the junkfilter(s). At
-        * this point we aren't sure if we need junkfilters, so just add slots
-        * for them unconditionally.
+        * Initialize the executor's tuple table to empty.
+        */
+       estate->es_tupleTable = NIL;
+       estate->es_trig_tuple_slot = NULL;
+       estate->es_trig_oldtup_slot = NULL;
+       estate->es_trig_newtup_slot = NULL;
+
+       /* mark EvalPlanQual not active */
+       estate->es_epqTuple = NULL;
+       estate->es_epqTupleSet = NULL;
+       estate->es_epqScanDone = NULL;
+
+       /*
+        * Initialize private state information for each SubPlan.  We must do this
+        * before running ExecInitNode on the main query tree, since
+        * ExecInitSubPlan expects to be able to find these entries.
         */
+       Assert(estate->es_subplanstates == NIL);
+       i = 1;                                          /* subplan indices count from 1 */
+       foreach(l, plannedstmt->subplans)
        {
-               int                     nSlots = ExecCountSlotsNode(plan);
+               Plan       *subplan = (Plan *) lfirst(l);
+               PlanState  *subplanstate;
+               int                     sp_eflags;
 
-               if (parseTree->resultRelations != NIL)
-                       nSlots += length(parseTree->resultRelations);
-               else
-                       nSlots += 1;
-               estate->es_tupleTable = ExecCreateTupleTable(nSlots);
-       }
+               /*
+                * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
+                * it is a parameterless subplan (not initplan), we suggest that it be
+                * prepared to handle REWIND efficiently; otherwise there is no need.
+                */
+               sp_eflags = eflags
+                       & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
+               if (bms_is_member(i, plannedstmt->rewindPlanIDs))
+                       sp_eflags |= EXEC_FLAG_REWIND;
 
-       /* mark EvalPlanQual not active */
-       estate->es_origPlan = plan;
-       estate->es_evalPlanQual = NULL;
-       estate->es_evTuple = NULL;
-       estate->es_evTupleNull = NULL;
-       estate->es_useEvalPlan = false;
+               subplanstate = ExecInitNode(subplan, estate, sp_eflags);
+
+               estate->es_subplanstates = lappend(estate->es_subplanstates,
+                                                                                  subplanstate);
+
+               i++;
+       }
 
        /*
-        * initialize the private state information for all the nodes in the
-        * query tree.  This opens files, allocates storage and leaves us
-        * ready to start processing tuples.
+        * Initialize the private state information for all the nodes in the query
+        * tree.  This opens files, allocates storage and leaves us ready to start
+        * processing tuples.
         */
-       ExecInitNode(plan, estate, NULL);
+       planstate = ExecInitNode(plan, estate, eflags);
 
        /*
         * Get the tuple descriptor describing the type of tuples to return.
-        * (this is especially important if we are creating a relation with
-        * "SELECT INTO")
         */
-       tupType = ExecGetTupType(plan);         /* tuple descriptor */
+       tupType = ExecGetResultType(planstate);
 
        /*
-        * Initialize the junk filter if needed. SELECT and INSERT queries
-        * need a filter if there are any junk attrs in the tlist.      UPDATE and
-        * DELETE always need one, since there's always a junk 'ctid'
-        * attribute present --- no need to look first.
+        * Initialize the junk filter if needed.  SELECT queries need a filter if
+        * there are any junk attrs in the top-level tlist.
         */
+       if (operation == CMD_SELECT)
        {
                bool            junk_filter_needed = false;
-               List       *tlist;
+               ListCell   *tlist;
 
-               switch (operation)
+               foreach(tlist, plan->targetlist)
                {
-                       case CMD_SELECT:
-                       case CMD_INSERT:
-                               foreach(tlist, plan->targetlist)
-                               {
-                                       TargetEntry *tle = (TargetEntry *) lfirst(tlist);
+                       TargetEntry *tle = (TargetEntry *) lfirst(tlist);
 
-                                       if (tle->resdom->resjunk)
-                                       {
-                                               junk_filter_needed = true;
-                                               break;
-                                       }
-                               }
-                               break;
-                       case CMD_UPDATE:
-                       case CMD_DELETE:
+                       if (tle->resjunk)
+                       {
                                junk_filter_needed = true;
                                break;
-                       default:
-                               break;
+                       }
                }
 
                if (junk_filter_needed)
                {
-                       /*
-                        * If there are multiple result relations, each one needs its
-                        * own junk filter.  Note this is only possible for
-                        * UPDATE/DELETE, so we can't be fooled by some needing a
-                        * filter and some not.
-                        */
-                       if (parseTree->resultRelations != NIL)
-                       {
-                               List       *subplans;
-                               ResultRelInfo *resultRelInfo;
-
-                               /* Top plan had better be an Append here. */
-                               Assert(IsA(plan, Append));
-                               Assert(((Append *) plan)->isTarget);
-                               subplans = ((Append *) plan)->appendplans;
-                               Assert(length(subplans) == estate->es_num_result_relations);
-                               resultRelInfo = estate->es_result_relations;
-                               while (subplans != NIL)
-                               {
-                                       Plan       *subplan = (Plan *) lfirst(subplans);
-                                       JunkFilter *j;
-
-                                       j = ExecInitJunkFilter(subplan->targetlist,
-                                                                                  ExecGetTupType(subplan),
-                                                         ExecAllocTableSlot(estate->es_tupleTable));
-                                       resultRelInfo->ri_junkFilter = j;
-                                       resultRelInfo++;
-                                       subplans = lnext(subplans);
-                               }
+                       JunkFilter *j;
 
-                               /*
-                                * Set active junkfilter too; at this point ExecInitAppend
-                                * has already selected an active result relation...
-                                */
-                               estate->es_junkFilter =
-                                       estate->es_result_relation_info->ri_junkFilter;
-                       }
-                       else
-                       {
-                               /* Normal case with just one JunkFilter */
-                               JunkFilter *j;
-
-                               j = ExecInitJunkFilter(plan->targetlist,
-                                                                          tupType,
-                                                         ExecAllocTableSlot(estate->es_tupleTable));
-                               estate->es_junkFilter = j;
-                               if (estate->es_result_relation_info)
-                                       estate->es_result_relation_info->ri_junkFilter = j;
-
-                               /* For SELECT, want to return the cleaned tuple type */
-                               if (operation == CMD_SELECT)
-                                       tupType = j->jf_cleanTupType;
-                       }
+                       j = ExecInitJunkFilter(planstate->plan->targetlist,
+                                                                  tupType->tdhasoid,
+                                                                  ExecInitExtraTupleSlot(estate));
+                       estate->es_junkFilter = j;
+
+                       /* Want to return the cleaned tuple type */
+                       tupType = j->jf_cleanTupType;
                }
-               else
-                       estate->es_junkFilter = NULL;
        }
 
-       /*
-        * initialize the "into" relation
-        */
-       intoRelationDesc = (Relation) NULL;
+       queryDesc->tupDesc = tupType;
+       queryDesc->planstate = planstate;
+}
 
-       if (operation == CMD_SELECT)
+/*
+ * Check that a proposed result relation is a legal target for the operation
+ *
+ * Generally the parser and/or planner should have noticed any such mistake
+ * already, but let's make sure.
+ *
+ * Note: when changing this function, you probably also need to look at
+ * CheckValidRowMarkRel.
+ */
+void
+CheckValidResultRel(Relation resultRel, CmdType operation)
+{
+       TriggerDesc *trigDesc = resultRel->trigdesc;
+       FdwRoutine *fdwroutine;
+
+       switch (resultRel->rd_rel->relkind)
        {
-               if (!parseTree->isPortal)
-               {
+               case RELKIND_RELATION:
+                       /* OK */
+                       break;
+               case RELKIND_SEQUENCE:
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot change sequence \"%s\"",
+                                                       RelationGetRelationName(resultRel))));
+                       break;
+               case RELKIND_TOASTVALUE:
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot change TOAST relation \"%s\"",
+                                                       RelationGetRelationName(resultRel))));
+                       break;
+               case RELKIND_VIEW:
+
                        /*
-                        * a select into table --- need to create the "into" table
+                        * Okay only if there's a suitable INSTEAD OF trigger.  Messages
+                        * here should match rewriteHandler.c's rewriteTargetView, except
+                        * that we omit errdetail because we haven't got the information
+                        * handy (and given that we really shouldn't get here anyway, it's
+                        * not worth great exertion to get).
                         */
-                       if (parseTree->into != NULL)
+                       switch (operation)
                        {
-                               char       *intoName;
-                               Oid                     namespaceId;
-                               Oid                     intoRelationId;
-                               TupleDesc       tupdesc;
-
-                               /*
-                                * find namespace to create in, check permissions
-                                */
-                               intoName = parseTree->into->relname;
-                               namespaceId = RangeVarGetCreationNamespace(parseTree->into);
-
-                               if (!isTempNamespace(namespaceId))
-                               {
-                                       AclResult       aclresult;
-
-                                       aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
-                                                                                                         ACL_CREATE);
-                                       if (aclresult != ACLCHECK_OK)
-                                               aclcheck_error(aclresult,
-                                                                          get_namespace_name(namespaceId));
-                               }
-
-                               /*
-                                * have to copy tupType to get rid of constraints
-                                */
-                               tupdesc = CreateTupleDescCopy(tupType);
-
-                               intoRelationId =
-                                       heap_create_with_catalog(intoName,
-                                                                                        namespaceId,
-                                                                                        tupdesc,
-                                                                                        RELKIND_RELATION,
-                                                                                        false,
-                                                                                        true,
-                                                                                        allowSystemTableMods);
-
-                               FreeTupleDesc(tupdesc);
-
-                               /*
-                                * Advance command counter so that the newly-created
-                                * relation's catalog tuples will be visible to heap_open.
-                                */
-                               CommandCounterIncrement();
-
-                               /*
-                                * If necessary, create a TOAST table for the into
-                                * relation. Note that AlterTableCreateToastTable ends
-                                * with CommandCounterIncrement(), so that the TOAST table
-                                * will be visible for insertion.
-                                */
-                               AlterTableCreateToastTable(intoRelationId, true);
-
-                               intoRelationDesc = heap_open(intoRelationId,
-                                                                                        AccessExclusiveLock);
+                               case CMD_INSERT:
+                                       if (!trigDesc || !trigDesc->trig_insert_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot insert into view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
+                                       break;
+                               case CMD_UPDATE:
+                                       if (!trigDesc || !trigDesc->trig_update_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot update view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
+                                       break;
+                               case CMD_DELETE:
+                                       if (!trigDesc || !trigDesc->trig_delete_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot delete from view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
+                                       break;
+                               default:
+                                       elog(ERROR, "unrecognized CmdType: %d", (int) operation);
+                                       break;
                        }
-               }
+                       break;
+               case RELKIND_MATVIEW:
+                       if (!MatViewIncrementalMaintenanceIsEnabled())
+                               ereport(ERROR,
+                                               (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                                errmsg("cannot change materialized view \"%s\"",
+                                                               RelationGetRelationName(resultRel))));
+                       break;
+               case RELKIND_FOREIGN_TABLE:
+                       /* Okay only if the FDW supports it */
+                       fdwroutine = GetFdwRoutineForRelation(resultRel, false);
+                       switch (operation)
+                       {
+                               case CMD_INSERT:
+                                       if (fdwroutine->ExecForeignInsert == NULL)
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                       errmsg("cannot insert into foreign table \"%s\"",
+                                                                  RelationGetRelationName(resultRel))));
+                                       if (fdwroutine->IsForeignRelUpdatable != NULL &&
+                                               (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                               errmsg("foreign table \"%s\" does not allow inserts",
+                                                          RelationGetRelationName(resultRel))));
+                                       break;
+                               case CMD_UPDATE:
+                                       if (fdwroutine->ExecForeignUpdate == NULL)
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                                errmsg("cannot update foreign table \"%s\"",
+                                                                               RelationGetRelationName(resultRel))));
+                                       if (fdwroutine->IsForeignRelUpdatable != NULL &&
+                                               (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                               errmsg("foreign table \"%s\" does not allow updates",
+                                                          RelationGetRelationName(resultRel))));
+                                       break;
+                               case CMD_DELETE:
+                                       if (fdwroutine->ExecForeignDelete == NULL)
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                       errmsg("cannot delete from foreign table \"%s\"",
+                                                                  RelationGetRelationName(resultRel))));
+                                       if (fdwroutine->IsForeignRelUpdatable != NULL &&
+                                               (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                               errmsg("foreign table \"%s\" does not allow deletes",
+                                                          RelationGetRelationName(resultRel))));
+                                       break;
+                               default:
+                                       elog(ERROR, "unrecognized CmdType: %d", (int) operation);
+                                       break;
+                       }
+                       break;
+               default:
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot change relation \"%s\"",
+                                                       RelationGetRelationName(resultRel))));
+                       break;
        }
-
-       estate->es_into_relation_descriptor = intoRelationDesc;
-
-       return tupType;
 }
 
 /*
- * Initialize ResultRelInfo data for one result relation
+ * Check that a proposed rowmark target relation is a legal target
+ *
+ * In most cases parser and/or planner should have noticed this already, but
+ * they don't cover all cases.
  */
 static void
-initResultRelInfo(ResultRelInfo *resultRelInfo,
-                                 Index resultRelationIndex,
-                                 List *rangeTable,
-                                 CmdType operation)
+CheckValidRowMarkRel(Relation rel, RowMarkType markType)
 {
-       Oid                     resultRelationOid;
-       Relation        resultRelationDesc;
+       FdwRoutine *fdwroutine;
 
-       resultRelationOid = getrelid(resultRelationIndex, rangeTable);
-       resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
-
-       switch (resultRelationDesc->rd_rel->relkind)
+       switch (rel->rd_rel->relkind)
        {
+               case RELKIND_RELATION:
+                       /* OK */
+                       break;
                case RELKIND_SEQUENCE:
-                       elog(ERROR, "You can't change sequence relation %s",
-                                RelationGetRelationName(resultRelationDesc));
+                       /* Must disallow this because we don't vacuum sequences */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in sequence \"%s\"",
+                                                       RelationGetRelationName(rel))));
                        break;
                case RELKIND_TOASTVALUE:
-                       elog(ERROR, "You can't change toast relation %s",
-                                RelationGetRelationName(resultRelationDesc));
+                       /* We could allow this, but there seems no good reason to */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in TOAST relation \"%s\"",
+                                                       RelationGetRelationName(rel))));
                        break;
                case RELKIND_VIEW:
-                       elog(ERROR, "You can't change view relation %s",
-                                RelationGetRelationName(resultRelationDesc));
+                       /* Should not get here; planner should have expanded the view */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in view \"%s\"",
+                                                       RelationGetRelationName(rel))));
+                       break;
+               case RELKIND_MATVIEW:
+                       /* Allow referencing a matview, but not actual locking clauses */
+                       if (markType != ROW_MARK_REFERENCE)
+                               ereport(ERROR,
+                                               (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                          errmsg("cannot lock rows in materialized view \"%s\"",
+                                                         RelationGetRelationName(rel))));
+                       break;
+               case RELKIND_FOREIGN_TABLE:
+                       /* Okay only if the FDW supports it */
+                       fdwroutine = GetFdwRoutineForRelation(rel, false);
+                       if (fdwroutine->RefetchForeignRow == NULL)
+                               ereport(ERROR,
+                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                errmsg("cannot lock rows in foreign table \"%s\"",
+                                                               RelationGetRelationName(rel))));
+                       break;
+               default:
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in relation \"%s\"",
+                                                       RelationGetRelationName(rel))));
                        break;
        }
+}
 
+/*
+ * Initialize ResultRelInfo data for one result relation
+ *
+ * Caution: before Postgres 9.1, this function included the relkind checking
+ * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
+ * appropriate.  Be sure callers cover those needs.
+ */
+void
+InitResultRelInfo(ResultRelInfo *resultRelInfo,
+                                 Relation resultRelationDesc,
+                                 Index resultRelationIndex,
+                                 int instrument_options)
+{
        MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
        resultRelInfo->type = T_ResultRelInfo;
        resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
@@ -795,55 +1232,254 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
        resultRelInfo->ri_NumIndices = 0;
        resultRelInfo->ri_IndexRelationDescs = NULL;
        resultRelInfo->ri_IndexRelationInfo = NULL;
-       resultRelInfo->ri_TrigDesc = resultRelationDesc->trigdesc;
-       resultRelInfo->ri_TrigFunctions = NULL;
+       /* make a copy so as not to depend on relcache info not changing... */
+       resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
+       if (resultRelInfo->ri_TrigDesc)
+       {
+               int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
+
+               resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
+                       palloc0(n * sizeof(FmgrInfo));
+               resultRelInfo->ri_TrigWhenExprs = (List **)
+                       palloc0(n * sizeof(List *));
+               if (instrument_options)
+                       resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
+       }
+       else
+       {
+               resultRelInfo->ri_TrigFunctions = NULL;
+               resultRelInfo->ri_TrigWhenExprs = NULL;
+               resultRelInfo->ri_TrigInstrument = NULL;
+       }
+       if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+               resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
+       else
+               resultRelInfo->ri_FdwRoutine = NULL;
+       resultRelInfo->ri_FdwState = NULL;
        resultRelInfo->ri_ConstraintExprs = NULL;
        resultRelInfo->ri_junkFilter = NULL;
+       resultRelInfo->ri_projectReturning = NULL;
+}
+
+/*
+ *             ExecGetTriggerResultRel
+ *
+ * Get a ResultRelInfo for a trigger target relation.  Most of the time,
+ * triggers are fired on one of the result relations of the query, and so
+ * we can just return a member of the es_result_relations array.  (Note: in
+ * self-join situations there might be multiple members with the same OID;
+ * if so it doesn't matter which one we pick.)  However, it is sometimes
+ * necessary to fire triggers on other relations; this happens mainly when an
+ * RI update trigger queues additional triggers on other relations, which will
+ * be processed in the context of the outer query.  For efficiency's sake,
+ * we want to have a ResultRelInfo for those triggers too; that can avoid
+ * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
+ * ANALYZE to report the runtimes of such triggers.)  So we make additional
+ * ResultRelInfo's as needed, and save them in es_trig_target_relations.
+ */
+ResultRelInfo *
+ExecGetTriggerResultRel(EState *estate, Oid relid)
+{
+       ResultRelInfo *rInfo;
+       int                     nr;
+       ListCell   *l;
+       Relation        rel;
+       MemoryContext oldcontext;
+
+       /* First, search through the query result relations */
+       rInfo = estate->es_result_relations;
+       nr = estate->es_num_result_relations;
+       while (nr > 0)
+       {
+               if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
+                       return rInfo;
+               rInfo++;
+               nr--;
+       }
+       /* Nope, but maybe we already made an extra ResultRelInfo for it */
+       foreach(l, estate->es_trig_target_relations)
+       {
+               rInfo = (ResultRelInfo *) lfirst(l);
+               if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
+                       return rInfo;
+       }
+       /* Nope, so we need a new one */
+
+       /*
+        * Open the target relation's relcache entry.  We assume that an
+        * appropriate lock is still held by the backend from whenever the trigger
+        * event got queued, so we need take no new lock here.  Also, we need not
+        * recheck the relkind, so no need for CheckValidResultRel.
+        */
+       rel = heap_open(relid, NoLock);
+
+       /*
+        * Make the new entry in the right context.
+        */
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+       rInfo = makeNode(ResultRelInfo);
+       InitResultRelInfo(rInfo,
+                                         rel,
+                                         0,            /* dummy rangetable index */
+                                         estate->es_instrument);
+       estate->es_trig_target_relations =
+               lappend(estate->es_trig_target_relations, rInfo);
+       MemoryContextSwitchTo(oldcontext);
+
+       /*
+        * Currently, we don't need any index information in ResultRelInfos used
+        * only for triggers, so no need to call ExecOpenIndices.
+        */
+
+       return rInfo;
+}
+
+/*
+ *             ExecContextForcesOids
+ *
+ * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
+ * we need to ensure that result tuples have space for an OID iff they are
+ * going to be stored into a relation that has OIDs.  In other contexts
+ * we are free to choose whether to leave space for OIDs in result tuples
+ * (we generally don't want to, but we do if a physical-tlist optimization
+ * is possible).  This routine checks the plan context and returns TRUE if the
+ * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
+ * *hasoids is set to the required value.
+ *
+ * One reason this is ugly is that all plan nodes in the plan tree will emit
+ * tuples with space for an OID, though we really only need the topmost node
+ * to do so.  However, node types like Sort don't project new tuples but just
+ * return their inputs, and in those cases the requirement propagates down
+ * to the input node.  Eventually we might make this code smart enough to
+ * recognize how far down the requirement really goes, but for now we just
+ * make all plan nodes do the same thing if the top level forces the choice.
+ *
+ * We assume that if we are generating tuples for INSERT or UPDATE,
+ * estate->es_result_relation_info is already set up to describe the target
+ * relation.  Note that in an UPDATE that spans an inheritance tree, some of
+ * the target relations may have OIDs and some not.  We have to make the
+ * decisions on a per-relation basis as we initialize each of the subplans of
+ * the ModifyTable node, so ModifyTable has to set es_result_relation_info
+ * while initializing each subplan.
+ *
+ * CREATE TABLE AS is even uglier, because we don't have the target relation's
+ * descriptor available when this code runs; we have to look aside at the
+ * flags passed to ExecutorStart().
+ */
+bool
+ExecContextForcesOids(PlanState *planstate, bool *hasoids)
+{
+       ResultRelInfo *ri = planstate->state->es_result_relation_info;
+
+       if (ri != NULL)
+       {
+               Relation        rel = ri->ri_RelationDesc;
+
+               if (rel != NULL)
+               {
+                       *hasoids = rel->rd_rel->relhasoids;
+                       return true;
+               }
+       }
+
+       if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
+       {
+               *hasoids = true;
+               return true;
+       }
+       if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
+       {
+               *hasoids = false;
+               return true;
+       }
+
+       return false;
+}
+
+/* ----------------------------------------------------------------
+ *             ExecPostprocessPlan
+ *
+ *             Give plan nodes a final chance to execute before shutdown
+ * ----------------------------------------------------------------
+ */
+static void
+ExecPostprocessPlan(EState *estate)
+{
+       ListCell   *lc;
+
+       /*
+        * Make sure nodes run forward.
+        */
+       estate->es_direction = ForwardScanDirection;
 
        /*
-        * If there are indices on the result relation, open them and save
-        * descriptors in the result relation info, so that we can add new
-        * index entries for the tuples we add/update.  We need not do this
-        * for a DELETE, however, since deletion doesn't affect indexes.
+        * Run any secondary ModifyTable nodes to completion, in case the main
+        * query did not fetch all rows from them.  (We do this to ensure that
+        * such nodes have predictable results.)
         */
-       if (resultRelationDesc->rd_rel->relhasindex &&
-               operation != CMD_DELETE)
-               ExecOpenIndices(resultRelInfo);
+       foreach(lc, estate->es_auxmodifytables)
+       {
+               PlanState  *ps = (PlanState *) lfirst(lc);
+
+               for (;;)
+               {
+                       TupleTableSlot *slot;
+
+                       /* Reset the per-output-tuple exprcontext each time */
+                       ResetPerTupleExprContext(estate);
+
+                       slot = ExecProcNode(ps);
+
+                       if (TupIsNull(slot))
+                               break;
+               }
+       }
 }
 
 /* ----------------------------------------------------------------
- *             EndPlan
+ *             ExecEndPlan
+ *
+ *             Cleans up the query plan -- closes files and frees up storage
  *
- *             Cleans up the query plan -- closes files and free up storages
+ * NOTE: we are no longer very worried about freeing storage per se
+ * in this code; FreeExecutorState should be guaranteed to release all
+ * memory that needs to be released.  What we are worried about doing
+ * is closing relations and dropping buffer pins.  Thus, for example,
+ * tuple tables must be cleared or dropped to ensure pins are released.
  * ----------------------------------------------------------------
  */
 static void
-EndPlan(Plan *plan, EState *estate)
+ExecEndPlan(PlanState *planstate, EState *estate)
 {
        ResultRelInfo *resultRelInfo;
        int                     i;
-       List       *l;
+       ListCell   *l;
 
        /*
-        * shut down any PlanQual processing we were doing
+        * shut down the node-type-specific query processing
         */
-       if (estate->es_evalPlanQual != NULL)
-               EndEvalPlanQual(estate);
+       ExecEndNode(planstate);
 
        /*
-        * shut down the node-type-specific query processing
+        * for subplans too
         */
-       ExecEndNode(plan, NULL);
+       foreach(l, estate->es_subplanstates)
+       {
+               PlanState  *subplanstate = (PlanState *) lfirst(l);
+
+               ExecEndNode(subplanstate);
+       }
 
        /*
-        * destroy the executor "tuple" table.
+        * destroy the executor's tuple table.  Actually we only care about
+        * releasing buffer pins and tupdesc refcounts; there's no need to pfree
+        * the TupleTableSlots, since the containing memory context is about to go
+        * away anyway.
         */
-       ExecDropTupleTable(estate->es_tupleTable, true);
-       estate->es_tupleTable = NULL;
+       ExecResetTupleTable(estate->es_tupleTable, false);
 
        /*
-        * close the result relation(s) if any, but hold locks until xact
-        * commit.      Also clean up junkfilters if present.
+        * close the result relation(s) if any, but hold locks until xact commit.
         */
        resultRelInfo = estate->es_result_relations;
        for (i = estate->es_num_result_relations; i > 0; i--)
@@ -851,74 +1487,61 @@ EndPlan(Plan *plan, EState *estate)
                /* Close indices and then the relation itself */
                ExecCloseIndices(resultRelInfo);
                heap_close(resultRelInfo->ri_RelationDesc, NoLock);
-               /* Delete the junkfilter if any */
-               if (resultRelInfo->ri_junkFilter != NULL)
-                       ExecFreeJunkFilter(resultRelInfo->ri_junkFilter);
                resultRelInfo++;
        }
 
        /*
-        * close the "into" relation if necessary, again keeping lock
-        */
-       if (estate->es_into_relation_descriptor != NULL)
-               heap_close(estate->es_into_relation_descriptor, NoLock);
-
-       /*
-        * There might be a junkfilter without a result relation.
+        * likewise close any trigger target relations
         */
-       if (estate->es_num_result_relations == 0 &&
-               estate->es_junkFilter != NULL)
+       foreach(l, estate->es_trig_target_relations)
        {
-               ExecFreeJunkFilter(estate->es_junkFilter);
-               estate->es_junkFilter = NULL;
+               resultRelInfo = (ResultRelInfo *) lfirst(l);
+               /* Close indices and then the relation itself */
+               ExecCloseIndices(resultRelInfo);
+               heap_close(resultRelInfo->ri_RelationDesc, NoLock);
        }
 
        /*
-        * close any relations selected FOR UPDATE, again keeping locks
+        * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
+        * locks
         */
-       foreach(l, estate->es_rowMark)
+       foreach(l, estate->es_rowMarks)
        {
-               execRowMark *erm = lfirst(l);
+               ExecRowMark *erm = (ExecRowMark *) lfirst(l);
 
-               heap_close(erm->relation, NoLock);
+               if (erm->relation)
+                       heap_close(erm->relation, NoLock);
        }
 }
 
 /* ----------------------------------------------------------------
  *             ExecutePlan
  *
- *             processes the query plan to retrieve 'numberTuples' tuples in the
- *             direction specified.
- *             Retrieves all tuples if numberTuples is 0
+ *             Processes the query plan until we have retrieved 'numberTuples' tuples,
+ *             moving in the specified direction.
  *
- *             result is either a slot containing the last tuple in the case
- *             of a SELECT or NULL otherwise.
+ *             Runs to completion if numberTuples is 0
  *
  * Note: the ctid attribute is a 'junk' attribute that is removed before the
  * user can see it
  * ----------------------------------------------------------------
  */
-static TupleTableSlot *
+static void
 ExecutePlan(EState *estate,
-                       Plan *plan,
+                       PlanState *planstate,
                        CmdType operation,
+                       bool sendTuples,
                        long numberTuples,
                        ScanDirection direction,
-                       DestReceiver *destfunc)
+                       DestReceiver *dest)
 {
-       JunkFilter *junkfilter;
        TupleTableSlot *slot;
-       ItemPointer tupleid = NULL;
-       ItemPointerData tuple_ctid;
        long            current_tuple_count;
-       TupleTableSlot *result;
 
        /*
         * initialize local variables
         */
-       slot = NULL;
        current_tuple_count = 0;
-       result = NULL;
 
        /*
         * Set the direction.
@@ -926,10 +1549,8 @@ ExecutePlan(EState *estate,
        estate->es_direction = direction;
 
        /*
-        * Loop until we've processed the proper number of tuples from the
-        * plan.
+        * Loop until we've processed the proper number of tuples from the plan.
         */
-
        for (;;)
        {
                /* Reset the per-output-tuple exprcontext */
@@ -938,719 +1559,828 @@ ExecutePlan(EState *estate,
                /*
                 * Execute the plan and obtain a tuple
                 */
-lnext: ;
-               if (estate->es_useEvalPlan)
-               {
-                       slot = EvalPlanQualNext(estate);
-                       if (TupIsNull(slot))
-                               slot = ExecProcNode(plan, NULL);
-               }
-               else
-                       slot = ExecProcNode(plan, NULL);
+               slot = ExecProcNode(planstate);
 
                /*
                 * if the tuple is null, then we assume there is nothing more to
-                * process so we just return null...
+                * process so we just end the loop...
                 */
                if (TupIsNull(slot))
-               {
-                       result = NULL;
                        break;
-               }
 
                /*
-                * if we have a junk filter, then project a new tuple with the
-                * junk removed.
+                * If we have a junk filter, then project a new tuple with the junk
+                * removed.
                 *
                 * Store this new "clean" tuple in the junkfilter's resultSlot.
-                * (Formerly, we stored it back over the "dirty" tuple, which is
-                * WRONG because that tuple slot has the wrong descriptor.)
-                *
-                * Also, extract all the junk information we need.
+                * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
+                * because that tuple slot has the wrong descriptor.)
                 */
-               if ((junkfilter = estate->es_junkFilter) != (JunkFilter *) NULL)
-               {
-                       Datum           datum;
-                       HeapTuple       newTuple;
-                       bool            isNull;
-
-                       /*
-                        * extract the 'ctid' junk attribute.
-                        */
-                       if (operation == CMD_UPDATE || operation == CMD_DELETE)
-                       {
-                               if (!ExecGetJunkAttribute(junkfilter,
-                                                                                 slot,
-                                                                                 "ctid",
-                                                                                 &datum,
-                                                                                 &isNull))
-                                       elog(ERROR, "ExecutePlan: NO (junk) `ctid' was found!");
-
-                               /* shouldn't ever get a null result... */
-                               if (isNull)
-                                       elog(ERROR, "ExecutePlan: (junk) `ctid' is NULL!");
-
-                               tupleid = (ItemPointer) DatumGetPointer(datum);
-                               tuple_ctid = *tupleid;  /* make sure we don't free the
-                                                                                * ctid!! */
-                               tupleid = &tuple_ctid;
-                       }
-                       else if (estate->es_rowMark != NIL)
-                       {
-                               List       *l;
-
-               lmark:  ;
-                               foreach(l, estate->es_rowMark)
-                               {
-                                       execRowMark *erm = lfirst(l);
-                                       Buffer          buffer;
-                                       HeapTupleData tuple;
-                                       TupleTableSlot *newSlot;
-                                       int                     test;
-
-                                       if (!ExecGetJunkAttribute(junkfilter,
-                                                                                         slot,
-                                                                                         erm->resname,
-                                                                                         &datum,
-                                                                                         &isNull))
-                                               elog(ERROR, "ExecutePlan: NO (junk) `%s' was found!",
-                                                        erm->resname);
-
-                                       /* shouldn't ever get a null result... */
-                                       if (isNull)
-                                               elog(ERROR, "ExecutePlan: (junk) `%s' is NULL!",
-                                                        erm->resname);
-
-                                       tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
-                                       test = heap_mark4update(erm->relation, &tuple, &buffer,
-                                                                                       estate->es_snapshot->curcid);
-                                       ReleaseBuffer(buffer);
-                                       switch (test)
-                                       {
-                                               case HeapTupleSelfUpdated:
-                                               case HeapTupleMayBeUpdated:
-                                                       break;
-
-                                               case HeapTupleUpdated:
-                                                       if (XactIsoLevel == XACT_SERIALIZABLE)
-                                                               elog(ERROR, "Can't serialize access due to concurrent update");
-                                                       if (!(ItemPointerEquals(&(tuple.t_self),
-                                                                 (ItemPointer) DatumGetPointer(datum))))
-                                                       {
-                                                               newSlot = EvalPlanQual(estate, erm->rti, &(tuple.t_self));
-                                                               if (!(TupIsNull(newSlot)))
-                                                               {
-                                                                       slot = newSlot;
-                                                                       estate->es_useEvalPlan = true;
-                                                                       goto lmark;
-                                                               }
-                                                       }
-
-                                                       /*
-                                                        * if tuple was deleted or PlanQual failed for
-                                                        * updated tuple - we must not return this
-                                                        * tuple!
-                                                        */
-                                                       goto lnext;
-
-                                               default:
-                                                       elog(ERROR, "Unknown status %u from heap_mark4update", test);
-                                                       return (NULL);
-                                       }
-                               }
-                       }
-
-                       /*
-                        * Finally create a new "clean" tuple with all junk attributes
-                        * removed
-                        */
-                       newTuple = ExecRemoveJunk(junkfilter, slot);
-
-                       slot = ExecStoreTuple(newTuple,         /* tuple to store */
-                                                                 junkfilter->jf_resultSlot,    /* dest slot */
-                                                                 InvalidBuffer,        /* this tuple has no buffer */
-                                                                 true);                /* tuple should be pfreed */
-               }
+               if (estate->es_junkFilter != NULL)
+                       slot = ExecFilterJunk(estate->es_junkFilter, slot);
 
                /*
-                * now that we have a tuple, do the appropriate thing with it..
-                * either return it to the user, add it to a relation someplace,
-                * delete it from a relation, or modify some of its attributes.
+                * If we are supposed to send the tuple somewhere, do so. (In
+                * practice, this is probably always the case at this point.)
                 */
-               switch (operation)
-               {
-                       case CMD_SELECT:
-                               ExecSelect(slot,                /* slot containing tuple */
-                                                  destfunc,    /* destination's tuple-receiver obj */
-                                                  estate);
-                               result = slot;
-                               break;
-
-                       case CMD_INSERT:
-                               ExecInsert(slot, tupleid, estate);
-                               result = NULL;
-                               break;
-
-                       case CMD_DELETE:
-                               ExecDelete(slot, tupleid, estate);
-                               result = NULL;
-                               break;
-
-                       case CMD_UPDATE:
-                               ExecUpdate(slot, tupleid, estate);
-                               result = NULL;
-                               break;
+               if (sendTuples)
+                       (*dest->receiveSlot) (slot, dest);
 
-                       default:
-                               elog(LOG, "ExecutePlan: unknown operation in queryDesc");
-                               result = NULL;
-                               break;
-               }
+               /*
+                * Count tuples processed, if this is a SELECT.  (For other operation
+                * types, the ModifyTable plan node must count the appropriate
+                * events.)
+                */
+               if (operation == CMD_SELECT)
+                       (estate->es_processed)++;
 
                /*
-                * check our tuple count.. if we've processed the proper number
-                * then quit, else loop again and process more tuples..
+                * check our tuple count.. if we've processed the proper number then
+                * quit, else loop again and process more tuples.  Zero numberTuples
+                * means no limit.
                 */
                current_tuple_count++;
-               if (numberTuples == current_tuple_count)
+               if (numberTuples && numberTuples == current_tuple_count)
                        break;
        }
-
-       /*
-        * here, result is either a slot containing a tuple in the case of a
-        * SELECT or NULL otherwise.
-        */
-       return result;
 }
 
-/* ----------------------------------------------------------------
- *             ExecSelect
+
+/*
+ * ExecRelCheck --- check that tuple meets constraints for result relation
  *
- *             SELECTs are easy.. we just pass the tuple to the appropriate
- *             print function.  The only complexity is when we do a
- *             "SELECT INTO", in which case we insert the tuple into
- *             the appropriate relation (note: this is a newly created relation
- *             so we don't need to worry about indices or locks.)
- * ----------------------------------------------------------------
+ * Returns NULL if OK, else name of failed check constraint
  */
-static void
-ExecSelect(TupleTableSlot *slot,
-                  DestReceiver *destfunc,
-                  EState *estate)
+static const char *
+ExecRelCheck(ResultRelInfo *resultRelInfo,
+                        TupleTableSlot *slot, EState *estate)
 {
-       HeapTuple       tuple;
-       TupleDesc       attrtype;
+       Relation        rel = resultRelInfo->ri_RelationDesc;
+       int                     ncheck = rel->rd_att->constr->num_check;
+       ConstrCheck *check = rel->rd_att->constr->check;
+       ExprContext *econtext;
+       MemoryContext oldContext;
+       List       *qual;
+       int                     i;
 
        /*
-        * get the heap tuple out of the tuple table slot
+        * If first time through for this result relation, build expression
+        * nodetrees for rel's constraint expressions.  Keep them in the per-query
+        * memory context so they'll survive throughout the query.
         */
-       tuple = slot->val;
-       attrtype = slot->ttc_tupleDescriptor;
-
-       /*
-        * insert the tuple into the "into relation"
-        */
-       if (estate->es_into_relation_descriptor != NULL)
+       if (resultRelInfo->ri_ConstraintExprs == NULL)
        {
-               heap_insert(estate->es_into_relation_descriptor, tuple,
-                                       estate->es_snapshot->curcid);
-               IncrAppended();
+               oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
+               resultRelInfo->ri_ConstraintExprs =
+                       (List **) palloc(ncheck * sizeof(List *));
+               for (i = 0; i < ncheck; i++)
+               {
+                       /* ExecQual wants implicit-AND form */
+                       qual = make_ands_implicit(stringToNode(check[i].ccbin));
+                       resultRelInfo->ri_ConstraintExprs[i] = (List *)
+                               ExecPrepareExpr((Expr *) qual, estate);
+               }
+               MemoryContextSwitchTo(oldContext);
        }
 
        /*
-        * send the tuple to the front end (or the screen)
+        * We will use the EState's per-tuple context for evaluating constraint
+        * expressions (creating it if it's not already there).
         */
-       (*destfunc->receiveTuple) (tuple, attrtype, destfunc);
-       IncrRetrieved();
-       (estate->es_processed)++;
-}
+       econtext = GetPerTupleExprContext(estate);
 
-/* ----------------------------------------------------------------
- *             ExecInsert
- *
- *             INSERTs are trickier.. we have to insert the tuple into
- *             the base relation and insert appropriate tuples into the
- *             index relations.
- * ----------------------------------------------------------------
- */
-static void
-ExecInsert(TupleTableSlot *slot,
-                  ItemPointer tupleid,
-                  EState *estate)
-{
-       HeapTuple       tuple;
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       int                     numIndices;
-       Oid                     newId;
+       /* Arrange for econtext's scan tuple to be the tuple under test */
+       econtext->ecxt_scantuple = slot;
 
-       /*
-        * get the heap tuple out of the tuple table slot
-        */
-       tuple = slot->val;
+       /* And evaluate the constraints */
+       for (i = 0; i < ncheck; i++)
+       {
+               qual = resultRelInfo->ri_ConstraintExprs[i];
 
-       /*
-        * get information on the (current) result relation
-        */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
+               /*
+                * NOTE: SQL specifies that a NULL result from a constraint expression
+                * is not to be treated as a failure.  Therefore, tell ExecQual to
+                * return TRUE for NULL.
+                */
+               if (!ExecQual(qual, econtext, true))
+                       return check[i].ccname;
+       }
 
-       /* BEFORE ROW INSERT Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-         resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
-       {
-               HeapTuple       newtuple;
+       /* NULL result means no error */
+       return NULL;
+}
+
+void
+ExecConstraints(ResultRelInfo *resultRelInfo,
+                               TupleTableSlot *slot, EState *estate)
+{
+       Relation        rel = resultRelInfo->ri_RelationDesc;
+       TupleDesc       tupdesc = RelationGetDescr(rel);
+       TupleConstr *constr = tupdesc->constr;
+       Bitmapset  *modifiedCols;
+       Bitmapset  *insertedCols;
+       Bitmapset  *updatedCols;
 
-               newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
+       Assert(constr);
 
-               if (newtuple == NULL)   /* "do nothing" */
-                       return;
+       if (constr->has_not_null)
+       {
+               int                     natts = tupdesc->natts;
+               int                     attrChk;
 
-               if (newtuple != tuple)  /* modified by Trigger(s) */
+               for (attrChk = 1; attrChk <= natts; attrChk++)
                {
-                       /*
-                        * Insert modified tuple into tuple table slot, replacing the
-                        * original.  We assume that it was allocated in per-tuple
-                        * memory context, and therefore will go away by itself. The
-                        * tuple table slot should not try to clear it.
-                        */
-                       ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
-                       tuple = newtuple;
+                       if (tupdesc->attrs[attrChk - 1]->attnotnull &&
+                               slot_attisnull(slot, attrChk))
+                       {
+                               char       *val_desc;
+
+                               insertedCols = GetInsertedColumns(resultRelInfo, estate);
+                               updatedCols = GetUpdatedColumns(resultRelInfo, estate);
+                               modifiedCols = bms_union(insertedCols, updatedCols);
+                               val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
+                                                                                                                slot,
+                                                                                                                tupdesc,
+                                                                                                                modifiedCols,
+                                                                                                                64);
+
+                               ereport(ERROR,
+                                               (errcode(ERRCODE_NOT_NULL_VIOLATION),
+                                                errmsg("null value in column \"%s\" violates not-null constraint",
+                                                         NameStr(tupdesc->attrs[attrChk - 1]->attname)),
+                                                val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
+                                                errtablecol(rel, attrChk)));
+                       }
                }
        }
 
-       /*
-        * Check the constraints of the tuple
-        */
-       if (resultRelationDesc->rd_att->constr)
-               ExecConstraints("ExecInsert", resultRelInfo, slot, estate);
-
-       /*
-        * insert the tuple
-        */
-       newId = heap_insert(resultRelationDesc, tuple,
-                                               estate->es_snapshot->curcid);
-
-       IncrAppended();
-       (estate->es_processed)++;
-       estate->es_lastoid = newId;
-       setLastTid(&(tuple->t_self));
-
-       /*
-        * process indices
-        *
-        * Note: heap_insert adds a new tuple to a relation.  As a side effect,
-        * the tupleid of the new tuple is placed in the new tuple's t_ctid
-        * field.
-        */
-       numIndices = resultRelInfo->ri_NumIndices;
-       if (numIndices > 0)
-               ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
+       if (constr->num_check > 0)
+       {
+               const char *failed;
 
-       /* AFTER ROW INSERT Triggers */
-       if (resultRelInfo->ri_TrigDesc)
-               ExecARInsertTriggers(estate, resultRelInfo, tuple);
+               if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
+               {
+                       char       *val_desc;
+
+                       insertedCols = GetInsertedColumns(resultRelInfo, estate);
+                       updatedCols = GetUpdatedColumns(resultRelInfo, estate);
+                       modifiedCols = bms_union(insertedCols, updatedCols);
+                       val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
+                                                                                                        slot,
+                                                                                                        tupdesc,
+                                                                                                        modifiedCols,
+                                                                                                        64);
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_CHECK_VIOLATION),
+                                        errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
+                                                       RelationGetRelationName(rel), failed),
+                         val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
+                                        errtableconstraint(rel, failed)));
+               }
+       }
 }
 
-/* ----------------------------------------------------------------
- *             ExecDelete
+/*
+ * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
+ * of the specified kind.
  *
- *             DELETE is like UPDATE, we delete the tuple and its
- *             index tuples.
- * ----------------------------------------------------------------
+ * Note that this needs to be called multiple times to ensure that all kinds of
+ * WITH CHECK OPTIONs are handled (both those from views which have the WITH
+ * CHECK OPTION set and from row level security policies).  See ExecInsert()
+ * and ExecUpdate().
  */
-static void
-ExecDelete(TupleTableSlot *slot,
-                  ItemPointer tupleid,
-                  EState *estate)
+void
+ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
+                                        TupleTableSlot *slot, EState *estate)
 {
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       ItemPointerData ctid;
-       int                     result;
+       Relation        rel = resultRelInfo->ri_RelationDesc;
+       TupleDesc       tupdesc = RelationGetDescr(rel);
+       ExprContext *econtext;
+       ListCell   *l1,
+                          *l2;
 
        /*
-        * get information on the (current) result relation
+        * We will use the EState's per-tuple context for evaluating constraint
+        * expressions (creating it if it's not already there).
         */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
-
-       /* BEFORE ROW DELETE Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-         resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
-       {
-               bool            dodelete;
-
-               dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
+       econtext = GetPerTupleExprContext(estate);
 
-               if (!dodelete)                  /* "do nothing" */
-                       return;
-       }
+       /* Arrange for econtext's scan tuple to be the tuple under test */
+       econtext->ecxt_scantuple = slot;
 
-       /*
-        * delete the tuple
-        */
-ldelete:;
-       result = heap_delete(resultRelationDesc, tupleid,
-                                                &ctid,
-                                                estate->es_snapshot->curcid);
-       switch (result)
+       /* Check each of the constraints */
+       forboth(l1, resultRelInfo->ri_WithCheckOptions,
+                       l2, resultRelInfo->ri_WithCheckOptionExprs)
        {
-               case HeapTupleSelfUpdated:
-                       return;
+               WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
+               ExprState  *wcoExpr = (ExprState *) lfirst(l2);
 
-               case HeapTupleMayBeUpdated:
-                       break;
+               /*
+                * Skip any WCOs which are not the kind we are looking for at this
+                * time.
+                */
+               if (wco->kind != kind)
+                       continue;
 
-               case HeapTupleUpdated:
-                       if (XactIsoLevel == XACT_SERIALIZABLE)
-                               elog(ERROR, "Can't serialize access due to concurrent update");
-                       else if (!(ItemPointerEquals(tupleid, &ctid)))
-                       {
-                               TupleTableSlot *epqslot = EvalPlanQual(estate,
-                                                          resultRelInfo->ri_RangeTableIndex, &ctid);
+               /*
+                * WITH CHECK OPTION checks are intended to ensure that the new tuple
+                * is visible (in the case of a view) or that it passes the
+                * 'with-check' policy (in the case of row security). If the qual
+                * evaluates to NULL or FALSE, then the new tuple won't be included in
+                * the view or doesn't pass the 'with-check' policy for the table.  We
+                * need ExecQual to return FALSE for NULL to handle the view case (the
+                * opposite of what we do above for CHECK constraints).
+                */
+               if (!ExecQual((List *) wcoExpr, econtext, false))
+               {
+                       char       *val_desc;
+                       Bitmapset  *modifiedCols;
+                       Bitmapset  *insertedCols;
+                       Bitmapset  *updatedCols;
 
-                               if (!TupIsNull(epqslot))
-                               {
-                                       *tupleid = ctid;
-                                       goto ldelete;
-                               }
+                       switch (wco->kind)
+                       {
+                                       /*
+                                        * For WITH CHECK OPTIONs coming from views, we might be
+                                        * able to provide the details on the row, depending on
+                                        * the permissions on the relation (that is, if the user
+                                        * could view it directly anyway).  For RLS violations, we
+                                        * don't include the data since we don't know if the user
+                                        * should be able to view the tuple as as that depends on
+                                        * the USING policy.
+                                        */
+                               case WCO_VIEW_CHECK:
+                                       insertedCols = GetInsertedColumns(resultRelInfo, estate);
+                                       updatedCols = GetUpdatedColumns(resultRelInfo, estate);
+                                       modifiedCols = bms_union(insertedCols, updatedCols);
+                                       val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
+                                                                                                                        slot,
+                                                                                                                        tupdesc,
+                                                                                                                        modifiedCols,
+                                                                                                                        64);
+
+                                       ereport(ERROR,
+                                                       (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
+                                         errmsg("new row violates WITH CHECK OPTION for \"%s\"",
+                                                        wco->relname),
+                                                        val_desc ? errdetail("Failing row contains %s.",
+                                                                                                 val_desc) : 0));
+                                       break;
+                               case WCO_RLS_INSERT_CHECK:
+                               case WCO_RLS_UPDATE_CHECK:
+                                       if (wco->polname != NULL)
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                                        errmsg("new row violates row level security policy \"%s\" for \"%s\"",
+                                                                       wco->polname, wco->relname)));
+                                       else
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                                        errmsg("new row violates row level security policy for \"%s\"",
+                                                                       wco->relname)));
+                                       break;
+                               case WCO_RLS_CONFLICT_CHECK:
+                                       if (wco->polname != NULL)
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                                        errmsg("new row violates row level security policy \"%s\" (USING expression) for \"%s\"",
+                                                                       wco->polname, wco->relname)));
+                                       else
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                                        errmsg("new row violates row level security policy (USING expression) for \"%s\"",
+                                                                       wco->relname)));
+                                       break;
+                               default:
+                                       elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
+                                       break;
                        }
-                       /* tuple already deleted; nothing to do */
-                       return;
-
-               default:
-                       elog(ERROR, "Unknown status %u from heap_delete", result);
-                       return;
+               }
        }
+}
 
-       IncrDeleted();
-       (estate->es_processed)++;
+/*
+ * ExecBuildSlotValueDescription -- construct a string representing a tuple
+ *
+ * This is intentionally very similar to BuildIndexValueDescription, but
+ * unlike that function, we truncate long field values (to at most maxfieldlen
+ * bytes).  That seems necessary here since heap field values could be very
+ * long, whereas index entries typically aren't so wide.
+ *
+ * Also, unlike the case with index entries, we need to be prepared to ignore
+ * dropped columns.  We used to use the slot's tuple descriptor to decode the
+ * data, but the slot's descriptor doesn't identify dropped columns, so we
+ * now need to be passed the relation's descriptor.
+ *
+ * Note that, like BuildIndexValueDescription, if the user does not have
+ * permission to view any of the columns involved, a NULL is returned.  Unlike
+ * BuildIndexValueDescription, if the user has access to view a subset of the
+ * column involved, that subset will be returned with a key identifying which
+ * columns they are.
+ */
+static char *
+ExecBuildSlotValueDescription(Oid reloid,
+                                                         TupleTableSlot *slot,
+                                                         TupleDesc tupdesc,
+                                                         Bitmapset *modifiedCols,
+                                                         int maxfieldlen)
+{
+       StringInfoData buf;
+       StringInfoData collist;
+       bool            write_comma = false;
+       bool            write_comma_collist = false;
+       int                     i;
+       AclResult       aclresult;
+       bool            table_perm = false;
+       bool            any_perm = false;
 
        /*
-        * Note: Normally one would think that we have to delete index tuples
-        * associated with the heap tuple now..
-        *
-        * ... but in POSTGRES, we have no need to do this because the vacuum
-        * daemon automatically opens an index scan and deletes index tuples
-        * when it finds deleted heap tuples. -cim 9/27/89
+        * Check if RLS is enabled and should be active for the relation; if so,
+        * then don't return anything.  Otherwise, go through normal permission
+        * checks.
         */
+       if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
+               return NULL;
 
-       /* AFTER ROW DELETE Triggers */
-       if (resultRelInfo->ri_TrigDesc)
-               ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
-}
+       initStringInfo(&buf);
 
-/* ----------------------------------------------------------------
- *             ExecUpdate
- *
- *             note: we can't run UPDATE queries with transactions
- *             off because UPDATEs are actually INSERTs and our
- *             scan will mistakenly loop forever, updating the tuple
- *             it just inserted..      This should be fixed but until it
- *             is, we don't want to get stuck in an infinite loop
- *             which corrupts your database..
- * ----------------------------------------------------------------
- */
-static void
-ExecUpdate(TupleTableSlot *slot,
-                       ItemPointer tupleid,
-                       EState *estate)
-{
-       HeapTuple       tuple;
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       ItemPointerData ctid;
-       int                     result;
-       int                     numIndices;
+       appendStringInfoChar(&buf, '(');
 
        /*
-        * abort the operation if not running transactions
+        * Check if the user has permissions to see the row.  Table-level SELECT
+        * allows access to all columns.  If the user does not have table-level
+        * SELECT then we check each column and include those the user has SELECT
+        * rights on.  Additionally, we always include columns the user provided
+        * data for.
         */
-       if (IsBootstrapProcessingMode())
+       aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
+       if (aclresult != ACLCHECK_OK)
        {
-               elog(WARNING, "ExecReplace: replace can't run without transactions");
-               return;
+               /* Set up the buffer for the column list */
+               initStringInfo(&collist);
+               appendStringInfoChar(&collist, '(');
        }
+       else
+               table_perm = any_perm = true;
 
-       /*
-        * get the heap tuple out of the tuple table slot
-        */
-       tuple = slot->val;
-
-       /*
-        * get information on the (current) result relation
-        */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
+       /* Make sure the tuple is fully deconstructed */
+       slot_getallattrs(slot);
 
-       /* BEFORE ROW UPDATE Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-         resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
+       for (i = 0; i < tupdesc->natts; i++)
        {
-               HeapTuple       newtuple;
+               bool            column_perm = false;
+               char       *val;
+               int                     vallen;
 
-               newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
-                                                                               tupleid, tuple);
-
-               if (newtuple == NULL)   /* "do nothing" */
-                       return;
+               /* ignore dropped columns */
+               if (tupdesc->attrs[i]->attisdropped)
+                       continue;
 
-               if (newtuple != tuple)  /* modified by Trigger(s) */
+               if (!table_perm)
                {
                        /*
-                        * Insert modified tuple into tuple table slot, replacing the
-                        * original.  We assume that it was allocated in per-tuple
-                        * memory context, and therefore will go away by itself. The
-                        * tuple table slot should not try to clear it.
+                        * No table-level SELECT, so need to make sure they either have
+                        * SELECT rights on the column or that they have provided the data
+                        * for the column.  If not, omit this column from the error
+                        * message.
                         */
-                       ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
-                       tuple = newtuple;
-               }
-       }
-
-       /*
-        * Check the constraints of the tuple
-        *
-        * If we generate a new candidate tuple after EvalPlanQual testing, we
-        * must loop back here and recheck constraints.  (We don't need to
-        * redo triggers, however.      If there are any BEFORE triggers then
-        * trigger.c will have done mark4update to lock the correct tuple, so
-        * there's no need to do them again.)
-        */
-lreplace:;
-       if (resultRelationDesc->rd_att->constr)
-               ExecConstraints("ExecReplace", resultRelInfo, slot, estate);
+                       aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
+                                                                                         GetUserId(), ACL_SELECT);
+                       if (bms_is_member(tupdesc->attrs[i]->attnum - FirstLowInvalidHeapAttributeNumber,
+                                                         modifiedCols) || aclresult == ACLCHECK_OK)
+                       {
+                               column_perm = any_perm = true;
 
-       /*
-        * replace the heap tuple
-        */
-       result = heap_update(resultRelationDesc, tupleid, tuple,
-                                                &ctid,
-                                                estate->es_snapshot->curcid);
-       switch (result)
-       {
-               case HeapTupleSelfUpdated:
-                       return;
+                               if (write_comma_collist)
+                                       appendStringInfoString(&collist, ", ");
+                               else
+                                       write_comma_collist = true;
 
-               case HeapTupleMayBeUpdated:
-                       break;
+                               appendStringInfoString(&collist, NameStr(tupdesc->attrs[i]->attname));
+                       }
+               }
 
-               case HeapTupleUpdated:
-                       if (XactIsoLevel == XACT_SERIALIZABLE)
-                               elog(ERROR, "Can't serialize access due to concurrent update");
-                       else if (!(ItemPointerEquals(tupleid, &ctid)))
+               if (table_perm || column_perm)
+               {
+                       if (slot->tts_isnull[i])
+                               val = "null";
+                       else
                        {
-                               TupleTableSlot *epqslot = EvalPlanQual(estate,
-                                                          resultRelInfo->ri_RangeTableIndex, &ctid);
+                               Oid                     foutoid;
+                               bool            typisvarlena;
 
-                               if (!TupIsNull(epqslot))
-                               {
-                                       *tupleid = ctid;
-                                       tuple = ExecRemoveJunk(estate->es_junkFilter, epqslot);
-                                       slot = ExecStoreTuple(tuple,
-                                                                       estate->es_junkFilter->jf_resultSlot,
-                                                                                 InvalidBuffer, true);
-                                       goto lreplace;
-                               }
+                               getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
+                                                                 &foutoid, &typisvarlena);
+                               val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
                        }
-                       /* tuple already deleted; nothing to do */
-                       return;
 
-               default:
-                       elog(ERROR, "Unknown status %u from heap_update", result);
-                       return;
+                       if (write_comma)
+                               appendStringInfoString(&buf, ", ");
+                       else
+                               write_comma = true;
+
+                       /* truncate if needed */
+                       vallen = strlen(val);
+                       if (vallen <= maxfieldlen)
+                               appendStringInfoString(&buf, val);
+                       else
+                       {
+                               vallen = pg_mbcliplen(val, vallen, maxfieldlen);
+                               appendBinaryStringInfo(&buf, val, vallen);
+                               appendStringInfoString(&buf, "...");
+                       }
+               }
        }
 
-       IncrReplaced();
-       (estate->es_processed)++;
+       /* If we end up with zero columns being returned, then return NULL. */
+       if (!any_perm)
+               return NULL;
 
-       /*
-        * Note: instead of having to update the old index tuples associated
-        * with the heap tuple, all we do is form and insert new index tuples.
-        * This is because UPDATEs are actually DELETEs and INSERTs and index
-        * tuple deletion is done automagically by the vacuum daemon. All we
-        * do is insert new index tuples.  -cim 9/27/89
-        */
+       appendStringInfoChar(&buf, ')');
 
-       /*
-        * process indices
-        *
-        * heap_update updates a tuple in the base relation by invalidating it
-        * and then inserting a new tuple to the relation.      As a side effect,
-        * the tupleid of the new tuple is placed in the new tuple's t_ctid
-        * field.  So we now insert index tuples using the new tupleid stored
-        * there.
-        */
+       if (!table_perm)
+       {
+               appendStringInfoString(&collist, ") = ");
+               appendStringInfoString(&collist, buf.data);
 
-       numIndices = resultRelInfo->ri_NumIndices;
-       if (numIndices > 0)
-               ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
+               return collist.data;
+       }
 
-       /* AFTER ROW UPDATE Triggers */
-       if (resultRelInfo->ri_TrigDesc)
-               ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
+       return buf.data;
 }
 
-static char *
-ExecRelCheck(ResultRelInfo *resultRelInfo,
-                        TupleTableSlot *slot, EState *estate)
+
+/*
+ * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
+ * given ResultRelInfo
+ */
+LockTupleMode
+ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
 {
-       Relation        rel = resultRelInfo->ri_RelationDesc;
-       int                     ncheck = rel->rd_att->constr->num_check;
-       ConstrCheck *check = rel->rd_att->constr->check;
-       ExprContext *econtext;
-       MemoryContext oldContext;
-       List       *qual;
-       int                     i;
+       Bitmapset  *keyCols;
+       Bitmapset  *updatedCols;
 
        /*
-        * If first time through for this result relation, build expression
-        * nodetrees for rel's constraint expressions.  Keep them in the
-        * per-query memory context so they'll survive throughout the query.
+        * Compute lock mode to use.  If columns that are part of the key have not
+        * been modified, then we can use a weaker lock, allowing for better
+        * concurrency.
         */
-       if (resultRelInfo->ri_ConstraintExprs == NULL)
-       {
-               oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
-               resultRelInfo->ri_ConstraintExprs =
-                       (List **) palloc(ncheck * sizeof(List *));
-               for (i = 0; i < ncheck; i++)
-               {
-                       qual = (List *) stringToNode(check[i].ccbin);
-                       resultRelInfo->ri_ConstraintExprs[i] = qual;
-               }
-               MemoryContextSwitchTo(oldContext);
-       }
+       updatedCols = GetUpdatedColumns(relinfo, estate);
+       keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
+                                                                                INDEX_ATTR_BITMAP_KEY);
 
-       /*
-        * We will use the EState's per-tuple context for evaluating
-        * constraint expressions (creating it if it's not already there).
-        */
-       econtext = GetPerTupleExprContext(estate);
+       if (bms_overlap(keyCols, updatedCols))
+               return LockTupleExclusive;
 
-       /* Arrange for econtext's scan tuple to be the tuple under test */
-       econtext->ecxt_scantuple = slot;
+       return LockTupleNoKeyExclusive;
+}
 
-       /* And evaluate the constraints */
-       for (i = 0; i < ncheck; i++)
+/*
+ * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
+ *
+ * If no such struct, either return NULL or throw error depending on missing_ok
+ */
+ExecRowMark *
+ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
+{
+       ListCell   *lc;
+
+       foreach(lc, estate->es_rowMarks)
        {
-               qual = resultRelInfo->ri_ConstraintExprs[i];
+               ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
 
-               /*
-                * NOTE: SQL92 specifies that a NULL result from a constraint
-                * expression is not to be treated as a failure.  Therefore, tell
-                * ExecQual to return TRUE for NULL.
-                */
-               if (!ExecQual(qual, econtext, true))
-                       return check[i].ccname;
+               if (erm->rti == rti)
+                       return erm;
        }
-
-       /* NULL result means no error */
-       return (char *) NULL;
+       if (!missing_ok)
+               elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
+       return NULL;
 }
 
-void
-ExecConstraints(const char *caller, ResultRelInfo *resultRelInfo,
-                               TupleTableSlot *slot, EState *estate)
+/*
+ * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
+ *
+ * Inputs are the underlying ExecRowMark struct and the targetlist of the
+ * input plan node (not planstate node!).  We need the latter to find out
+ * the column numbers of the resjunk columns.
+ */
+ExecAuxRowMark *
+ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
 {
-       Relation        rel = resultRelInfo->ri_RelationDesc;
-       HeapTuple       tuple = slot->val;
-       TupleConstr *constr = rel->rd_att->constr;
+       ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
+       char            resname[32];
 
-       Assert(constr);
+       aerm->rowmark = erm;
 
-       if (constr->has_not_null)
+       /* Look up the resjunk columns associated with this rowmark */
+       if (erm->markType != ROW_MARK_COPY)
        {
-               int                     natts = rel->rd_att->natts;
-               int                     attrChk;
-
-               for (attrChk = 1; attrChk <= natts; attrChk++)
-               {
-                       if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
-                               heap_attisnull(tuple, attrChk))
-                               elog(ERROR, "%s: Fail to add null value in not null attribute %s",
-                                        caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
-               }
+               /* need ctid for all methods other than COPY */
+               snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
+               aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                          resname);
+               if (!AttributeNumberIsValid(aerm->ctidAttNo))
+                       elog(ERROR, "could not find junk %s column", resname);
        }
-
-       if (constr->num_check > 0)
+       else
        {
-               char       *failed;
+               /* need wholerow if COPY */
+               snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
+               aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                               resname);
+               if (!AttributeNumberIsValid(aerm->wholeAttNo))
+                       elog(ERROR, "could not find junk %s column", resname);
+       }
 
-               if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
-                       elog(ERROR, "%s: rejected due to CHECK constraint %s",
-                                caller, failed);
+       /* if child rel, need tableoid */
+       if (erm->rti != erm->prti)
+       {
+               snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
+               aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                          resname);
+               if (!AttributeNumberIsValid(aerm->toidAttNo))
+                       elog(ERROR, "could not find junk %s column", resname);
        }
+
+       return aerm;
 }
 
+
+/*
+ * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
+ * process the updated version under READ COMMITTED rules.
+ *
+ * See backend/executor/README for some info about how this works.
+ */
+
+
 /*
  * Check a modified tuple to see if we want to process its updated version
  * under READ COMMITTED rules.
  *
- * See backend/executor/README for some info about how this works.
+ *     estate - outer executor state data
+ *     epqstate - state for EvalPlanQual rechecking
+ *     relation - table containing tuple
+ *     rti - rangetable index of table containing tuple
+ *     lockmode - requested tuple lock mode
+ *     *tid - t_ctid from the outdated tuple (ie, next updated version)
+ *     priorXmax - t_xmax from the outdated tuple
+ *
+ * *tid is also an output parameter: it's modified to hold the TID of the
+ * latest version of the tuple (note this may be changed even on failure)
+ *
+ * Returns a slot containing the new candidate update/delete tuple, or
+ * NULL if we determine we shouldn't process the row.
+ *
+ * Note: properly, lockmode should be declared as enum LockTupleMode,
+ * but we use "int" to avoid having to include heapam.h in executor.h.
  */
 TupleTableSlot *
-EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
+EvalPlanQual(EState *estate, EPQState *epqstate,
+                        Relation relation, Index rti, int lockmode,
+                        ItemPointer tid, TransactionId priorXmax)
 {
-       evalPlanQual *epq;
-       EState     *epqstate;
-       Relation        relation;
-       HeapTupleData tuple;
-       HeapTuple       copyTuple = NULL;
-       int                     rtsize;
-       bool            endNode;
+       TupleTableSlot *slot;
+       HeapTuple       copyTuple;
 
-       Assert(rti != 0);
+       Assert(rti > 0);
 
        /*
-        * find relation containing target tuple
+        * Get and lock the updated version of the row; if fail, return NULL.
         */
-       if (estate->es_result_relation_info != NULL &&
-               estate->es_result_relation_info->ri_RangeTableIndex == rti)
-               relation = estate->es_result_relation_info->ri_RelationDesc;
-       else
-       {
-               List       *l;
+       copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
+                                                                 tid, priorXmax);
 
-               relation = NULL;
-               foreach(l, estate->es_rowMark)
-               {
-                       if (((execRowMark *) lfirst(l))->rti == rti)
-                       {
-                               relation = ((execRowMark *) lfirst(l))->relation;
-                               break;
-                       }
-               }
-               if (relation == NULL)
-                       elog(ERROR, "EvalPlanQual: can't find RTE %d", (int) rti);
-       }
+       if (copyTuple == NULL)
+               return NULL;
+
+       /*
+        * For UPDATE/DELETE we have to return tid of actual row we're executing
+        * PQ for.
+        */
+       *tid = copyTuple->t_self;
+
+       /*
+        * Need to run a recheck subquery.  Initialize or reinitialize EPQ state.
+        */
+       EvalPlanQualBegin(epqstate, estate);
+
+       /*
+        * Free old test tuple, if any, and store new tuple where relation's scan
+        * node will see it
+        */
+       EvalPlanQualSetTuple(epqstate, rti, copyTuple);
+
+       /*
+        * Fetch any non-locked source rows
+        */
+       EvalPlanQualFetchRowMarks(epqstate);
+
+       /*
+        * Run the EPQ query.  We assume it will return at most one tuple.
+        */
+       slot = EvalPlanQualNext(epqstate);
+
+       /*
+        * If we got a tuple, force the slot to materialize the tuple so that it
+        * is not dependent on any local state in the EPQ query (in particular,
+        * it's highly likely that the slot contains references to any pass-by-ref
+        * datums that may be present in copyTuple).  As with the next step, this
+        * is to guard against early re-use of the EPQ query.
+        */
+       if (!TupIsNull(slot))
+               (void) ExecMaterializeSlot(slot);
+
+       /*
+        * Clear out the test tuple.  This is needed in case the EPQ query is
+        * re-used to test a tuple for a different relation.  (Not clear that can
+        * really happen, but let's be safe.)
+        */
+       EvalPlanQualSetTuple(epqstate, rti, NULL);
+
+       return slot;
+}
+
+/*
+ * Fetch a copy of the newest version of an outdated tuple
+ *
+ *     estate - executor state data
+ *     relation - table containing tuple
+ *     lockmode - requested tuple lock mode
+ *     wait_policy - requested lock wait policy
+ *     *tid - t_ctid from the outdated tuple (ie, next updated version)
+ *     priorXmax - t_xmax from the outdated tuple
+ *
+ * Returns a palloc'd copy of the newest tuple version, or NULL if we find
+ * that there is no newest version (ie, the row was deleted not updated).
+ * We also return NULL if the tuple is locked and the wait policy is to skip
+ * such tuples.
+ *
+ * If successful, we have locked the newest tuple version, so caller does not
+ * need to worry about it changing anymore.
+ *
+ * Note: properly, lockmode should be declared as enum LockTupleMode,
+ * but we use "int" to avoid having to include heapam.h in executor.h.
+ */
+HeapTuple
+EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
+                                 LockWaitPolicy wait_policy,
+                                 ItemPointer tid, TransactionId priorXmax)
+{
+       HeapTuple       copyTuple = NULL;
+       HeapTupleData tuple;
+       SnapshotData SnapshotDirty;
 
        /*
-        * fetch tid tuple
+        * fetch target tuple
         *
         * Loop here to deal with updated or busy tuples
         */
+       InitDirtySnapshot(SnapshotDirty);
        tuple.t_self = *tid;
        for (;;)
        {
                Buffer          buffer;
 
-               if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, false, NULL))
+               if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
                {
-                       TransactionId xwait = SnapshotDirty->xmax;
+                       HTSU_Result test;
+                       HeapUpdateFailureData hufd;
+
+                       /*
+                        * If xmin isn't what we're expecting, the slot must have been
+                        * recycled and reused for an unrelated tuple.  This implies that
+                        * the latest version of the row was deleted, so we need do
+                        * nothing.  (Should be safe to examine xmin without getting
+                        * buffer's content lock.  We assume reading a TransactionId to be
+                        * atomic, and Xmin never changes in an existing tuple, except to
+                        * invalid or frozen, and neither of those can match priorXmax.)
+                        */
+                       if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
+                                                                        priorXmax))
+                       {
+                               ReleaseBuffer(buffer);
+                               return NULL;
+                       }
 
-                       if (TransactionIdIsValid(SnapshotDirty->xmin))
-                               elog(ERROR, "EvalPlanQual: t_xmin is uncommitted ?!");
+                       /* otherwise xmin should not be dirty... */
+                       if (TransactionIdIsValid(SnapshotDirty.xmin))
+                               elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
 
                        /*
-                        * If tuple is being updated by other transaction then we have
-                        * to wait for its commit/abort.
+                        * If tuple is being updated by other transaction then we have to
+                        * wait for its commit/abort, or die trying.
                         */
-                       if (TransactionIdIsValid(xwait))
+                       if (TransactionIdIsValid(SnapshotDirty.xmax))
                        {
                                ReleaseBuffer(buffer);
-                               XactLockTableWait(xwait);
-                               continue;
+                               switch (wait_policy)
+                               {
+                                       case LockWaitBlock:
+                                               XactLockTableWait(SnapshotDirty.xmax,
+                                                                                 relation, &tuple.t_self,
+                                                                                 XLTW_FetchUpdated);
+                                               break;
+                                       case LockWaitSkip:
+                                               if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
+                                                       return NULL;            /* skip instead of waiting */
+                                               break;
+                                       case LockWaitError:
+                                               if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
+                                                       ereport(ERROR,
+                                                                       (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+                                                                        errmsg("could not obtain lock on row in relation \"%s\"",
+                                                                               RelationGetRelationName(relation))));
+                                               break;
+                               }
+                               continue;               /* loop back to repeat heap_fetch */
+                       }
+
+                       /*
+                        * If tuple was inserted by our own transaction, we have to check
+                        * cmin against es_output_cid: cmin >= current CID means our
+                        * command cannot see the tuple, so we should ignore it. Otherwise
+                        * heap_lock_tuple() will throw an error, and so would any later
+                        * attempt to update or delete the tuple.  (We need not check cmax
+                        * because HeapTupleSatisfiesDirty will consider a tuple deleted
+                        * by our transaction dead, regardless of cmax.) We just checked
+                        * that priorXmax == xmin, so we can test that variable instead of
+                        * doing HeapTupleHeaderGetXmin again.
+                        */
+                       if (TransactionIdIsCurrentTransactionId(priorXmax) &&
+                               HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
+                       {
+                               ReleaseBuffer(buffer);
+                               return NULL;
+                       }
+
+                       /*
+                        * This is a live tuple, so now try to lock it.
+                        */
+                       test = heap_lock_tuple(relation, &tuple,
+                                                                  estate->es_output_cid,
+                                                                  lockmode, wait_policy,
+                                                                  false, &buffer, &hufd);
+                       /* We now have two pins on the buffer, get rid of one */
+                       ReleaseBuffer(buffer);
+
+                       switch (test)
+                       {
+                               case HeapTupleSelfUpdated:
+
+                                       /*
+                                        * The target tuple was already updated or deleted by the
+                                        * current command, or by a later command in the current
+                                        * transaction.  We *must* ignore the tuple in the former
+                                        * case, so as to avoid the "Halloween problem" of
+                                        * repeated update attempts.  In the latter case it might
+                                        * be sensible to fetch the updated tuple instead, but
+                                        * doing so would require changing heap_update and
+                                        * heap_delete to not complain about updating "invisible"
+                                        * tuples, which seems pretty scary (heap_lock_tuple will
+                                        * not complain, but few callers expect
+                                        * HeapTupleInvisible, and we're not one of them).  So for
+                                        * now, treat the tuple as deleted and do not process.
+                                        */
+                                       ReleaseBuffer(buffer);
+                                       return NULL;
+
+                               case HeapTupleMayBeUpdated:
+                                       /* successfully locked */
+                                       break;
+
+                               case HeapTupleUpdated:
+                                       ReleaseBuffer(buffer);
+                                       if (IsolationUsesXactSnapshot())
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+                                                                errmsg("could not serialize access due to concurrent update")));
+
+                                       /* Should not encounter speculative tuple on recheck */
+                                       Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
+                                       if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
+                                       {
+                                               /* it was updated, so look at the updated version */
+                                               tuple.t_self = hufd.ctid;
+                                               /* updated row should have xmin matching this xmax */
+                                               priorXmax = hufd.xmax;
+                                               continue;
+                                       }
+                                       /* tuple was deleted, so give up */
+                                       return NULL;
+
+                               case HeapTupleWouldBlock:
+                                       ReleaseBuffer(buffer);
+                                       return NULL;
+
+                               case HeapTupleInvisible:
+                                       elog(ERROR, "attempted to lock invisible tuple");
+
+                               default:
+                                       ReleaseBuffer(buffer);
+                                       elog(ERROR, "unrecognized heap_lock_tuple status: %u",
+                                                test);
+                                       return NULL;    /* keep compiler quiet */
                        }
 
                        /*
@@ -1662,276 +2392,516 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
                }
 
                /*
-                * Oops! Invalid tuple. Have to check is it updated or deleted.
-                * Note that it's possible to get invalid SnapshotDirty->tid if
-                * tuple updated by this transaction. Have we to check this ?
+                * If the referenced slot was actually empty, the latest version of
+                * the row must have been deleted, so we need do nothing.
                 */
-               if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
-                       !(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
+               if (tuple.t_data == NULL)
                {
-                       /* updated, so look at the updated copy */
-                       tuple.t_self = SnapshotDirty->tid;
-                       continue;
+                       ReleaseBuffer(buffer);
+                       return NULL;
                }
 
                /*
-                * Deleted or updated by this transaction; forget it.
+                * As above, if xmin isn't what we're expecting, do nothing.
                 */
-               return NULL;
+               if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
+                                                                priorXmax))
+               {
+                       ReleaseBuffer(buffer);
+                       return NULL;
+               }
+
+               /*
+                * If we get here, the tuple was found but failed SnapshotDirty.
+                * Assuming the xmin is either a committed xact or our own xact (as it
+                * certainly should be if we're trying to modify the tuple), this must
+                * mean that the row was updated or deleted by either a committed xact
+                * or our own xact.  If it was deleted, we can ignore it; if it was
+                * updated then chain up to the next version and repeat the whole
+                * process.
+                *
+                * As above, it should be safe to examine xmax and t_ctid without the
+                * buffer content lock, because they can't be changing.
+                */
+               if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
+               {
+                       /* deleted, so forget about it */
+                       ReleaseBuffer(buffer);
+                       return NULL;
+               }
+
+               /* updated, so look at the updated row */
+               tuple.t_self = tuple.t_data->t_ctid;
+               /* updated row should have xmin matching this xmax */
+               priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
+               ReleaseBuffer(buffer);
+               /* loop back to fetch next in chain */
        }
 
        /*
-        * For UPDATE/DELETE we have to return tid of actual row we're
-        * executing PQ for.
+        * Return the copied tuple
         */
-       *tid = tuple.t_self;
+       return copyTuple;
+}
 
-       /*
-        * Need to run a recheck subquery.      Find or create a PQ stack entry.
-        */
-       epq = (evalPlanQual *) estate->es_evalPlanQual;
-       rtsize = length(estate->es_range_table);
-       endNode = true;
+/*
+ * EvalPlanQualInit -- initialize during creation of a plan state node
+ * that might need to invoke EPQ processing.
+ *
+ * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
+ * with EvalPlanQualSetPlan.
+ */
+void
+EvalPlanQualInit(EPQState *epqstate, EState *estate,
+                                Plan *subplan, List *auxrowmarks, int epqParam)
+{
+       /* Mark the EPQ state inactive */
+       epqstate->estate = NULL;
+       epqstate->planstate = NULL;
+       epqstate->origslot = NULL;
+       /* ... and remember data that EvalPlanQualBegin will need */
+       epqstate->plan = subplan;
+       epqstate->arowMarks = auxrowmarks;
+       epqstate->epqParam = epqParam;
+}
 
-       if (epq != NULL && epq->rti == 0)
-       {
-               /* Top PQ stack entry is idle, so re-use it */
-               Assert(!(estate->es_useEvalPlan) &&
-                          epq->estate.es_evalPlanQual == NULL);
-               epq->rti = rti;
-               endNode = false;
-       }
+/*
+ * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
+ *
+ * We need this so that ModifyTable can deal with multiple subplans.
+ */
+void
+EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
+{
+       /* If we have a live EPQ query, shut it down */
+       EvalPlanQualEnd(epqstate);
+       /* And set/change the plan pointer */
+       epqstate->plan = subplan;
+       /* The rowmarks depend on the plan, too */
+       epqstate->arowMarks = auxrowmarks;
+}
+
+/*
+ * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
+ *
+ * NB: passed tuple must be palloc'd; it may get freed later
+ */
+void
+EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
+{
+       EState     *estate = epqstate->estate;
+
+       Assert(rti > 0);
 
        /*
-        * If this is request for another RTE - Ra, - then we have to check
-        * wasn't PlanQual requested for Ra already and if so then Ra' row was
-        * updated again and we have to re-start old execution for Ra and
-        * forget all what we done after Ra was suspended. Cool? -:))
+        * free old test tuple, if any, and store new tuple where relation's scan
+        * node will see it
         */
-       if (epq != NULL && epq->rti != rti &&
-               epq->estate.es_evTuple[rti - 1] != NULL)
+       if (estate->es_epqTuple[rti - 1] != NULL)
+               heap_freetuple(estate->es_epqTuple[rti - 1]);
+       estate->es_epqTuple[rti - 1] = tuple;
+       estate->es_epqTupleSet[rti - 1] = true;
+}
+
+/*
+ * Fetch back the current test tuple (if any) for the specified RTI
+ */
+HeapTuple
+EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
+{
+       EState     *estate = epqstate->estate;
+
+       Assert(rti > 0);
+
+       return estate->es_epqTuple[rti - 1];
+}
+
+/*
+ * Fetch the current row values for any non-locked relations that need
+ * to be scanned by an EvalPlanQual operation.  origslot must have been set
+ * to contain the current result row (top-level row) that we need to recheck.
+ */
+void
+EvalPlanQualFetchRowMarks(EPQState *epqstate)
+{
+       ListCell   *l;
+
+       Assert(epqstate->origslot != NULL);
+
+       foreach(l, epqstate->arowMarks)
        {
-               do
+               ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
+               ExecRowMark *erm = aerm->rowmark;
+               Datum           datum;
+               bool            isNull;
+               HeapTupleData tuple;
+
+               if (RowMarkRequiresRowShareLock(erm->markType))
+                       elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
+
+               /* clear any leftover test tuple for this rel */
+               EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
+
+               /* if child rel, must check whether it produced this row */
+               if (erm->rti != erm->prti)
                {
-                       evalPlanQual *oldepq;
-
-                       /* pop previous PlanQual from the stack */
-                       epqstate = &(epq->estate);
-                       oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
-                       Assert(oldepq->rti != 0);
-                       /* stop execution */
-                       ExecEndNode(epq->plan, NULL);
-                       ExecDropTupleTable(epqstate->es_tupleTable, true);
-                       epqstate->es_tupleTable = NULL;
-                       heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
-                       epqstate->es_evTuple[epq->rti - 1] = NULL;
-                       /* push current PQ to freePQ stack */
-                       oldepq->free = epq;
-                       epq = oldepq;
-                       estate->es_evalPlanQual = (Pointer) epq;
-               } while (epq->rti != rti);
-       }
+                       Oid                     tableoid;
 
-       /*
-        * If we are requested for another RTE then we have to suspend
-        * execution of current PlanQual and start execution for new one.
-        */
-       if (epq == NULL || epq->rti != rti)
-       {
-               /* try to reuse plan used previously */
-               evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
+                       datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                aerm->toidAttNo,
+                                                                                &isNull);
+                       /* non-locked rels could be on the inside of outer joins */
+                       if (isNull)
+                               continue;
+                       tableoid = DatumGetObjectId(datum);
+
+                       Assert(OidIsValid(erm->relid));
+                       if (tableoid != erm->relid)
+                       {
+                               /* this child is inactive right now */
+                               continue;
+                       }
+               }
 
-               if (newepq == NULL)             /* first call or freePQ stack is empty */
+               if (erm->markType == ROW_MARK_REFERENCE)
                {
-                       newepq = (evalPlanQual *) palloc(sizeof(evalPlanQual));
-                       newepq->free = NULL;
+                       HeapTuple       copyTuple;
 
-                       /*
-                        * Each stack level has its own copy of the plan tree.  This
-                        * is wasteful, but necessary as long as plan nodes point to
-                        * exec state nodes rather than vice versa.  Note that
-                        * copyfuncs.c doesn't attempt to copy the exec state nodes,
-                        * which is a good thing in this situation.
-                        */
-                       newepq->plan = copyObject(estate->es_origPlan);
+                       Assert(erm->relation != NULL);
 
-                       /*
-                        * Init stack level's EState.  We share top level's copy of
-                        * es_result_relations array and other non-changing status. We
-                        * need our own tupletable, es_param_exec_vals, and other
-                        * changeable state.
-                        */
-                       epqstate = &(newepq->estate);
-                       memcpy(epqstate, estate, sizeof(EState));
-                       epqstate->es_direction = ForwardScanDirection;
-                       if (estate->es_origPlan->nParamExec > 0)
-                               epqstate->es_param_exec_vals = (ParamExecData *)
-                                       palloc(estate->es_origPlan->nParamExec *
-                                                  sizeof(ParamExecData));
-                       epqstate->es_tupleTable = NULL;
-                       epqstate->es_per_tuple_exprcontext = NULL;
+                       /* fetch the tuple's ctid */
+                       datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                aerm->ctidAttNo,
+                                                                                &isNull);
+                       /* non-locked rels could be on the inside of outer joins */
+                       if (isNull)
+                               continue;
 
-                       /*
-                        * Each epqstate must have its own es_evTupleNull state, but
-                        * all the stack entries share es_evTuple state.  This allows
-                        * sub-rechecks to inherit the value being examined by an
-                        * outer recheck.
-                        */
-                       epqstate->es_evTupleNull = (bool *) palloc(rtsize * sizeof(bool));
-                       if (epq == NULL)
+                       /* fetch requests on foreign tables must be passed to their FDW */
+                       if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
                        {
-                               /* first PQ stack entry */
-                               epqstate->es_evTuple = (HeapTuple *)
-                                       palloc(rtsize * sizeof(HeapTuple));
-                               memset(epqstate->es_evTuple, 0, rtsize * sizeof(HeapTuple));
+                               FdwRoutine *fdwroutine;
+                               bool            updated = false;
+
+                               fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
+                               /* this should have been checked already, but let's be safe */
+                               if (fdwroutine->RefetchForeignRow == NULL)
+                                       ereport(ERROR,
+                                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                  errmsg("cannot lock rows in foreign table \"%s\"",
+                                                                 RelationGetRelationName(erm->relation))));
+                               copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
+                                                                                                                 erm,
+                                                                                                                 datum,
+                                                                                                                 &updated);
+                               if (copyTuple == NULL)
+                                       elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+                               /*
+                                * Ideally we'd insist on updated == false here, but that
+                                * assumes that FDWs can track that exactly, which they might
+                                * not be able to.  So just ignore the flag.
+                                */
                        }
                        else
                        {
-                               /* later stack entries share the same storage */
-                               epqstate->es_evTuple = epq->estate.es_evTuple;
+                               /* ordinary table, fetch the tuple */
+                               Buffer          buffer;
+
+                               tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+                               if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
+                                                               false, NULL))
+                                       elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+                               /* successful, copy tuple */
+                               copyTuple = heap_copytuple(&tuple);
+                               ReleaseBuffer(buffer);
                        }
+
+                       /* store tuple */
+                       EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
                }
                else
                {
-                       /* recycle previously used EState */
-                       epqstate = &(newepq->estate);
+                       HeapTupleHeader td;
+
+                       Assert(erm->markType == ROW_MARK_COPY);
+
+                       /* fetch the whole-row Var for the relation */
+                       datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                aerm->wholeAttNo,
+                                                                                &isNull);
+                       /* non-locked rels could be on the inside of outer joins */
+                       if (isNull)
+                               continue;
+                       td = DatumGetHeapTupleHeader(datum);
+
+                       /* build a temporary HeapTuple control structure */
+                       tuple.t_len = HeapTupleHeaderGetDatumLength(td);
+                       tuple.t_data = td;
+                       /* relation might be a foreign table, if so provide tableoid */
+                       tuple.t_tableOid = erm->relid;
+                       /* also copy t_ctid in case there's valid data there */
+                       tuple.t_self = td->t_ctid;
+
+                       /* copy and store tuple */
+                       EvalPlanQualSetTuple(epqstate, erm->rti,
+                                                                heap_copytuple(&tuple));
                }
-               /* push current PQ to the stack */
-               epqstate->es_evalPlanQual = (Pointer) epq;
-               epq = newepq;
-               estate->es_evalPlanQual = (Pointer) epq;
-               epq->rti = rti;
-               endNode = false;
        }
+}
+
+/*
+ * Fetch the next row (if any) from EvalPlanQual testing
+ *
+ * (In practice, there should never be more than one row...)
+ */
+TupleTableSlot *
+EvalPlanQualNext(EPQState *epqstate)
+{
+       MemoryContext oldcontext;
+       TupleTableSlot *slot;
 
-       Assert(epq->rti == rti);
-       epqstate = &(epq->estate);
+       oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
+       slot = ExecProcNode(epqstate->planstate);
+       MemoryContextSwitchTo(oldcontext);
 
-       /*
-        * Ok - we're requested for the same RTE.  Unfortunately we still have
-        * to end and restart execution of the plan, because ExecReScan
-        * wouldn't ensure that upper plan nodes would reset themselves.  We
-        * could make that work if insertion of the target tuple were
-        * integrated with the Param mechanism somehow, so that the upper plan
-        * nodes know that their children's outputs have changed.
-        */
-       if (endNode)
+       return slot;
+}
+
+/*
+ * Initialize or reset an EvalPlanQual state tree
+ */
+void
+EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
+{
+       EState     *estate = epqstate->estate;
+
+       if (estate == NULL)
        {
-               /* stop execution */
-               ExecEndNode(epq->plan, NULL);
-               ExecDropTupleTable(epqstate->es_tupleTable, true);
-               epqstate->es_tupleTable = NULL;
+               /* First time through, so create a child EState */
+               EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
        }
+       else
+       {
+               /*
+                * We already have a suitable child EPQ tree, so just reset it.
+                */
+               int                     rtsize = list_length(parentestate->es_range_table);
+               PlanState  *planstate = epqstate->planstate;
 
-       /*
-        * free old RTE' tuple, if any, and store target tuple where
-        * relation's scan node will see it
-        */
-       if (epqstate->es_evTuple[rti - 1] != NULL)
-               heap_freetuple(epqstate->es_evTuple[rti - 1]);
-       epqstate->es_evTuple[rti - 1] = copyTuple;
+               MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
 
-       /*
-        * Initialize for new recheck query; be careful to copy down state
-        * that might have changed in top EState.
-        */
-       epqstate->es_result_relation_info = estate->es_result_relation_info;
-       epqstate->es_junkFilter = estate->es_junkFilter;
-       if (estate->es_origPlan->nParamExec > 0)
-               memset(epqstate->es_param_exec_vals, 0,
-                          estate->es_origPlan->nParamExec * sizeof(ParamExecData));
-       memset(epqstate->es_evTupleNull, false, rtsize * sizeof(bool));
-       epqstate->es_useEvalPlan = false;
-       Assert(epqstate->es_tupleTable == NULL);
-       epqstate->es_tupleTable =
-               ExecCreateTupleTable(estate->es_tupleTable->size);
+               /* Recopy current values of parent parameters */
+               if (parentestate->es_plannedstmt->nParamExec > 0)
+               {
+                       int                     i = parentestate->es_plannedstmt->nParamExec;
 
-       ExecInitNode(epq->plan, epqstate, NULL);
+                       while (--i >= 0)
+                       {
+                               /* copy value if any, but not execPlan link */
+                               estate->es_param_exec_vals[i].value =
+                                       parentestate->es_param_exec_vals[i].value;
+                               estate->es_param_exec_vals[i].isnull =
+                                       parentestate->es_param_exec_vals[i].isnull;
+                       }
+               }
 
-       return EvalPlanQualNext(estate);
+               /*
+                * Mark child plan tree as needing rescan at all scan nodes.  The
+                * first ExecProcNode will take care of actually doing the rescan.
+                */
+               planstate->chgParam = bms_add_member(planstate->chgParam,
+                                                                                        epqstate->epqParam);
+       }
 }
 
-static TupleTableSlot *
-EvalPlanQualNext(EState *estate)
+/*
+ * Start execution of an EvalPlanQual plan tree.
+ *
+ * This is a cut-down version of ExecutorStart(): we copy some state from
+ * the top-level estate rather than initializing it fresh.
+ */
+static void
+EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 {
-       evalPlanQual *epq = (evalPlanQual *) estate->es_evalPlanQual;
-       EState     *epqstate = &(epq->estate);
-       evalPlanQual *oldepq;
-       TupleTableSlot *slot;
+       EState     *estate;
+       int                     rtsize;
+       MemoryContext oldcontext;
+       ListCell   *l;
+
+       rtsize = list_length(parentestate->es_range_table);
+
+       epqstate->estate = estate = CreateExecutorState();
+
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
-       Assert(epq->rti != 0);
+       /*
+        * Child EPQ EStates share the parent's copy of unchanging state such as
+        * the snapshot, rangetable, result-rel info, and external Param info.
+        * They need their own copies of local state, including a tuple table,
+        * es_param_exec_vals, etc.
+        *
+        * The ResultRelInfo array management is trickier than it looks.  We
+        * create a fresh array for the child but copy all the content from the
+        * parent.  This is because it's okay for the child to share any
+        * per-relation state the parent has already created --- but if the child
+        * sets up any ResultRelInfo fields, such as its own junkfilter, that
+        * state must *not* propagate back to the parent.  (For one thing, the
+        * pointed-to data is in a memory context that won't last long enough.)
+        */
+       estate->es_direction = ForwardScanDirection;
+       estate->es_snapshot = parentestate->es_snapshot;
+       estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
+       estate->es_range_table = parentestate->es_range_table;
+       estate->es_plannedstmt = parentestate->es_plannedstmt;
+       estate->es_junkFilter = parentestate->es_junkFilter;
+       estate->es_output_cid = parentestate->es_output_cid;
+       if (parentestate->es_num_result_relations > 0)
+       {
+               int                     numResultRelations = parentestate->es_num_result_relations;
+               ResultRelInfo *resultRelInfos;
 
-lpqnext:;
-       slot = ExecProcNode(epq->plan, NULL);
+               resultRelInfos = (ResultRelInfo *)
+                       palloc(numResultRelations * sizeof(ResultRelInfo));
+               memcpy(resultRelInfos, parentestate->es_result_relations,
+                          numResultRelations * sizeof(ResultRelInfo));
+               estate->es_result_relations = resultRelInfos;
+               estate->es_num_result_relations = numResultRelations;
+       }
+       /* es_result_relation_info must NOT be copied */
+       /* es_trig_target_relations must NOT be copied */
+       estate->es_rowMarks = parentestate->es_rowMarks;
+       estate->es_top_eflags = parentestate->es_top_eflags;
+       estate->es_instrument = parentestate->es_instrument;
+       /* es_auxmodifytables must NOT be copied */
 
        /*
-        * No more tuples for this PQ. Continue previous one.
+        * The external param list is simply shared from parent.  The internal
+        * param workspace has to be local state, but we copy the initial values
+        * from the parent, so as to have access to any param values that were
+        * already set from other parts of the parent's plan tree.
         */
-       if (TupIsNull(slot))
+       estate->es_param_list_info = parentestate->es_param_list_info;
+       if (parentestate->es_plannedstmt->nParamExec > 0)
        {
-               /* stop execution */
-               ExecEndNode(epq->plan, NULL);
-               ExecDropTupleTable(epqstate->es_tupleTable, true);
-               epqstate->es_tupleTable = NULL;
-               heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
-               epqstate->es_evTuple[epq->rti - 1] = NULL;
-               /* pop old PQ from the stack */
-               oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
-               if (oldepq == (evalPlanQual *) NULL)
+               int                     i = parentestate->es_plannedstmt->nParamExec;
+
+               estate->es_param_exec_vals = (ParamExecData *)
+                       palloc0(i * sizeof(ParamExecData));
+               while (--i >= 0)
                {
-                       epq->rti = 0;           /* this is the first (oldest) */
-                       estate->es_useEvalPlan = false;         /* PQ - mark as free and          */
-                       return (NULL);          /* continue Query execution   */
+                       /* copy value if any, but not execPlan link */
+                       estate->es_param_exec_vals[i].value =
+                               parentestate->es_param_exec_vals[i].value;
+                       estate->es_param_exec_vals[i].isnull =
+                               parentestate->es_param_exec_vals[i].isnull;
                }
-               Assert(oldepq->rti != 0);
-               /* push current PQ to freePQ stack */
-               oldepq->free = epq;
-               epq = oldepq;
-               epqstate = &(epq->estate);
-               estate->es_evalPlanQual = (Pointer) epq;
-               goto lpqnext;
        }
 
-       return (slot);
+       /*
+        * Each EState must have its own es_epqScanDone state, but if we have
+        * nested EPQ checks they should share es_epqTuple arrays.  This allows
+        * sub-rechecks to inherit the values being examined by an outer recheck.
+        */
+       estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
+       if (parentestate->es_epqTuple != NULL)
+       {
+               estate->es_epqTuple = parentestate->es_epqTuple;
+               estate->es_epqTupleSet = parentestate->es_epqTupleSet;
+       }
+       else
+       {
+               estate->es_epqTuple = (HeapTuple *)
+                       palloc0(rtsize * sizeof(HeapTuple));
+               estate->es_epqTupleSet = (bool *)
+                       palloc0(rtsize * sizeof(bool));
+       }
+
+       /*
+        * Each estate also has its own tuple table.
+        */
+       estate->es_tupleTable = NIL;
+
+       /*
+        * Initialize private state information for each SubPlan.  We must do this
+        * before running ExecInitNode on the main query tree, since
+        * ExecInitSubPlan expects to be able to find these entries. Some of the
+        * SubPlans might not be used in the part of the plan tree we intend to
+        * run, but since it's not easy to tell which, we just initialize them
+        * all.
+        */
+       Assert(estate->es_subplanstates == NIL);
+       foreach(l, parentestate->es_plannedstmt->subplans)
+       {
+               Plan       *subplan = (Plan *) lfirst(l);
+               PlanState  *subplanstate;
+
+               subplanstate = ExecInitNode(subplan, estate, 0);
+               estate->es_subplanstates = lappend(estate->es_subplanstates,
+                                                                                  subplanstate);
+       }
+
+       /*
+        * Initialize the private state information for all the nodes in the part
+        * of the plan tree we need to run.  This opens files, allocates storage
+        * and leaves us ready to start processing tuples.
+        */
+       epqstate->planstate = ExecInitNode(planTree, estate, 0);
+
+       MemoryContextSwitchTo(oldcontext);
 }
 
-static void
-EndEvalPlanQual(EState *estate)
+/*
+ * EvalPlanQualEnd -- shut down at termination of parent plan state node,
+ * or if we are done with the current EPQ child.
+ *
+ * This is a cut-down version of ExecutorEnd(); basically we want to do most
+ * of the normal cleanup, but *not* close result relations (which we are
+ * just sharing from the outer query).  We do, however, have to close any
+ * trigger target relations that got opened, since those are not shared.
+ * (There probably shouldn't be any of the latter, but just in case...)
+ */
+void
+EvalPlanQualEnd(EPQState *epqstate)
 {
-       evalPlanQual *epq = (evalPlanQual *) estate->es_evalPlanQual;
-       EState     *epqstate = &(epq->estate);
-       evalPlanQual *oldepq;
+       EState     *estate = epqstate->estate;
+       MemoryContext oldcontext;
+       ListCell   *l;
+
+       if (estate == NULL)
+               return;                                 /* idle, so nothing to do */
+
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+       ExecEndNode(epqstate->planstate);
 
-       if (epq->rti == 0)                      /* plans already shutdowned */
+       foreach(l, estate->es_subplanstates)
        {
-               Assert(epq->estate.es_evalPlanQual == NULL);
-               return;
+               PlanState  *subplanstate = (PlanState *) lfirst(l);
+
+               ExecEndNode(subplanstate);
        }
 
-       for (;;)
+       /* throw away the per-estate tuple table */
+       ExecResetTupleTable(estate->es_tupleTable, false);
+
+       /* close any trigger target relations attached to this EState */
+       foreach(l, estate->es_trig_target_relations)
        {
-               /* stop execution */
-               ExecEndNode(epq->plan, NULL);
-               ExecDropTupleTable(epqstate->es_tupleTable, true);
-               epqstate->es_tupleTable = NULL;
-               if (epqstate->es_evTuple[epq->rti - 1] != NULL)
-               {
-                       heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
-                       epqstate->es_evTuple[epq->rti - 1] = NULL;
-               }
-               /* pop old PQ from the stack */
-               oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
-               if (oldepq == (evalPlanQual *) NULL)
-               {
-                       epq->rti = 0;           /* this is the first (oldest) */
-                       estate->es_useEvalPlan = false;         /* PQ - mark as free */
-                       break;
-               }
-               Assert(oldepq->rti != 0);
-               /* push current PQ to freePQ stack */
-               oldepq->free = epq;
-               epq = oldepq;
-               epqstate = &(epq->estate);
-               estate->es_evalPlanQual = (Pointer) epq;
+               ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
+
+               /* Close indices and then the relation itself */
+               ExecCloseIndices(resultRelInfo);
+               heap_close(resultRelInfo->ri_RelationDesc, NoLock);
        }
+
+       MemoryContextSwitchTo(oldcontext);
+
+       FreeExecutorState(estate);
+
+       /* Mark EPQState idle */
+       epqstate->estate = NULL;
+       epqstate->planstate = NULL;
+       epqstate->origslot = NULL;
 }