* INTERFACE ROUTINES
* ExecutorStart()
* ExecutorRun()
+ * ExecutorFinish()
* ExecutorEnd()
*
- * The old ExecutorMain() has been replaced by ExecutorStart(),
- * ExecutorRun() and ExecutorEnd()
- *
- * These three procedures are the external interfaces to the executor.
+ * These four procedures are the external interface to the executor.
* In each case, the query descriptor is required as an argument.
*
- * ExecutorStart() must be called at the beginning of execution of any
- * query plan and ExecutorEnd() should always be called at the end of
- * execution of a plan.
+ * ExecutorStart must be called at the beginning of execution of any
+ * query plan and ExecutorEnd must always be called at the end of
+ * execution of a plan (unless it is aborted due to error).
*
* ExecutorRun accepts direction and count arguments that specify whether
* the plan is to be executed forwards, backwards, and for how many tuples.
+ * In some cases ExecutorRun may be called multiple times to process all
+ * the tuples for a plan. It is also acceptable to stop short of executing
+ * the whole plan (but only if it is a SELECT).
+ *
+ * ExecutorFinish must be called after the final ExecutorRun call and
+ * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
+ * which should also omit ExecutorRun.
*
- * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.333 2009/10/12 18:10:41 tgl Exp $
+ * src/backend/executor/execMain.c
*
*-------------------------------------------------------------------------
*/
#include "commands/tablespace.h"
#include "commands/trigger.h"
#include "executor/execdebug.h"
-#include "executor/instrument.h"
+#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "optimizer/clauses.h"
#include "parser/parse_clause.h"
#include "parser/parsetree.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
+#include "storage/smgr.h"
+#include "tcop/utility.h"
#include "utils/acl.h"
+#include "utils/builtins.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/snapmgr.h"
#include "utils/tqual.h"
-/* Hooks for plugins to get control in ExecutorStart/Run/End() */
+/* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
ExecutorStart_hook_type ExecutorStart_hook = NULL;
ExecutorRun_hook_type ExecutorRun_hook = NULL;
+ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
-typedef struct evalPlanQual
-{
- Index rti;
- EState *estate;
- PlanState *planstate;
- PlanState *origplanstate;
- TupleTableSlot *resultslot;
- struct evalPlanQual *next; /* stack of active PlanQual plans */
- struct evalPlanQual *free; /* list of free PlanQual plans */
-} evalPlanQual;
+/* Hook for plugin to get control in ExecCheckRTPerms() */
+ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
/* decls for local routines only used within this module */
static void InitPlan(QueryDesc *queryDesc, int eflags);
+static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
+static void ExecPostprocessPlan(EState *estate);
static void ExecEndPlan(PlanState *planstate, EState *estate);
static void ExecutePlan(EState *estate, PlanState *planstate,
CmdType operation,
long numberTuples,
ScanDirection direction,
DestReceiver *dest);
-static void EndEvalPlanQual(EState *estate);
-static void ExecCheckRTPerms(List *rangeTable);
-static void ExecCheckRTEPerms(RangeTblEntry *rte);
+static bool ExecCheckRTEPerms(RangeTblEntry *rte);
static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
-static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
- Plan *planTree, evalPlanQual *priorepq);
-static void EvalPlanQualStop(evalPlanQual *epq);
+static char *ExecBuildSlotValueDescription(TupleTableSlot *slot,
+ int maxfieldlen);
+static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
+ Plan *planTree);
static void OpenIntoRel(QueryDesc *queryDesc);
static void CloseIntoRel(QueryDesc *queryDesc);
static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
* This routine must be called at the beginning of any execution of any
* query plan
*
- * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
- * clear why we bother to separate the two functions, but...). The tupDesc
+ * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
+ * only because some places use QueryDescs for utility commands). The tupDesc
* field of the QueryDesc is filled in to describe the tuples that will be
* returned, and the internal fields (estate and planstate) are set up.
*
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
/*
- * Fill in parameters, if any, from queryDesc
+ * Fill in external parameters, if any, from queryDesc; and allocate
+ * workspace for internal parameters
*/
estate->es_param_list_info = queryDesc->params;
switch (queryDesc->operation)
{
case CMD_SELECT:
- /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
+
+ /*
+ * SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to
+ * mark tuples
+ */
if (queryDesc->plannedstmt->intoClause != NULL ||
- queryDesc->plannedstmt->rowMarks != NIL)
+ queryDesc->plannedstmt->rowMarks != NIL ||
+ queryDesc->plannedstmt->hasModifyingCTE)
estate->es_output_cid = GetCurrentCommandId(true);
+
+ /*
+ * A SELECT without modifying CTEs can't possibly queue triggers,
+ * so force skip-triggers mode. This is just a marginal efficiency
+ * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
+ * all that expensive, but we might as well do it.
+ */
+ if (!queryDesc->plannedstmt->hasModifyingCTE)
+ eflags |= EXEC_FLAG_SKIP_TRIGGERS;
break;
case CMD_INSERT:
*/
estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
- estate->es_instrument = queryDesc->doInstrument;
+ estate->es_top_eflags = eflags;
+ estate->es_instrument = queryDesc->instrument_options;
/*
* Initialize the plan state tree
*/
InitPlan(queryDesc, eflags);
+ /*
+ * Set up an AFTER-trigger statement context, unless told not to, or
+ * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
+ */
+ if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
+ AfterTriggerBeginQuery();
+
MemoryContextSwitchTo(oldcontext);
}
estate = queryDesc->estate;
Assert(estate != NULL);
+ Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
/*
* Switch into per-query memory context
*/
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
- /* Allow instrumentation of ExecutorRun overall runtime */
+ /* Allow instrumentation of Executor overall runtime */
if (queryDesc->totaltime)
InstrStartNode(queryDesc->totaltime);
if (sendTuples)
(*dest->rStartup) (dest, operation, queryDesc->tupDesc);
+ /*
+ * if it's CREATE TABLE AS ... WITH NO DATA, skip plan execution
+ */
+ if (estate->es_select_into &&
+ queryDesc->plannedstmt->intoClause->skipData)
+ direction = NoMovementScanDirection;
+
/*
* run plan
*/
MemoryContextSwitchTo(oldcontext);
}
+/* ----------------------------------------------------------------
+ * ExecutorFinish
+ *
+ * This routine must be called after the last ExecutorRun call.
+ * It performs cleanup such as firing AFTER triggers. It is
+ * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
+ * include these actions in the total runtime.
+ *
+ * We provide a function hook variable that lets loadable plugins
+ * get control when ExecutorFinish is called. Such a plugin would
+ * normally call standard_ExecutorFinish().
+ *
+ * ----------------------------------------------------------------
+ */
+void
+ExecutorFinish(QueryDesc *queryDesc)
+{
+ if (ExecutorFinish_hook)
+ (*ExecutorFinish_hook) (queryDesc);
+ else
+ standard_ExecutorFinish(queryDesc);
+}
+
+void
+standard_ExecutorFinish(QueryDesc *queryDesc)
+{
+ EState *estate;
+ MemoryContext oldcontext;
+
+ /* sanity checks */
+ Assert(queryDesc != NULL);
+
+ estate = queryDesc->estate;
+
+ Assert(estate != NULL);
+ Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
+
+ /* This should be run once and only once per Executor instance */
+ Assert(!estate->es_finished);
+
+ /* Switch into per-query memory context */
+ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+ /* Allow instrumentation of Executor overall runtime */
+ if (queryDesc->totaltime)
+ InstrStartNode(queryDesc->totaltime);
+
+ /* Run ModifyTable nodes to completion */
+ ExecPostprocessPlan(estate);
+
+ /* Execute queued AFTER triggers, unless told not to */
+ if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
+ AfterTriggerEndQuery(estate);
+
+ if (queryDesc->totaltime)
+ InstrStopNode(queryDesc->totaltime, 0);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ estate->es_finished = true;
+}
+
/* ----------------------------------------------------------------
* ExecutorEnd
*
Assert(estate != NULL);
+ /*
+ * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
+ * Assert is needed because ExecutorFinish is new as of 9.1, and callers
+ * might forget to call it.
+ */
+ Assert(estate->es_finished ||
+ (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
+
/*
* Switch into per-query memory context to run ExecEndPlan
*/
/*
* rescan plan
*/
- ExecReScan(queryDesc->planstate, NULL);
+ ExecReScan(queryDesc->planstate);
MemoryContextSwitchTo(oldcontext);
}
/*
* ExecCheckRTPerms
* Check access permissions for all relations listed in a range table.
+ *
+ * Returns true if permissions are adequate. Otherwise, throws an appropriate
+ * error if ereport_on_violation is true, or simply returns false otherwise.
*/
-static void
-ExecCheckRTPerms(List *rangeTable)
+bool
+ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
{
ListCell *l;
+ bool result = true;
foreach(l, rangeTable)
{
- ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
+
+ result = ExecCheckRTEPerms(rte);
+ if (!result)
+ {
+ Assert(rte->rtekind == RTE_RELATION);
+ if (ereport_on_violation)
+ aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
+ get_rel_name(rte->relid));
+ return false;
+ }
}
+
+ if (ExecutorCheckPerms_hook)
+ result = (*ExecutorCheckPerms_hook) (rangeTable,
+ ereport_on_violation);
+ return result;
}
/*
* ExecCheckRTEPerms
* Check access permissions for a single RTE.
*/
-static void
+static bool
ExecCheckRTEPerms(RangeTblEntry *rte)
{
AclMode requiredPerms;
* Join, subquery, and special RTEs need no checks.
*/
if (rte->rtekind != RTE_RELATION)
- return;
+ return true;
/*
* No work if requiredPerms is empty.
*/
requiredPerms = rte->requiredPerms;
if (requiredPerms == 0)
- return;
+ return true;
relOid = rte->relid;
* we can fail straight away.
*/
if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
- aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
- get_rel_name(relOid));
+ return false;
/*
* Check to see if we have the needed privileges at column level.
{
if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
ACLMASK_ANY) != ACLCHECK_OK)
- aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
- get_rel_name(relOid));
+ return false;
}
tmpset = bms_copy(rte->selectedCols);
/* Whole-row reference, must have priv on all cols */
if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
ACLMASK_ALL) != ACLCHECK_OK)
- aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
- get_rel_name(relOid));
+ return false;
}
else
{
- if (pg_attribute_aclcheck(relOid, col, userid, ACL_SELECT)
- != ACLCHECK_OK)
- aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
- get_rel_name(relOid));
+ if (pg_attribute_aclcheck(relOid, col, userid,
+ ACL_SELECT) != ACLCHECK_OK)
+ return false;
}
}
bms_free(tmpset);
{
if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
ACLMASK_ANY) != ACLCHECK_OK)
- aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
- get_rel_name(relOid));
+ return false;
}
tmpset = bms_copy(rte->modifiedCols);
}
else
{
- if (pg_attribute_aclcheck(relOid, col, userid, remainingPerms)
- != ACLCHECK_OK)
- aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
- get_rel_name(relOid));
+ if (pg_attribute_aclcheck(relOid, col, userid,
+ remainingPerms) != ACLCHECK_OK)
+ return false;
}
}
bms_free(tmpset);
}
}
+ return true;
}
/*
* Check that the query does not imply any writes to non-temp tables.
+ *
+ * Note: in a Hot Standby slave this would need to reject writes to temp
+ * tables as well; but an HS slave can't have created any temp tables
+ * in the first place, so no need to check that.
*/
static void
ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
/*
* CREATE TABLE AS or SELECT INTO?
*
- * XXX should we allow this if the destination is temp?
+ * XXX should we allow this if the destination is temp? Considering that
+ * it would still require catalog changes, probably not.
*/
if (plannedstmt->intoClause != NULL)
- goto fail;
+ PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
/* Fail if write permissions are requested on any non-temp table */
foreach(l, plannedstmt->rtable)
if (isTempNamespace(get_rel_namespace(rte->relid)))
continue;
- goto fail;
+ PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
}
-
- return;
-
-fail:
- ereport(ERROR,
- (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
- errmsg("transaction is read-only")));
}
/*
* Do permissions checks
*/
- ExecCheckRTPerms(rangeTable);
+ ExecCheckRTPerms(rangeTable, true);
/*
* initialize the node's execution state
*/
estate->es_range_table = rangeTable;
+ estate->es_plannedstmt = plannedstmt;
/*
* initialize result relation stuff, and open/lock the result rels.
*
- * We must do this before initializing the plan tree, else we might
- * try to do a lock upgrade if a result rel is also a source rel.
+ * We must do this before initializing the plan tree, else we might try to
+ * do a lock upgrade if a result rel is also a source rel.
*/
if (plannedstmt->resultRelations)
{
InitResultRelInfo(resultRelInfo,
resultRelation,
resultRelationIndex,
- operation,
estate->es_instrument);
resultRelInfo++;
}
/*
* Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
- * before we initialize the plan tree, else we'd be risking lock
- * upgrades. While we are at it, build the ExecRowMark list.
+ * before we initialize the plan tree, else we'd be risking lock upgrades.
+ * While we are at it, build the ExecRowMark list.
*/
estate->es_rowMarks = NIL;
foreach(l, plannedstmt->rowMarks)
{
- RowMarkClause *rc = (RowMarkClause *) lfirst(l);
+ PlanRowMark *rc = (PlanRowMark *) lfirst(l);
Oid relid;
Relation relation;
ExecRowMark *erm;
if (rc->isParent)
continue;
- relid = getrelid(rc->rti, rangeTable);
- relation = heap_open(relid, RowShareLock);
+ switch (rc->markType)
+ {
+ case ROW_MARK_EXCLUSIVE:
+ case ROW_MARK_SHARE:
+ relid = getrelid(rc->rti, rangeTable);
+ relation = heap_open(relid, RowShareLock);
+ break;
+ case ROW_MARK_REFERENCE:
+ relid = getrelid(rc->rti, rangeTable);
+ relation = heap_open(relid, AccessShareLock);
+ break;
+ case ROW_MARK_COPY:
+ /* there's no real table here ... */
+ relation = NULL;
+ break;
+ default:
+ elog(ERROR, "unrecognized markType: %d", rc->markType);
+ relation = NULL; /* keep compiler quiet */
+ break;
+ }
+
+ /* Check that relation is a legal target for marking */
+ if (relation)
+ CheckValidRowMarkRel(relation, rc->markType);
+
erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
erm->relation = relation;
erm->rti = rc->rti;
erm->prti = rc->prti;
erm->rowmarkId = rc->rowmarkId;
- erm->forUpdate = rc->forUpdate;
+ erm->markType = rc->markType;
erm->noWait = rc->noWait;
- /* remaining fields are filled during LockRows plan node init */
- erm->ctidAttNo = InvalidAttrNumber;
- erm->toidAttNo = InvalidAttrNumber;
ItemPointerSetInvalid(&(erm->curCtid));
estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
}
*/
estate->es_tupleTable = NIL;
estate->es_trig_tuple_slot = NULL;
+ estate->es_trig_oldtup_slot = NULL;
+ estate->es_trig_newtup_slot = NULL;
/* mark EvalPlanQual not active */
- estate->es_plannedstmt = plannedstmt;
- estate->es_evalPlanQual = NULL;
- estate->es_evTupleNull = NULL;
- estate->es_evTuple = NULL;
+ estate->es_epqTuple = NULL;
+ estate->es_epqTupleSet = NULL;
+ estate->es_epqScanDone = NULL;
/*
* Initialize private state information for each SubPlan. We must do this
tupType = ExecGetResultType(planstate);
/*
- * Initialize the junk filter if needed. SELECT queries need a
- * filter if there are any junk attrs in the top-level tlist.
+ * Initialize the junk filter if needed. SELECT queries need a filter if
+ * there are any junk attrs in the top-level tlist.
*/
if (operation == CMD_SELECT)
{
}
/*
- * Initialize ResultRelInfo data for one result relation
+ * Check that a proposed result relation is a legal target for the operation
+ *
+ * In most cases parser and/or planner should have noticed this already, but
+ * let's make sure. In the view case we do need a test here, because if the
+ * view wasn't rewritten by a rule, it had better have an INSTEAD trigger.
+ *
+ * Note: when changing this function, you probably also need to look at
+ * CheckValidRowMarkRel.
*/
void
-InitResultRelInfo(ResultRelInfo *resultRelInfo,
- Relation resultRelationDesc,
- Index resultRelationIndex,
- CmdType operation,
- bool doInstrument)
+CheckValidResultRel(Relation resultRel, CmdType operation)
{
- /*
- * Check valid relkind ... parser and/or planner should have noticed this
- * already, but let's make sure.
- */
- switch (resultRelationDesc->rd_rel->relkind)
+ TriggerDesc *trigDesc = resultRel->trigdesc;
+
+ switch (resultRel->rd_rel->relkind)
{
case RELKIND_RELATION:
/* OK */
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change sequence \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRel))));
break;
case RELKIND_TOASTVALUE:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change TOAST relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRel))));
break;
case RELKIND_VIEW:
+ switch (operation)
+ {
+ case CMD_INSERT:
+ if (!trigDesc || !trigDesc->trig_insert_instead_row)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot insert into view \"%s\"",
+ RelationGetRelationName(resultRel)),
+ errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
+ break;
+ case CMD_UPDATE:
+ if (!trigDesc || !trigDesc->trig_update_instead_row)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot update view \"%s\"",
+ RelationGetRelationName(resultRel)),
+ errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
+ break;
+ case CMD_DELETE:
+ if (!trigDesc || !trigDesc->trig_delete_instead_row)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot delete from view \"%s\"",
+ RelationGetRelationName(resultRel)),
+ errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
+ break;
+ default:
+ elog(ERROR, "unrecognized CmdType: %d", (int) operation);
+ break;
+ }
+ break;
+ case RELKIND_FOREIGN_TABLE:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot change view \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ errmsg("cannot change foreign table \"%s\"",
+ RelationGetRelationName(resultRel))));
break;
default:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRel))));
break;
}
+}
- /* OK, fill in the node */
+/*
+ * Check that a proposed rowmark target relation is a legal target
+ *
+ * In most cases parser and/or planner should have noticed this already, but
+ * they don't cover all cases.
+ */
+static void
+CheckValidRowMarkRel(Relation rel, RowMarkType markType)
+{
+ switch (rel->rd_rel->relkind)
+ {
+ case RELKIND_RELATION:
+ /* OK */
+ break;
+ case RELKIND_SEQUENCE:
+ /* Must disallow this because we don't vacuum sequences */
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("cannot lock rows in sequence \"%s\"",
+ RelationGetRelationName(rel))));
+ break;
+ case RELKIND_TOASTVALUE:
+ /* We could allow this, but there seems no good reason to */
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("cannot lock rows in TOAST relation \"%s\"",
+ RelationGetRelationName(rel))));
+ break;
+ case RELKIND_VIEW:
+ /* Should not get here */
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("cannot lock rows in view \"%s\"",
+ RelationGetRelationName(rel))));
+ break;
+ case RELKIND_FOREIGN_TABLE:
+ /* Perhaps we can support this someday, but not today */
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("cannot lock rows in foreign table \"%s\"",
+ RelationGetRelationName(rel))));
+ break;
+ default:
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("cannot lock rows in relation \"%s\"",
+ RelationGetRelationName(rel))));
+ break;
+ }
+}
+
+/*
+ * Initialize ResultRelInfo data for one result relation
+ *
+ * Caution: before Postgres 9.1, this function included the relkind checking
+ * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
+ * appropriate. Be sure callers cover those needs.
+ */
+void
+InitResultRelInfo(ResultRelInfo *resultRelInfo,
+ Relation resultRelationDesc,
+ Index resultRelationIndex,
+ int instrument_options)
+{
MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
resultRelInfo->type = T_ResultRelInfo;
resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
palloc0(n * sizeof(FmgrInfo));
- if (doInstrument)
- resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
- else
- resultRelInfo->ri_TrigInstrument = NULL;
+ resultRelInfo->ri_TrigWhenExprs = (List **)
+ palloc0(n * sizeof(List *));
+ if (instrument_options)
+ resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
}
else
{
resultRelInfo->ri_TrigFunctions = NULL;
+ resultRelInfo->ri_TrigWhenExprs = NULL;
resultRelInfo->ri_TrigInstrument = NULL;
}
resultRelInfo->ri_ConstraintExprs = NULL;
resultRelInfo->ri_junkFilter = NULL;
resultRelInfo->ri_projectReturning = NULL;
-
- /*
- * If there are indices on the result relation, open them and save
- * descriptors in the result relation info, so that we can add new index
- * entries for the tuples we add/update. We need not do this for a
- * DELETE, however, since deletion doesn't affect indexes.
- */
- if (resultRelationDesc->rd_rel->relhasindex &&
- operation != CMD_DELETE)
- ExecOpenIndices(resultRelInfo);
}
/*
/*
* Open the target relation's relcache entry. We assume that an
* appropriate lock is still held by the backend from whenever the trigger
- * event got queued, so we need take no new lock here.
+ * event got queued, so we need take no new lock here. Also, we need not
+ * recheck the relkind, so no need for CheckValidResultRel.
*/
rel = heap_open(relid, NoLock);
/*
- * Make the new entry in the right context. Currently, we don't need any
- * index information in ResultRelInfos used only for triggers, so tell
- * InitResultRelInfo it's a DELETE.
+ * Make the new entry in the right context.
*/
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
rInfo = makeNode(ResultRelInfo);
InitResultRelInfo(rInfo,
rel,
0, /* dummy rangetable index */
- CMD_DELETE,
estate->es_instrument);
estate->es_trig_target_relations =
lappend(estate->es_trig_target_relations, rInfo);
MemoryContextSwitchTo(oldcontext);
+ /*
+ * Currently, we don't need any index information in ResultRelInfos used
+ * only for triggers, so no need to call ExecOpenIndices.
+ */
+
return rInfo;
}
return false;
}
+/* ----------------------------------------------------------------
+ * ExecPostprocessPlan
+ *
+ * Give plan nodes a final chance to execute before shutdown
+ * ----------------------------------------------------------------
+ */
+static void
+ExecPostprocessPlan(EState *estate)
+{
+ ListCell *lc;
+
+ /*
+ * Make sure nodes run forward.
+ */
+ estate->es_direction = ForwardScanDirection;
+
+ /*
+ * Run any secondary ModifyTable nodes to completion, in case the main
+ * query did not fetch all rows from them. (We do this to ensure that
+ * such nodes have predictable results.)
+ */
+ foreach(lc, estate->es_auxmodifytables)
+ {
+ PlanState *ps = (PlanState *) lfirst(lc);
+
+ for (;;)
+ {
+ TupleTableSlot *slot;
+
+ /* Reset the per-output-tuple exprcontext each time */
+ ResetPerTupleExprContext(estate);
+
+ slot = ExecProcNode(ps);
+
+ if (TupIsNull(slot))
+ break;
+ }
+ }
+}
+
/* ----------------------------------------------------------------
* ExecEndPlan
*
int i;
ListCell *l;
- /*
- * shut down any PlanQual processing we were doing
- */
- if (estate->es_evalPlanQual != NULL)
- EndEvalPlanQual(estate);
-
/*
* shut down the node-type-specific query processing
*/
/*
* destroy the executor's tuple table. Actually we only care about
- * releasing buffer pins and tupdesc refcounts; there's no need to
- * pfree the TupleTableSlots, since the containing memory context
- * is about to go away anyway.
+ * releasing buffer pins and tupdesc refcounts; there's no need to pfree
+ * the TupleTableSlots, since the containing memory context is about to go
+ * away anyway.
*/
ExecResetTupleTable(estate->es_tupleTable, false);
*/
foreach(l, estate->es_rowMarks)
{
- ExecRowMark *erm = lfirst(l);
+ ExecRowMark *erm = (ExecRowMark *) lfirst(l);
- heap_close(erm->relation, NoLock);
+ if (erm->relation)
+ heap_close(erm->relation, NoLock);
}
}
slot = ExecFilterJunk(estate->es_junkFilter, slot);
/*
- * If we are supposed to send the tuple somewhere, do so.
- * (In practice, this is probably always the case at this point.)
+ * If we are supposed to send the tuple somewhere, do so. (In
+ * practice, this is probably always the case at this point.)
*/
if (sendTuples)
(*dest->receiveSlot) (slot, dest);
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value in column \"%s\" violates not-null constraint",
- NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
+ NameStr(rel->rd_att->attrs[attrChk - 1]->attname)),
+ errdetail("Failing row contains %s.",
+ ExecBuildSlotValueDescription(slot, 64))));
}
}
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
- RelationGetRelationName(rel), failed)));
+ RelationGetRelationName(rel), failed),
+ errdetail("Failing row contains %s.",
+ ExecBuildSlotValueDescription(slot, 64))));
}
}
/*
- * Check a modified tuple to see if we want to process its updated version
- * under READ COMMITTED rules.
+ * ExecBuildSlotValueDescription -- construct a string representing a tuple
+ *
+ * This is intentionally very similar to BuildIndexValueDescription, but
+ * unlike that function, we truncate long field values. That seems necessary
+ * here since heap field values could be very long, whereas index entries
+ * typically aren't so wide.
+ */
+static char *
+ExecBuildSlotValueDescription(TupleTableSlot *slot, int maxfieldlen)
+{
+ StringInfoData buf;
+ TupleDesc tupdesc = slot->tts_tupleDescriptor;
+ int i;
+
+ /* Make sure the tuple is fully deconstructed */
+ slot_getallattrs(slot);
+
+ initStringInfo(&buf);
+
+ appendStringInfoChar(&buf, '(');
+
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ char *val;
+ int vallen;
+
+ if (slot->tts_isnull[i])
+ val = "null";
+ else
+ {
+ Oid foutoid;
+ bool typisvarlena;
+
+ getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
+ &foutoid, &typisvarlena);
+ val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
+ }
+
+ if (i > 0)
+ appendStringInfoString(&buf, ", ");
+
+ /* truncate if needed */
+ vallen = strlen(val);
+ if (vallen <= maxfieldlen)
+ appendStringInfoString(&buf, val);
+ else
+ {
+ vallen = pg_mbcliplen(val, vallen, maxfieldlen);
+ appendBinaryStringInfo(&buf, val, vallen);
+ appendStringInfoString(&buf, "...");
+ }
+ }
+
+ appendStringInfoChar(&buf, ')');
+
+ return buf.data;
+}
+
+
+/*
+ * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
+ */
+ExecRowMark *
+ExecFindRowMark(EState *estate, Index rti)
+{
+ ListCell *lc;
+
+ foreach(lc, estate->es_rowMarks)
+ {
+ ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
+
+ if (erm->rti == rti)
+ return erm;
+ }
+ elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
+ return NULL; /* keep compiler quiet */
+}
+
+/*
+ * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
+ *
+ * Inputs are the underlying ExecRowMark struct and the targetlist of the
+ * input plan node (not planstate node!). We need the latter to find out
+ * the column numbers of the resjunk columns.
+ */
+ExecAuxRowMark *
+ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
+{
+ ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
+ char resname[32];
+
+ aerm->rowmark = erm;
+
+ /* Look up the resjunk columns associated with this rowmark */
+ if (erm->relation)
+ {
+ Assert(erm->markType != ROW_MARK_COPY);
+
+ /* if child rel, need tableoid */
+ if (erm->rti != erm->prti)
+ {
+ snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
+ aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
+ resname);
+ if (!AttributeNumberIsValid(aerm->toidAttNo))
+ elog(ERROR, "could not find junk %s column", resname);
+ }
+
+ /* always need ctid for real relations */
+ snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
+ aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
+ resname);
+ if (!AttributeNumberIsValid(aerm->ctidAttNo))
+ elog(ERROR, "could not find junk %s column", resname);
+ }
+ else
+ {
+ Assert(erm->markType == ROW_MARK_COPY);
+
+ snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
+ aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
+ resname);
+ if (!AttributeNumberIsValid(aerm->wholeAttNo))
+ elog(ERROR, "could not find junk %s column", resname);
+ }
+
+ return aerm;
+}
+
+
+/*
+ * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
+ * process the updated version under READ COMMITTED rules.
*
* See backend/executor/README for some info about how this works.
+ */
+
+
+/*
+ * Check a modified tuple to see if we want to process its updated version
+ * under READ COMMITTED rules.
*
- * estate - executor state data
+ * estate - outer executor state data
+ * epqstate - state for EvalPlanQual rechecking
+ * relation - table containing tuple
* rti - rangetable index of table containing tuple
- * subplanstate - portion of plan tree that needs to be re-evaluated
* *tid - t_ctid from the outdated tuple (ie, next updated version)
* priorXmax - t_xmax from the outdated tuple
*
* NULL if we determine we shouldn't process the row.
*/
TupleTableSlot *
-EvalPlanQual(EState *estate, Index rti,
- PlanState *subplanstate,
+EvalPlanQual(EState *estate, EPQState *epqstate,
+ Relation relation, Index rti,
ItemPointer tid, TransactionId priorXmax)
{
TupleTableSlot *slot;
HeapTuple copyTuple;
- Assert(rti != 0);
+ Assert(rti > 0);
/*
- * Get the updated version of the row; if fail, return NULL.
+ * Get and lock the updated version of the row; if fail, return NULL.
*/
- copyTuple = EvalPlanQualFetch(estate, rti, tid, priorXmax);
+ copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
+ tid, priorXmax);
if (copyTuple == NULL)
return NULL;
*tid = copyTuple->t_self;
/*
- * Need to run a recheck subquery. Find or create a PQ stack entry.
+ * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
*/
- EvalPlanQualPush(estate, rti, subplanstate);
+ EvalPlanQualBegin(epqstate, estate);
/*
- * free old RTE' tuple, if any, and store target tuple where relation's
- * scan node will see it
+ * Free old test tuple, if any, and store new tuple where relation's scan
+ * node will see it
*/
- EvalPlanQualSetTuple(estate, rti, copyTuple);
+ EvalPlanQualSetTuple(epqstate, rti, copyTuple);
/*
- * Run the EPQ query, but just for one tuple.
+ * Fetch any non-locked source rows
*/
- slot = EvalPlanQualNext(estate);
+ EvalPlanQualFetchRowMarks(epqstate);
/*
- * If we got a result, we must copy it out of the EPQ query's local
- * context before we shut down the EPQ query.
+ * Run the EPQ query. We assume it will return at most one tuple.
*/
- if (TupIsNull(slot))
- slot = NULL; /* in case we got back an empty slot */
- else
- {
- TupleDesc tupdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
- evalPlanQual *epq = estate->es_evalPlanQual;
-
- if (epq->resultslot == NULL)
- {
- epq->resultslot = ExecInitExtraTupleSlot(estate);
- ExecSetSlotDescriptor(epq->resultslot, tupdesc);
- }
- else
- {
- TupleDesc oldtupdesc = epq->resultslot->tts_tupleDescriptor;
-
- ExecSetSlotDescriptor(epq->resultslot, tupdesc);
- FreeTupleDesc(oldtupdesc);
- }
+ slot = EvalPlanQualNext(epqstate);
- slot = ExecCopySlot(epq->resultslot, slot);
- }
+ /*
+ * If we got a tuple, force the slot to materialize the tuple so that it
+ * is not dependent on any local state in the EPQ query (in particular,
+ * it's highly likely that the slot contains references to any pass-by-ref
+ * datums that may be present in copyTuple). As with the next step, this
+ * is to guard against early re-use of the EPQ query.
+ */
+ if (!TupIsNull(slot))
+ (void) ExecMaterializeSlot(slot);
/*
- * Shut it down ...
+ * Clear out the test tuple. This is needed in case the EPQ query is
+ * re-used to test a tuple for a different relation. (Not clear that can
+ * really happen, but let's be safe.)
*/
- EvalPlanQualPop(estate, subplanstate);
+ EvalPlanQualSetTuple(epqstate, rti, NULL);
return slot;
}
* Fetch a copy of the newest version of an outdated tuple
*
* estate - executor state data
- * rti - rangetable index of table containing tuple
+ * relation - table containing tuple
+ * lockmode - requested tuple lock mode
* *tid - t_ctid from the outdated tuple (ie, next updated version)
* priorXmax - t_xmax from the outdated tuple
*
* Returns a palloc'd copy of the newest tuple version, or NULL if we find
* that there is no newest version (ie, the row was deleted not updated).
+ * If successful, we have locked the newest tuple version, so caller does not
+ * need to worry about it changing anymore.
*
- * XXX this does not lock the new row version ... wouldn't it be better if
- * it did? As-is, caller might have to repeat all its work.
+ * Note: properly, lockmode should be declared as enum LockTupleMode,
+ * but we use "int" to avoid having to include heapam.h in executor.h.
*/
HeapTuple
-EvalPlanQualFetch(EState *estate, Index rti,
+EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
ItemPointer tid, TransactionId priorXmax)
{
HeapTuple copyTuple = NULL;
- Relation relation;
HeapTupleData tuple;
SnapshotData SnapshotDirty;
- Assert(rti != 0);
-
- /*
- * Find relation containing target tuple --- must be either a result
- * relation of the query, or a SELECT FOR UPDATE target
- */
- if (estate->es_result_relation_info != NULL &&
- estate->es_result_relation_info->ri_RangeTableIndex == rti)
- relation = estate->es_result_relation_info->ri_RelationDesc;
- else
- {
- ListCell *l;
-
- relation = NULL;
- foreach(l, estate->es_rowMarks)
- {
- ExecRowMark *erm = lfirst(l);
-
- if (erm->rti == rti)
- {
- relation = erm->relation;
- break;
- }
- }
- if (relation == NULL)
- elog(ERROR, "could not find RowMark for RT index %u", rti);
- }
-
/*
- * fetch tid tuple
+ * fetch target tuple
*
* Loop here to deal with updated or busy tuples
*/
if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
{
+ HTSU_Result test;
+ ItemPointerData update_ctid;
+ TransactionId update_xmax;
+
/*
* If xmin isn't what we're expecting, the slot must have been
* recycled and reused for an unrelated tuple. This implies that
return NULL;
}
+ /*
+ * This is a live tuple, so now try to lock it.
+ */
+ test = heap_lock_tuple(relation, &tuple, &buffer,
+ &update_ctid, &update_xmax,
+ estate->es_output_cid,
+ lockmode, false);
+ /* We now have two pins on the buffer, get rid of one */
+ ReleaseBuffer(buffer);
+
+ switch (test)
+ {
+ case HeapTupleSelfUpdated:
+ /* treat it as deleted; do not process */
+ ReleaseBuffer(buffer);
+ return NULL;
+
+ case HeapTupleMayBeUpdated:
+ /* successfully locked */
+ break;
+
+ case HeapTupleUpdated:
+ ReleaseBuffer(buffer);
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+ if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
+ {
+ /* it was updated, so look at the updated version */
+ tuple.t_self = update_ctid;
+ /* updated row should have xmin matching this xmax */
+ priorXmax = update_xmax;
+ continue;
+ }
+ /* tuple was deleted, so give up */
+ return NULL;
+
+ default:
+ ReleaseBuffer(buffer);
+ elog(ERROR, "unrecognized heap_lock_tuple status: %u",
+ test);
+ return NULL; /* keep compiler quiet */
+ }
+
/*
* We got tuple - now copy it for use by recheck query.
*/
* mean that the row was updated or deleted by either a committed xact
* or our own xact. If it was deleted, we can ignore it; if it was
* updated then chain up to the next version and repeat the whole
- * test.
+ * process.
*
* As above, it should be safe to examine xmax and t_ctid without the
* buffer content lock, because they can't be changing.
}
/*
- * Push a new level of EPQ state, and prepare to execute the given subplan
+ * EvalPlanQualInit -- initialize during creation of a plan state node
+ * that might need to invoke EPQ processing.
+ *
+ * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
+ * with EvalPlanQualSetPlan.
*/
void
-EvalPlanQualPush(EState *estate, Index rti, PlanState *subplanstate)
+EvalPlanQualInit(EPQState *epqstate, EState *estate,
+ Plan *subplan, List *auxrowmarks, int epqParam)
{
- evalPlanQual *epq;
- bool endNode;
-
- Assert(rti != 0);
+ /* Mark the EPQ state inactive */
+ epqstate->estate = NULL;
+ epqstate->planstate = NULL;
+ epqstate->origslot = NULL;
+ /* ... and remember data that EvalPlanQualBegin will need */
+ epqstate->plan = subplan;
+ epqstate->arowMarks = auxrowmarks;
+ epqstate->epqParam = epqParam;
+}
- epq = estate->es_evalPlanQual;
- endNode = true;
+/*
+ * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
+ *
+ * We need this so that ModifyTuple can deal with multiple subplans.
+ */
+void
+EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
+{
+ /* If we have a live EPQ query, shut it down */
+ EvalPlanQualEnd(epqstate);
+ /* And set/change the plan pointer */
+ epqstate->plan = subplan;
+ /* The rowmarks depend on the plan, too */
+ epqstate->arowMarks = auxrowmarks;
+}
- if (epq != NULL && epq->rti == 0)
- {
- /* Top PQ stack entry is idle, so re-use it */
- Assert(epq->next == NULL);
- epq->rti = rti;
- endNode = false;
- }
+/*
+ * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
+ *
+ * NB: passed tuple must be palloc'd; it may get freed later
+ */
+void
+EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
+{
+ EState *estate = epqstate->estate;
- /*
- * If this is request for another RTE - Ra, - then we have to check wasn't
- * PlanQual requested for Ra already and if so then Ra' row was updated
- * again and we have to re-start old execution for Ra and forget all what
- * we done after Ra was suspended. Cool? -:))
- */
- if (epq != NULL && epq->rti != rti &&
- epq->estate->es_evTuple[rti - 1] != NULL)
- {
- do
- {
- evalPlanQual *oldepq;
-
- /* stop execution */
- EvalPlanQualStop(epq);
- /* pop previous PlanQual from the stack */
- oldepq = epq->next;
- Assert(oldepq && oldepq->rti != 0);
- /* push current PQ to freePQ stack */
- oldepq->free = epq;
- epq = oldepq;
- estate->es_evalPlanQual = epq;
- } while (epq->rti != rti);
- }
+ Assert(rti > 0);
/*
- * If we are requested for another RTE then we have to suspend execution
- * of current PlanQual and start execution for new one.
+ * free old test tuple, if any, and store new tuple where relation's scan
+ * node will see it
*/
- if (epq == NULL || epq->rti != rti)
- {
- /* try to reuse plan used previously */
- evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
-
- if (newepq == NULL) /* first call or freePQ stack is empty */
- {
- newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
- newepq->free = NULL;
- newepq->estate = NULL;
- newepq->planstate = NULL;
- newepq->origplanstate = NULL;
- newepq->resultslot = NULL;
- }
- else
- {
- /* recycle previously used PlanQual */
- Assert(newepq->estate == NULL);
- epq->free = NULL;
- }
- /* push current PQ to the stack */
- newepq->next = epq;
- epq = newepq;
- estate->es_evalPlanQual = epq;
- epq->rti = rti;
- endNode = false;
- }
+ if (estate->es_epqTuple[rti - 1] != NULL)
+ heap_freetuple(estate->es_epqTuple[rti - 1]);
+ estate->es_epqTuple[rti - 1] = tuple;
+ estate->es_epqTupleSet[rti - 1] = true;
+}
- Assert(epq->rti == rti);
- Assert(estate->es_evalPlanQual == epq);
+/*
+ * Fetch back the current test tuple (if any) for the specified RTI
+ */
+HeapTuple
+EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
+{
+ EState *estate = epqstate->estate;
- /*
- * Ok - we're requested for the same RTE. Unfortunately we still have to
- * end and restart execution of the plan, because ExecReScan wouldn't
- * ensure that upper plan nodes would reset themselves. We could make
- * that work if insertion of the target tuple were integrated with the
- * Param mechanism somehow, so that the upper plan nodes know that their
- * children's outputs have changed.
- *
- * Note that the stack of free evalPlanQual nodes is quite useless at the
- * moment, since it only saves us from pallocing/releasing the
- * evalPlanQual nodes themselves. But it will be useful once we implement
- * ReScan instead of end/restart for re-using PlanQual nodes.
- */
- if (endNode)
- {
- /* stop execution */
- EvalPlanQualStop(epq);
- }
+ Assert(rti > 0);
- /*
- * Initialize new recheck query.
- *
- * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
- * instead copy down changeable state from the top plan (including
- * es_result_relation_info) and reset locally changeable
- * state in the epq (including es_param_exec_vals, es_evTupleNull).
- */
- epq->origplanstate = subplanstate;
- EvalPlanQualStart(epq, estate, subplanstate->plan, epq->next);
+ return estate->es_epqTuple[rti - 1];
}
/*
- * Install one test tuple into current EPQ level
+ * Fetch the current row values for any non-locked relations that need
+ * to be scanned by an EvalPlanQual operation. origslot must have been set
+ * to contain the current result row (top-level row) that we need to recheck.
*/
void
-EvalPlanQualSetTuple(EState *estate, Index rti, HeapTuple tuple)
+EvalPlanQualFetchRowMarks(EPQState *epqstate)
{
- evalPlanQual *epq = estate->es_evalPlanQual;
- EState *epqstate;
+ ListCell *l;
- Assert(rti != 0);
+ Assert(epqstate->origslot != NULL);
- /*
- * free old RTE' tuple, if any, and store target tuple where relation's
- * scan node will see it
- */
- epqstate = epq->estate;
- if (epqstate->es_evTuple[rti - 1] != NULL)
- heap_freetuple(epqstate->es_evTuple[rti - 1]);
- epqstate->es_evTuple[rti - 1] = tuple;
+ foreach(l, epqstate->arowMarks)
+ {
+ ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
+ ExecRowMark *erm = aerm->rowmark;
+ Datum datum;
+ bool isNull;
+ HeapTupleData tuple;
+
+ if (RowMarkRequiresRowShareLock(erm->markType))
+ elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
+
+ /* clear any leftover test tuple for this rel */
+ EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
+
+ if (erm->relation)
+ {
+ Buffer buffer;
+
+ Assert(erm->markType == ROW_MARK_REFERENCE);
+
+ /* if child rel, must check whether it produced this row */
+ if (erm->rti != erm->prti)
+ {
+ Oid tableoid;
+
+ datum = ExecGetJunkAttribute(epqstate->origslot,
+ aerm->toidAttNo,
+ &isNull);
+ /* non-locked rels could be on the inside of outer joins */
+ if (isNull)
+ continue;
+ tableoid = DatumGetObjectId(datum);
+
+ if (tableoid != RelationGetRelid(erm->relation))
+ {
+ /* this child is inactive right now */
+ continue;
+ }
+ }
+
+ /* fetch the tuple's ctid */
+ datum = ExecGetJunkAttribute(epqstate->origslot,
+ aerm->ctidAttNo,
+ &isNull);
+ /* non-locked rels could be on the inside of outer joins */
+ if (isNull)
+ continue;
+ tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+
+ /* okay, fetch the tuple */
+ if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
+ false, NULL))
+ elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+ /* successful, copy and store tuple */
+ EvalPlanQualSetTuple(epqstate, erm->rti,
+ heap_copytuple(&tuple));
+ ReleaseBuffer(buffer);
+ }
+ else
+ {
+ HeapTupleHeader td;
+
+ Assert(erm->markType == ROW_MARK_COPY);
+
+ /* fetch the whole-row Var for the relation */
+ datum = ExecGetJunkAttribute(epqstate->origslot,
+ aerm->wholeAttNo,
+ &isNull);
+ /* non-locked rels could be on the inside of outer joins */
+ if (isNull)
+ continue;
+ td = DatumGetHeapTupleHeader(datum);
+
+ /* build a temporary HeapTuple control structure */
+ tuple.t_len = HeapTupleHeaderGetDatumLength(td);
+ ItemPointerSetInvalid(&(tuple.t_self));
+ tuple.t_tableOid = InvalidOid;
+ tuple.t_data = td;
+
+ /* copy and store tuple */
+ EvalPlanQualSetTuple(epqstate, erm->rti,
+ heap_copytuple(&tuple));
+ }
+ }
}
/*
* Fetch the next row (if any) from EvalPlanQual testing
+ *
+ * (In practice, there should never be more than one row...)
*/
TupleTableSlot *
-EvalPlanQualNext(EState *estate)
+EvalPlanQualNext(EPQState *epqstate)
{
- evalPlanQual *epq = estate->es_evalPlanQual;
MemoryContext oldcontext;
TupleTableSlot *slot;
- Assert(epq->rti != 0);
-
- oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
- slot = ExecProcNode(epq->planstate);
+ oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
+ slot = ExecProcNode(epqstate->planstate);
MemoryContextSwitchTo(oldcontext);
return slot;
}
/*
- * Shut down and pop the specified level of EvalPlanQual machinery,
- * plus any levels nested within it
+ * Initialize or reset an EvalPlanQual state tree
*/
void
-EvalPlanQualPop(EState *estate, PlanState *subplanstate)
+EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
{
- evalPlanQual *epq = estate->es_evalPlanQual;
+ EState *estate = epqstate->estate;
- for (;;)
+ if (estate == NULL)
{
- PlanState *epqplanstate = epq->origplanstate;
- evalPlanQual *oldepq;
-
- Assert(epq->rti != 0);
-
- /* stop execution */
- EvalPlanQualStop(epq);
- epq->origplanstate = NULL;
- /* pop old PQ from the stack */
- oldepq = epq->next;
- if (oldepq == NULL)
- {
- /* this is the first (oldest) PQ - mark as free */
- epq->rti = 0;
- break;
- }
- Assert(oldepq->rti != 0);
- /* push current PQ to freePQ stack */
- oldepq->free = epq;
- epq = oldepq;
- estate->es_evalPlanQual = epq;
- if (epqplanstate == subplanstate)
- break;
+ /* First time through, so create a child EState */
+ EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
}
-}
-
-static void
-EndEvalPlanQual(EState *estate)
-{
- evalPlanQual *epq = estate->es_evalPlanQual;
-
- if (epq->rti == 0) /* plans already shutdowned */
+ else
{
- Assert(epq->next == NULL);
- return;
- }
+ /*
+ * We already have a suitable child EPQ tree, so just reset it.
+ */
+ int rtsize = list_length(parentestate->es_range_table);
+ PlanState *planstate = epqstate->planstate;
- for (;;)
- {
- evalPlanQual *oldepq;
-
- /* stop execution */
- EvalPlanQualStop(epq);
- epq->origplanstate = NULL;
- /* pop old PQ from the stack */
- oldepq = epq->next;
- if (oldepq == NULL)
+ MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
+
+ /* Recopy current values of parent parameters */
+ if (parentestate->es_plannedstmt->nParamExec > 0)
{
- /* this is the first (oldest) PQ - mark as free */
- epq->rti = 0;
- break;
+ int i = parentestate->es_plannedstmt->nParamExec;
+
+ while (--i >= 0)
+ {
+ /* copy value if any, but not execPlan link */
+ estate->es_param_exec_vals[i].value =
+ parentestate->es_param_exec_vals[i].value;
+ estate->es_param_exec_vals[i].isnull =
+ parentestate->es_param_exec_vals[i].isnull;
+ }
}
- Assert(oldepq->rti != 0);
- /* push current PQ to freePQ stack */
- oldepq->free = epq;
- epq = oldepq;
- estate->es_evalPlanQual = epq;
+
+ /*
+ * Mark child plan tree as needing rescan at all scan nodes. The
+ * first ExecProcNode will take care of actually doing the rescan.
+ */
+ planstate->chgParam = bms_add_member(planstate->chgParam,
+ epqstate->epqParam);
}
}
/*
- * Start execution of one level of PlanQual.
+ * Start execution of an EvalPlanQual plan tree.
*
* This is a cut-down version of ExecutorStart(): we copy some state from
* the top-level estate rather than initializing it fresh.
*/
static void
-EvalPlanQualStart(evalPlanQual *epq, EState *estate, Plan *planTree,
- evalPlanQual *priorepq)
+EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
{
- EState *epqstate;
+ EState *estate;
int rtsize;
MemoryContext oldcontext;
ListCell *l;
- rtsize = list_length(estate->es_range_table);
+ rtsize = list_length(parentestate->es_range_table);
- epq->estate = epqstate = CreateExecutorState();
+ epqstate->estate = estate = CreateExecutorState();
- oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
+ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
/*
- * The epqstates share the top query's copy of unchanging state such as
+ * Child EPQ EStates share the parent's copy of unchanging state such as
* the snapshot, rangetable, result-rel info, and external Param info.
* They need their own copies of local state, including a tuple table,
* es_param_exec_vals, etc.
*/
- epqstate->es_direction = ForwardScanDirection;
- epqstate->es_snapshot = estate->es_snapshot;
- epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
- epqstate->es_range_table = estate->es_range_table;
- epqstate->es_junkFilter = estate->es_junkFilter;
- epqstate->es_output_cid = estate->es_output_cid;
- epqstate->es_result_relations = estate->es_result_relations;
- epqstate->es_num_result_relations = estate->es_num_result_relations;
- epqstate->es_result_relation_info = estate->es_result_relation_info;
+ estate->es_direction = ForwardScanDirection;
+ estate->es_snapshot = parentestate->es_snapshot;
+ estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
+ estate->es_range_table = parentestate->es_range_table;
+ estate->es_plannedstmt = parentestate->es_plannedstmt;
+ estate->es_junkFilter = parentestate->es_junkFilter;
+ estate->es_output_cid = parentestate->es_output_cid;
+ estate->es_result_relations = parentestate->es_result_relations;
+ estate->es_num_result_relations = parentestate->es_num_result_relations;
+ estate->es_result_relation_info = parentestate->es_result_relation_info;
/* es_trig_target_relations must NOT be copied */
- epqstate->es_param_list_info = estate->es_param_list_info;
- if (estate->es_plannedstmt->nParamExec > 0)
- epqstate->es_param_exec_vals = (ParamExecData *)
- palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
- epqstate->es_rowMarks = estate->es_rowMarks;
- epqstate->es_instrument = estate->es_instrument;
- epqstate->es_select_into = estate->es_select_into;
- epqstate->es_into_oids = estate->es_into_oids;
- epqstate->es_plannedstmt = estate->es_plannedstmt;
-
- /*
- * Each epqstate must have its own es_evTupleNull state, but all the stack
- * entries share es_evTuple state. This allows sub-rechecks to inherit
- * the value being examined by an outer recheck.
- */
- epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
- if (priorepq == NULL)
- /* first PQ stack entry */
- epqstate->es_evTuple = (HeapTuple *)
- palloc0(rtsize * sizeof(HeapTuple));
+ estate->es_rowMarks = parentestate->es_rowMarks;
+ estate->es_top_eflags = parentestate->es_top_eflags;
+ estate->es_instrument = parentestate->es_instrument;
+ estate->es_select_into = parentestate->es_select_into;
+ estate->es_into_oids = parentestate->es_into_oids;
+ /* es_auxmodifytables must NOT be copied */
+
+ /*
+ * The external param list is simply shared from parent. The internal
+ * param workspace has to be local state, but we copy the initial values
+ * from the parent, so as to have access to any param values that were
+ * already set from other parts of the parent's plan tree.
+ */
+ estate->es_param_list_info = parentestate->es_param_list_info;
+ if (parentestate->es_plannedstmt->nParamExec > 0)
+ {
+ int i = parentestate->es_plannedstmt->nParamExec;
+
+ estate->es_param_exec_vals = (ParamExecData *)
+ palloc0(i * sizeof(ParamExecData));
+ while (--i >= 0)
+ {
+ /* copy value if any, but not execPlan link */
+ estate->es_param_exec_vals[i].value =
+ parentestate->es_param_exec_vals[i].value;
+ estate->es_param_exec_vals[i].isnull =
+ parentestate->es_param_exec_vals[i].isnull;
+ }
+ }
+
+ /*
+ * Each EState must have its own es_epqScanDone state, but if we have
+ * nested EPQ checks they should share es_epqTuple arrays. This allows
+ * sub-rechecks to inherit the values being examined by an outer recheck.
+ */
+ estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
+ if (parentestate->es_epqTuple != NULL)
+ {
+ estate->es_epqTuple = parentestate->es_epqTuple;
+ estate->es_epqTupleSet = parentestate->es_epqTupleSet;
+ }
else
- /* later stack entries share the same storage */
- epqstate->es_evTuple = priorepq->estate->es_evTuple;
+ {
+ estate->es_epqTuple = (HeapTuple *)
+ palloc0(rtsize * sizeof(HeapTuple));
+ estate->es_epqTupleSet = (bool *)
+ palloc0(rtsize * sizeof(bool));
+ }
/*
- * Each epqstate also has its own tuple table.
+ * Each estate also has its own tuple table.
*/
- epqstate->es_tupleTable = NIL;
+ estate->es_tupleTable = NIL;
/*
* Initialize private state information for each SubPlan. We must do this
* before running ExecInitNode on the main query tree, since
- * ExecInitSubPlan expects to be able to find these entries.
- * Some of the SubPlans might not be used in the part of the plan tree
- * we intend to run, but since it's not easy to tell which, we just
- * initialize them all.
+ * ExecInitSubPlan expects to be able to find these entries. Some of the
+ * SubPlans might not be used in the part of the plan tree we intend to
+ * run, but since it's not easy to tell which, we just initialize them
+ * all. (However, if the subplan is headed by a ModifyTable node, then it
+ * must be a data-modifying CTE, which we will certainly not need to
+ * re-run, so we can skip initializing it. This is just an efficiency
+ * hack; it won't skip data-modifying CTEs for which the ModifyTable node
+ * is not at the top.)
*/
- Assert(epqstate->es_subplanstates == NIL);
- foreach(l, estate->es_plannedstmt->subplans)
+ Assert(estate->es_subplanstates == NIL);
+ foreach(l, parentestate->es_plannedstmt->subplans)
{
Plan *subplan = (Plan *) lfirst(l);
PlanState *subplanstate;
- subplanstate = ExecInitNode(subplan, epqstate, 0);
+ /* Don't initialize ModifyTable subplans, per comment above */
+ if (IsA(subplan, ModifyTable))
+ subplanstate = NULL;
+ else
+ subplanstate = ExecInitNode(subplan, estate, 0);
- epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
- subplanstate);
+ estate->es_subplanstates = lappend(estate->es_subplanstates,
+ subplanstate);
}
/*
- * Initialize the private state information for all the nodes in the
- * part of the plan tree we need to run. This opens files, allocates
- * storage and leaves us ready to start processing tuples.
+ * Initialize the private state information for all the nodes in the part
+ * of the plan tree we need to run. This opens files, allocates storage
+ * and leaves us ready to start processing tuples.
*/
- epq->planstate = ExecInitNode(planTree, epqstate, 0);
+ epqstate->planstate = ExecInitNode(planTree, estate, 0);
MemoryContextSwitchTo(oldcontext);
}
/*
- * End execution of one level of PlanQual.
+ * EvalPlanQualEnd -- shut down at termination of parent plan state node,
+ * or if we are done with the current EPQ child.
*
* This is a cut-down version of ExecutorEnd(); basically we want to do most
* of the normal cleanup, but *not* close result relations (which we are
* just sharing from the outer query). We do, however, have to close any
* trigger target relations that got opened, since those are not shared.
+ * (There probably shouldn't be any of the latter, but just in case...)
*/
-static void
-EvalPlanQualStop(evalPlanQual *epq)
+void
+EvalPlanQualEnd(EPQState *epqstate)
{
- EState *epqstate = epq->estate;
+ EState *estate = epqstate->estate;
MemoryContext oldcontext;
ListCell *l;
- oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
+ if (estate == NULL)
+ return; /* idle, so nothing to do */
+
+ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
- ExecEndNode(epq->planstate);
+ ExecEndNode(epqstate->planstate);
- foreach(l, epqstate->es_subplanstates)
+ foreach(l, estate->es_subplanstates)
{
PlanState *subplanstate = (PlanState *) lfirst(l);
ExecEndNode(subplanstate);
}
- /* throw away the per-epqstate tuple table completely */
- ExecResetTupleTable(epqstate->es_tupleTable, true);
- epqstate->es_tupleTable = NIL;
-
- if (epqstate->es_evTuple[epq->rti - 1] != NULL)
- {
- heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
- epqstate->es_evTuple[epq->rti - 1] = NULL;
- }
+ /* throw away the per-estate tuple table */
+ ExecResetTupleTable(estate->es_tupleTable, false);
- foreach(l, epqstate->es_trig_target_relations)
+ /* close any trigger target relations attached to this EState */
+ foreach(l, estate->es_trig_target_relations)
{
ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
MemoryContextSwitchTo(oldcontext);
- FreeExecutorState(epqstate);
+ FreeExecutorState(estate);
- epq->estate = NULL;
- epq->planstate = NULL;
+ /* Mark EPQState idle */
+ epqstate->estate = NULL;
+ epqstate->planstate = NULL;
+ epqstate->origslot = NULL;
}
{
DestReceiver pub; /* publicly-known function pointers */
EState *estate; /* EState we are working with */
+ DestReceiver *origdest; /* QueryDesc's original receiver */
Relation rel; /* Relation to write to */
int hi_options; /* heap_insert performance options */
BulkInsertState bistate; /* bulk insert state */
{
IntoClause *into = queryDesc->plannedstmt->intoClause;
EState *estate = queryDesc->estate;
+ TupleDesc intoTupDesc = queryDesc->tupDesc;
Relation intoRelationDesc;
char *intoName;
Oid namespaceId;
Oid tablespaceId;
Datum reloptions;
- AclResult aclresult;
Oid intoRelationId;
- TupleDesc tupdesc;
DR_intorel *myState;
+ RangeTblEntry *rte;
+ AttrNumber attnum;
static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
Assert(into);
+ /*
+ * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
+ * should try to use that function instead.
+ */
+
/*
* Check consistency of arguments
*/
- if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
+ if (into->onCommit != ONCOMMIT_NOOP
+ && into->rel->relpersistence != RELPERSISTENCE_TEMP)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("ON COMMIT can only be used on temporary tables")));
+ {
+ AclResult aclresult;
+ int i;
+
+ for (i = 0; i < intoTupDesc->natts; i++)
+ {
+ Oid atttypid = intoTupDesc->attrs[i]->atttypid;
+
+ aclresult = pg_type_aclcheck(atttypid, GetUserId(), ACL_USAGE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_TYPE,
+ format_type_be(atttypid));
+ }
+ }
+
/*
- * Find namespace to create in, check its permissions
+ * If a column name list was specified in CREATE TABLE AS, override the
+ * column names derived from the query. (Too few column names are OK, too
+ * many are not.) It would probably be all right to scribble directly on
+ * the query's result tupdesc, but let's be safe and make a copy.
+ */
+ if (into->colNames)
+ {
+ ListCell *lc;
+
+ intoTupDesc = CreateTupleDescCopy(intoTupDesc);
+ attnum = 1;
+ foreach(lc, into->colNames)
+ {
+ char *colname = strVal(lfirst(lc));
+
+ if (attnum > intoTupDesc->natts)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("CREATE TABLE AS specifies too many column names")));
+ namestrcpy(&(intoTupDesc->attrs[attnum - 1]->attname), colname);
+ attnum++;
+ }
+ }
+
+ /*
+ * Find namespace to create in, check its permissions, lock it against
+ * concurrent drop, and mark into->rel as RELPERSISTENCE_TEMP if the
+ * selected namespace is temporary.
*/
intoName = into->rel->relname;
- namespaceId = RangeVarGetCreationNamespace(into->rel);
+ namespaceId = RangeVarGetAndCheckCreationNamespace(into->rel, NoLock,
+ NULL);
- aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
- get_namespace_name(namespaceId));
+ /*
+ * Security check: disallow creating temp tables from security-restricted
+ * code. This is needed because calling code might not expect untrusted
+ * tables to appear in pg_temp at the front of its search path.
+ */
+ if (into->rel->relpersistence == RELPERSISTENCE_TEMP
+ && InSecurityRestrictedOperation())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("cannot create temporary table within security-restricted operation")));
/*
* Select tablespace to use. If not specified, use default tablespace
*/
if (into->tableSpaceName)
{
- tablespaceId = get_tablespace_oid(into->tableSpaceName);
- if (!OidIsValid(tablespaceId))
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace \"%s\" does not exist",
- into->tableSpaceName)));
+ tablespaceId = get_tablespace_oid(into->tableSpaceName, false);
}
else
{
- tablespaceId = GetDefaultTablespace(into->rel->istemp);
+ tablespaceId = GetDefaultTablespace(into->rel->relpersistence);
/* note InvalidOid is OK in this case */
}
false);
(void) heap_reloptions(RELKIND_RELATION, reloptions, true);
- /* Copy the tupdesc because heap_create_with_catalog modifies it */
- tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
-
/* Now we can actually create the new relation */
intoRelationId = heap_create_with_catalog(intoName,
namespaceId,
tablespaceId,
InvalidOid,
InvalidOid,
+ InvalidOid,
GetUserId(),
- tupdesc,
+ intoTupDesc,
NIL,
RELKIND_RELATION,
+ into->rel->relpersistence,
+ false,
false,
true,
0,
reloptions,
true,
allowSystemTableMods);
-
- FreeTupleDesc(tupdesc);
+ Assert(intoRelationId != InvalidOid);
/*
* Advance command counter so that the newly-created relation's catalog
(void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
- AlterTableCreateToastTable(intoRelationId, InvalidOid, reloptions, false);
+ AlterTableCreateToastTable(intoRelationId, reloptions);
/*
* And open the constructed table for writing.
*/
intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
+ /*
+ * Check INSERT permission on the constructed table.
+ */
+ rte = makeNode(RangeTblEntry);
+ rte->rtekind = RTE_RELATION;
+ rte->relid = intoRelationId;
+ rte->relkind = RELKIND_RELATION;
+ rte->requiredPerms = ACL_INSERT;
+
+ for (attnum = 1; attnum <= intoTupDesc->natts; attnum++)
+ rte->modifiedCols = bms_add_member(rte->modifiedCols,
+ attnum - FirstLowInvalidHeapAttributeNumber);
+
+ ExecCheckRTPerms(list_make1(rte), true);
+
/*
* Now replace the query's DestReceiver with one for SELECT INTO
*/
- queryDesc->dest = CreateDestReceiver(DestIntoRel);
- myState = (DR_intorel *) queryDesc->dest;
+ myState = (DR_intorel *) CreateDestReceiver(DestIntoRel);
Assert(myState->pub.mydest == DestIntoRel);
myState->estate = estate;
+ myState->origdest = queryDesc->dest;
myState->rel = intoRelationDesc;
+ queryDesc->dest = (DestReceiver *) myState;
+
/*
- * We can skip WAL-logging the insertions, unless PITR is in use. We can
- * skip the FSM in any case.
+ * We can skip WAL-logging the insertions, unless PITR or streaming
+ * replication is in use. We can skip the FSM in any case.
*/
myState->hi_options = HEAP_INSERT_SKIP_FSM |
- (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
+ (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL);
myState->bistate = GetBulkInsertState();
- /* Not using WAL requires rd_targblock be initially invalid */
- Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
+ /* Not using WAL requires smgr_targblock be initially invalid */
+ Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
}
/*
{
DR_intorel *myState = (DR_intorel *) queryDesc->dest;
- /* OpenIntoRel might never have gotten called */
- if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
+ /*
+ * OpenIntoRel might never have gotten called, and we also want to guard
+ * against double destruction.
+ */
+ if (myState && myState->pub.mydest == DestIntoRel)
{
FreeBulkInsertState(myState->bistate);
/* close rel, but keep lock until commit */
heap_close(myState->rel, NoLock);
- myState->rel = NULL;
+ /* restore the receiver belonging to executor's caller */
+ queryDesc->dest = myState->origdest;
+
+ /* might as well invoke my destructor */
+ intorel_destroy((DestReceiver *) myState);
}
}