* trigger.c
* PostgreSQL TRIGGERs support code.
*
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
#include "access/genam.h"
#include "access/heapam.h"
#include "access/sysattr.h"
+#include "access/htup_details.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/objectaccess.h"
#include "catalog/pg_constraint.h"
+#include "catalog/pg_constraint_fn.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_trigger.h"
#include "catalog/pg_type.h"
#include "commands/defrem.h"
#include "commands/trigger.h"
#include "executor/executor.h"
-#include "executor/instrument.h"
#include "miscadmin.h"
#include "nodes/bitmapset.h"
#include "nodes/makefuncs.h"
#include "pgstat.h"
#include "rewrite/rewriteManip.h"
#include "storage/bufmgr.h"
+#include "storage/lmgr.h"
#include "tcop/utility.h"
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
+#include "utils/tuplestore.h"
/* GUC variables */
int SessionReplicationRole = SESSION_REPLICATION_ROLE_ORIGIN;
+/* How many levels deep into trigger execution are we? */
+static int MyTriggerDepth = 0;
-#define GetModifiedColumns(relinfo, estate) \
- (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->modifiedCols)
+/*
+ * Note that similar macros also exist in executor/execMain.c. There does not
+ * appear to be any good header to put them into, given the structures that
+ * they use, so we let them be duplicated. Be sure to update all if one needs
+ * to be changed, however.
+ */
+#define GetUpdatedColumns(relinfo, estate) \
+ (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
/* Local function prototypes */
static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
EPQState *epqstate,
ResultRelInfo *relinfo,
ItemPointer tid,
+ LockTupleMode lockmode,
TupleTableSlot **newSlot);
static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
Trigger *trigger, TriggerEvent event,
int event, bool row_trigger,
HeapTuple oldtup, HeapTuple newtup,
List *recheckIndexes, Bitmapset *modifiedCols);
+static void AfterTriggerEnlargeQueryState(void);
/*
- * Create a trigger. Returns the OID of the created trigger.
+ * Create a trigger. Returns the address of the created trigger.
*
* queryString is the source text of the CREATE TRIGGER command.
* This must be supplied if a whenClause is specified, else it can be NULL.
*
+ * relOid, if nonzero, is the relation on which the trigger should be
+ * created. If zero, the name provided in the statement will be looked up.
+ *
+ * refRelOid, if nonzero, is the relation to which the constraint trigger
+ * refers. If zero, the constraint relation name provided in the statement
+ * will be looked up as needed.
+ *
* constraintOid, if nonzero, says that this trigger is being created
* internally to implement that constraint. A suitable pg_depend entry will
- * be made to link the trigger to that constraint. constraintOid is zero when
+ * be made to link the trigger to that constraint. constraintOid is zero when
* executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
* TRIGGER, we build a pg_constraint entry internally.)
*
* if TRUE causes us to modify the given trigger name to ensure uniqueness.
*
* When isInternal is not true we require ACL_TRIGGER permissions on the
- * relation. For internal triggers the caller must apply any required
- * permission checks.
+ * relation, as well as ACL_EXECUTE on the trigger function. For internal
+ * triggers the caller must apply any required permission checks.
*
- * Note: can return InvalidOid if we decided to not create a trigger at all,
- * but a foreign-key constraint. This is a kluge for backwards compatibility.
+ * Note: can return InvalidObjectAddress if we decided to not create a trigger
+ * at all, but a foreign-key constraint. This is a kluge for backwards
+ * compatibility.
*/
-Oid
+ObjectAddress
CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
- Oid constraintOid, Oid indexOid,
+ Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
bool isInternal)
{
int16 tgtype;
int ncolumns;
- int2 *columns;
+ int16 *columns;
int2vector *tgattr;
Node *whenClause;
List *whenRtable;
Oid constrrelid = InvalidOid;
ObjectAddress myself,
referenced;
+ char *oldtablename = NULL;
+ char *newtablename = NULL;
- rel = heap_openrv(stmt->relation, AccessExclusiveLock);
+ if (OidIsValid(relOid))
+ rel = heap_open(relOid, ShareRowExclusiveLock);
+ else
+ rel = heap_openrv(stmt->relation, ShareRowExclusiveLock);
/*
* Triggers must be on tables or views, and there are additional
* relation-type-specific restrictions.
*/
- if (rel->rd_rel->relkind == RELKIND_RELATION)
+ if (rel->rd_rel->relkind == RELKIND_RELATION ||
+ rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
/* Tables can't have INSTEAD OF triggers */
if (stmt->timing != TRIGGER_TYPE_BEFORE &&
errmsg("\"%s\" is a table",
RelationGetRelationName(rel)),
errdetail("Tables cannot have INSTEAD OF triggers.")));
+ /* Disallow ROW triggers on partitioned tables */
+ if (stmt->row && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a partitioned table",
+ RelationGetRelationName(rel)),
+ errdetail("Partitioned tables cannot have ROW triggers.")));
}
else if (rel->rd_rel->relkind == RELKIND_VIEW)
{
RelationGetRelationName(rel)),
errdetail("Views cannot have TRUNCATE triggers.")));
}
+ else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ if (stmt->timing != TRIGGER_TYPE_BEFORE &&
+ stmt->timing != TRIGGER_TYPE_AFTER)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a foreign table",
+ RelationGetRelationName(rel)),
+ errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
+
+ if (TRIGGER_FOR_TRUNCATE(stmt->events))
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a foreign table",
+ RelationGetRelationName(rel)),
+ errdetail("Foreign tables cannot have TRUNCATE triggers.")));
+
+ if (stmt->isconstraint)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a foreign table",
+ RelationGetRelationName(rel)),
+ errdetail("Foreign tables cannot have constraint triggers.")));
+ }
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("permission denied: \"%s\" is a system catalog",
RelationGetRelationName(rel))));
- if (stmt->isconstraint && stmt->constrrel != NULL)
- constrrelid = RangeVarGetRelid(stmt->constrrel, false);
+ if (stmt->isconstraint)
+ {
+ /*
+ * We must take a lock on the target relation to protect against
+ * concurrent drop. It's not clear that AccessShareLock is strong
+ * enough, but we certainly need at least that much... otherwise, we
+ * might end up creating a pg_constraint entry referencing a
+ * nonexistent table.
+ */
+ if (OidIsValid(refRelOid))
+ {
+ LockRelationOid(refRelOid, AccessShareLock);
+ constrrelid = refRelOid;
+ }
+ else if (stmt->constrrel != NULL)
+ constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
+ false);
+ }
/* permission checks */
if (!isInternal)
errmsg("INSTEAD OF triggers cannot have column lists")));
}
+ /*
+ * We don't yet support naming ROW transition variables, but the parser
+ * recognizes the syntax so we can give a nicer message here.
+ *
+ * Per standard, REFERENCING TABLE names are only allowed on AFTER
+ * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
+ * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
+ * only allowed once. Per standard, OLD may not be specified when
+ * creating a trigger only for INSERT, and NEW may not be specified when
+ * creating a trigger only for DELETE.
+ *
+ * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
+ * reference both ROW and TABLE transition data.
+ */
+ if (stmt->transitionRels != NIL)
+ {
+ List *varList = stmt->transitionRels;
+ ListCell *lc;
+
+ foreach(lc, varList)
+ {
+ TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
+
+ if (!(tt->isTable))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ROW variable naming in the REFERENCING clause is not supported"),
+ errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
+
+ /*
+ * Because of the above test, we omit further ROW-related testing
+ * below. If we later allow naming OLD and NEW ROW variables,
+ * adjustments will be needed below.
+ */
+
+ if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a partitioned table",
+ RelationGetRelationName(rel)),
+ errdetail("Triggers on partitioned tables cannot have transition tables.")));
+
+ if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a foreign table",
+ RelationGetRelationName(rel)),
+ errdetail("Triggers on foreign tables cannot have transition tables.")));
+
+ if (rel->rd_rel->relkind == RELKIND_VIEW)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a view",
+ RelationGetRelationName(rel)),
+ errdetail("Triggers on views cannot have transition tables.")));
+
+ if (stmt->timing != TRIGGER_TYPE_AFTER)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("transition table name can only be specified for an AFTER trigger")));
+
+ if (TRIGGER_FOR_TRUNCATE(tgtype))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("TRUNCATE triggers with transition tables are not supported")));
+
+ if (tt->isNew)
+ {
+ if (!(TRIGGER_FOR_INSERT(tgtype) ||
+ TRIGGER_FOR_UPDATE(tgtype)))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
+
+ if (newtablename != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("NEW TABLE cannot be specified multiple times")));
+
+ newtablename = tt->name;
+ }
+ else
+ {
+ if (!(TRIGGER_FOR_DELETE(tgtype) ||
+ TRIGGER_FOR_UPDATE(tgtype)))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
+
+ if (oldtablename != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("OLD TABLE cannot be specified multiple times")));
+
+ oldtablename = tt->name;
+ }
+ }
+
+ if (newtablename != NULL && oldtablename != NULL &&
+ strcmp(newtablename, oldtablename) == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
+ }
+
/*
* Parse the WHEN clause, if any
*/
/* Transform expression. Copy to be sure we don't modify original */
whenClause = transformWhereClause(pstate,
copyObject(stmt->whenClause),
+ EXPR_KIND_TRIGGER_WHEN,
"WHEN");
/* we have to fix its collations too */
assign_expr_collations(pstate, whenClause);
- /*
- * No subplans or aggregates, please
- */
- if (pstate->p_hasSubLinks)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in trigger WHEN condition")));
- if (pstate->p_hasAggs)
- ereport(ERROR,
- (errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in trigger WHEN condition")));
- if (pstate->p_hasWindowFuncs)
- ereport(ERROR,
- (errcode(ERRCODE_WINDOWING_ERROR),
- errmsg("cannot use window function in trigger WHEN condition")));
-
/*
* Check for disallowed references to OLD/NEW.
*
* subselects in WHEN clauses; it would fail to examine the contents
* of subselects.
*/
- varList = pull_var_clause(whenClause, PVC_REJECT_PLACEHOLDERS);
+ varList = pull_var_clause(whenClause, 0);
foreach(lc, varList)
{
Var *var = (Var *) lfirst(lc);
* Find and validate the trigger function.
*/
funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
+ if (!isInternal)
+ {
+ aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_PROC,
+ NameListToString(stmt->funcname));
+ }
funcrettype = get_func_rettype(funcoid);
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we see a
+ * We allow OPAQUE just so we can load old dump files. When we see a
* trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
{
ereport(WARNING,
- (errmsg("changing return type of function %s from \"opaque\" to \"trigger\"",
- NameListToString(stmt->funcname))));
+ (errmsg("changing return type of function %s from %s to %s",
+ NameListToString(stmt->funcname),
+ "opaque", "trigger")));
SetFunctionReturnType(funcoid, TRIGGEROID);
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("function %s must return type \"trigger\"",
- NameListToString(stmt->funcname))));
+ errmsg("function %s must return type %s",
+ NameListToString(stmt->funcname), "trigger")));
}
/*
* references one of the built-in RI_FKey trigger functions, assume it is
* from a dump of a pre-7.3 foreign key constraint, and take steps to
* convert this legacy representation into a regular foreign key
- * constraint. Ugly, but necessary for loading old dump files.
+ * constraint. Ugly, but necessary for loading old dump files.
*/
if (stmt->isconstraint && !isInternal &&
list_length(stmt->args) >= 6 &&
ConvertTriggerToFK(stmt, funcoid);
- return InvalidOid;
+ return InvalidObjectAddress;
}
/*
stmt->initdeferred,
true,
RelationGetRelid(rel),
- NULL, /* no conkey */
+ NULL, /* no conkey */
0,
- InvalidOid, /* no domain */
- InvalidOid, /* no index */
- InvalidOid, /* no foreign key */
+ InvalidOid, /* no domain */
+ InvalidOid, /* no index */
+ InvalidOid, /* no foreign key */
NULL,
NULL,
NULL,
' ',
' ',
' ',
- NULL, /* no exclusion */
- NULL, /* no check constraint */
+ NULL, /* no exclusion */
+ NULL, /* no check constraint */
NULL,
NULL,
- true, /* islocal */
- 0); /* inhcount */
+ true, /* islocal */
+ 0, /* inhcount */
+ true, /* isnoinherit */
+ isInternal); /* is_internal */
}
/*
/*
* If trigger is internally generated, modify the provided trigger name to
- * ensure uniqueness by appending the trigger OID. (Callers will usually
+ * ensure uniqueness by appending the trigger OID. (Callers will usually
* supply a simple constant trigger name in these cases.)
*/
if (isInternal)
* can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
- * NOTE that this is cool only because we have AccessExclusiveLock on
+ * NOTE that this is cool only because we have ShareRowExclusiveLock on
* the relation, so the trigger set won't be changing underneath us.
*/
if (!isInternal)
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(RelationGetRelid(rel)));
tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
- SnapshotNow, 1, &key);
+ NULL, 1, &key);
while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
{
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("trigger \"%s\" for relation \"%s\" already exists",
- trigname, stmt->relation->relname)));
+ trigname, RelationGetRelationName(rel))));
}
systable_endscan(tgscan);
}
ListCell *cell;
int i = 0;
- columns = (int2 *) palloc(ncolumns * sizeof(int2));
+ columns = (int16 *) palloc(ncolumns * sizeof(int16));
foreach(cell, stmt->columns)
{
char *name = strVal(lfirst(cell));
- int2 attnum;
+ int16 attnum;
int j;
- /* Lookup column name. System columns are not allowed */
+ /* Lookup column name. System columns are not allowed */
attnum = attnameAttNum(rel, name, false);
if (attnum == InvalidAttrNumber)
ereport(ERROR,
else
nulls[Anum_pg_trigger_tgqual - 1] = true;
+ if (oldtablename)
+ values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
+ CStringGetDatum(oldtablename));
+ else
+ nulls[Anum_pg_trigger_tgoldtable - 1] = true;
+ if (newtablename)
+ values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
+ CStringGetDatum(newtablename));
+ else
+ nulls[Anum_pg_trigger_tgnewtable - 1] = true;
+
tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
/* force tuple to have the desired OID */
/*
* Insert tuple into pg_trigger.
*/
- simple_heap_insert(tgrel, tuple);
-
- CatalogUpdateIndexes(tgrel, tuple);
+ CatalogTupleInsert(tgrel, tuple);
heap_freetuple(tuple);
heap_close(tgrel, RowExclusiveLock);
pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
+ if (oldtablename)
+ pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
+ if (newtablename)
+ pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
/*
* Update relation's pg_class entry. Crucial side-effect: other backends
((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
- simple_heap_update(pgrel, &tuple->t_self, tuple);
-
- CatalogUpdateIndexes(pgrel, tuple);
+ CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
heap_freetuple(tuple);
heap_close(pgrel, RowExclusiveLock);
else
{
/*
- * User CREATE TRIGGER, so place dependencies. We make trigger be
+ * User CREATE TRIGGER, so place dependencies. We make trigger be
* auto-dropped if its relation is dropped or if the FK relation is
* dropped. (Auto drop is compatible with our pre-7.3 behavior.)
*/
DEPENDENCY_NORMAL);
/* Post creation hook for new trigger */
- InvokeObjectAccessHook(OAT_POST_CREATE,
- TriggerRelationId, trigoid, 0);
+ InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
+ isInternal);
/* Keep lock on target rel until end of xact */
heap_close(rel, NoLock);
- return trigoid;
+ return myself;
}
* full-fledged foreign key constraints.
*
* The conversion is complex because a pre-7.3 foreign key involved three
- * separate triggers, which were reported separately in dumps. While the
+ * separate triggers, which were reported separately in dumps. While the
* single trigger on the referencing table adds no new information, we need
* to know the trigger functions of both of the triggers on the referenced
* table to build the constraint declaration. Also, due to lack of proper
char *constr_name;
char *fk_table_name;
char *pk_table_name;
- char fk_matchtype = FKCONSTR_MATCH_UNSPECIFIED;
+ char fk_matchtype = FKCONSTR_MATCH_SIMPLE;
List *fk_attrs = NIL;
List *pk_attrs = NIL;
StringInfoData buf;
if (strcmp(strVal(arg), "FULL") == 0)
fk_matchtype = FKCONSTR_MATCH_FULL;
else
- fk_matchtype = FKCONSTR_MATCH_UNSPECIFIED;
+ fk_matchtype = FKCONSTR_MATCH_SIMPLE;
continue;
}
if (i % 2)
ereport(NOTICE,
(errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
constr_name, buf.data),
- errdetail("%s", _(funcdescr[funcnum]))));
+ errdetail_internal("%s", _(funcdescr[funcnum]))));
oldContext = MemoryContextSwitchTo(TopMemoryContext);
info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
info->args = copyObject(stmt->args);
ereport(NOTICE,
(errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
constr_name, buf.data),
- errdetail("%s", _(funcdescr[funcnum]))));
+ errdetail_internal("%s", _(funcdescr[funcnum]))));
}
else
{
AlterTableStmt *atstmt = makeNode(AlterTableStmt);
AlterTableCmd *atcmd = makeNode(AlterTableCmd);
Constraint *fkcon = makeNode(Constraint);
+ PlannedStmt *wrapper = makeNode(PlannedStmt);
ereport(NOTICE,
(errmsg("converting trigger group into constraint \"%s\" %s",
constr_name, buf.data),
- errdetail("%s", _(funcdescr[funcnum]))));
+ errdetail_internal("%s", _(funcdescr[funcnum]))));
fkcon->contype = CONSTR_FOREIGN;
fkcon->location = -1;
if (funcnum == 2)
fkcon->skip_validation = false;
fkcon->initially_valid = true;
+ /* finally, wrap it in a dummy PlannedStmt */
+ wrapper->commandType = CMD_UTILITY;
+ wrapper->canSetTag = false;
+ wrapper->utilityStmt = (Node *) atstmt;
+ wrapper->stmt_location = -1;
+ wrapper->stmt_len = -1;
+
/* ... and execute it */
- ProcessUtility((Node *) atstmt,
+ ProcessUtility(wrapper,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
- NULL, false, None_Receiver, NULL);
+ PROCESS_UTILITY_SUBCOMMAND, NULL, NULL,
+ None_Receiver, NULL);
/* Remove the matched item from the list */
info_list = list_delete_ptr(info_list, info);
}
}
-
-/*
- * DropTrigger - drop an individual trigger by name
- */
-void
-DropTrigger(Oid relid, const char *trigname, DropBehavior behavior,
- bool missing_ok)
-{
- ObjectAddress object;
-
- object.classId = TriggerRelationId;
- object.objectId = get_trigger_oid(relid, trigname, missing_ok);
- object.objectSubId = 0;
-
- if (!OidIsValid(object.objectId))
- {
- ereport(NOTICE,
- (errmsg("trigger \"%s\" for table \"%s\" does not exist, skipping",
- trigname, get_rel_name(relid))));
- return;
- }
-
- if (!pg_class_ownercheck(relid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
- get_rel_name(relid));
-
- /*
- * Do the deletion
- */
- performDeletion(&object, behavior);
-}
-
/*
* Guts of trigger deletion.
*/
ObjectIdGetDatum(trigOid));
tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
- SnapshotNow, 1, skey);
+ NULL, 1, skey);
tup = systable_getnext(tgscan);
if (!HeapTupleIsValid(tup))
rel = heap_open(relid, AccessExclusiveLock);
if (rel->rd_rel->relkind != RELKIND_RELATION &&
- rel->rd_rel->relkind != RELKIND_VIEW)
+ rel->rd_rel->relkind != RELKIND_VIEW &&
+ rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
+ rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table or view",
+ errmsg("\"%s\" is not a table, view, or foreign table",
RelationGetRelationName(rel))));
if (!allowSystemTableMods && IsSystemRelation(rel))
/*
* Delete the pg_trigger tuple.
*/
- simple_heap_delete(tgrel, &tup->t_self);
+ CatalogTupleDelete(tgrel, &tup->t_self);
systable_endscan(tgscan);
heap_close(tgrel, RowExclusiveLock);
CStringGetDatum(trigname));
tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
- SnapshotNow, 2, skey);
+ NULL, 2, skey);
tup = systable_getnext(tgscan);
return oid;
}
+/*
+ * Perform permissions and integrity checks before acquiring a relation lock.
+ */
+static void
+RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid,
+ void *arg)
+{
+ HeapTuple tuple;
+ Form_pg_class form;
+
+ tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
+ if (!HeapTupleIsValid(tuple))
+ return; /* concurrently dropped */
+ form = (Form_pg_class) GETSTRUCT(tuple);
+
+ /* only tables and views can have triggers */
+ if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
+ form->relkind != RELKIND_FOREIGN_TABLE &&
+ form->relkind != RELKIND_PARTITIONED_TABLE)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is not a table, view, or foreign table",
+ rv->relname)));
+
+ /* you must own the table to rename one of its triggers */
+ if (!pg_class_ownercheck(relid, GetUserId()))
+ aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname);
+ if (!allowSystemTableMods && IsSystemClass(relid, form))
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("permission denied: \"%s\" is a system catalog",
+ rv->relname)));
+
+ ReleaseSysCache(tuple);
+}
+
/*
* renametrig - changes the name of a trigger on a relation
*
* modify tgname in trigger tuple
* update row in catalog
*/
-void
-renametrig(Oid relid,
- const char *oldname,
- const char *newname)
+ObjectAddress
+renametrig(RenameStmt *stmt)
{
+ Oid tgoid;
Relation targetrel;
Relation tgrel;
HeapTuple tuple;
SysScanDesc tgscan;
ScanKeyData key[2];
+ Oid relid;
+ ObjectAddress address;
/*
- * Grab an exclusive lock on the target table, which we will NOT release
- * until end of transaction.
+ * Look up name, check permissions, and acquire lock (which we will NOT
+ * release until end of transaction).
*/
- targetrel = heap_open(relid, AccessExclusiveLock);
+ relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
+ false, false,
+ RangeVarCallbackForRenameTrigger,
+ NULL);
+
+ /* Have lock already, so just need to build relcache entry. */
+ targetrel = relation_open(relid, NoLock);
/*
* Scan pg_trigger twice for existing triggers on relation. We do this in
ScanKeyInit(&key[1],
Anum_pg_trigger_tgname,
BTEqualStrategyNumber, F_NAMEEQ,
- PointerGetDatum(newname));
+ PointerGetDatum(stmt->newname));
tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
- SnapshotNow, 2, key);
+ NULL, 2, key);
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("trigger \"%s\" for relation \"%s\" already exists",
- newname, RelationGetRelationName(targetrel))));
+ stmt->newname, RelationGetRelationName(targetrel))));
systable_endscan(tgscan);
/*
ScanKeyInit(&key[1],
Anum_pg_trigger_tgname,
BTEqualStrategyNumber, F_NAMEEQ,
- PointerGetDatum(oldname));
+ PointerGetDatum(stmt->subname));
tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
- SnapshotNow, 2, key);
+ NULL, 2, key);
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
{
+ tgoid = HeapTupleGetOid(tuple);
+
/*
* Update pg_trigger tuple with new tgname.
*/
tuple = heap_copytuple(tuple); /* need a modifiable copy */
- namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname, newname);
+ namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
+ stmt->newname);
- simple_heap_update(tgrel, &tuple->t_self, tuple);
+ CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
- /* keep system catalog indexes current */
- CatalogUpdateIndexes(tgrel, tuple);
+ InvokeObjectPostAlterHook(TriggerRelationId,
+ HeapTupleGetOid(tuple), 0);
/*
* Invalidate relation's relcache entry so that other backends (and
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("trigger \"%s\" for table \"%s\" does not exist",
- oldname, RelationGetRelationName(targetrel))));
+ stmt->subname, RelationGetRelationName(targetrel))));
}
+ ObjectAddressSet(address, TriggerRelationId, tgoid);
+
systable_endscan(tgscan);
heap_close(tgrel, RowExclusiveLock);
/*
* Close rel, but keep exclusive lock!
*/
- heap_close(targetrel, NoLock);
+ relation_close(targetrel, NoLock);
+
+ return address;
}
nkeys = 1;
tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
- SnapshotNow, nkeys, keys);
+ NULL, nkeys, keys);
found = changed = false;
newtrig->tgenabled = fires_when;
- simple_heap_update(tgrel, &newtup->t_self, newtup);
-
- /* Keep catalog indexes current */
- CatalogUpdateIndexes(tgrel, newtup);
+ CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
heap_freetuple(newtup);
changed = true;
}
+
+ InvokeObjectPostAlterHook(TriggerRelationId,
+ HeapTupleGetOid(tuple), 0);
}
systable_endscan(tgscan);
tgrel = heap_open(TriggerRelationId, AccessShareLock);
tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
- SnapshotNow, 1, &skey);
+ NULL, 1, &skey);
while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
{
build->tgnattr = pg_trigger->tgattr.dim1;
if (build->tgnattr > 0)
{
- build->tgattr = (int2 *) palloc(build->tgnattr * sizeof(int2));
+ build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
memcpy(build->tgattr, &(pg_trigger->tgattr.values),
- build->tgnattr * sizeof(int2));
+ build->tgnattr * sizeof(int16));
}
else
build->tgattr = NULL;
bytea *val;
char *p;
- val = DatumGetByteaP(fastgetattr(htup,
- Anum_pg_trigger_tgargs,
- tgrel->rd_att, &isnull));
+ val = DatumGetByteaPP(fastgetattr(htup,
+ Anum_pg_trigger_tgargs,
+ tgrel->rd_att, &isnull));
if (isnull)
elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
RelationGetRelationName(relation));
- p = (char *) VARDATA(val);
+ p = (char *) VARDATA_ANY(val);
build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
for (i = 0; i < build->tgnargs; i++)
{
}
else
build->tgargs = NULL;
+
+ datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
+ tgrel->rd_att, &isnull);
+ if (!isnull)
+ build->tgoldtable =
+ DatumGetCString(DirectFunctionCall1(nameout, datum));
+ else
+ build->tgoldtable = NULL;
+
+ datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
+ tgrel->rd_att, &isnull);
+ if (!isnull)
+ build->tgnewtable =
+ DatumGetCString(DirectFunctionCall1(nameout, datum));
+ else
+ build->tgnewtable = NULL;
+
datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
tgrel->rd_att, &isnull);
if (!isnull)
trigdesc->trig_truncate_after_statement |=
TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
+
+ trigdesc->trig_insert_new_table |=
+ (TRIGGER_FOR_INSERT(tgtype) &&
+ TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
+ trigdesc->trig_update_old_table |=
+ (TRIGGER_FOR_UPDATE(tgtype) &&
+ TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
+ trigdesc->trig_update_new_table |=
+ (TRIGGER_FOR_UPDATE(tgtype) &&
+ TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
+ trigdesc->trig_delete_old_table |=
+ (TRIGGER_FOR_DELETE(tgtype) &&
+ TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
}
/*
trigger->tgname = pstrdup(trigger->tgname);
if (trigger->tgnattr > 0)
{
- int2 *newattr;
+ int16 *newattr;
- newattr = (int2 *) palloc(trigger->tgnattr * sizeof(int2));
+ newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
memcpy(newattr, trigger->tgattr,
- trigger->tgnattr * sizeof(int2));
+ trigger->tgnattr * sizeof(int16));
trigger->tgattr = newattr;
}
if (trigger->tgnargs > 0)
}
if (trigger->tgqual)
trigger->tgqual = pstrdup(trigger->tgqual);
+ if (trigger->tgoldtable)
+ trigger->tgoldtable = pstrdup(trigger->tgoldtable);
+ if (trigger->tgnewtable)
+ trigger->tgnewtable = pstrdup(trigger->tgnewtable);
trigger++;
}
}
if (trigger->tgqual)
pfree(trigger->tgqual);
+ if (trigger->tgoldtable)
+ pfree(trigger->tgoldtable);
+ if (trigger->tgnewtable)
+ pfree(trigger->tgnewtable);
trigger++;
}
pfree(trigdesc->triggers);
return false;
if (trig1->tgnattr > 0 &&
memcmp(trig1->tgattr, trig2->tgattr,
- trig1->tgnattr * sizeof(int2)) != 0)
+ trig1->tgnattr * sizeof(int16)) != 0)
return false;
for (j = 0; j < trig1->tgnargs; j++)
if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
return false;
else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
return false;
+ if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
+ /* ok */ ;
+ else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
+ return false;
+ else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
+ return false;
+ if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
+ /* ok */ ;
+ else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
+ return false;
+ else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
+ return false;
}
}
else if (trigdesc2 != NULL)
return false;
return true;
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/*
* Call a trigger function.
Datum result;
MemoryContext oldContext;
+ /*
+ * Protect against code paths that may fail to initialize transition table
+ * info.
+ */
+ Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
+ TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
+ TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
+ TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
+ !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
+ !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
+ (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
+
finfo += tgindx;
/*
pgstat_init_function_usage(&fcinfo, &fcusage);
- result = FunctionCallInvoke(&fcinfo);
+ MyTriggerDepth++;
+ PG_TRY();
+ {
+ result = FunctionCallInvoke(&fcinfo);
+ }
+ PG_CATCH();
+ {
+ MyTriggerDepth--;
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+ MyTriggerDepth--;
pgstat_end_function_usage(&fcusage, true);
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_trigtuple = NULL;
LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
LocTriggerData.tg_newtuplebuf = InvalidBuffer;
for (i = 0; i < trigdesc->numtriggers; i++)
TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
LocTriggerData.tg_newtuplebuf = InvalidBuffer;
for (i = 0; i < trigdesc->numtriggers; i++)
{
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
- if (trigdesc && trigdesc->trig_insert_after_row)
+ if (trigdesc &&
+ (trigdesc->trig_insert_after_row || trigdesc->trig_insert_new_table))
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_INSERT,
true, NULL, trigtuple, recheckIndexes, NULL);
}
TRIGGER_EVENT_INSTEAD;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
LocTriggerData.tg_newtuplebuf = InvalidBuffer;
for (i = 0; i < trigdesc->numtriggers; i++)
{
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_trigtuple = NULL;
LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
LocTriggerData.tg_newtuplebuf = InvalidBuffer;
for (i = 0; i < trigdesc->numtriggers; i++)
bool
ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
ResultRelInfo *relinfo,
- ItemPointer tupleid)
+ ItemPointer tupleid,
+ HeapTuple fdw_trigtuple)
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
bool result = true;
TupleTableSlot *newSlot;
int i;
- trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
- &newSlot);
- if (trigtuple == NULL)
- return false;
+ Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
+ if (fdw_trigtuple == NULL)
+ {
+ trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
+ LockTupleExclusive, &newSlot);
+ if (trigtuple == NULL)
+ return false;
+ }
+ else
+ trigtuple = fdw_trigtuple;
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
LocTriggerData.tg_newtuplebuf = InvalidBuffer;
for (i = 0; i < trigdesc->numtriggers; i++)
{
if (newtuple != trigtuple)
heap_freetuple(newtuple);
}
- heap_freetuple(trigtuple);
+ if (trigtuple != fdw_trigtuple)
+ heap_freetuple(trigtuple);
return result;
}
void
ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
- ItemPointer tupleid)
+ ItemPointer tupleid,
+ HeapTuple fdw_trigtuple)
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
- if (trigdesc && trigdesc->trig_delete_after_row)
+ if (trigdesc &&
+ (trigdesc->trig_delete_after_row || trigdesc->trig_delete_old_table))
{
- HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
- tupleid, NULL);
+ HeapTuple trigtuple;
+
+ Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
+ if (fdw_trigtuple == NULL)
+ trigtuple = GetTupleForTrigger(estate,
+ NULL,
+ relinfo,
+ tupleid,
+ LockTupleExclusive,
+ NULL);
+ else
+ trigtuple = fdw_trigtuple;
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
true, trigtuple, NULL, NIL, NULL);
- heap_freetuple(trigtuple);
+ if (trigtuple != fdw_trigtuple)
+ heap_freetuple(trigtuple);
}
}
TRIGGER_EVENT_INSTEAD;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
LocTriggerData.tg_newtuplebuf = InvalidBuffer;
for (i = 0; i < trigdesc->numtriggers; i++)
{
TriggerDesc *trigdesc;
int i;
TriggerData LocTriggerData;
- Bitmapset *modifiedCols;
+ Bitmapset *updatedCols;
trigdesc = relinfo->ri_TrigDesc;
if (!trigdesc->trig_update_before_statement)
return;
- modifiedCols = GetModifiedColumns(relinfo, estate);
+ updatedCols = GetUpdatedColumns(relinfo, estate);
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_trigtuple = NULL;
LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
LocTriggerData.tg_newtuplebuf = InvalidBuffer;
for (i = 0; i < trigdesc->numtriggers; i++)
TRIGGER_TYPE_UPDATE))
continue;
if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
- modifiedCols, NULL, NULL))
+ updatedCols, NULL, NULL))
continue;
LocTriggerData.tg_trigger = trigger;
if (trigdesc && trigdesc->trig_update_after_statement)
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
false, NULL, NULL, NIL,
- GetModifiedColumns(relinfo, estate));
+ GetUpdatedColumns(relinfo, estate));
}
TupleTableSlot *
ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
ResultRelInfo *relinfo,
- ItemPointer tupleid, TupleTableSlot *slot)
+ ItemPointer tupleid,
+ HeapTuple fdw_trigtuple,
+ TupleTableSlot *slot)
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
HeapTuple slottuple = ExecMaterializeSlot(slot);
HeapTuple oldtuple;
TupleTableSlot *newSlot;
int i;
- Bitmapset *modifiedCols;
+ Bitmapset *updatedCols;
+ LockTupleMode lockmode;
+
+ /* Determine lock mode to use */
+ lockmode = ExecUpdateLockMode(estate, relinfo);
- /* get a copy of the on-disk tuple we are planning to update */
- trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
- &newSlot);
- if (trigtuple == NULL)
- return NULL; /* cancel the update action */
+ Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
+ if (fdw_trigtuple == NULL)
+ {
+ /* get a copy of the on-disk tuple we are planning to update */
+ trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
+ lockmode, &newSlot);
+ if (trigtuple == NULL)
+ return NULL; /* cancel the update action */
+ }
+ else
+ {
+ trigtuple = fdw_trigtuple;
+ newSlot = NULL;
+ }
/*
* In READ COMMITTED isolation level it's possible that target tuple was
newtuple = slottuple;
}
- modifiedCols = GetModifiedColumns(relinfo, estate);
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
TRIGGER_EVENT_ROW |
TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
+ updatedCols = GetUpdatedColumns(relinfo, estate);
for (i = 0; i < trigdesc->numtriggers; i++)
{
Trigger *trigger = &trigdesc->triggers[i];
TRIGGER_TYPE_UPDATE))
continue;
if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
- modifiedCols, trigtuple, newtuple))
+ updatedCols, trigtuple, newtuple))
continue;
LocTriggerData.tg_trigtuple = trigtuple;
heap_freetuple(oldtuple);
if (newtuple == NULL)
{
- heap_freetuple(trigtuple);
+ if (trigtuple != fdw_trigtuple)
+ heap_freetuple(trigtuple);
return NULL; /* "do nothing" */
}
}
- heap_freetuple(trigtuple);
+ if (trigtuple != fdw_trigtuple)
+ heap_freetuple(trigtuple);
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
void
ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
- ItemPointer tupleid, HeapTuple newtuple,
+ ItemPointer tupleid,
+ HeapTuple fdw_trigtuple,
+ HeapTuple newtuple,
List *recheckIndexes)
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
- if (trigdesc && trigdesc->trig_update_after_row)
+ if (trigdesc && (trigdesc->trig_update_after_row ||
+ trigdesc->trig_update_old_table || trigdesc->trig_update_new_table))
{
- HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
- tupleid, NULL);
+ HeapTuple trigtuple;
+
+ Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
+ if (fdw_trigtuple == NULL)
+ trigtuple = GetTupleForTrigger(estate,
+ NULL,
+ relinfo,
+ tupleid,
+ LockTupleExclusive,
+ NULL);
+ else
+ trigtuple = fdw_trigtuple;
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
true, trigtuple, newtuple, recheckIndexes,
- GetModifiedColumns(relinfo, estate));
- heap_freetuple(trigtuple);
+ GetUpdatedColumns(relinfo, estate));
+ if (trigtuple != fdw_trigtuple)
+ heap_freetuple(trigtuple);
}
}
TRIGGER_EVENT_ROW |
TRIGGER_EVENT_INSTEAD;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
for (i = 0; i < trigdesc->numtriggers; i++)
{
Trigger *trigger = &trigdesc->triggers[i];
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_trigtuple = NULL;
LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_oldtable = NULL;
+ LocTriggerData.tg_newtable = NULL;
LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
LocTriggerData.tg_newtuplebuf = InvalidBuffer;
for (i = 0; i < trigdesc->numtriggers; i++)
EPQState *epqstate,
ResultRelInfo *relinfo,
ItemPointer tid,
+ LockTupleMode lockmode,
TupleTableSlot **newSlot)
{
Relation relation = relinfo->ri_RelationDesc;
if (newSlot != NULL)
{
HTSU_Result test;
- ItemPointerData update_ctid;
- TransactionId update_xmax;
+ HeapUpdateFailureData hufd;
*newSlot = NULL;
*/
ltrmark:;
tuple.t_self = *tid;
- test = heap_lock_tuple(relation, &tuple, &buffer,
- &update_ctid, &update_xmax,
+ test = heap_lock_tuple(relation, &tuple,
estate->es_output_cid,
- LockTupleExclusive, false);
+ lockmode, LockWaitBlock,
+ false, &buffer, &hufd);
switch (test)
{
case HeapTupleSelfUpdated:
+
+ /*
+ * The target tuple was already updated or deleted by the
+ * current command, or by a later command in the current
+ * transaction. We ignore the tuple in the former case, and
+ * throw error in the latter case, for the same reasons
+ * enumerated in ExecUpdate and ExecDelete in
+ * nodeModifyTable.c.
+ */
+ if (hufd.cmax != estate->es_output_cid)
+ ereport(ERROR,
+ (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
+ errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
+ errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
+
/* treat it as deleted; do not process */
ReleaseBuffer(buffer);
return NULL;
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
- if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
+ if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
{
/* it was updated, so look at the updated version */
TupleTableSlot *epqslot;
epqstate,
relation,
relinfo->ri_RangeTableIndex,
- &update_ctid,
- update_xmax);
+ lockmode,
+ &hufd.ctid,
+ hufd.xmax);
if (!TupIsNull(epqslot))
{
- *tid = update_ctid;
+ *tid = hufd.ctid;
*newSlot = epqslot;
/*
*/
return NULL;
+ case HeapTupleInvisible:
+ elog(ERROR, "attempted to lock invisible tuple");
+
default:
ReleaseBuffer(buffer);
elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
+ /*
+ * Although we already know this tuple is valid, we must lock the
+ * buffer to ensure that no one has a buffer cleanup lock; otherwise
+ * they might move the tuple while we try to copy it. But we can
+ * release the lock before actually doing the heap_copytuple call,
+ * since holding pin is sufficient to prevent anyone from getting a
+ * cleanup lock they don't already hold.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
+
page = BufferGetPage(buffer);
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
tuple.t_len = ItemIdGetLength(lp);
tuple.t_self = *tid;
tuple.t_tableOid = RelationGetRelid(relation);
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
}
result = heap_copytuple(&tuple);
trigger->tgenabled == TRIGGER_DISABLED)
return false;
}
- else /* ORIGIN or LOCAL role */
+ else /* ORIGIN or LOCAL role */
{
if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
trigger->tgenabled == TRIGGER_DISABLED)
if (trigger->tgqual)
{
TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
- List **predicate;
+ ExprState **predicate;
ExprContext *econtext;
TupleTableSlot *oldslot = NULL;
TupleTableSlot *newslot = NULL;
* nodetrees for it. Keep them in the per-query memory context so
* they'll survive throughout the query.
*/
- if (*predicate == NIL)
+ if (*predicate == NULL)
{
Node *tgqual;
oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
tgqual = stringToNode(trigger->tgqual);
- /* Change references to OLD and NEW to INNER and OUTER */
- ChangeVarNodes(tgqual, PRS2_OLD_VARNO, INNER, 0);
- ChangeVarNodes(tgqual, PRS2_NEW_VARNO, OUTER, 0);
- /* ExecQual wants implicit-AND form */
+ /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
+ ChangeVarNodes(tgqual, PRS2_OLD_VARNO, INNER_VAR, 0);
+ ChangeVarNodes(tgqual, PRS2_NEW_VARNO, OUTER_VAR, 0);
+ /* ExecPrepareQual wants implicit-AND form */
tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
- *predicate = (List *) ExecPrepareExpr((Expr *) tgqual, estate);
+ *predicate = ExecPrepareQual((List *) tgqual, estate);
MemoryContextSwitchTo(oldContext);
}
}
if (HeapTupleIsValid(newtup))
{
- if (estate->es_trig_tuple_slot == NULL)
+ if (estate->es_trig_newtup_slot == NULL)
{
oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
- estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate);
+ estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate);
MemoryContextSwitchTo(oldContext);
}
- newslot = estate->es_trig_tuple_slot;
+ newslot = estate->es_trig_newtup_slot;
if (newslot->tts_tupleDescriptor != tupdesc)
ExecSetSlotDescriptor(newslot, tupdesc);
ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
/*
* Finally evaluate the expression, making the old and/or new tuples
- * available as INNER/OUTER respectively.
+ * available as INNER_VAR/OUTER_VAR respectively.
*/
econtext->ecxt_innertuple = oldslot;
econtext->ecxt_outertuple = newslot;
- if (!ExecQual(*predicate, econtext, false))
+ if (!ExecQual(*predicate, econtext))
return false;
}
bool all_isdeferred;
int numstates; /* number of trigstates[] entries in use */
int numalloc; /* allocated size of trigstates[] */
- SetConstraintTriggerData trigstates[1]; /* VARIABLE LENGTH ARRAY */
+ SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
} SetConstraintStateData;
typedef SetConstraintStateData *SetConstraintState;
* Per-trigger-event data
*
* The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
- * status bits and one or two tuple CTIDs. Each event record also has an
- * associated AfterTriggerSharedData that is shared across all instances
- * of similar events within a "chunk".
+ * status bits and up to two tuple CTIDs. Each event record also has an
+ * associated AfterTriggerSharedData that is shared across all instances of
+ * similar events within a "chunk".
*
- * We arrange not to waste storage on ate_ctid2 for non-update events.
- * We could go further and not store either ctid for statement-level triggers,
- * but that seems unlikely to be worth the trouble.
+ * For row-level triggers, we arrange not to waste storage on unneeded ctid
+ * fields. Updates of regular tables use two; inserts and deletes of regular
+ * tables use one; foreign tables always use zero and save the tuple(s) to a
+ * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
+ * retrieve a fresh tuple or pair of tuples from that tuplestore, while
+ * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
+ * tuple(s). This permits storing tuples once regardless of the number of
+ * row-level triggers on a foreign table.
+ *
+ * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
+ * require no ctid field. We lack the flag bit space to neatly represent that
+ * distinct case, and it seems unlikely to be worth much trouble.
*
* Note: ats_firing_id is initially zero and is set to something else when
* AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
* Although this is mutable state, we can keep it in AfterTriggerSharedData
* because all instances of the same type of event in a given event list will
* be fired at the same time, if they were queued between the same firing
- * cycles. So we need only ensure that ats_firing_id is zero when attaching
+ * cycles. So we need only ensure that ats_firing_id is zero when attaching
* a new event to an existing AfterTriggerSharedData record.
*/
typedef uint32 TriggerFlags;
-#define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order
- * bits */
-#define AFTER_TRIGGER_2CTIDS 0x10000000
-#define AFTER_TRIGGER_DONE 0x20000000
-#define AFTER_TRIGGER_IN_PROGRESS 0x40000000
+#define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
+#define AFTER_TRIGGER_DONE 0x10000000
+#define AFTER_TRIGGER_IN_PROGRESS 0x20000000
+/* bits describing the size and tuple sources of this event */
+#define AFTER_TRIGGER_FDW_REUSE 0x00000000
+#define AFTER_TRIGGER_FDW_FETCH 0x80000000
+#define AFTER_TRIGGER_1CTID 0x40000000
+#define AFTER_TRIGGER_2CTID 0xC0000000
+#define AFTER_TRIGGER_TUP_BITS 0xC0000000
typedef struct AfterTriggerSharedData *AfterTriggerShared;
ItemPointerData ate_ctid2; /* new updated tuple */
} AfterTriggerEventData;
-/* This struct must exactly match the one above except for not having ctid2 */
+/* AfterTriggerEventData, minus ate_ctid2 */
typedef struct AfterTriggerEventDataOneCtid
{
TriggerFlags ate_flags; /* status bits and offset to shared data */
ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
-} AfterTriggerEventDataOneCtid;
+} AfterTriggerEventDataOneCtid;
+
+/* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
+typedef struct AfterTriggerEventDataZeroCtids
+{
+ TriggerFlags ate_flags; /* status bits and offset to shared data */
+} AfterTriggerEventDataZeroCtids;
#define SizeofTriggerEvent(evt) \
- (((evt)->ate_flags & AFTER_TRIGGER_2CTIDS) ? \
- sizeof(AfterTriggerEventData) : sizeof(AfterTriggerEventDataOneCtid))
+ (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
+ sizeof(AfterTriggerEventData) : \
+ ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
+ sizeof(AfterTriggerEventDataOneCtid) : \
+ sizeof(AfterTriggerEventDataZeroCtids))
#define GetTriggerSharedData(evt) \
((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
/*
* To avoid palloc overhead, we keep trigger events in arrays in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). The space between CHUNK_DATA_START and freeptr is occupied by
+ * array). The space between CHUNK_DATA_START and freeptr is occupied by
* AfterTriggerEventData records; the space between endfree and endptr is
* occupied by AfterTriggerSharedData records.
*/
typedef struct AfterTriggerEventChunk
{
- struct AfterTriggerEventChunk *next; /* list link */
+ struct AfterTriggerEventChunk *next; /* list link */
char *freeptr; /* start of free space in chunk */
char *endfree; /* end of free space in chunk */
char *endptr; /* end of chunk */
*
* firing_counter is incremented for each call of afterTriggerInvokeEvents.
* We mark firable events with the current firing cycle's ID so that we can
- * tell which ones to work on. This ensures sane behavior if a trigger
+ * tell which ones to work on. This ensures sane behavior if a trigger
* function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
* only fire those events that weren't already scheduled for firing.
*
* This is saved and restored across failed subtransactions.
*
* events is the current list of deferred events. This is global across
- * all subtransactions of the current transaction. In a subtransaction
+ * all subtransactions of the current transaction. In a subtransaction
* abort, we know that the events added by the subtransaction are at the
* end of the list, so it is relatively easy to discard them. The event
* list chunks themselves are stored in event_cxt.
* immediate-mode triggers, and append any deferred events to the main events
* list.
*
- * maxquerydepth is just the allocated length of query_stack.
+ * fdw_tuplestores[query_depth] is a tuplestore containing the foreign tuples
+ * needed for the current query.
+ *
+ * old_tuplestores[query_depth] and new_tuplestores[query_depth] hold the
+ * transition relations for the current query.
+ *
+ * maxquerydepth is just the allocated length of query_stack and the
+ * tuplestores.
*
* state_stack is a stack of pointers to saved copies of the SET CONSTRAINTS
* state data; each subtransaction level that modifies that state first
* which we similarly use to clean up at subtransaction abort.
*
* firing_stack is a stack of copies of subtransaction-start-time
- * firing_counter. We use this to recognize which deferred triggers were
+ * firing_counter. We use this to recognize which deferred triggers were
* fired (or marked for firing) within an aborted subtransaction.
*
* We use GetCurrentTransactionNestLevel() to determine the correct array
* index in these stacks. maxtransdepth is the number of allocated entries in
- * each stack. (By not keeping our own stack pointer, we can avoid trouble
+ * each stack. (By not keeping our own stack pointer, we can avoid trouble
* in cases where errors during subxact abort cause multiple invocations
* of AfterTriggerEndSubXact() at the same nesting depth.)
*/
{
CommandId firing_counter; /* next firing ID to assign */
SetConstraintState state; /* the active S C state */
- AfterTriggerEventList events; /* deferred-event list */
+ AfterTriggerEventList events; /* deferred-event list */
int query_depth; /* current query list index */
AfterTriggerEventList *query_stack; /* events pending from each query */
+ Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from
+ * each query */
+ Tuplestorestate **old_tuplestores; /* all old tuples from each query */
+ Tuplestorestate **new_tuplestores; /* all new tuples from each query */
int maxquerydepth; /* allocated len of above array */
MemoryContext event_cxt; /* memory context for events, if any */
/* these fields are just for resetting at subtrans abort: */
SetConstraintState *state_stack; /* stacked S C states */
- AfterTriggerEventList *events_stack; /* stacked list pointers */
+ AfterTriggerEventList *events_stack; /* stacked list pointers */
int *depth_stack; /* stacked query_depths */
CommandId *firing_stack; /* stacked firing_counters */
int maxtransdepth; /* allocated len of above arrays */
} AfterTriggersData;
-typedef AfterTriggersData *AfterTriggers;
-
-static AfterTriggers afterTriggers;
-
+static AfterTriggersData afterTriggers;
static void AfterTriggerExecute(AfterTriggerEvent event,
Relation rel, TriggerDesc *trigdesc,
FmgrInfo *finfo,
Instrumentation *instr,
- MemoryContext per_tuple_context);
+ MemoryContext per_tuple_context,
+ TupleTableSlot *trig_tuple_slot1,
+ TupleTableSlot *trig_tuple_slot2);
static SetConstraintState SetConstraintStateCreate(int numalloc);
static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
Oid tgoid, bool tgisdeferred);
+/*
+ * Gets a current query transition tuplestore and initializes it if necessary.
+ * This can be holding a single transition row tuple (in the case of an FDW)
+ * or a transition table (for an AFTER trigger).
+ */
+static Tuplestorestate *
+GetTriggerTransitionTuplestore(Tuplestorestate **tss)
+{
+ Tuplestorestate *ret;
+
+ ret = tss[afterTriggers.query_depth];
+ if (ret == NULL)
+ {
+ MemoryContext oldcxt;
+ ResourceOwner saveResourceOwner;
+
+ /*
+ * Make the tuplestore valid until end of transaction. This is the
+ * allocation lifespan of the associated events list, but we really
+ * only need it until AfterTriggerEndQuery().
+ */
+ oldcxt = MemoryContextSwitchTo(TopTransactionContext);
+ saveResourceOwner = CurrentResourceOwner;
+ PG_TRY();
+ {
+ CurrentResourceOwner = TopTransactionResourceOwner;
+ ret = tuplestore_begin_heap(false, false, work_mem);
+ }
+ PG_CATCH();
+ {
+ CurrentResourceOwner = saveResourceOwner;
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+ CurrentResourceOwner = saveResourceOwner;
+ MemoryContextSwitchTo(oldcxt);
+
+ tss[afterTriggers.query_depth] = ret;
+ }
+
+ return ret;
+}
+
/* ----------
* afterTriggerCheckState()
*
afterTriggerCheckState(AfterTriggerShared evtshared)
{
Oid tgoid = evtshared->ats_tgoid;
- SetConstraintState state = afterTriggers->state;
+ SetConstraintState state = afterTriggers.state;
int i;
/*
return false;
/*
- * Check if SET CONSTRAINTS has been executed for this specific trigger.
+ * If constraint state exists, SET CONSTRAINTS might have been executed
+ * either for this trigger or for all triggers.
*/
- for (i = 0; i < state->numstates; i++)
+ if (state != NULL)
{
- if (state->trigstates[i].sct_tgoid == tgoid)
- return state->trigstates[i].sct_tgisdeferred;
- }
+ /* Check for SET CONSTRAINTS for this specific trigger. */
+ for (i = 0; i < state->numstates; i++)
+ {
+ if (state->trigstates[i].sct_tgoid == tgoid)
+ return state->trigstates[i].sct_tgisdeferred;
+ }
- /*
- * Check if SET CONSTRAINTS ALL has been executed; if so use that.
- */
- if (state->all_isset)
- return state->all_isdeferred;
+ /* Check for SET CONSTRAINTS ALL. */
+ if (state->all_isset)
+ return state->all_isdeferred;
+ }
/*
* Otherwise return the default state for the trigger.
Size chunksize;
/* Create event context if we didn't already */
- if (afterTriggers->event_cxt == NULL)
- afterTriggers->event_cxt =
+ if (afterTriggers.event_cxt == NULL)
+ afterTriggers.event_cxt =
AllocSetContextCreate(TopTransactionContext,
"AfterTriggerEvents",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_SIZES);
/*
* Chunk size starts at 1KB and is allowed to increase up to 1MB.
chunksize /= 2; /* too many shared records */
chunksize = Min(chunksize, MAX_CHUNK_SIZE);
}
- chunk = MemoryContextAlloc(afterTriggers->event_cxt, chunksize);
+ chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
chunk->next = NULL;
chunk->freeptr = CHUNK_DATA_START(chunk);
chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
* single trigger function.
*
* Frequently, this will be fired many times in a row for triggers of
- * a single relation. Therefore, we cache the open relation and provide
+ * a single relation. Therefore, we cache the open relation and provide
* fmgr lookup cache space at the caller level. (For triggers fired at
* the end of a query, we can even piggyback on the executor's state.)
*
* instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
* or NULL if no instrumentation is wanted.
* per_tuple_context: memory context to call trigger function in.
+ * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
+ * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
* ----------
*/
static void
AfterTriggerExecute(AfterTriggerEvent event,
Relation rel, TriggerDesc *trigdesc,
FmgrInfo *finfo, Instrumentation *instr,
- MemoryContext per_tuple_context)
+ MemoryContext per_tuple_context,
+ TupleTableSlot *trig_tuple_slot1,
+ TupleTableSlot *trig_tuple_slot2)
{
AfterTriggerShared evtshared = GetTriggerSharedData(event);
Oid tgoid = evtshared->ats_tgoid;
/*
* Fetch the required tuple(s).
*/
- if (ItemPointerIsValid(&(event->ate_ctid1)))
- {
- ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
- if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
- elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
- LocTriggerData.tg_trigtuple = &tuple1;
- LocTriggerData.tg_trigtuplebuf = buffer1;
- }
- else
+ switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
{
- LocTriggerData.tg_trigtuple = NULL;
- LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
- }
+ case AFTER_TRIGGER_FDW_FETCH:
+ {
+ Tuplestorestate *fdw_tuplestore =
+ GetTriggerTransitionTuplestore
+ (afterTriggers.fdw_tuplestores);
+
+ if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
+ trig_tuple_slot1))
+ elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
+
+ if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
+ TRIGGER_EVENT_UPDATE &&
+ !tuplestore_gettupleslot(fdw_tuplestore, true, false,
+ trig_tuple_slot2))
+ elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
+ }
+ /* fall through */
+ case AFTER_TRIGGER_FDW_REUSE:
- /* don't touch ctid2 if not there */
- if ((event->ate_flags & AFTER_TRIGGER_2CTIDS) &&
- ItemPointerIsValid(&(event->ate_ctid2)))
- {
- ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
- if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
- elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
- LocTriggerData.tg_newtuple = &tuple2;
- LocTriggerData.tg_newtuplebuf = buffer2;
+ /*
+ * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
+ * ensures that tg_trigtuple does not reference tuplestore memory.
+ * (It is formally possible for the trigger function to queue
+ * trigger events that add to the same tuplestore, which can push
+ * other tuples out of memory.) The distinction is academic,
+ * because we start with a minimal tuple that ExecFetchSlotTuple()
+ * must materialize anyway.
+ */
+ LocTriggerData.tg_trigtuple =
+ ExecMaterializeSlot(trig_tuple_slot1);
+ LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
+
+ LocTriggerData.tg_newtuple =
+ ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
+ TRIGGER_EVENT_UPDATE) ?
+ ExecMaterializeSlot(trig_tuple_slot2) : NULL;
+ LocTriggerData.tg_newtuplebuf = InvalidBuffer;
+
+ break;
+
+ default:
+ if (ItemPointerIsValid(&(event->ate_ctid1)))
+ {
+ ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
+ if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
+ elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
+ LocTriggerData.tg_trigtuple = &tuple1;
+ LocTriggerData.tg_trigtuplebuf = buffer1;
+ }
+ else
+ {
+ LocTriggerData.tg_trigtuple = NULL;
+ LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
+ }
+
+ /* don't touch ctid2 if not there */
+ if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
+ AFTER_TRIGGER_2CTID &&
+ ItemPointerIsValid(&(event->ate_ctid2)))
+ {
+ ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
+ if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
+ elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
+ LocTriggerData.tg_newtuple = &tuple2;
+ LocTriggerData.tg_newtuplebuf = buffer2;
+ }
+ else
+ {
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_newtuplebuf = InvalidBuffer;
+ }
}
+
+ /*
+ * Set up the tuplestore information.
+ */
+ if (LocTriggerData.tg_trigger->tgoldtable)
+ LocTriggerData.tg_oldtable =
+ GetTriggerTransitionTuplestore(afterTriggers.old_tuplestores);
else
- {
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_newtuplebuf = InvalidBuffer;
- }
+ LocTriggerData.tg_oldtable = NULL;
+ if (LocTriggerData.tg_trigger->tgnewtable)
+ LocTriggerData.tg_newtable =
+ GetTriggerTransitionTuplestore(afterTriggers.new_tuplestores);
+ else
+ LocTriggerData.tg_newtable = NULL;
/*
* Setup the remaining trigger information
finfo,
NULL,
per_tuple_context);
- if (rettuple != NULL && rettuple != &tuple1 && rettuple != &tuple2)
+ if (rettuple != NULL &&
+ rettuple != LocTriggerData.tg_trigtuple &&
+ rettuple != LocTriggerData.tg_newtuple)
heap_freetuple(rettuple);
/*
/*
* Mark it as to be fired in this firing cycle.
*/
- evtshared->ats_firing_id = afterTriggers->firing_counter;
+ evtshared->ats_firing_id = afterTriggers.firing_counter;
event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
found = true;
}
TriggerDesc *trigdesc = NULL;
FmgrInfo *finfo = NULL;
Instrumentation *instr = NULL;
+ TupleTableSlot *slot1 = NULL,
+ *slot2 = NULL;
/* Make a local EState if need be */
if (estate == NULL)
per_tuple_context =
AllocSetContextCreate(CurrentMemoryContext,
"AfterTriggerTupleContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_SIZES);
for_each_chunk(chunk, *events)
{
trigdesc = rInfo->ri_TrigDesc;
finfo = rInfo->ri_TrigFunctions;
instr = rInfo->ri_TrigInstrument;
- if (trigdesc == NULL) /* should not happen */
+ if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ if (slot1 != NULL)
+ {
+ ExecDropSingleTupleTableSlot(slot1);
+ ExecDropSingleTupleTableSlot(slot2);
+ }
+ slot1 = MakeSingleTupleTableSlot(rel->rd_att);
+ slot2 = MakeSingleTupleTableSlot(rel->rd_att);
+ }
+ if (trigdesc == NULL) /* should not happen */
elog(ERROR, "relation %u has no triggers",
evtshared->ats_relid);
}
* won't try to re-fire it.
*/
AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
- per_tuple_context);
+ per_tuple_context, slot1, slot2);
/*
* Mark the event as done.
events->tailfree = chunk->freeptr;
}
}
+ if (slot1 != NULL)
+ {
+ ExecDropSingleTupleTableSlot(slot1);
+ ExecDropSingleTupleTableSlot(slot2);
+ }
/* Release working resources */
MemoryContextDelete(per_tuple_context);
if (local_estate)
{
- ListCell *l;
-
- foreach(l, estate->es_trig_target_relations)
- {
- ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
-
- /* Close indices and then the relation itself */
- ExecCloseIndices(resultRelInfo);
- heap_close(resultRelInfo->ri_RelationDesc, NoLock);
- }
+ ExecCleanUpTriggerState(estate);
FreeExecutorState(estate);
}
void
AfterTriggerBeginXact(void)
{
- Assert(afterTriggers == NULL);
+ /*
+ * Initialize after-trigger state structure to empty
+ */
+ afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
+ afterTriggers.query_depth = -1;
/*
- * Build empty after-trigger state structure
+ * Verify that there is no leftover state remaining. If these assertions
+ * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
+ * up properly.
*/
- afterTriggers = (AfterTriggers)
- MemoryContextAlloc(TopTransactionContext,
- sizeof(AfterTriggersData));
-
- afterTriggers->firing_counter = (CommandId) 1; /* mustn't be 0 */
- afterTriggers->state = SetConstraintStateCreate(8);
- afterTriggers->events.head = NULL;
- afterTriggers->events.tail = NULL;
- afterTriggers->events.tailfree = NULL;
- afterTriggers->query_depth = -1;
-
- /* We initialize the query stack to a reasonable size */
- afterTriggers->query_stack = (AfterTriggerEventList *)
- MemoryContextAlloc(TopTransactionContext,
- 8 * sizeof(AfterTriggerEventList));
- afterTriggers->maxquerydepth = 8;
-
- /* Context for events is created only when needed */
- afterTriggers->event_cxt = NULL;
-
- /* Subtransaction stack is empty until/unless needed */
- afterTriggers->state_stack = NULL;
- afterTriggers->events_stack = NULL;
- afterTriggers->depth_stack = NULL;
- afterTriggers->firing_stack = NULL;
- afterTriggers->maxtransdepth = 0;
+ Assert(afterTriggers.state == NULL);
+ Assert(afterTriggers.query_stack == NULL);
+ Assert(afterTriggers.fdw_tuplestores == NULL);
+ Assert(afterTriggers.old_tuplestores == NULL);
+ Assert(afterTriggers.new_tuplestores == NULL);
+ Assert(afterTriggers.maxquerydepth == 0);
+ Assert(afterTriggers.event_cxt == NULL);
+ Assert(afterTriggers.events.head == NULL);
+ Assert(afterTriggers.state_stack == NULL);
+ Assert(afterTriggers.events_stack == NULL);
+ Assert(afterTriggers.depth_stack == NULL);
+ Assert(afterTriggers.firing_stack == NULL);
+ Assert(afterTriggers.maxtransdepth == 0);
}
* AfterTriggerBeginQuery()
*
* Called just before we start processing a single query within a
- * transaction (or subtransaction). Set up to record AFTER trigger
- * events queued by the query. Note that it is allowed to have
- * nested queries within a (sub)transaction.
+ * transaction (or subtransaction). Most of the real work gets deferred
+ * until somebody actually tries to queue a trigger event.
* ----------
*/
void
AfterTriggerBeginQuery(void)
{
- AfterTriggerEventList *events;
-
- /* Must be inside a transaction */
- Assert(afterTriggers != NULL);
-
/* Increase the query stack depth */
- afterTriggers->query_depth++;
-
- /*
- * Allocate more space in the query stack if needed.
- */
- if (afterTriggers->query_depth >= afterTriggers->maxquerydepth)
- {
- /* repalloc will keep the stack in the same context */
- int new_alloc = afterTriggers->maxquerydepth * 2;
-
- afterTriggers->query_stack = (AfterTriggerEventList *)
- repalloc(afterTriggers->query_stack,
- new_alloc * sizeof(AfterTriggerEventList));
- afterTriggers->maxquerydepth = new_alloc;
- }
-
- /* Initialize this query's list to empty */
- events = &afterTriggers->query_stack[afterTriggers->query_depth];
- events->head = NULL;
- events->tail = NULL;
- events->tailfree = NULL;
+ afterTriggers.query_depth++;
}
AfterTriggerEndQuery(EState *estate)
{
AfterTriggerEventList *events;
-
- /* Must be inside a transaction */
- Assert(afterTriggers != NULL);
+ Tuplestorestate *fdw_tuplestore;
+ Tuplestorestate *old_tuplestore;
+ Tuplestorestate *new_tuplestore;
/* Must be inside a query, too */
- Assert(afterTriggers->query_depth >= 0);
+ Assert(afterTriggers.query_depth >= 0);
+
+ /*
+ * If we never even got as far as initializing the event stack, there
+ * certainly won't be any events, so exit quickly.
+ */
+ if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
+ {
+ afterTriggers.query_depth--;
+ return;
+ }
/*
* Process all immediate-mode triggers queued by the query, and move the
* IMMEDIATE: all events we have decided to defer will be available for it
* to fire.
*
- * We loop in case a trigger queues more events at the same query level
- * (is that even possible?). Be careful here: firing a trigger could
- * result in query_stack being repalloc'd, so we can't save its address
- * across afterTriggerInvokeEvents calls.
+ * We loop in case a trigger queues more events at the same query level.
+ * Ordinary trigger functions, including all PL/pgSQL trigger functions,
+ * will instead fire any triggers in a dedicated query level. Foreign key
+ * enforcement triggers do add to the current query level, thanks to their
+ * passing fire_triggers = false to SPI_execute_snapshot(). Other
+ * C-language triggers might do likewise. Be careful here: firing a
+ * trigger could result in query_stack being repalloc'd, so we can't save
+ * its address across afterTriggerInvokeEvents calls.
*
* If we find no firable events, we don't have to increment
* firing_counter.
*/
for (;;)
{
- events = &afterTriggers->query_stack[afterTriggers->query_depth];
- if (afterTriggerMarkEvents(events, &afterTriggers->events, true))
+ events = &afterTriggers.query_stack[afterTriggers.query_depth];
+ if (afterTriggerMarkEvents(events, &afterTriggers.events, true))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers.firing_counter++;
/* OK to delete the immediate events after processing them */
if (afterTriggerInvokeEvents(events, firing_id, estate, true))
break;
}
- /* Release query-local storage for events */
- afterTriggerFreeEventList(&afterTriggers->query_stack[afterTriggers->query_depth]);
+ /* Release query-local storage for events, including tuplestore if any */
+ fdw_tuplestore = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
+ if (fdw_tuplestore)
+ {
+ tuplestore_end(fdw_tuplestore);
+ afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
+ }
+ old_tuplestore = afterTriggers.old_tuplestores[afterTriggers.query_depth];
+ if (old_tuplestore)
+ {
+ tuplestore_end(old_tuplestore);
+ afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
+ }
+ new_tuplestore = afterTriggers.new_tuplestores[afterTriggers.query_depth];
+ if (new_tuplestore)
+ {
+ tuplestore_end(new_tuplestore);
+ afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
+ }
+ afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
- afterTriggers->query_depth--;
+ afterTriggers.query_depth--;
}
AfterTriggerEventList *events;
bool snap_pushed = false;
- /* Must be inside a transaction */
- Assert(afterTriggers != NULL);
-
- /* ... but not inside a query */
- Assert(afterTriggers->query_depth == -1);
+ /* Must not be inside a query */
+ Assert(afterTriggers.query_depth == -1);
/*
* If there are any triggers to fire, make sure we have set a snapshot for
* them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
* can't assume ActiveSnapshot is valid on entry.)
*/
- events = &afterTriggers->events;
+ events = &afterTriggers.events;
if (events->head != NULL)
{
PushActiveSnapshot(GetTransactionSnapshot());
}
/*
- * Run all the remaining triggers. Loop until they are all gone, in case
+ * Run all the remaining triggers. Loop until they are all gone, in case
* some trigger queues more for us to do.
*/
while (afterTriggerMarkEvents(events, NULL, false))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers.firing_counter++;
if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
break; /* all fired */
AfterTriggerEndXact(bool isCommit)
{
/*
- * Forget everything we know about AFTER triggers.
+ * Forget the pending-events list.
*
* Since all the info is in TopTransactionContext or children thereof, we
* don't really need to do anything to reclaim memory. However, the
* soon as possible --- especially if we are aborting because we ran out
* of memory for the list!
*/
- if (afterTriggers && afterTriggers->event_cxt)
- MemoryContextDelete(afterTriggers->event_cxt);
+ if (afterTriggers.event_cxt)
+ {
+ MemoryContextDelete(afterTriggers.event_cxt);
+ afterTriggers.event_cxt = NULL;
+ afterTriggers.events.head = NULL;
+ afterTriggers.events.tail = NULL;
+ afterTriggers.events.tailfree = NULL;
+ }
+
+ /*
+ * Forget any subtransaction state as well. Since this can't be very
+ * large, we let the eventual reset of TopTransactionContext free the
+ * memory instead of doing it here.
+ */
+ afterTriggers.state_stack = NULL;
+ afterTriggers.events_stack = NULL;
+ afterTriggers.depth_stack = NULL;
+ afterTriggers.firing_stack = NULL;
+ afterTriggers.maxtransdepth = 0;
+
- afterTriggers = NULL;
+ /*
+ * Forget the query stack and constraint-related state information. As
+ * with the subtransaction state information, we don't bother freeing the
+ * memory here.
+ */
+ afterTriggers.query_stack = NULL;
+ afterTriggers.fdw_tuplestores = NULL;
+ afterTriggers.old_tuplestores = NULL;
+ afterTriggers.new_tuplestores = NULL;
+ afterTriggers.maxquerydepth = 0;
+ afterTriggers.state = NULL;
+
+ /* No more afterTriggers manipulation until next transaction starts. */
+ afterTriggers.query_depth = -1;
}
/*
{
int my_level = GetCurrentTransactionNestLevel();
- /*
- * Ignore call if the transaction is in aborted state. (Probably
- * shouldn't happen?)
- */
- if (afterTriggers == NULL)
- return;
-
/*
* Allocate more space in the stacks if needed. (Note: because the
* minimum nest level of a subtransaction is 2, we waste the first couple
* entries of each array; not worth the notational effort to avoid it.)
*/
- while (my_level >= afterTriggers->maxtransdepth)
+ while (my_level >= afterTriggers.maxtransdepth)
{
- if (afterTriggers->maxtransdepth == 0)
+ if (afterTriggers.maxtransdepth == 0)
{
MemoryContext old_cxt;
old_cxt = MemoryContextSwitchTo(TopTransactionContext);
#define DEFTRIG_INITALLOC 8
- afterTriggers->state_stack = (SetConstraintState *)
+ afterTriggers.state_stack = (SetConstraintState *)
palloc(DEFTRIG_INITALLOC * sizeof(SetConstraintState));
- afterTriggers->events_stack = (AfterTriggerEventList *)
+ afterTriggers.events_stack = (AfterTriggerEventList *)
palloc(DEFTRIG_INITALLOC * sizeof(AfterTriggerEventList));
- afterTriggers->depth_stack = (int *)
+ afterTriggers.depth_stack = (int *)
palloc(DEFTRIG_INITALLOC * sizeof(int));
- afterTriggers->firing_stack = (CommandId *)
+ afterTriggers.firing_stack = (CommandId *)
palloc(DEFTRIG_INITALLOC * sizeof(CommandId));
- afterTriggers->maxtransdepth = DEFTRIG_INITALLOC;
+ afterTriggers.maxtransdepth = DEFTRIG_INITALLOC;
MemoryContextSwitchTo(old_cxt);
}
else
{
/* repalloc will keep the stacks in the same context */
- int new_alloc = afterTriggers->maxtransdepth * 2;
+ int new_alloc = afterTriggers.maxtransdepth * 2;
- afterTriggers->state_stack = (SetConstraintState *)
- repalloc(afterTriggers->state_stack,
+ afterTriggers.state_stack = (SetConstraintState *)
+ repalloc(afterTriggers.state_stack,
new_alloc * sizeof(SetConstraintState));
- afterTriggers->events_stack = (AfterTriggerEventList *)
- repalloc(afterTriggers->events_stack,
+ afterTriggers.events_stack = (AfterTriggerEventList *)
+ repalloc(afterTriggers.events_stack,
new_alloc * sizeof(AfterTriggerEventList));
- afterTriggers->depth_stack = (int *)
- repalloc(afterTriggers->depth_stack,
+ afterTriggers.depth_stack = (int *)
+ repalloc(afterTriggers.depth_stack,
new_alloc * sizeof(int));
- afterTriggers->firing_stack = (CommandId *)
- repalloc(afterTriggers->firing_stack,
+ afterTriggers.firing_stack = (CommandId *)
+ repalloc(afterTriggers.firing_stack,
new_alloc * sizeof(CommandId));
- afterTriggers->maxtransdepth = new_alloc;
+ afterTriggers.maxtransdepth = new_alloc;
}
}
* is not saved until/unless changed. Likewise, we don't make a
* per-subtransaction event context until needed.
*/
- afterTriggers->state_stack[my_level] = NULL;
- afterTriggers->events_stack[my_level] = afterTriggers->events;
- afterTriggers->depth_stack[my_level] = afterTriggers->query_depth;
- afterTriggers->firing_stack[my_level] = afterTriggers->firing_counter;
+ afterTriggers.state_stack[my_level] = NULL;
+ afterTriggers.events_stack[my_level] = afterTriggers.events;
+ afterTriggers.depth_stack[my_level] = afterTriggers.query_depth;
+ afterTriggers.firing_stack[my_level] = afterTriggers.firing_counter;
}
/*
AfterTriggerEventChunk *chunk;
CommandId subxact_firing_id;
- /*
- * Ignore call if the transaction is in aborted state. (Probably
- * unneeded)
- */
- if (afterTriggers == NULL)
- return;
-
/*
* Pop the prior state if needed.
*/
if (isCommit)
{
- Assert(my_level < afterTriggers->maxtransdepth);
+ Assert(my_level < afterTriggers.maxtransdepth);
/* If we saved a prior state, we don't need it anymore */
- state = afterTriggers->state_stack[my_level];
+ state = afterTriggers.state_stack[my_level];
if (state != NULL)
pfree(state);
/* this avoids double pfree if error later: */
- afterTriggers->state_stack[my_level] = NULL;
- Assert(afterTriggers->query_depth ==
- afterTriggers->depth_stack[my_level]);
+ afterTriggers.state_stack[my_level] = NULL;
+ Assert(afterTriggers.query_depth ==
+ afterTriggers.depth_stack[my_level]);
}
else
{
* AfterTriggerBeginSubXact, in which case we mustn't risk touching
* stack levels that aren't there.
*/
- if (my_level >= afterTriggers->maxtransdepth)
+ if (my_level >= afterTriggers.maxtransdepth)
return;
/*
* Release any event lists from queries being aborted, and restore
- * query_depth to its pre-subxact value.
+ * query_depth to its pre-subxact value. This assumes that a
+ * subtransaction will not add events to query levels started in a
+ * earlier transaction state.
*/
- while (afterTriggers->query_depth > afterTriggers->depth_stack[my_level])
+ while (afterTriggers.query_depth > afterTriggers.depth_stack[my_level])
{
- afterTriggerFreeEventList(&afterTriggers->query_stack[afterTriggers->query_depth]);
- afterTriggers->query_depth--;
+ if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
+ {
+ Tuplestorestate *ts;
+
+ ts = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
+ if (ts)
+ {
+ tuplestore_end(ts);
+ afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
+ }
+ ts = afterTriggers.old_tuplestores[afterTriggers.query_depth];
+ if (ts)
+ {
+ tuplestore_end(ts);
+ afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
+ }
+ ts = afterTriggers.new_tuplestores[afterTriggers.query_depth];
+ if (ts)
+ {
+ tuplestore_end(ts);
+ afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
+ }
+
+ afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
+ }
+
+ afterTriggers.query_depth--;
}
- Assert(afterTriggers->query_depth ==
- afterTriggers->depth_stack[my_level]);
+ Assert(afterTriggers.query_depth ==
+ afterTriggers.depth_stack[my_level]);
/*
* Restore the global deferred-event list to its former length,
* discarding any events queued by the subxact.
*/
- afterTriggerRestoreEventList(&afterTriggers->events,
- &afterTriggers->events_stack[my_level]);
+ afterTriggerRestoreEventList(&afterTriggers.events,
+ &afterTriggers.events_stack[my_level]);
/*
* Restore the trigger state. If the saved state is NULL, then this
* subxact didn't save it, so it doesn't need restoring.
*/
- state = afterTriggers->state_stack[my_level];
+ state = afterTriggers.state_stack[my_level];
if (state != NULL)
{
- pfree(afterTriggers->state);
- afterTriggers->state = state;
+ pfree(afterTriggers.state);
+ afterTriggers.state = state;
}
/* this avoids double pfree if error later: */
- afterTriggers->state_stack[my_level] = NULL;
+ afterTriggers.state_stack[my_level] = NULL;
/*
* Scan for any remaining deferred events that were marked DONE or IN
* (This essentially assumes that the current subxact includes all
* subxacts started after it.)
*/
- subxact_firing_id = afterTriggers->firing_stack[my_level];
- for_each_event_chunk(event, chunk, afterTriggers->events)
+ subxact_firing_id = afterTriggers.firing_stack[my_level];
+ for_each_event_chunk(event, chunk, afterTriggers.events)
{
AfterTriggerShared evtshared = GetTriggerSharedData(event);
}
}
+/* ----------
+ * AfterTriggerEnlargeQueryState()
+ *
+ * Prepare the necessary state so that we can record AFTER trigger events
+ * queued by a query. It is allowed to have nested queries within a
+ * (sub)transaction, so we need to have separate state for each query
+ * nesting level.
+ * ----------
+ */
+static void
+AfterTriggerEnlargeQueryState(void)
+{
+ int init_depth = afterTriggers.maxquerydepth;
+
+ Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
+
+ if (afterTriggers.maxquerydepth == 0)
+ {
+ int new_alloc = Max(afterTriggers.query_depth + 1, 8);
+
+ afterTriggers.query_stack = (AfterTriggerEventList *)
+ MemoryContextAlloc(TopTransactionContext,
+ new_alloc * sizeof(AfterTriggerEventList));
+ afterTriggers.fdw_tuplestores = (Tuplestorestate **)
+ MemoryContextAllocZero(TopTransactionContext,
+ new_alloc * sizeof(Tuplestorestate *));
+ afterTriggers.old_tuplestores = (Tuplestorestate **)
+ MemoryContextAllocZero(TopTransactionContext,
+ new_alloc * sizeof(Tuplestorestate *));
+ afterTriggers.new_tuplestores = (Tuplestorestate **)
+ MemoryContextAllocZero(TopTransactionContext,
+ new_alloc * sizeof(Tuplestorestate *));
+ afterTriggers.maxquerydepth = new_alloc;
+ }
+ else
+ {
+ /* repalloc will keep the stack in the same context */
+ int old_alloc = afterTriggers.maxquerydepth;
+ int new_alloc = Max(afterTriggers.query_depth + 1,
+ old_alloc * 2);
+
+ afterTriggers.query_stack = (AfterTriggerEventList *)
+ repalloc(afterTriggers.query_stack,
+ new_alloc * sizeof(AfterTriggerEventList));
+ afterTriggers.fdw_tuplestores = (Tuplestorestate **)
+ repalloc(afterTriggers.fdw_tuplestores,
+ new_alloc * sizeof(Tuplestorestate *));
+ afterTriggers.old_tuplestores = (Tuplestorestate **)
+ repalloc(afterTriggers.old_tuplestores,
+ new_alloc * sizeof(Tuplestorestate *));
+ afterTriggers.new_tuplestores = (Tuplestorestate **)
+ repalloc(afterTriggers.new_tuplestores,
+ new_alloc * sizeof(Tuplestorestate *));
+ /* Clear newly-allocated slots for subsequent lazy initialization. */
+ memset(afterTriggers.fdw_tuplestores + old_alloc,
+ 0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
+ memset(afterTriggers.old_tuplestores + old_alloc,
+ 0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
+ memset(afterTriggers.new_tuplestores + old_alloc,
+ 0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
+ afterTriggers.maxquerydepth = new_alloc;
+ }
+
+ /* Initialize new query lists to empty */
+ while (init_depth < afterTriggers.maxquerydepth)
+ {
+ AfterTriggerEventList *events;
+
+ events = &afterTriggers.query_stack[init_depth];
+ events->head = NULL;
+ events->tail = NULL;
+ events->tailfree = NULL;
+
+ ++init_depth;
+ }
+}
+
/*
* Create an empty SetConstraintState with room for numalloc trigstates
*/
*/
state = (SetConstraintState)
MemoryContextAllocZero(TopTransactionContext,
- sizeof(SetConstraintStateData) +
- (numalloc - 1) *sizeof(SetConstraintTriggerData));
+ offsetof(SetConstraintStateData, trigstates) +
+ numalloc * sizeof(SetConstraintTriggerData));
state->numalloc = numalloc;
}
/*
- * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
+ * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
* pointer to the state object (it will change if we have to repalloc).
*/
static SetConstraintState
newalloc = Max(newalloc, 8); /* in case original has size 0 */
state = (SetConstraintState)
repalloc(state,
- sizeof(SetConstraintStateData) +
- (newalloc - 1) *sizeof(SetConstraintTriggerData));
+ offsetof(SetConstraintStateData, trigstates) +
+ newalloc * sizeof(SetConstraintTriggerData));
state->numalloc = newalloc;
Assert(state->numstates < state->numalloc);
}
{
int my_level = GetCurrentTransactionNestLevel();
- /*
- * Ignore call if we aren't in a transaction. (Shouldn't happen?)
- */
- if (afterTriggers == NULL)
- return;
+ /* If we haven't already done so, initialize our state. */
+ if (afterTriggers.state == NULL)
+ afterTriggers.state = SetConstraintStateCreate(8);
/*
* If in a subtransaction, and we didn't save the current state already,
* save it so it can be restored if the subtransaction aborts.
*/
if (my_level > 1 &&
- afterTriggers->state_stack[my_level] == NULL)
+ afterTriggers.state_stack[my_level] == NULL)
{
- afterTriggers->state_stack[my_level] =
- SetConstraintStateCopy(afterTriggers->state);
+ afterTriggers.state_stack[my_level] =
+ SetConstraintStateCopy(afterTriggers.state);
}
/*
/*
* Forget any previous SET CONSTRAINTS commands in this transaction.
*/
- afterTriggers->state->numstates = 0;
+ afterTriggers.state->numstates = 0;
/*
* Set the per-transaction ALL state to known.
*/
- afterTriggers->state->all_isset = true;
- afterTriggers->state->all_isdeferred = stmt->deferred;
+ afterTriggers.state->all_isset = true;
+ afterTriggers.state->all_isdeferred = stmt->deferred;
}
else
{
* First, identify all the named constraints and make a list of their
* OIDs. Since, unlike the SQL spec, we allow multiple constraints of
* the same name within a schema, the specifications are not
- * necessarily unique. Our strategy is to target all matching
+ * necessarily unique. Our strategy is to target all matching
* constraints within the first search-path schema that has any
* matches, but disregard matches in schemas beyond the first match.
* (This is a bit odd but it's the historical behavior.)
/*
* If we're given the schema name with the constraint, look only
- * in that schema. If given a bare constraint name, use the
+ * in that schema. If given a bare constraint name, use the
* search path to find the first matching constraint.
*/
if (constraint->schemaname)
{
- Oid namespaceId = LookupExplicitNamespace(constraint->schemaname);
+ Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
+ false);
namespacelist = list_make1_oid(namespaceId);
}
ObjectIdGetDatum(namespaceId));
conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
- true, SnapshotNow, 2, skey);
+ true, NULL, 2, skey);
while (HeapTupleIsValid(tup = systable_getnext(conscan)))
{
ObjectIdGetDatum(conoid));
tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
- SnapshotNow, 1, &skey);
+ NULL, 1, &skey);
while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
{
/*
* Silently skip triggers that are marked as non-deferrable in
- * pg_trigger. This is not an error condition, since a
+ * pg_trigger. This is not an error condition, since a
* deferrable RI constraint may have some non-deferrable
* actions.
*/
foreach(lc, tgoidlist)
{
Oid tgoid = lfirst_oid(lc);
- SetConstraintState state = afterTriggers->state;
+ SetConstraintState state = afterTriggers.state;
bool found = false;
int i;
}
if (!found)
{
- afterTriggers->state =
+ afterTriggers.state =
SetConstraintStateAddItem(state, tgoid, stmt->deferred);
}
}
*/
if (!stmt->deferred)
{
- AfterTriggerEventList *events = &afterTriggers->events;
+ AfterTriggerEventList *events = &afterTriggers.events;
bool snapshot_set = false;
while (afterTriggerMarkEvents(events, NULL, true))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers.firing_counter++;
/*
* Make sure a snapshot has been established in case trigger
- * functions need one. Note that we avoid setting a snapshot if
+ * functions need one. Note that we avoid setting a snapshot if
* we don't find at least one trigger that has to be fired now.
* This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
* ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
AfterTriggerEventChunk *chunk;
int depth;
- /* No-op if we aren't in a transaction. (Shouldn't happen?) */
- if (afterTriggers == NULL)
- return false;
-
/* Scan queued events */
- for_each_event_chunk(event, chunk, afterTriggers->events)
+ for_each_event_chunk(event, chunk, afterTriggers.events)
{
AfterTriggerShared evtshared = GetTriggerSharedData(event);
/*
- * We can ignore completed events. (Even if a DONE flag is rolled
+ * We can ignore completed events. (Even if a DONE flag is rolled
* back by subxact abort, it's OK because the effects of the TRUNCATE
* or whatever must get rolled back too.)
*/
* if TRUNCATE/etc is executed by a function or trigger within an updating
* query on the same relation, which is pretty perverse, but let's check.
*/
- for (depth = 0; depth <= afterTriggers->query_depth; depth++)
+ for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
{
- for_each_event_chunk(event, chunk, afterTriggers->query_stack[depth])
+ for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth])
{
AfterTriggerShared evtshared = GetTriggerSharedData(event);
* be fired for an event.
*
* NOTE: this is called whenever there are any triggers associated with
- * the event (even if they are disabled). This function decides which
- * triggers actually need to be queued.
+ * the event (even if they are disabled). This function decides which
+ * triggers actually need to be queued. It is also called after each row,
+ * even if there are no triggers for that event, if there are any AFTER
+ * STATEMENT triggers for the statement which use transition tables, so that
+ * the transition tuplestores can be built.
+ *
+ * Transition tuplestores are built now, rather than when events are pulled
+ * off of the queue because AFTER ROW triggers are allowed to select from the
+ * transition tables for the statement.
* ----------
*/
static void
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
AfterTriggerEventData new_event;
AfterTriggerSharedData new_shared;
+ char relkind = relinfo->ri_RelationDesc->rd_rel->relkind;
int tgtype_event;
int tgtype_level;
int i;
+ Tuplestorestate *fdw_tuplestore = NULL;
/*
- * Check state. We use normal tests not Asserts because it is possible to
+ * Check state. We use a normal test not Assert because it is possible to
* reach here in the wrong state given misconfigured RI triggers, in
* particular deferring a cascade action trigger.
*/
- if (afterTriggers == NULL)
- elog(ERROR, "AfterTriggerSaveEvent() called outside of transaction");
- if (afterTriggers->query_depth < 0)
+ if (afterTriggers.query_depth < 0)
elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
+ /* Be sure we have enough space to record events at this query depth. */
+ if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
+ AfterTriggerEnlargeQueryState();
+
+ /*
+ * If the relation has AFTER ... FOR EACH ROW triggers, capture rows into
+ * transition tuplestores for this depth.
+ */
+ if (row_trigger)
+ {
+ if ((event == TRIGGER_EVENT_DELETE &&
+ trigdesc->trig_delete_old_table) ||
+ (event == TRIGGER_EVENT_UPDATE &&
+ trigdesc->trig_update_old_table))
+ {
+ Tuplestorestate *old_tuplestore;
+
+ Assert(oldtup != NULL);
+ old_tuplestore =
+ GetTriggerTransitionTuplestore
+ (afterTriggers.old_tuplestores);
+ tuplestore_puttuple(old_tuplestore, oldtup);
+ }
+ if ((event == TRIGGER_EVENT_INSERT &&
+ trigdesc->trig_insert_new_table) ||
+ (event == TRIGGER_EVENT_UPDATE &&
+ trigdesc->trig_update_new_table))
+ {
+ Tuplestorestate *new_tuplestore;
+
+ Assert(newtup != NULL);
+ new_tuplestore =
+ GetTriggerTransitionTuplestore
+ (afterTriggers.new_tuplestores);
+ tuplestore_puttuple(new_tuplestore, newtup);
+ }
+
+ /* If transition tables are the only reason we're here, return. */
+ if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
+ (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
+ (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
+ return;
+ }
+
/*
* Validate the event code and collect the associated tuple CTIDs.
*
* validation is important to make sure we don't walk off the edge of our
* arrays.
*/
- new_event.ate_flags = 0;
switch (event)
{
case TRIGGER_EVENT_INSERT:
Assert(newtup != NULL);
ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid2));
- new_event.ate_flags |= AFTER_TRIGGER_2CTIDS;
}
else
{
break;
}
+ if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
+ new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
+ AFTER_TRIGGER_2CTID : AFTER_TRIGGER_1CTID;
+ /* else, we'll initialize ate_flags for each trigger */
+
tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
for (i = 0; i < trigdesc->numtriggers; i++)
modifiedCols, oldtup, newtup))
continue;
+ if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
+ {
+ if (fdw_tuplestore == NULL)
+ {
+ fdw_tuplestore =
+ GetTriggerTransitionTuplestore
+ (afterTriggers.fdw_tuplestores);
+ new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
+ }
+ else
+ /* subsequent event for the same tuple */
+ new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
+ }
+
/*
- * If this is an UPDATE of a PK table or FK table that does not change
- * the PK or FK respectively, we can skip queuing the event: there is
- * no need to fire the trigger.
+ * If the trigger is a foreign key enforcement trigger, there are
+ * certain cases where we can skip queueing the event because we can
+ * tell by inspection that the FK constraint will still pass.
*/
if (TRIGGER_FIRED_BY_UPDATE(event))
{
switch (RI_FKey_trigger_type(trigger->tgfoid))
{
case RI_TRIGGER_PK:
- /* Update on PK table */
- if (RI_FKey_keyequal_upd_pk(trigger, rel, oldtup, newtup))
+ /* Update on trigger's PK table */
+ if (!RI_FKey_pk_upd_check_required(trigger, rel,
+ oldtup, newtup))
{
- /* key unchanged, so skip queuing this event */
+ /* skip queuing this event */
continue;
}
break;
case RI_TRIGGER_FK:
-
- /*
- * Update on FK table
- *
- * There is one exception when updating FK tables: if the
- * updated row was inserted by our own transaction and the
- * FK is deferred, we still need to fire the trigger. This
- * is because our UPDATE will invalidate the INSERT so the
- * end-of-transaction INSERT RI trigger will not do
- * anything, so we have to do the check for the UPDATE
- * anyway.
- */
- if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(oldtup->t_data)) &&
- RI_FKey_keyequal_upd_fk(trigger, rel, oldtup, newtup))
+ /* Update on trigger's FK table */
+ if (!RI_FKey_fk_upd_check_required(trigger, rel,
+ oldtup, newtup))
{
+ /* skip queuing this event */
continue;
}
break;
new_shared.ats_relid = RelationGetRelid(rel);
new_shared.ats_firing_id = 0;
- afterTriggerAddEvent(&afterTriggers->query_stack[afterTriggers->query_depth],
+ afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth],
&new_event, &new_shared);
}
+
+ /*
+ * Finally, spool any foreign tuple(s). The tuplestore squashes them to
+ * minimal tuples, so this loses any system columns. The executor lost
+ * those columns before us, for an unrelated reason, so this is fine.
+ */
+ if (fdw_tuplestore)
+ {
+ if (oldtup != NULL)
+ tuplestore_puttuple(fdw_tuplestore, oldtup);
+ if (newtup != NULL)
+ tuplestore_puttuple(fdw_tuplestore, newtup);
+ }
+}
+
+Datum
+pg_trigger_depth(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_INT32(MyTriggerDepth);
}