1 /*-------------------------------------------------------------------------
4 * routines for inserting index tuples and enforcing unique and
5 * exclusion constraints.
7 * ExecInsertIndexTuples() is the main entry point. It's called after
8 * inserting a tuple to the heap, and it inserts corresponding index tuples
9 * into all indexes. At the same time, it enforces any unique and
10 * exclusion constraints:
15 * Enforcing a unique constraint is straightforward. When the index AM
16 * inserts the tuple to the index, it also checks that there are no
17 * conflicting tuples in the index already. It does so atomically, so that
18 * even if two backends try to insert the same key concurrently, only one
19 * of them will succeed. All the logic to ensure atomicity, and to wait
20 * for in-progress transactions to finish, is handled by the index AM.
22 * If a unique constraint is deferred, we request the index AM to not
23 * throw an error if a conflict is found. Instead, we make note that there
24 * was a conflict and return the list of indexes with conflicts to the
25 * caller. The caller must re-check them later, by calling index_insert()
26 * with the UNIQUE_CHECK_EXISTING option.
28 * Exclusion Constraints
29 * ---------------------
31 * Exclusion constraints are different from unique indexes in that when the
32 * tuple is inserted to the index, the index AM does not check for
33 * duplicate keys at the same time. After the insertion, we perform a
34 * separate scan on the index to check for conflicting tuples, and if one
35 * is found, we throw an error and the transaction is aborted. If the
36 * conflicting tuple's inserter or deleter is in-progress, we wait for it
39 * There is a chance of deadlock, if two backends insert a tuple at the
40 * same time, and then perform the scan to check for conflicts. They will
41 * find each other's tuple, and both try to wait for each other. The
42 * deadlock detector will detect that, and abort one of the transactions.
43 * That's fairly harmless, as one of them was bound to abort with a
44 * "duplicate key error" anyway, although you get a different error
47 * If an exclusion constraint is deferred, we still perform the conflict
48 * checking scan immediately after inserting the index tuple. But instead
49 * of throwing an error if a conflict is found, we return that information
50 * to the caller. The caller must re-check them later by calling
51 * check_exclusion_constraint().
53 * Speculative insertion
54 * ---------------------
56 * Speculative insertion is a two-phase mechanism used to implement
57 * INSERT ... ON CONFLICT DO UPDATE/NOTHING. The tuple is first inserted
58 * to the heap and update the indexes as usual, but if a constraint is
59 * violated, we can still back out the insertion without aborting the whole
60 * transaction. In an INSERT ... ON CONFLICT statement, if a conflict is
61 * detected, the inserted tuple is backed out and the ON CONFLICT action is
64 * Insertion to a unique index works as usual: the index AM checks for
65 * duplicate keys atomically with the insertion. But instead of throwing
66 * an error on a conflict, the speculatively inserted heap tuple is backed
69 * Exclusion constraints are slightly more complicated. As mentioned
70 * earlier, there is a risk of deadlock when two backends insert the same
71 * key concurrently. That was not a problem for regular insertions, when
72 * one of the transactions has to be aborted anyway, but with a speculative
73 * insertion we cannot let a deadlock happen, because we only want to back
74 * out the speculatively inserted tuple on conflict, not abort the whole
77 * When a backend detects that the speculative insertion conflicts with
78 * another in-progress tuple, it has two options:
80 * 1. back out the speculatively inserted tuple, then wait for the other
81 * transaction, and retry. Or,
82 * 2. wait for the other transaction, with the speculatively inserted tuple
85 * If two backends insert at the same time, and both try to wait for each
86 * other, they will deadlock. So option 2 is not acceptable. Option 1
87 * avoids the deadlock, but it is prone to a livelock instead. Both
88 * transactions will wake up immediately as the other transaction backs
89 * out. Then they both retry, and conflict with each other again, lather,
92 * To avoid the livelock, one of the backends must back out first, and then
93 * wait, while the other one waits without backing out. It doesn't matter
94 * which one backs out, so we employ an arbitrary rule that the transaction
95 * with the higher XID backs out.
98 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
99 * Portions Copyright (c) 1994, Regents of the University of California
103 * src/backend/executor/execIndexing.c
105 *-------------------------------------------------------------------------
107 #include "postgres.h"
109 #include "access/genam.h"
110 #include "access/relscan.h"
111 #include "access/xact.h"
112 #include "catalog/index.h"
113 #include "executor/executor.h"
114 #include "nodes/nodeFuncs.h"
115 #include "storage/lmgr.h"
116 #include "utils/snapmgr.h"
118 /* waitMode argument to check_exclusion_or_unique_constraint() */
123 CEOUC_LIVELOCK_PREVENTING_WAIT
126 static bool check_exclusion_or_unique_constraint(Relation heap, Relation index,
127 IndexInfo *indexInfo,
129 Datum *values, bool *isnull,
130 EState *estate, bool newIndex,
131 CEOUC_WAIT_MODE waitMode,
133 ItemPointer conflictTid);
135 static bool index_recheck_constraint(Relation index, Oid *constr_procs,
136 Datum *existing_values, bool *existing_isnull,
139 /* ----------------------------------------------------------------
142 * Find the indices associated with a result relation, open them,
143 * and save information about them in the result ResultRelInfo.
145 * At entry, caller has already opened and locked
146 * resultRelInfo->ri_RelationDesc.
147 * ----------------------------------------------------------------
150 ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
152 Relation resultRelation = resultRelInfo->ri_RelationDesc;
157 RelationPtr relationDescs;
158 IndexInfo **indexInfoArray;
160 resultRelInfo->ri_NumIndices = 0;
162 /* fast path if no indexes */
163 if (!RelationGetForm(resultRelation)->relhasindex)
167 * Get cached list of index OIDs
169 indexoidlist = RelationGetIndexList(resultRelation);
170 len = list_length(indexoidlist);
175 * allocate space for result arrays
177 relationDescs = (RelationPtr) palloc(len * sizeof(Relation));
178 indexInfoArray = (IndexInfo **) palloc(len * sizeof(IndexInfo *));
180 resultRelInfo->ri_NumIndices = len;
181 resultRelInfo->ri_IndexRelationDescs = relationDescs;
182 resultRelInfo->ri_IndexRelationInfo = indexInfoArray;
185 * For each index, open the index relation and save pg_index info. We
186 * acquire RowExclusiveLock, signifying we will update the index.
188 * Note: we do this even if the index is not indisready; it's not worth
189 * the trouble to optimize for the case where it isn't.
192 foreach(l, indexoidlist)
194 Oid indexOid = lfirst_oid(l);
198 indexDesc = index_open(indexOid, RowExclusiveLock);
200 /* extract index key information from the index's pg_index info */
201 ii = BuildIndexInfo(indexDesc);
204 * If the indexes are to be used for speculative insertion, add extra
205 * information required by unique index entries.
207 if (speculative && ii->ii_Unique)
208 BuildSpeculativeIndexInfo(indexDesc, ii);
210 relationDescs[i] = indexDesc;
211 indexInfoArray[i] = ii;
215 list_free(indexoidlist);
218 /* ----------------------------------------------------------------
221 * Close the index relations stored in resultRelInfo
222 * ----------------------------------------------------------------
225 ExecCloseIndices(ResultRelInfo *resultRelInfo)
229 RelationPtr indexDescs;
231 numIndices = resultRelInfo->ri_NumIndices;
232 indexDescs = resultRelInfo->ri_IndexRelationDescs;
234 for (i = 0; i < numIndices; i++)
236 if (indexDescs[i] == NULL)
237 continue; /* shouldn't happen? */
239 /* Drop lock acquired by ExecOpenIndices */
240 index_close(indexDescs[i], RowExclusiveLock);
244 * XXX should free indexInfo array here too? Currently we assume that
245 * such stuff will be cleaned up automatically in FreeExecutorState.
249 /* ----------------------------------------------------------------
250 * ExecInsertIndexTuples
252 * This routine takes care of inserting index tuples
253 * into all the relations indexing the result relation
254 * when a heap tuple is inserted into the result relation.
256 * Unique and exclusion constraints are enforced at the same
257 * time. This returns a list of index OIDs for any unique or
258 * exclusion constraints that are deferred and that had
259 * potential (unconfirmed) conflicts. (if noDupErr == true,
260 * the same is done for non-deferred constraints, but report
261 * if conflict was speculative or deferred conflict to caller)
263 * If 'arbiterIndexes' is nonempty, noDupErr applies only to
264 * those indexes. NIL means noDupErr applies to all indexes.
266 * CAUTION: this must not be called for a HOT update.
267 * We can't defend against that here for lack of info.
268 * Should we change the API to make it safer?
269 * ----------------------------------------------------------------
272 ExecInsertIndexTuples(TupleTableSlot *slot,
277 List *arbiterIndexes)
280 ResultRelInfo *resultRelInfo;
283 RelationPtr relationDescs;
284 Relation heapRelation;
285 IndexInfo **indexInfoArray;
286 ExprContext *econtext;
287 Datum values[INDEX_MAX_KEYS];
288 bool isnull[INDEX_MAX_KEYS];
291 * Get information from the result relation info structure.
293 resultRelInfo = estate->es_result_relation_info;
294 numIndices = resultRelInfo->ri_NumIndices;
295 relationDescs = resultRelInfo->ri_IndexRelationDescs;
296 indexInfoArray = resultRelInfo->ri_IndexRelationInfo;
297 heapRelation = resultRelInfo->ri_RelationDesc;
300 * We will use the EState's per-tuple context for evaluating predicates
301 * and index expressions (creating it if it's not already there).
303 econtext = GetPerTupleExprContext(estate);
305 /* Arrange for econtext's scan tuple to be the tuple under test */
306 econtext->ecxt_scantuple = slot;
309 * for each index, form and insert the index tuple
311 for (i = 0; i < numIndices; i++)
313 Relation indexRelation = relationDescs[i];
314 IndexInfo *indexInfo;
316 IndexUniqueCheck checkUnique;
317 bool satisfiesConstraint;
319 if (indexRelation == NULL)
322 indexInfo = indexInfoArray[i];
324 /* If the index is marked as read-only, ignore it */
325 if (!indexInfo->ii_ReadyForInserts)
328 /* Check for partial index */
329 if (indexInfo->ii_Predicate != NIL)
331 ExprState *predicate;
334 * If predicate state not set up yet, create it (in the estate's
337 predicate = indexInfo->ii_PredicateState;
338 if (predicate == NULL)
340 predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
341 indexInfo->ii_PredicateState = predicate;
344 /* Skip this index-update if the predicate isn't satisfied */
345 if (!ExecQual(predicate, econtext))
350 * FormIndexDatum fills in its values and isnull parameters with the
351 * appropriate values for the column(s) of the index.
353 FormIndexDatum(indexInfo,
359 /* Check whether to apply noDupErr to this index */
360 applyNoDupErr = noDupErr &&
361 (arbiterIndexes == NIL ||
362 list_member_oid(arbiterIndexes,
363 indexRelation->rd_index->indexrelid));
366 * The index AM does the actual insertion, plus uniqueness checking.
368 * For an immediate-mode unique index, we just tell the index AM to
369 * throw error if not unique.
371 * For a deferrable unique index, we tell the index AM to just detect
372 * possible non-uniqueness, and we add the index OID to the result
373 * list if further checking is needed.
375 * For a speculative insertion (used by INSERT ... ON CONFLICT), do
376 * the same as for a deferrable unique index.
378 if (!indexRelation->rd_index->indisunique)
379 checkUnique = UNIQUE_CHECK_NO;
380 else if (applyNoDupErr)
381 checkUnique = UNIQUE_CHECK_PARTIAL;
382 else if (indexRelation->rd_index->indimmediate)
383 checkUnique = UNIQUE_CHECK_YES;
385 checkUnique = UNIQUE_CHECK_PARTIAL;
387 satisfiesConstraint =
388 index_insert(indexRelation, /* index relation */
389 values, /* array of index Datums */
390 isnull, /* null flags */
391 tupleid, /* tid of heap tuple */
392 heapRelation, /* heap relation */
393 checkUnique, /* type of uniqueness check to do */
394 indexInfo); /* index AM may need this */
397 * If the index has an associated exclusion constraint, check that.
398 * This is simpler than the process for uniqueness checks since we
399 * always insert first and then check. If the constraint is deferred,
400 * we check now anyway, but don't throw error on violation or wait for
401 * a conclusive outcome from a concurrent insertion; instead we'll
402 * queue a recheck event. Similarly, noDupErr callers (speculative
403 * inserters) will recheck later, and wait for a conclusive outcome
406 * An index for an exclusion constraint can't also be UNIQUE (not an
407 * essential property, we just don't allow it in the grammar), so no
408 * need to preserve the prior state of satisfiesConstraint.
410 if (indexInfo->ii_ExclusionOps != NULL)
413 CEOUC_WAIT_MODE waitMode;
418 waitMode = CEOUC_LIVELOCK_PREVENTING_WAIT;
420 else if (!indexRelation->rd_index->indimmediate)
423 waitMode = CEOUC_NOWAIT;
428 waitMode = CEOUC_WAIT;
431 satisfiesConstraint =
432 check_exclusion_or_unique_constraint(heapRelation,
433 indexRelation, indexInfo,
434 tupleid, values, isnull,
436 waitMode, violationOK, NULL);
439 if ((checkUnique == UNIQUE_CHECK_PARTIAL ||
440 indexInfo->ii_ExclusionOps != NULL) &&
441 !satisfiesConstraint)
444 * The tuple potentially violates the uniqueness or exclusion
445 * constraint, so make a note of the index so that we can re-check
446 * it later. Speculative inserters are told if there was a
447 * speculative conflict, since that always requires a restart.
449 result = lappend_oid(result, RelationGetRelid(indexRelation));
450 if (indexRelation->rd_index->indimmediate && specConflict)
451 *specConflict = true;
458 /* ----------------------------------------------------------------
459 * ExecCheckIndexConstraints
461 * This routine checks if a tuple violates any unique or
462 * exclusion constraints. Returns true if there is no conflict.
463 * Otherwise returns false, and the TID of the conflicting
464 * tuple is returned in *conflictTid.
466 * If 'arbiterIndexes' is given, only those indexes are checked.
467 * NIL means all indexes.
469 * Note that this doesn't lock the values in any way, so it's
470 * possible that a conflicting tuple is inserted immediately
471 * after this returns. But this can be used for a pre-check
473 * ----------------------------------------------------------------
476 ExecCheckIndexConstraints(TupleTableSlot *slot,
477 EState *estate, ItemPointer conflictTid,
478 List *arbiterIndexes)
480 ResultRelInfo *resultRelInfo;
483 RelationPtr relationDescs;
484 Relation heapRelation;
485 IndexInfo **indexInfoArray;
486 ExprContext *econtext;
487 Datum values[INDEX_MAX_KEYS];
488 bool isnull[INDEX_MAX_KEYS];
489 ItemPointerData invalidItemPtr;
490 bool checkedIndex = false;
492 ItemPointerSetInvalid(conflictTid);
493 ItemPointerSetInvalid(&invalidItemPtr);
496 * Get information from the result relation info structure.
498 resultRelInfo = estate->es_result_relation_info;
499 numIndices = resultRelInfo->ri_NumIndices;
500 relationDescs = resultRelInfo->ri_IndexRelationDescs;
501 indexInfoArray = resultRelInfo->ri_IndexRelationInfo;
502 heapRelation = resultRelInfo->ri_RelationDesc;
505 * We will use the EState's per-tuple context for evaluating predicates
506 * and index expressions (creating it if it's not already there).
508 econtext = GetPerTupleExprContext(estate);
510 /* Arrange for econtext's scan tuple to be the tuple under test */
511 econtext->ecxt_scantuple = slot;
514 * For each index, form index tuple and check if it satisfies the
517 for (i = 0; i < numIndices; i++)
519 Relation indexRelation = relationDescs[i];
520 IndexInfo *indexInfo;
521 bool satisfiesConstraint;
523 if (indexRelation == NULL)
526 indexInfo = indexInfoArray[i];
528 if (!indexInfo->ii_Unique && !indexInfo->ii_ExclusionOps)
531 /* If the index is marked as read-only, ignore it */
532 if (!indexInfo->ii_ReadyForInserts)
535 /* When specific arbiter indexes requested, only examine them */
536 if (arbiterIndexes != NIL &&
537 !list_member_oid(arbiterIndexes,
538 indexRelation->rd_index->indexrelid))
541 if (!indexRelation->rd_index->indimmediate)
543 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
544 errmsg("ON CONFLICT does not support deferrable unique constraints/exclusion constraints as arbiters"),
545 errtableconstraint(heapRelation,
546 RelationGetRelationName(indexRelation))));
550 /* Check for partial index */
551 if (indexInfo->ii_Predicate != NIL)
553 ExprState *predicate;
556 * If predicate state not set up yet, create it (in the estate's
559 predicate = indexInfo->ii_PredicateState;
560 if (predicate == NULL)
562 predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
563 indexInfo->ii_PredicateState = predicate;
566 /* Skip this index-update if the predicate isn't satisfied */
567 if (!ExecQual(predicate, econtext))
572 * FormIndexDatum fills in its values and isnull parameters with the
573 * appropriate values for the column(s) of the index.
575 FormIndexDatum(indexInfo,
581 satisfiesConstraint =
582 check_exclusion_or_unique_constraint(heapRelation, indexRelation,
583 indexInfo, &invalidItemPtr,
584 values, isnull, estate, false,
587 if (!satisfiesConstraint)
591 if (arbiterIndexes != NIL && !checkedIndex)
592 elog(ERROR, "unexpected failure to find arbiter index");
598 * Check for violation of an exclusion or unique constraint
600 * heap: the table containing the new tuple
601 * index: the index supporting the constraint
602 * indexInfo: info about the index, including the exclusion properties
603 * tupleid: heap TID of the new tuple we have just inserted (invalid if we
604 * haven't inserted a new tuple yet)
605 * values, isnull: the *index* column values computed for the new tuple
606 * estate: an EState we can do evaluation in
607 * newIndex: if true, we are trying to build a new index (this affects
608 * only the wording of error messages)
609 * waitMode: whether to wait for concurrent inserters/deleters
610 * violationOK: if true, don't throw error for violation
611 * conflictTid: if not-NULL, the TID of the conflicting tuple is returned here
613 * Returns true if OK, false if actual or potential violation
615 * 'waitMode' determines what happens if a conflict is detected with a tuple
616 * that was inserted or deleted by a transaction that's still running.
617 * CEOUC_WAIT means that we wait for the transaction to commit, before
618 * throwing an error or returning. CEOUC_NOWAIT means that we report the
619 * violation immediately; so the violation is only potential, and the caller
620 * must recheck sometime later. This behavior is convenient for deferred
621 * exclusion checks; we need not bother queuing a deferred event if there is
622 * definitely no conflict at insertion time.
624 * CEOUC_LIVELOCK_PREVENTING_WAIT is like CEOUC_NOWAIT, but we will sometimes
625 * wait anyway, to prevent livelocking if two transactions try inserting at
626 * the same time. This is used with speculative insertions, for INSERT ON
627 * CONFLICT statements. (See notes in file header)
629 * If violationOK is true, we just report the potential or actual violation to
630 * the caller by returning 'false'. Otherwise we throw a descriptive error
631 * message here. When violationOK is false, a false result is impossible.
633 * Note: The indexam is normally responsible for checking unique constraints,
634 * so this normally only needs to be used for exclusion constraints. But this
635 * function is also called when doing a "pre-check" for conflicts on a unique
636 * constraint, when doing speculative insertion. Caller may use the returned
637 * conflict TID to take further steps.
640 check_exclusion_or_unique_constraint(Relation heap, Relation index,
641 IndexInfo *indexInfo,
643 Datum *values, bool *isnull,
644 EState *estate, bool newIndex,
645 CEOUC_WAIT_MODE waitMode,
647 ItemPointer conflictTid)
650 uint16 *constr_strats;
651 Oid *index_collations = index->rd_indcollation;
652 int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(index);
653 IndexScanDesc index_scan;
655 ScanKeyData scankeys[INDEX_MAX_KEYS];
656 SnapshotData DirtySnapshot;
660 ExprContext *econtext;
661 TupleTableSlot *existing_slot;
662 TupleTableSlot *save_scantuple;
664 if (indexInfo->ii_ExclusionOps)
666 constr_procs = indexInfo->ii_ExclusionProcs;
667 constr_strats = indexInfo->ii_ExclusionStrats;
671 constr_procs = indexInfo->ii_UniqueProcs;
672 constr_strats = indexInfo->ii_UniqueStrats;
676 * If any of the input values are NULL, the constraint check is assumed to
677 * pass (i.e., we assume the operators are strict).
679 for (i = 0; i < indnkeyatts; i++)
686 * Search the tuples that are in the index for any violations, including
687 * tuples that aren't visible yet.
689 InitDirtySnapshot(DirtySnapshot);
691 for (i = 0; i < indnkeyatts; i++)
693 ScanKeyEntryInitialize(&scankeys[i],
704 * Need a TupleTableSlot to put existing tuples in.
706 * To use FormIndexDatum, we have to make the econtext's scantuple point
707 * to this slot. Be sure to save and restore caller's value for
710 existing_slot = MakeSingleTupleTableSlot(RelationGetDescr(heap),
713 econtext = GetPerTupleExprContext(estate);
714 save_scantuple = econtext->ecxt_scantuple;
715 econtext->ecxt_scantuple = existing_slot;
718 * May have to restart scan from this point if a potential conflict is
724 index_scan = index_beginscan(heap, index, &DirtySnapshot, indnkeyatts, 0);
725 index_rescan(index_scan, scankeys, indnkeyatts, NULL, 0);
727 while ((tup = index_getnext(index_scan,
728 ForwardScanDirection)) != NULL)
731 ItemPointerData ctid_wait;
732 XLTW_Oper reason_wait;
733 Datum existing_values[INDEX_MAX_KEYS];
734 bool existing_isnull[INDEX_MAX_KEYS];
736 char *error_existing;
739 * Ignore the entry for the tuple we're trying to check.
741 if (ItemPointerIsValid(tupleid) &&
742 ItemPointerEquals(tupleid, &tup->t_self))
744 if (found_self) /* should not happen */
745 elog(ERROR, "found self tuple multiple times in index \"%s\"",
746 RelationGetRelationName(index));
752 * Extract the index column values and isnull flags from the existing
755 ExecStoreHeapTuple(tup, existing_slot, false);
756 FormIndexDatum(indexInfo, existing_slot, estate,
757 existing_values, existing_isnull);
759 /* If lossy indexscan, must recheck the condition */
760 if (index_scan->xs_recheck)
762 if (!index_recheck_constraint(index,
767 continue; /* tuple doesn't actually match, so no
772 * At this point we have either a conflict or a potential conflict.
774 * If an in-progress transaction is affecting the visibility of this
775 * tuple, we need to wait for it to complete and then recheck (unless
776 * the caller requested not to). For simplicity we do rechecking by
777 * just restarting the whole scan --- this case probably doesn't
778 * happen often enough to be worth trying harder, and anyway we don't
779 * want to hold any index internal locks while waiting.
781 xwait = TransactionIdIsValid(DirtySnapshot.xmin) ?
782 DirtySnapshot.xmin : DirtySnapshot.xmax;
784 if (TransactionIdIsValid(xwait) &&
785 (waitMode == CEOUC_WAIT ||
786 (waitMode == CEOUC_LIVELOCK_PREVENTING_WAIT &&
787 DirtySnapshot.speculativeToken &&
788 TransactionIdPrecedes(GetCurrentTransactionId(), xwait))))
790 ctid_wait = tup->t_data->t_ctid;
791 reason_wait = indexInfo->ii_ExclusionOps ?
792 XLTW_RecheckExclusionConstr : XLTW_InsertIndex;
793 index_endscan(index_scan);
794 if (DirtySnapshot.speculativeToken)
795 SpeculativeInsertionWait(DirtySnapshot.xmin,
796 DirtySnapshot.speculativeToken);
798 XactLockTableWait(xwait, heap, &ctid_wait, reason_wait);
803 * We have a definite conflict (or a potential one, but the caller
804 * didn't want to wait). Return it to caller, or report it.
810 *conflictTid = tup->t_self;
814 error_new = BuildIndexValueDescription(index, values, isnull);
815 error_existing = BuildIndexValueDescription(index, existing_values,
819 (errcode(ERRCODE_EXCLUSION_VIOLATION),
820 errmsg("could not create exclusion constraint \"%s\"",
821 RelationGetRelationName(index)),
822 error_new && error_existing ?
823 errdetail("Key %s conflicts with key %s.",
824 error_new, error_existing) :
825 errdetail("Key conflicts exist."),
826 errtableconstraint(heap,
827 RelationGetRelationName(index))));
830 (errcode(ERRCODE_EXCLUSION_VIOLATION),
831 errmsg("conflicting key value violates exclusion constraint \"%s\"",
832 RelationGetRelationName(index)),
833 error_new && error_existing ?
834 errdetail("Key %s conflicts with existing key %s.",
835 error_new, error_existing) :
836 errdetail("Key conflicts with existing key."),
837 errtableconstraint(heap,
838 RelationGetRelationName(index))));
841 index_endscan(index_scan);
844 * Ordinarily, at this point the search should have found the originally
845 * inserted tuple (if any), unless we exited the loop early because of
846 * conflict. However, it is possible to define exclusion constraints for
847 * which that wouldn't be true --- for instance, if the operator is <>. So
848 * we no longer complain if found_self is still false.
851 econtext->ecxt_scantuple = save_scantuple;
853 ExecDropSingleTupleTableSlot(existing_slot);
859 * Check for violation of an exclusion constraint
861 * This is a dumbed down version of check_exclusion_or_unique_constraint
862 * for external callers. They don't need all the special modes.
865 check_exclusion_constraint(Relation heap, Relation index,
866 IndexInfo *indexInfo,
868 Datum *values, bool *isnull,
869 EState *estate, bool newIndex)
871 (void) check_exclusion_or_unique_constraint(heap, index, indexInfo, tupleid,
874 CEOUC_WAIT, false, NULL);
878 * Check existing tuple's index values to see if it really matches the
879 * exclusion condition against the new_values. Returns true if conflict.
882 index_recheck_constraint(Relation index, Oid *constr_procs,
883 Datum *existing_values, bool *existing_isnull,
886 int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(index);
889 for (i = 0; i < indnkeyatts; i++)
891 /* Assume the exclusion operators are strict */
892 if (existing_isnull[i])
895 if (!DatumGetBool(OidFunctionCall2Coll(constr_procs[i],
896 index->rd_indcollation[i],