]> granicus.if.org Git - postgresql/blobdiff - src/backend/storage/lmgr/predicate.c
Fix inconsistencies in the code
[postgresql] / src / backend / storage / lmgr / predicate.c
index 870cf4277cf991aca1c588b70330d398843a8faf..565c3ac4397592de9c73be864c0bb866f0215e41 100644 (file)
@@ -10,9 +10,9 @@
  *
  *     Michael J. Cahill, Uwe Röhm, and Alan D. Fekete. 2008.
  *     Serializable isolation for snapshot databases.
- *     In SIGMOD 08: Proceedings of the 2008 ACM SIGMOD
+ *     In SIGMOD '08: Proceedings of the 2008 ACM SIGMOD
  *     international conference on Management of data,
- *     pages 729738, New York, NY, USA. ACM.
+ *     pages 729-738, New York, NY, USA. ACM.
  *     http://doi.acm.org/10.1145/1376616.1376690
  *
  * and further elaborated in Cahill's doctoral thesis:
  * examining the MVCC data.)
  *
  * (1) Besides tuples actually read, they must cover ranges of tuples
- *             which would have been read based on the predicate.      This will
+ *             which would have been read based on the predicate.  This will
  *             require modelling the predicates through locks against database
  *             objects such as pages, index ranges, or entire tables.
  *
- * (2) They must be kept in RAM for quick access.      Because of this, it
+ * (2) They must be kept in RAM for quick access.  Because of this, it
  *             isn't possible to always maintain tuple-level granularity -- when
  *             the space allocated to store these approaches exhaustion, a
  *             request for a lock may need to scan for situations where a single
@@ -49,7 +49,7 @@
  *
  * (4) While they are associated with a transaction, they must survive
  *             a successful COMMIT of that transaction, and remain until all
- *             overlapping transactions complete.      This even means that they
+ *             overlapping transactions complete.  This even means that they
  *             must survive termination of the transaction's process.  If a
  *             top level transaction is rolled back, however, it is immediately
  *             flagged so that it can be ignored, and its SIREAD locks can be
@@ -62,7 +62,7 @@
  *             an existing SIREAD lock for the same transaction, the SIREAD lock
  *             can be deleted.
  *
- * (7) A write from a serializable transaction must ensure that a xact
+ * (7) A write from a serializable transaction must ensure that an xact
  *             record exists for the transaction, with the same lifespan (until
  *             all concurrent transaction complete or the transaction is rolled
  *             back) so that rw-dependencies to that transaction can be
  *                     may yet matter because they overlap still-active transactions.
  *
  *     SerializablePredicateLockListLock
- *             - Protects the linked list of locks held by a transaction.      Note
+ *             - Protects the linked list of locks held by a transaction.  Note
  *                     that the locks themselves are also covered by the partition
  *                     locks of their respective lock targets; this lock only affects
  *                     the linked list connecting the locks related to a transaction.
  *             - All transactions share this single lock (with no partitioning).
  *             - There is never a need for a process other than the one running
  *                     an active transaction to walk the list of locks held by that
- *                     transaction.
+ *                     transaction, except parallel query workers sharing the leader's
+ *                     transaction.  In the parallel case, an extra per-sxact lock is
+ *                     taken; see below.
  *             - It is relatively infrequent that another process needs to
  *                     modify the list for a transaction, but it does happen for such
  *                     things as index page splits for pages with predicate locks and
- *                     freeing of predicate locked pages by a vacuum process.  When
+ *                     freeing of predicate locked pages by a vacuum process.  When
  *                     removing a lock in such cases, the lock itself contains the
  *                     pointers needed to remove it from the list.  When adding a
  *                     lock in such cases, the lock can be added using the anchor in
- *                     the transaction structure.      Neither requires walking the list.
+ *                     the transaction structure.  Neither requires walking the list.
  *             - Cleaning up the list for a terminated transaction is sometimes
  *                     not done on a retail basis, in which case no lock is required.
  *             - Due to the above, a process accessing its active transaction's
  *                     than its own active transaction must acquire an exclusive
  *                     lock.
  *
- *     FirstPredicateLockMgrLock based partition locks
+ *     SERIALIZABLEXACT's member 'predicateLockListLock'
+ *             - Protects the linked list of locks held by a transaction.  Only
+ *                     needed for parallel mode, where multiple backends share the
+ *                     same SERIALIZABLEXACT object.  Not needed if
+ *                     SerializablePredicateLockListLock is held exclusively.
+ *
+ *     PredicateLockHashPartitionLock(hashcode)
  *             - The same lock protects a target, all locks on that target, and
- *                     the linked list of locks on the target..
- *             - When more than one is needed, acquire in ascending order.
+ *                     the linked list of locks on the target.
+ *             - When more than one is needed, acquire in ascending address order.
+ *             - When all are needed (rare), acquire in ascending index order with
+ *                     PredicateLockHashPartitionLockByIndex(index).
  *
  *     SerializableXactHashLock
  *             - Protects both PredXact and SerializableXidHash.
  *
  *
- * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  *             PageIsPredicateLocked(Relation relation, BlockNumber blkno)
  *
  * predicate lock maintenance
- *             RegisterSerializableTransaction(Snapshot snapshot)
+ *             GetSerializableTransactionSnapshot(Snapshot snapshot)
+ *             SetSerializableTransactionSnapshot(Snapshot snapshot,
+ *                                                                                VirtualTransactionId *sourcevxid)
  *             RegisterPredicateLockingXid(void)
- *             PredicateLockRelation(Relation relation)
- *             PredicateLockPage(Relation relation, BlockNumber blkno)
- *             PredicateLockTuple(Relation relation, HeapTuple tuple)
+ *             PredicateLockRelation(Relation relation, Snapshot snapshot)
+ *             PredicateLockPage(Relation relation, BlockNumber blkno,
+ *                                             Snapshot snapshot)
+ *             PredicateLockTuple(Relation relation, HeapTuple tuple,
+ *                                             Snapshot snapshot)
  *             PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
- *                                                        BlockNumber newblkno);
+ *                                                        BlockNumber newblkno)
  *             PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
- *                                                              BlockNumber newblkno);
- *             PredicateLockTupleRowVersionLink(const Relation relation,
- *                                                                              const HeapTuple oldTuple,
- *                                                                              const HeapTuple newTuple)
- *             ReleasePredicateLocks(bool isCommit)
+ *                                                              BlockNumber newblkno)
+ *             TransferPredicateLocksToHeapRelation(Relation relation)
+ *             ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
  *
  * conflict detection (may also trigger rollback)
  *             CheckForSerializableConflictOut(bool visible, Relation relation,
- *                                                                             HeapTupleData *tup, Buffer buffer)
+ *                                                                             HeapTupleData *tup, Buffer buffer,
+ *                                                                             Snapshot snapshot)
  *             CheckForSerializableConflictIn(Relation relation, HeapTupleData *tup,
  *                                                                        Buffer buffer)
+ *             CheckTableForSerializableConflictIn(Relation relation)
  *
  * final rollback checking
  *             PreCommit_CheckForSerializationFailure(void)
 
 #include "postgres.h"
 
+#include "access/heapam.h"
+#include "access/htup_details.h"
+#include "access/parallel.h"
 #include "access/slru.h"
 #include "access/subtrans.h"
 #include "access/transam.h"
 #include "access/twophase.h"
 #include "access/twophase_rmgr.h"
 #include "access/xact.h"
+#include "access/xlog.h"
 #include "miscadmin.h"
+#include "pgstat.h"
 #include "storage/bufmgr.h"
 #include "storage/predicate.h"
 #include "storage/predicate_internals.h"
+#include "storage/proc.h"
 #include "storage/procarray.h"
 #include "utils/rel.h"
 #include "utils/snapmgr.h"
-#include "utils/tqual.h"
 
 /* Uncomment the next line to test the graceful degradation code. */
 /* #define TEST_OLDSERXID */
 #define PredicateLockHashPartition(hashcode) \
        ((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
 #define PredicateLockHashPartitionLock(hashcode) \
-       ((LWLockId) (FirstPredicateLockMgrLock + PredicateLockHashPartition(hashcode)))
+       (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + \
+               PredicateLockHashPartition(hashcode)].lock)
+#define PredicateLockHashPartitionLockByIndex(i) \
+       (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
 
 #define NPREDICATELOCKTARGETENTS() \
        mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
 #define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
 
-#define SxactIsPrepared(sxact) (((sxact)->flags & SXACT_FLAG_PREPARED) != 0)
+/*
+ * Note that a sxact is marked "prepared" once it has passed
+ * PreCommit_CheckForSerializationFailure, even if it isn't using
+ * 2PC. This is the point at which it can no longer be aborted.
+ *
+ * The PREPARED flag remains set after commit, so SxactIsCommitted
+ * implies SxactIsPrepared.
+ */
 #define SxactIsCommitted(sxact) (((sxact)->flags & SXACT_FLAG_COMMITTED) != 0)
+#define SxactIsPrepared(sxact) (((sxact)->flags & SXACT_FLAG_PREPARED) != 0)
 #define SxactIsRolledBack(sxact) (((sxact)->flags & SXACT_FLAG_ROLLED_BACK) != 0)
+#define SxactIsDoomed(sxact) (((sxact)->flags & SXACT_FLAG_DOOMED) != 0)
 #define SxactIsReadOnly(sxact) (((sxact)->flags & SXACT_FLAG_READ_ONLY) != 0)
 #define SxactHasSummaryConflictIn(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_IN) != 0)
 #define SxactHasSummaryConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_OUT) != 0)
+/*
+ * The following macro actually means that the specified transaction has a
+ * conflict out *to a transaction which committed ahead of it*.  It's hard
+ * to get that into a name of a reasonable length.
+ */
 #define SxactHasConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_CONFLICT_OUT) != 0)
 #define SxactIsDeferrableWaiting(sxact) (((sxact)->flags & SXACT_FLAG_DEFERRABLE_WAITING) != 0)
 #define SxactIsROSafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_SAFE) != 0)
 #define SxactIsROUnsafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_UNSAFE) != 0)
-#define SxactIsMarkedForDeath(sxact) (((sxact)->flags & SXACT_FLAG_MARKED_FOR_DEATH) != 0)
-
-/*
- * When a public interface method is called for a split on an index relation,
- * this is the test to see if we should do a quick return.
- */
-#define SkipSplitTracking(relation) \
-       (((relation)->rd_id < FirstBootstrapObjectId) \
-       || RelationUsesLocalBuffers(relation))
-
-/*
- * When a public interface method is called for serializing a relation within
- * the current transaction, this is the test to see if we should do a quick
- * return.
- */
-#define SkipSerialization(relation) \
-       ((!IsolationIsSerializable()) \
-       || ((MySerializableXact == InvalidSerializableXact)) \
-       || ReleasePredicateLocksIfROSafe() \
-       || SkipSplitTracking(relation))
-
+#define SxactIsPartiallyReleased(sxact) (((sxact)->flags & SXACT_FLAG_PARTIALLY_RELEASED) != 0)
 
 /*
  * Compute the hash code associated with a PREDICATELOCKTARGETTAG.
  * the lock partition number from the hashcode.
  */
 #define PredicateLockTargetTagHashCode(predicatelocktargettag) \
-       (tag_hash((predicatelocktargettag), sizeof(PREDICATELOCKTARGETTAG)))
+       get_hash_value(PredicateLockTargetHash, predicatelocktargettag)
 
 /*
  * Given a predicate lock tag, and the hash for its target,
@@ -310,7 +326,11 @@ static SlruCtlData OldSerXidSlruCtlData;
 #define OLDSERXID_PAGESIZE                     BLCKSZ
 #define OLDSERXID_ENTRYSIZE                    sizeof(SerCommitSeqNo)
 #define OLDSERXID_ENTRIESPERPAGE       (OLDSERXID_PAGESIZE / OLDSERXID_ENTRYSIZE)
-#define OLDSERXID_MAX_PAGE                     (SLRU_PAGES_PER_SEGMENT * 0x10000 - 1)
+
+/*
+ * Set maximum pages based on the number needed to track all transactions.
+ */
+#define OLDSERXID_MAX_PAGE                     (MaxTransactionId / OLDSERXID_ENTRIESPERPAGE)
 
 #define OldSerXidNextPage(page) (((page) >= OLDSERXID_MAX_PAGE) ? 0 : (page) + 1)
 
@@ -318,16 +338,14 @@ static SlruCtlData OldSerXidSlruCtlData;
        (OldSerXidSlruCtl->shared->page_buffer[slotno] + \
        ((((uint32) (xid)) % OLDSERXID_ENTRIESPERPAGE) * OLDSERXID_ENTRYSIZE))))
 
-#define OldSerXidPage(xid)     ((((uint32) (xid)) / OLDSERXID_ENTRIESPERPAGE) % (OLDSERXID_MAX_PAGE + 1))
-#define OldSerXidSegment(page) ((page) / SLRU_PAGES_PER_SEGMENT)
+#define OldSerXidPage(xid)     (((uint32) (xid)) / OLDSERXID_ENTRIESPERPAGE)
 
 typedef struct OldSerXidControlData
 {
        int                     headPage;               /* newest initialized page */
        TransactionId headXid;          /* newest valid Xid in the SLRU */
        TransactionId tailXid;          /* oldest xmin we might be interested in */
-       bool            warningIssued;
-} OldSerXidControlData;
+}                      OldSerXidControlData;
 
 typedef struct OldSerXidControlData *OldSerXidControl;
 
@@ -342,12 +360,19 @@ static OldSerXidControl oldSerXidControl;
 static SERIALIZABLEXACT *OldCommittedSxact;
 
 
-/* This configuration variable is used to set the predicate lock table size */
-int                    max_predicate_locks_per_xact;           /* set by guc.c */
+/*
+ * These configuration variables are used to set the predicate lock table size
+ * and to control promotion of predicate locks to coarser granularity in an
+ * attempt to degrade performance (mostly as false positive serialization
+ * failure) gracefully in the face of memory pressurel
+ */
+int                    max_predicate_locks_per_xact;   /* set by guc.c */
+int                    max_predicate_locks_per_relation;       /* set by guc.c */
+int                    max_predicate_locks_per_page;   /* set by guc.c */
 
 /*
  * This provides a list of objects in order to track transactions
- * participating in predicate locking. Entries in the list are fixed size,
+ * participating in predicate locking.  Entries in the list are fixed size,
  * and reside in shared memory.  The memory address of an entry must remain
  * fixed during its lifetime.  The list will be protected from concurrent
  * update externally; no provision is made in this code to manage that.  The
@@ -372,11 +397,13 @@ static HTAB *PredicateLockHash;
 static SHM_QUEUE *FinishedSerializableTransactions;
 
 /*
- * Tag for a reserved entry in PredicateLockTargetHash; used to ensure
- * there's an element available for scratch space if we need it,
- * e.g. in PredicateLockPageSplit. This is an otherwise-invalid tag.
+ * Tag for a dummy entry in PredicateLockTargetHash. By temporarily removing
+ * this entry, you can ensure that there's enough scratch space available for
+ * inserting one entry in the hash table. This is an otherwise-invalid tag.
  */
-static const PREDICATELOCKTARGETTAG ReservedTargetTag = {0, 0, 0, 0, 0};
+static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0};
+static uint32 ScratchTargetTagHash;
+static LWLock *ScratchPartitionLock;
 
 /*
  * The local hash table used to determine when to combine multiple fine-
@@ -386,10 +413,20 @@ static HTAB *LocalPredicateLockHash = NULL;
 
 /*
  * Keep a pointer to the currently-running serializable transaction (if any)
- * for quick reference.
- * TODO SSI: Remove volatile qualifier and the then-unnecessary casts?
+ * for quick reference. Also, remember if we have written anything that could
+ * cause a rw-conflict.
  */
-static volatile SERIALIZABLEXACT *MySerializableXact = InvalidSerializableXact;
+static SERIALIZABLEXACT *MySerializableXact = InvalidSerializableXact;
+static bool MyXactDidWrite = false;
+
+/*
+ * The SXACT_FLAG_RO_UNSAFE optimization might lead us to release
+ * MySerializableXact early.  If that happens in a parallel query, the leader
+ * needs to defer the destruction of the SERIALIZABLEXACT until end of
+ * transaction, because the workers still have a reference to it.  In that
+ * case, the leader stores it here.
+ */
+static SERIALIZABLEXACT *SavedSerializableXact = InvalidSerializableXact;
 
 /* local functions */
 
@@ -413,41 +450,133 @@ static void OldSerXidSetActiveSerXmin(TransactionId xid);
 static uint32 predicatelock_hash(const void *key, Size keysize);
 static void SummarizeOldestCommittedSxact(void);
 static Snapshot GetSafeSnapshot(Snapshot snapshot);
-static Snapshot RegisterSerializableTransactionInt(Snapshot snapshot);
+static Snapshot GetSerializableTransactionSnapshotInt(Snapshot snapshot,
+                                                                                                         VirtualTransactionId *sourcevxid,
+                                                                                                         int sourcepid);
 static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
 static bool GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
-                                                 PREDICATELOCKTARGETTAG *parent);
+                                                                         PREDICATELOCKTARGETTAG *parent);
 static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag);
+static void RemoveScratchTarget(bool lockheld);
+static void RestoreScratchTarget(bool lockheld);
 static void RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target,
-                                                  uint32 targettaghash);
+                                                                          uint32 targettaghash);
 static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag);
-static int     PredicateLockPromotionThreshold(const PREDICATELOCKTARGETTAG *tag);
+static int     MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag);
 static bool CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag);
 static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag);
 static void CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
-                                       uint32 targettaghash,
-                                       SERIALIZABLEXACT *sxact);
+                                                               uint32 targettaghash,
+                                                               SERIALIZABLEXACT *sxact);
 static void DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash);
-static bool TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
-                                                                 const PREDICATELOCKTARGETTAG newtargettag,
-                                                                 bool removeOld);
+static bool TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
+                                                                                         PREDICATELOCKTARGETTAG newtargettag,
+                                                                                         bool removeOld);
 static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag);
+static void DropAllPredicateLocksFromTable(Relation relation,
+                                                                                  bool transfer);
 static void SetNewSxactGlobalXmin(void);
-static bool ReleasePredicateLocksIfROSafe(void);
 static void ClearOldPredicateLocks(void);
 static void ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
-                                                  bool summarize);
+                                                                          bool summarize);
 static bool XidIsConcurrent(TransactionId xid);
 static void CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag);
 static void FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
 static void OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
-                                                                               SERIALIZABLEXACT *writer);
+                                                                                                       SERIALIZABLEXACT *writer);
+static void CreateLocalPredicateLockHash(void);
+static void ReleasePredicateLocksLocal(void);
+
+
+/*------------------------------------------------------------------------*/
+
+/*
+ * Does this relation participate in predicate locking? Temporary and system
+ * relations are exempt, as are materialized views.
+ */
+static inline bool
+PredicateLockingNeededForRelation(Relation relation)
+{
+       return !(relation->rd_id < FirstBootstrapObjectId ||
+                        RelationUsesLocalBuffers(relation) ||
+                        relation->rd_rel->relkind == RELKIND_MATVIEW);
+}
+
+/*
+ * When a public interface method is called for a read, this is the test to
+ * see if we should do a quick return.
+ *
+ * Note: this function has side-effects! If this transaction has been flagged
+ * as RO-safe since the last call, we release all predicate locks and reset
+ * MySerializableXact. That makes subsequent calls to return quickly.
+ *
+ * This is marked as 'inline' to eliminate the function call overhead in the
+ * common case that serialization is not needed.
+ */
+static inline bool
+SerializationNeededForRead(Relation relation, Snapshot snapshot)
+{
+       /* Nothing to do if this is not a serializable transaction */
+       if (MySerializableXact == InvalidSerializableXact)
+               return false;
+
+       /*
+        * Don't acquire locks or conflict when scanning with a special snapshot.
+        * This excludes things like CLUSTER and REINDEX. They use the wholesale
+        * functions TransferPredicateLocksToHeapRelation() and
+        * CheckTableForSerializableConflictIn() to participate in serialization,
+        * but the scans involved don't need serialization.
+        */
+       if (!IsMVCCSnapshot(snapshot))
+               return false;
+
+       /*
+        * Check if we have just become "RO-safe". If we have, immediately release
+        * all locks as they're not needed anymore. This also resets
+        * MySerializableXact, so that subsequent calls to this function can exit
+        * quickly.
+        *
+        * A transaction is flagged as RO_SAFE if all concurrent R/W transactions
+        * commit without having conflicts out to an earlier snapshot, thus
+        * ensuring that no conflicts are possible for this transaction.
+        */
+       if (SxactIsROSafe(MySerializableXact))
+       {
+               ReleasePredicateLocks(false, true);
+               return false;
+       }
+
+       /* Check if the relation doesn't participate in predicate locking */
+       if (!PredicateLockingNeededForRelation(relation))
+               return false;
+
+       return true;                            /* no excuse to skip predicate locking */
+}
+
+/*
+ * Like SerializationNeededForRead(), but called on writes.
+ * The logic is the same, but there is no snapshot and we can't be RO-safe.
+ */
+static inline bool
+SerializationNeededForWrite(Relation relation)
+{
+       /* Nothing to do if this is not a serializable transaction */
+       if (MySerializableXact == InvalidSerializableXact)
+               return false;
+
+       /* Check if the relation doesn't participate in predicate locking */
+       if (!PredicateLockingNeededForRelation(relation))
+               return false;
+
+       return true;                            /* no excuse to skip predicate locking */
+}
+
 
 /*------------------------------------------------------------------------*/
 
 /*
  * These functions are a simple implementation of a list for this specific
- * type of struct.     If there is ever a generalized shared memory list, we
+ * type of struct.  If there is ever a generalized shared memory list, we
  * should probably switch to that.
  */
 static SERIALIZABLEXACT *
@@ -477,7 +606,7 @@ ReleasePredXact(SERIALIZABLEXACT *sxact)
        ptle = (PredXactListElement)
                (((char *) sxact)
                 - offsetof(PredXactListElementData, sxact)
-                +offsetof(PredXactListElementData, link));
+                + offsetof(PredXactListElementData, link));
        SHMQueueDelete(&ptle->link);
        SHMQueueInsertBefore(&PredXact->availableList, &ptle->link);
 }
@@ -507,7 +636,7 @@ NextPredXact(SERIALIZABLEXACT *sxact)
        ptle = (PredXactListElement)
                (((char *) sxact)
                 - offsetof(PredXactListElementData, sxact)
-                +offsetof(PredXactListElementData, link));
+                + offsetof(PredXactListElementData, link));
        ptle = (PredXactListElement)
                SHMQueueNext(&PredXact->activeList,
                                         &ptle->link,
@@ -531,8 +660,8 @@ RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
        Assert(reader != writer);
 
        /* Check the ends of the purported conflict first. */
-       if (SxactIsRolledBack(reader)
-               || SxactIsRolledBack(writer)
+       if (SxactIsDoomed(reader)
+               || SxactIsDoomed(writer)
                || SHMQueueEmpty(&reader->outConflicts)
                || SHMQueueEmpty(&writer->inConflicts))
                return false;
@@ -571,7 +700,7 @@ SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
        if (!conflict)
                ereport(ERROR,
                                (errcode(ERRCODE_OUT_OF_MEMORY),
-                                errmsg("not enough elements in RWConflictPool to record a rw-conflict"),
+                                errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
                                 errhint("You might need to run fewer transactions at a time or increase max_connections.")));
 
        SHMQueueDelete(&conflict->outLink);
@@ -599,7 +728,7 @@ SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact,
        if (!conflict)
                ereport(ERROR,
                                (errcode(ERRCODE_OUT_OF_MEMORY),
-                                errmsg("not enough elements in RWConflictPool to record a potential rw-conflict"),
+                                errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
                                 errhint("You might need to run fewer transactions at a time or increase max_connections.")));
 
        SHMQueueDelete(&conflict->outLink);
@@ -667,7 +796,7 @@ OldSerXidPagePrecedesLogically(int p, int q)
        int                     diff;
 
        /*
-        * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2.  Both inputs should
+        * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2.  Both inputs should
         * be in the range 0..OLDSERXID_MAX_PAGE.
         */
        Assert(p >= 0 && p <= OLDSERXID_MAX_PAGE);
@@ -676,7 +805,7 @@ OldSerXidPagePrecedesLogically(int p, int q)
        diff = p - q;
        if (diff >= ((OLDSERXID_MAX_PAGE + 1) / 2))
                diff -= OLDSERXID_MAX_PAGE + 1;
-       else if (diff < -((OLDSERXID_MAX_PAGE + 1) / 2))
+       else if (diff < -((int) (OLDSERXID_MAX_PAGE + 1) / 2))
                diff += OLDSERXID_MAX_PAGE + 1;
        return diff < 0;
 }
@@ -693,8 +822,9 @@ OldSerXidInit(void)
         * Set up SLRU management of the pg_serial data.
         */
        OldSerXidSlruCtl->PagePrecedes = OldSerXidPagePrecedesLogically;
-       SimpleLruInit(OldSerXidSlruCtl, "OldSerXid SLRU Ctl", NUM_OLDSERXID_BUFFERS, 0,
-                                 OldSerXidLock, "pg_serial");
+       SimpleLruInit(OldSerXidSlruCtl, "oldserxid",
+                                 NUM_OLDSERXID_BUFFERS, 0, OldSerXidLock, "pg_serial",
+                                 LWTRANCHE_OLDSERXID_BUFFERS);
        /* Override default assumption that writes should be fsync'd */
        OldSerXidSlruCtl->do_fsync = false;
 
@@ -704,6 +834,7 @@ OldSerXidInit(void)
        oldSerXidControl = (OldSerXidControl)
                ShmemInitStruct("OldSerXidControlData", sizeof(OldSerXidControlData), &found);
 
+       Assert(found == IsUnderPostmaster);
        if (!found)
        {
                /*
@@ -712,14 +843,13 @@ OldSerXidInit(void)
                oldSerXidControl->headPage = -1;
                oldSerXidControl->headXid = InvalidTransactionId;
                oldSerXidControl->tailXid = InvalidTransactionId;
-               oldSerXidControl->warningIssued = false;
        }
 }
 
 /*
  * Record a committed read write serializable xid and the minimum
  * commitSeqNo of any transactions to which this xid had a rw-conflict out.
- * A zero seqNo means that there were no conflicts out from xid.
+ * An invalid seqNo means that there were no conflicts out from xid.
  */
 static void
 OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
@@ -728,7 +858,6 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
        int                     targetPage;
        int                     slotno;
        int                     firstZeroPage;
-       int                     xidSpread;
        bool            isNewPage;
 
        Assert(TransactionIdIsValid(xid));
@@ -746,10 +875,10 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
        Assert(TransactionIdIsValid(tailXid));
 
        /*
-        * If the SLRU is currently unused, zero out the whole active region
-        * from tailXid to headXid before taking it into use. Otherwise zero
-        * out only any new pages that enter the tailXid-headXid range as we
-        * advance headXid.
+        * If the SLRU is currently unused, zero out the whole active region from
+        * tailXid to headXid before taking it into use. Otherwise zero out only
+        * any new pages that enter the tailXid-headXid range as we advance
+        * headXid.
         */
        if (oldSerXidControl->headPage < 0)
        {
@@ -769,20 +898,6 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
        if (isNewPage)
                oldSerXidControl->headPage = targetPage;
 
-       xidSpread = (((uint32) xid) - ((uint32) tailXid));
-       if (oldSerXidControl->warningIssued)
-       {
-               if (xidSpread < 800000000)
-                       oldSerXidControl->warningIssued = false;
-       }
-       else if (xidSpread >= 1000000000)
-       {
-               oldSerXidControl->warningIssued = true;
-               ereport(WARNING,
-                               (errmsg("memory for serializable conflict tracking is nearly exhausted"),
-                                errhint("There may be an idle transaction or a forgotten prepared transaction causing this.")));
-       }
-
        if (isNewPage)
        {
                /* Initialize intervening pages. */
@@ -797,12 +912,13 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
                slotno = SimpleLruReadPage(OldSerXidSlruCtl, targetPage, true, xid);
 
        OldSerXidValue(slotno, xid) = minConflictCommitSeqNo;
+       OldSerXidSlruCtl->shared->page_dirty[slotno] = true;
 
        LWLockRelease(OldSerXidLock);
 }
 
 /*
- * Get the minimum commitSeqNo for any conflict out for the given xid. For
+ * Get the minimum commitSeqNo for any conflict out for the given xid.  For
  * a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
  * will be returned.
  */
@@ -855,8 +971,8 @@ OldSerXidSetActiveSerXmin(TransactionId xid)
        /*
         * When no sxacts are active, nothing overlaps, set the xid values to
         * invalid to show that there are no valid entries.  Don't clear headPage,
-        * though.  A new xmin might still land on that page, and we don't want
-        * to repeatedly zero out the same page.
+        * though.  A new xmin might still land on that page, and we don't want to
+        * repeatedly zero out the same page.
         */
        if (!TransactionIdIsValid(xid))
        {
@@ -901,7 +1017,7 @@ OldSerXidSetActiveSerXmin(TransactionId xid)
 void
 CheckPointPredicate(void)
 {
-       int tailPage;
+       int                     tailPage;
 
        LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
 
@@ -920,9 +1036,14 @@ CheckPointPredicate(void)
        else
        {
                /*
-                * The SLRU is no longer needed. Truncate everything but the last
-                * page. We don't dare to touch the last page in case the SLRU is
-                * taken back to use, and the new tail falls on the same page.
+                * The SLRU is no longer needed. Truncate to head before we set head
+                * invalid.
+                *
+                * XXX: It's possible that the SLRU is not needed again until XID
+                * wrap-around has happened, so that the segment containing headPage
+                * that we leave behind will appear to be new again. In that case it
+                * won't be removed until XID horizon advances enough to make it
+                * current again.
                 */
                tailPage = oldSerXidControl->headPage;
                oldSerXidControl->headPage = -1;
@@ -930,16 +1051,20 @@ CheckPointPredicate(void)
 
        LWLockRelease(OldSerXidLock);
 
+       /* Truncate away pages that are no longer required */
+       SimpleLruTruncate(OldSerXidSlruCtl, tailPage);
+
        /*
         * Flush dirty SLRU pages to disk
         *
         * This is not actually necessary from a correctness point of view. We do
         * it merely as a debugging aid.
+        *
+        * We're doing this after the truncation to avoid writing pages right
+        * before deleting the file in which they sit, which would be completely
+        * pointless.
         */
        SimpleLruFlush(OldSerXidSlruCtl, true);
-
-       /* Truncate away pages that are no longer required */
-       SimpleLruTruncate(OldSerXidSlruCtl, tailPage);
 }
 
 /*------------------------------------------------------------------------*/
@@ -958,18 +1083,19 @@ void
 InitPredicateLocks(void)
 {
        HASHCTL         info;
-       int                     hash_flags;
-       long            init_table_size,
-                               max_table_size;
+       long            max_table_size;
        Size            requestSize;
        bool            found;
 
+#ifndef EXEC_BACKEND
+       Assert(!IsUnderPostmaster);
+#endif
+
        /*
-        * Compute init/max size to request for predicate lock target hashtable.
-        * Note these calculations must agree with PredicateLockShmemSize!
+        * Compute size of predicate lock target hashtable. Note these
+        * calculations must agree with PredicateLockShmemSize!
         */
        max_table_size = NPREDICATELOCKTARGETENTS();
-       init_table_size = max_table_size / 2;
 
        /*
         * Allocate hash table for PREDICATELOCKTARGET structs.  This stores
@@ -978,29 +1104,31 @@ InitPredicateLocks(void)
        MemSet(&info, 0, sizeof(info));
        info.keysize = sizeof(PREDICATELOCKTARGETTAG);
        info.entrysize = sizeof(PREDICATELOCKTARGET);
-       info.hash = tag_hash;
        info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
-       hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
 
        PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
-                                                                                       init_table_size,
+                                                                                       max_table_size,
                                                                                        max_table_size,
                                                                                        &info,
-                                                                                       hash_flags);
-
-       /* Assume an average of 2 xacts per target */
-       max_table_size *= 2;
-       init_table_size *= 2;
+                                                                                       HASH_ELEM | HASH_BLOBS |
+                                                                                       HASH_PARTITION | HASH_FIXED_SIZE);
 
        /*
-        * Reserve an entry in the hash table; we use it to make sure there's
+        * Reserve a dummy entry in the hash table; we use it to make sure there's
         * always one entry available when we need to split or combine a page,
         * because running out of space there could mean aborting a
         * non-serializable transaction.
         */
-       hash_search(PredicateLockTargetHash, &ReservedTargetTag,
-                               HASH_ENTER, NULL);
+       if (!IsUnderPostmaster)
+       {
+               (void) hash_search(PredicateLockTargetHash, &ScratchTargetTag,
+                                                  HASH_ENTER, &found);
+               Assert(!found);
+       }
 
+       /* Pre-calculate the hash and partition lock of the scratch entry */
+       ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag);
+       ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash);
 
        /*
         * Allocate hash table for PREDICATELOCK structs.  This stores per
@@ -1011,18 +1139,20 @@ InitPredicateLocks(void)
        info.entrysize = sizeof(PREDICATELOCK);
        info.hash = predicatelock_hash;
        info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
-       hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
+
+       /* Assume an average of 2 xacts per target */
+       max_table_size *= 2;
 
        PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
-                                                                         init_table_size,
+                                                                         max_table_size,
                                                                          max_table_size,
                                                                          &info,
-                                                                         hash_flags);
+                                                                         HASH_ELEM | HASH_FUNCTION |
+                                                                         HASH_PARTITION | HASH_FIXED_SIZE);
 
        /*
-        * Compute init/max size to request for serializable transaction
-        * hashtable. Note these calculations must agree with
-        * PredicateLockShmemSize!
+        * Compute size for serializable transaction hashtable. Note these
+        * calculations must agree with PredicateLockShmemSize!
         */
        max_table_size = (MaxBackends + max_prepared_xacts);
 
@@ -1039,6 +1169,7 @@ InitPredicateLocks(void)
        PredXact = ShmemInitStruct("PredXactList",
                                                           PredXactListDataSize,
                                                           &found);
+       Assert(found == IsUnderPostmaster);
        if (!found)
        {
                int                     i;
@@ -1054,21 +1185,18 @@ InitPredicateLocks(void)
                requestSize = mul_size((Size) max_table_size,
                                                           PredXactListElementDataSize);
                PredXact->element = ShmemAlloc(requestSize);
-               if (PredXact->element == NULL)
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_OUT_OF_MEMORY),
-                        errmsg("not enough shared memory for elements of data structure"
-                                       " \"%s\" (%lu bytes requested)",
-                                       "PredXactList", (unsigned long) requestSize)));
                /* Add all elements to available list, clean. */
                memset(PredXact->element, 0, requestSize);
                for (i = 0; i < max_table_size; i++)
                {
+                       LWLockInitialize(&PredXact->element[i].sxact.predicateLockListLock,
+                                                        LWTRANCHE_SXACT);
                        SHMQueueInsertBefore(&(PredXact->availableList),
                                                                 &(PredXact->element[i].link));
                }
                PredXact->OldCommittedSxact = CreatePredXact();
                SetInvalidVirtualTransactionId(PredXact->OldCommittedSxact->vxid);
+               PredXact->OldCommittedSxact->prepareSeqNo = 0;
                PredXact->OldCommittedSxact->commitSeqNo = 0;
                PredXact->OldCommittedSxact->SeqNo.lastCommitBeforeSnapshot = 0;
                SHMQueueInit(&PredXact->OldCommittedSxact->outConflicts);
@@ -1092,14 +1220,13 @@ InitPredicateLocks(void)
        MemSet(&info, 0, sizeof(info));
        info.keysize = sizeof(SERIALIZABLEXIDTAG);
        info.entrysize = sizeof(SERIALIZABLEXID);
-       info.hash = tag_hash;
-       hash_flags = (HASH_ELEM | HASH_FUNCTION);
 
        SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
                                                                                max_table_size,
                                                                                max_table_size,
                                                                                &info,
-                                                                               hash_flags);
+                                                                               HASH_ELEM | HASH_BLOBS |
+                                                                               HASH_FIXED_SIZE);
 
        /*
         * Allocate space for tracking rw-conflicts in lists attached to the
@@ -1109,7 +1236,7 @@ InitPredicateLocks(void)
         * that this will prevent resource exhaustion in even the most pessimal
         * loads up to max_connections = 200 with all 200 connections pounding the
         * database with serializable transactions.  Beyond that, there may be
-        * occassional transactions canceled when trying to flag conflicts. That's
+        * occasional transactions canceled when trying to flag conflicts. That's
         * probably OK.
         */
        max_table_size *= 5;
@@ -1117,6 +1244,7 @@ InitPredicateLocks(void)
        RWConflictPool = ShmemInitStruct("RWConflictPool",
                                                                         RWConflictPoolHeaderDataSize,
                                                                         &found);
+       Assert(found == IsUnderPostmaster);
        if (!found)
        {
                int                     i;
@@ -1125,12 +1253,6 @@ InitPredicateLocks(void)
                requestSize = mul_size((Size) max_table_size,
                                                           RWConflictDataSize);
                RWConflictPool->element = ShmemAlloc(requestSize);
-               if (RWConflictPool->element == NULL)
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_OUT_OF_MEMORY),
-                        errmsg("not enough shared memory for elements of data structure"
-                                       " \"%s\" (%lu bytes requested)",
-                                       "RWConflictPool", (unsigned long) requestSize)));
                /* Add all elements to available list, clean. */
                memset(RWConflictPool->element, 0, requestSize);
                for (i = 0; i < max_table_size; i++)
@@ -1148,6 +1270,7 @@ InitPredicateLocks(void)
                ShmemInitStruct("FinishedSerializableTransactions",
                                                sizeof(SHM_QUEUE),
                                                &found);
+       Assert(found == IsUnderPostmaster);
        if (!found)
                SHMQueueInit(FinishedSerializableTransactions);
 
@@ -1266,7 +1389,7 @@ GetPredicateLockStatusData(void)
         * in ascending order, then SerializableXactHashLock.
         */
        for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
-               LWLockAcquire(FirstPredicateLockMgrLock + i, LW_SHARED);
+               LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
        LWLockAcquire(SerializableXactHashLock, LW_SHARED);
 
        /* Get number of locks and allocate appropriately-sized arrays. */
@@ -1295,7 +1418,7 @@ GetPredicateLockStatusData(void)
        /* Release locks in reverse order */
        LWLockRelease(SerializableXactHashLock);
        for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
-               LWLockRelease(FirstPredicateLockMgrLock + i);
+               LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
 
        return data;
 }
@@ -1317,12 +1440,12 @@ SummarizeOldestCommittedSxact(void)
        /*
         * This function is only called if there are no sxact slots available.
         * Some of them must belong to old, already-finished transactions, so
-        * there should be something in FinishedSerializableTransactions list
-        * that we can summarize. However, there's a race condition: while we
-        * were not holding any locks, a transaction might have ended and cleaned
-        * up all the finished sxact entries already, freeing up their sxact
-        * slots. In that case, we have nothing to do here. The caller will find
-        * one of the slots released by the other backend when it retries.
+        * there should be something in FinishedSerializableTransactions list that
+        * we can summarize. However, there's a race condition: while we were not
+        * holding any locks, a transaction might have ended and cleaned up all
+        * the finished sxact entries already, freeing up their sxact slots. In
+        * that case, we have nothing to do here. The caller will find one of the
+        * slots released by the other backend when it retries.
         */
        if (SHMQueueEmpty(FinishedSerializableTransactions))
        {
@@ -1332,7 +1455,7 @@ SummarizeOldestCommittedSxact(void)
 
        /*
         * Grab the first sxact off the finished list -- this will be the earliest
-        * commit.      Remove it from the list.
+        * commit.  Remove it from the list.
         */
        sxact = (SERIALIZABLEXACT *)
                SHMQueueNext(FinishedSerializableTransactions,
@@ -1343,7 +1466,7 @@ SummarizeOldestCommittedSxact(void)
        /* Add to SLRU summary information. */
        if (TransactionIdIsValid(sxact->topXid) && !SxactIsReadOnly(sxact))
                OldSerXidAdd(sxact->topXid, SxactHasConflictOut(sxact)
-                  ? sxact->SeqNo.earliestOutConflictCommit : InvalidSerCommitSeqNo);
+                                        ? sxact->SeqNo.earliestOutConflictCommit : InvalidSerCommitSeqNo);
 
        /* Summarize and release the detail. */
        ReleaseOneSerializableXact(sxact, false, true);
@@ -1359,6 +1482,10 @@ SummarizeOldestCommittedSxact(void)
  *             without further checks. This requires waiting for concurrent
  *             transactions to complete, and retrying with a new snapshot if
  *             one of them could possibly create a conflict.
+ *
+ *             As with GetSerializableTransactionSnapshot (which this is a subroutine
+ *             for), the passed-in Snapshot pointer should reference a static data
+ *             area that can safely be passed to GetSnapshotData.
  */
 static Snapshot
 GetSafeSnapshot(Snapshot origSnapshot)
@@ -1370,84 +1497,223 @@ GetSafeSnapshot(Snapshot origSnapshot)
        while (true)
        {
                /*
-                * RegisterSerializableTransactionInt is going to call
-                * GetSnapshotData, so we need to provide it the static snapshot our
-                * caller passed to us. It returns a copy of that snapshot and
-                * registers it on TopTransactionResourceOwner.
+                * GetSerializableTransactionSnapshotInt is going to call
+                * GetSnapshotData, so we need to provide it the static snapshot area
+                * our caller passed to us.  The pointer returned is actually the same
+                * one passed to it, but we avoid assuming that here.
                 */
-               snapshot = RegisterSerializableTransactionInt(origSnapshot);
+               snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
+                                                                                                                NULL, InvalidPid);
 
                if (MySerializableXact == InvalidSerializableXact)
                        return snapshot;        /* no concurrent r/w xacts; it's safe */
 
-               MySerializableXact->flags |= SXACT_FLAG_DEFERRABLE_WAITING;
+               LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
 
                /*
                 * Wait for concurrent transactions to finish. Stop early if one of
                 * them marked us as conflicted.
                 */
-               while (!(SHMQueueEmpty((SHM_QUEUE *)
-                                                        &MySerializableXact->possibleUnsafeConflicts) ||
+               MySerializableXact->flags |= SXACT_FLAG_DEFERRABLE_WAITING;
+               while (!(SHMQueueEmpty(&MySerializableXact->possibleUnsafeConflicts) ||
                                 SxactIsROUnsafe(MySerializableXact)))
-                       ProcWaitForSignal();
-
+               {
+                       LWLockRelease(SerializableXactHashLock);
+                       ProcWaitForSignal(WAIT_EVENT_SAFE_SNAPSHOT);
+                       LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
+               }
                MySerializableXact->flags &= ~SXACT_FLAG_DEFERRABLE_WAITING;
+
                if (!SxactIsROUnsafe(MySerializableXact))
+               {
+                       LWLockRelease(SerializableXactHashLock);
                        break;                          /* success */
+               }
+
+               LWLockRelease(SerializableXactHashLock);
 
                /* else, need to retry... */
                ereport(DEBUG2,
                                (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                 errmsg("deferrable snapshot was unsafe; trying a new one")));
-               ReleasePredicateLocks(false);
-               UnregisterSnapshotFromOwner(snapshot,
-                                                                       TopTransactionResourceOwner);
+               ReleasePredicateLocks(false, false);
        }
 
        /*
         * Now we have a safe snapshot, so we don't need to do any further checks.
         */
        Assert(SxactIsROSafe(MySerializableXact));
-       ReleasePredicateLocks(false);
+       ReleasePredicateLocks(false, true);
 
        return snapshot;
 }
 
 /*
- * Acquire and register a snapshot which can be used for this transaction..
+ * GetSafeSnapshotBlockingPids
+ *             If the specified process is currently blocked in GetSafeSnapshot,
+ *             write the process IDs of all processes that it is blocked by
+ *             into the caller-supplied buffer output[].  The list is truncated at
+ *             output_size, and the number of PIDs written into the buffer is
+ *             returned.  Returns zero if the given PID is not currently blocked
+ *             in GetSafeSnapshot.
+ */
+int
+GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
+{
+       int                     num_written = 0;
+       SERIALIZABLEXACT *sxact;
+
+       LWLockAcquire(SerializableXactHashLock, LW_SHARED);
+
+       /* Find blocked_pid's SERIALIZABLEXACT by linear search. */
+       for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
+       {
+               if (sxact->pid == blocked_pid)
+                       break;
+       }
+
+       /* Did we find it, and is it currently waiting in GetSafeSnapshot? */
+       if (sxact != NULL && SxactIsDeferrableWaiting(sxact))
+       {
+               RWConflict      possibleUnsafeConflict;
+
+               /* Traverse the list of possible unsafe conflicts collecting PIDs. */
+               possibleUnsafeConflict = (RWConflict)
+                       SHMQueueNext(&sxact->possibleUnsafeConflicts,
+                                                &sxact->possibleUnsafeConflicts,
+                                                offsetof(RWConflictData, inLink));
+
+               while (possibleUnsafeConflict != NULL && num_written < output_size)
+               {
+                       output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
+                       possibleUnsafeConflict = (RWConflict)
+                               SHMQueueNext(&sxact->possibleUnsafeConflicts,
+                                                        &possibleUnsafeConflict->inLink,
+                                                        offsetof(RWConflictData, inLink));
+               }
+       }
+
+       LWLockRelease(SerializableXactHashLock);
+
+       return num_written;
+}
+
+/*
+ * Acquire a snapshot that can be used for the current transaction.
+ *
  * Make sure we have a SERIALIZABLEXACT reference in MySerializableXact.
  * It should be current for this process and be contained in PredXact.
+ *
+ * The passed-in Snapshot pointer should reference a static data area that
+ * can safely be passed to GetSnapshotData.  The return value is actually
+ * always this same pointer; no new snapshot data structure is allocated
+ * within this function.
  */
 Snapshot
-RegisterSerializableTransaction(Snapshot snapshot)
+GetSerializableTransactionSnapshot(Snapshot snapshot)
 {
        Assert(IsolationIsSerializable());
 
+       /*
+        * Can't use serializable mode while recovery is still active, as it is,
+        * for example, on a hot standby.  We could get here despite the check in
+        * check_XactIsoLevel() if default_transaction_isolation is set to
+        * serializable, so phrase the hint accordingly.
+        */
+       if (RecoveryInProgress())
+               ereport(ERROR,
+                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                errmsg("cannot use serializable mode in a hot standby"),
+                                errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
+                                errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
+
        /*
         * A special optimization is available for SERIALIZABLE READ ONLY
         * DEFERRABLE transactions -- we can wait for a suitable snapshot and
-        * thereby avoid all SSI overhead once it's running..
+        * thereby avoid all SSI overhead once it's running.
         */
        if (XactReadOnly && XactDeferrable)
                return GetSafeSnapshot(snapshot);
 
-       return RegisterSerializableTransactionInt(snapshot);
+       return GetSerializableTransactionSnapshotInt(snapshot,
+                                                                                                NULL, InvalidPid);
+}
+
+/*
+ * Import a snapshot to be used for the current transaction.
+ *
+ * This is nearly the same as GetSerializableTransactionSnapshot, except that
+ * we don't take a new snapshot, but rather use the data we're handed.
+ *
+ * The caller must have verified that the snapshot came from a serializable
+ * transaction; and if we're read-write, the source transaction must not be
+ * read-only.
+ */
+void
+SetSerializableTransactionSnapshot(Snapshot snapshot,
+                                                                  VirtualTransactionId *sourcevxid,
+                                                                  int sourcepid)
+{
+       Assert(IsolationIsSerializable());
+
+       /*
+        * If this is called by parallel.c in a parallel worker, we don't want to
+        * create a SERIALIZABLEXACT just yet because the leader's
+        * SERIALIZABLEXACT will be installed with AttachSerializableXact().  We
+        * also don't want to reject SERIALIZABLE READ ONLY DEFERRABLE in this
+        * case, because the leader has already determined that the snapshot it
+        * has passed us is safe.  So there is nothing for us to do.
+        */
+       if (IsParallelWorker())
+               return;
+
+       /*
+        * We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
+        * import snapshots, since there's no way to wait for a safe snapshot when
+        * we're using the snap we're told to.  (XXX instead of throwing an error,
+        * we could just ignore the XactDeferrable flag?)
+        */
+       if (XactReadOnly && XactDeferrable)
+               ereport(ERROR,
+                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                errmsg("a snapshot-importing transaction must not be READ ONLY DEFERRABLE")));
+
+       (void) GetSerializableTransactionSnapshotInt(snapshot, sourcevxid,
+                                                                                                sourcepid);
 }
 
+/*
+ * Guts of GetSerializableTransactionSnapshot
+ *
+ * If sourcexid is valid, this is actually an import operation and we should
+ * skip calling GetSnapshotData, because the snapshot contents are already
+ * loaded up.  HOWEVER: to avoid race conditions, we must check that the
+ * source xact is still running after we acquire SerializableXactHashLock.
+ * We do that by calling ProcArrayInstallImportedXmin.
+ */
 static Snapshot
-RegisterSerializableTransactionInt(Snapshot snapshot)
+GetSerializableTransactionSnapshotInt(Snapshot snapshot,
+                                                                         VirtualTransactionId *sourcevxid,
+                                                                         int sourcepid)
 {
        PGPROC     *proc;
        VirtualTransactionId vxid;
        SERIALIZABLEXACT *sxact,
                           *othersxact;
-       HASHCTL         hash_ctl;
 
        /* We only do this for serializable transactions.  Once. */
        Assert(MySerializableXact == InvalidSerializableXact);
 
        Assert(!RecoveryInProgress());
 
+       /*
+        * Since all parts of a serializable transaction must use the same
+        * snapshot, it is too late to establish one after a parallel operation
+        * has begun.
+        */
+       if (IsInParallelMode())
+               elog(ERROR, "cannot establish serializable snapshot during a parallel operation");
+
        proc = MyProc;
        Assert(proc != NULL);
        GET_VXID_FROM_PGPROC(vxid, *proc);
@@ -1455,6 +1721,14 @@ RegisterSerializableTransactionInt(Snapshot snapshot)
        /*
         * First we get the sxact structure, which may involve looping and access
         * to the "finished" list to free a structure for use.
+        *
+        * We must hold SerializableXactHashLock when taking/checking the snapshot
+        * to avoid race conditions, for much the same reasons that
+        * GetSnapshotData takes the ProcArrayLock.  Since we might have to
+        * release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
+        * this means we have to create the sxact first, which is a bit annoying
+        * (in particular, an elog(ERROR) in procarray.c would cause us to leak
+        * the sxact).  Consider refactoring to avoid this.
         */
 #ifdef TEST_OLDSERXID
        SummarizeOldestCommittedSxact();
@@ -1472,9 +1746,19 @@ RegisterSerializableTransactionInt(Snapshot snapshot)
                }
        } while (!sxact);
 
-       /* Get and register a snapshot */
-       snapshot = GetSnapshotData(snapshot);
-       snapshot = RegisterSnapshotOnOwner(snapshot, TopTransactionResourceOwner);
+       /* Get the snapshot, or check that it's safe to use */
+       if (!sourcevxid)
+               snapshot = GetSnapshotData(snapshot);
+       else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcevxid))
+       {
+               ReleasePredXact(sxact);
+               LWLockRelease(SerializableXactHashLock);
+               ereport(ERROR,
+                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                errmsg("could not import the requested snapshot"),
+                                errdetail("The source process with PID %d is not running anymore.",
+                                                  sourcepid)));
+       }
 
        /*
         * If there are no serializable transactions which are not read-only, we
@@ -1516,6 +1800,7 @@ RegisterSerializableTransactionInt(Snapshot snapshot)
        /* Initialize the structure. */
        sxact->vxid = vxid;
        sxact->SeqNo.lastCommitBeforeSnapshot = PredXact->LastSxactCommitSeqNo;
+       sxact->prepareSeqNo = InvalidSerCommitSeqNo;
        sxact->commitSeqNo = InvalidSerCommitSeqNo;
        SHMQueueInit(&(sxact->outConflicts));
        SHMQueueInit(&(sxact->inConflicts));
@@ -1541,8 +1826,9 @@ RegisterSerializableTransactionInt(Snapshot snapshot)
                         othersxact != NULL;
                         othersxact = NextPredXact(othersxact))
                {
-                       if (!SxactIsOnFinishedList(othersxact) &&
-                               !SxactIsReadOnly(othersxact))
+                       if (!SxactIsCommitted(othersxact)
+                               && !SxactIsDoomed(othersxact)
+                               && !SxactIsReadOnly(othersxact))
                        {
                                SetPossibleUnsafeConflict(sxact, othersxact);
                        }
@@ -1556,21 +1842,29 @@ RegisterSerializableTransactionInt(Snapshot snapshot)
        }
 
        MySerializableXact = sxact;
+       MyXactDidWrite = false;         /* haven't written anything yet */
 
        LWLockRelease(SerializableXactHashLock);
 
+       CreateLocalPredicateLockHash();
+
+       return snapshot;
+}
+
+static void
+CreateLocalPredicateLockHash(void)
+{
+       HASHCTL         hash_ctl;
+
        /* Initialize the backend-local hash table of parent locks */
        Assert(LocalPredicateLockHash == NULL);
        MemSet(&hash_ctl, 0, sizeof(hash_ctl));
        hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
        hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
-       hash_ctl.hash = tag_hash;
        LocalPredicateLockHash = hash_create("Local predicate lock",
                                                                                 max_predicate_locks_per_xact,
                                                                                 &hash_ctl,
-                                                                                HASH_ELEM | HASH_FUNCTION);
-
-       return snapshot;
+                                                                                HASH_ELEM | HASH_BLOBS);
 }
 
 /*
@@ -1578,7 +1872,7 @@ RegisterSerializableTransactionInt(Snapshot snapshot)
  * Also store it for easy reference in MySerializableXact.
  */
 void
-RegisterPredicateLockingXid(const TransactionId xid)
+RegisterPredicateLockingXid(TransactionId xid)
 {
        SERIALIZABLEXIDTAG sxidtag;
        SERIALIZABLEXID *sxid;
@@ -1591,29 +1885,24 @@ RegisterPredicateLockingXid(const TransactionId xid)
        if (MySerializableXact == InvalidSerializableXact)
                return;
 
-       /* This should only be done once per transaction. */
-       Assert(MySerializableXact->topXid == InvalidTransactionId);
-
        /* We should have a valid XID and be at the top level. */
        Assert(TransactionIdIsValid(xid));
 
+       LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
+
+       /* This should only be done once per transaction. */
+       Assert(MySerializableXact->topXid == InvalidTransactionId);
+
        MySerializableXact->topXid = xid;
 
        sxidtag.xid = xid;
-       LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
        sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
                                                                                   &sxidtag,
                                                                                   HASH_ENTER, &found);
-       if (!sxid)
-               /* This should not be possible, based on allocation. */
-               ereport(ERROR,
-                               (errcode(ERRCODE_OUT_OF_MEMORY),
-                                errmsg("out of shared memory")));
-
        Assert(!found);
 
        /* Initialize the structure. */
-       sxid->myXact = (SERIALIZABLEXACT *) MySerializableXact;
+       sxid->myXact = MySerializableXact;
        LWLockRelease(SerializableXactHashLock);
 }
 
@@ -1632,11 +1921,11 @@ RegisterPredicateLockingXid(const TransactionId xid)
  * One use is to support proper behavior during GiST index vacuum.
  */
 bool
-PageIsPredicateLocked(const Relation relation, const BlockNumber blkno)
+PageIsPredicateLocked(Relation relation, BlockNumber blkno)
 {
        PREDICATELOCKTARGETTAG targettag;
        uint32          targettaghash;
-       LWLockId        partitionLock;
+       LWLock     *partitionLock;
        PREDICATELOCKTARGET *target;
 
        SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
@@ -1708,17 +1997,17 @@ GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
                case PREDLOCKTAG_PAGE:
                        /* parent lock is relation lock */
                        SET_PREDICATELOCKTARGETTAG_RELATION(*parent,
-                                                                                GET_PREDICATELOCKTARGETTAG_DB(*tag),
-                                                                 GET_PREDICATELOCKTARGETTAG_RELATION(*tag));
+                                                                                               GET_PREDICATELOCKTARGETTAG_DB(*tag),
+                                                                                               GET_PREDICATELOCKTARGETTAG_RELATION(*tag));
 
                        return true;
 
                case PREDLOCKTAG_TUPLE:
                        /* parent lock is page lock */
                        SET_PREDICATELOCKTARGETTAG_PAGE(*parent,
-                                                                                GET_PREDICATELOCKTARGETTAG_DB(*tag),
-                                                                  GET_PREDICATELOCKTARGETTAG_RELATION(*tag),
-                                                                         GET_PREDICATELOCKTARGETTAG_PAGE(*tag));
+                                                                                       GET_PREDICATELOCKTARGETTAG_DB(*tag),
+                                                                                       GET_PREDICATELOCKTARGETTAG_RELATION(*tag),
+                                                                                       GET_PREDICATELOCKTARGETTAG_PAGE(*tag));
                        return true;
        }
 
@@ -1755,14 +2044,61 @@ CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag)
 }
 
 /*
- * Check whether both the list of related predicate locks and the pointer to
- * a prior version of the row (if this is a tuple lock target) are empty for
- * a predicate lock target, and remove the target if they are.
+ * Remove the dummy entry from the predicate lock target hash, to free up some
+ * scratch space. The caller must be holding SerializablePredicateLockListLock,
+ * and must restore the entry with RestoreScratchTarget() before releasing the
+ * lock.
+ *
+ * If lockheld is true, the caller is already holding the partition lock
+ * of the partition containing the scratch entry.
+ */
+static void
+RemoveScratchTarget(bool lockheld)
+{
+       bool            found;
+
+       Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+
+       if (!lockheld)
+               LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
+       hash_search_with_hash_value(PredicateLockTargetHash,
+                                                               &ScratchTargetTag,
+                                                               ScratchTargetTagHash,
+                                                               HASH_REMOVE, &found);
+       Assert(found);
+       if (!lockheld)
+               LWLockRelease(ScratchPartitionLock);
+}
+
+/*
+ * Re-insert the dummy entry in predicate lock target hash.
+ */
+static void
+RestoreScratchTarget(bool lockheld)
+{
+       bool            found;
+
+       Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+
+       if (!lockheld)
+               LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
+       hash_search_with_hash_value(PredicateLockTargetHash,
+                                                               &ScratchTargetTag,
+                                                               ScratchTargetTagHash,
+                                                               HASH_ENTER, &found);
+       Assert(!found);
+       if (!lockheld)
+               LWLockRelease(ScratchPartitionLock);
+}
+
+/*
+ * Check whether the list of related predicate locks is empty for a
+ * predicate lock target, and remove the target if it is.
  */
 static void
 RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
 {
-       PREDICATELOCKTARGET *rmtarget;
+       PREDICATELOCKTARGET *rmtarget PG_USED_FOR_ASSERTS_ONLY;
 
        Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
 
@@ -1781,9 +2117,11 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
 /*
  * Delete child target locks owned by this process.
  * This implementation is assuming that the usage of each target tag field
- * is uniform. No need to make this hard if we don't have to.
+ * is uniform.  No need to make this hard if we don't have to.
  *
- * We aren't acquiring lightweight locks for the predicate lock or lock
+ * We acquire an LWLock in the case of parallel mode, because worker
+ * backends have access to the leader's SERIALIZABLEXACT.  Otherwise,
+ * we aren't acquiring LWLocks for the predicate lock or lock
  * target structures associated with this transaction unless we're going
  * to modify them, because no other process is permitted to modify our
  * locks.
@@ -1795,7 +2133,9 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
        PREDICATELOCK *predlock;
 
        LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
-       sxact = (SERIALIZABLEXACT *) MySerializableXact;
+       sxact = MySerializableXact;
+       if (IsInParallelMode())
+               LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
        predlock = (PREDICATELOCK *)
                SHMQueueNext(&(sxact->predicateLocks),
                                         &(sxact->predicateLocks),
@@ -1822,8 +2162,8 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
                if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
                {
                        uint32          oldtargettaghash;
-                       LWLockId        partitionLock;
-                       PREDICATELOCK *rmpredlock;
+                       LWLock     *partitionLock;
+                       PREDICATELOCK *rmpredlock PG_USED_FOR_ASSERTS_ONLY;
 
                        oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
                        partitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
@@ -1849,32 +2189,41 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
 
                predlock = nextpredlock;
        }
+       if (IsInParallelMode())
+               LWLockRelease(&sxact->predicateLockListLock);
        LWLockRelease(SerializablePredicateLockListLock);
 }
 
 /*
- * Returns the promotion threshold for a given predicate lock
- * target. This is the number of descendant locks required to promote
- * to the specified tag. Note that the threshold includes non-direct
- * descendants, e.g. both tuples and pages for a relation lock.
+ * Returns the promotion limit for a given predicate lock target.  This is the
+ * max number of descendant locks allowed before promoting to the specified
+ * tag. Note that the limit includes non-direct descendants (e.g., both tuples
+ * and pages for a relation lock).
+ *
+ * Currently the default limit is 2 for a page lock, and half of the value of
+ * max_pred_locks_per_transaction - 1 for a relation lock, to match behavior
+ * of earlier releases when upgrading.
  *
- * TODO SSI: We should do something more intelligent about what the
- * thresholds are, either making it proportional to the number of
- * tuples in a page & pages in a relation, or at least making it a
- * GUC. Currently the threshold is 3 for a page lock, and
- * max_pred_locks_per_transaction/2 for a relation lock, chosen
- * entirely arbitrarily (and without benchmarking).
+ * TODO SSI: We should probably add additional GUCs to allow a maximum ratio
+ * of page and tuple locks based on the pages in a relation, and the maximum
+ * ratio of tuple locks to tuples in a page.  This would provide more
+ * generally "balanced" allocation of locks to where they are most useful,
+ * while still allowing the absolute numbers to prevent one relation from
+ * tying up all predicate lock resources.
  */
 static int
-PredicateLockPromotionThreshold(const PREDICATELOCKTARGETTAG *tag)
+MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag)
 {
        switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
        {
                case PREDLOCKTAG_RELATION:
-                       return max_predicate_locks_per_xact / 2;
+                       return max_predicate_locks_per_relation < 0
+                               ? (max_predicate_locks_per_xact
+                                  / (-max_predicate_locks_per_relation)) - 1
+                               : max_predicate_locks_per_relation;
 
                case PREDLOCKTAG_PAGE:
-                       return 3;
+                       return max_predicate_locks_per_page;
 
                case PREDLOCKTAG_TUPLE:
 
@@ -1929,8 +2278,8 @@ CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag)
                else
                        parentlock->childLocks++;
 
-               if (parentlock->childLocks >=
-                       PredicateLockPromotionThreshold(&targettag))
+               if (parentlock->childLocks >
+                       MaxPredicateChildLocks(&targettag))
                {
                        /*
                         * We should promote to this parent lock. Continue to check its
@@ -1976,7 +2325,7 @@ DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag)
        {
                uint32          targettaghash;
                LOCALPREDICATELOCK *parentlock,
-                                  *rmlock;
+                                  *rmlock PG_USED_FOR_ASSERTS_ONLY;
 
                parenttag = nexttag;
                targettaghash = PredicateLockTargetTagHashCode(&parenttag);
@@ -2034,19 +2383,21 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
        PREDICATELOCKTARGET *target;
        PREDICATELOCKTAG locktag;
        PREDICATELOCK *lock;
-       LWLockId        partitionLock;
+       LWLock     *partitionLock;
        bool            found;
 
        partitionLock = PredicateLockHashPartitionLock(targettaghash);
 
        LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
+       if (IsInParallelMode())
+               LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
        LWLockAcquire(partitionLock, LW_EXCLUSIVE);
 
        /* Make sure that the target is represented. */
        target = (PREDICATELOCKTARGET *)
                hash_search_with_hash_value(PredicateLockTargetHash,
                                                                        targettag, targettaghash,
-                                                                       HASH_ENTER, &found);
+                                                                       HASH_ENTER_NULL, &found);
        if (!target)
                ereport(ERROR,
                                (errcode(ERRCODE_OUT_OF_MEMORY),
@@ -2060,8 +2411,8 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
        locktag.myXact = sxact;
        lock = (PREDICATELOCK *)
                hash_search_with_hash_value(PredicateLockHash, &locktag,
-                       PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
-                                                                       HASH_ENTER, &found);
+                                                                       PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
+                                                                       HASH_ENTER_NULL, &found);
        if (!lock)
                ereport(ERROR,
                                (errcode(ERRCODE_OUT_OF_MEMORY),
@@ -2073,10 +2424,12 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
                SHMQueueInsertBefore(&(target->predicateLocks), &(lock->targetLink));
                SHMQueueInsertBefore(&(sxact->predicateLocks),
                                                         &(lock->xactLink));
-               lock->commitSeqNo = 0;
+               lock->commitSeqNo = InvalidSerCommitSeqNo;
        }
 
        LWLockRelease(partitionLock);
+       if (IsInParallelMode())
+               LWLockRelease(&sxact->predicateLockListLock);
        LWLockRelease(SerializablePredicateLockListLock);
 }
 
@@ -2114,8 +2467,7 @@ PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag)
                locallock->childLocks = 0;
 
        /* Actually create the lock */
-       CreatePredicateLock(targettag, targettaghash,
-                                               (SERIALIZABLEXACT *) MySerializableXact);
+       CreatePredicateLock(targettag, targettaghash, MySerializableXact);
 
        /*
         * Lock has been acquired. Check whether it should be promoted to a
@@ -2148,11 +2500,11 @@ PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag)
  * Clear any finer-grained predicate locks this session has on the relation.
  */
 void
-PredicateLockRelation(const Relation relation)
+PredicateLockRelation(Relation relation, Snapshot snapshot)
 {
        PREDICATELOCKTARGETTAG tag;
 
-       if (SkipSerialization(relation))
+       if (!SerializationNeededForRead(relation, snapshot))
                return;
 
        SET_PREDICATELOCKTARGETTAG_RELATION(tag,
@@ -2171,11 +2523,11 @@ PredicateLockRelation(const Relation relation)
  * Clear any finer-grained predicate locks this session has on the relation.
  */
 void
-PredicateLockPage(const Relation relation, const BlockNumber blkno)
+PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
 {
        PREDICATELOCKTARGETTAG tag;
 
-       if (SkipSerialization(relation))
+       if (!SerializationNeededForRead(relation, snapshot))
                return;
 
        SET_PREDICATELOCKTARGETTAG_PAGE(tag,
@@ -2193,13 +2545,13 @@ PredicateLockPage(const Relation relation, const BlockNumber blkno)
  * Skip if this is a temporary table.
  */
 void
-PredicateLockTuple(const Relation relation, const HeapTuple tuple)
+PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
 {
        PREDICATELOCKTARGETTAG tag;
        ItemPointer tid;
        TransactionId targetxmin;
 
-       if (SkipSerialization(relation))
+       if (!SerializationNeededForRead(relation, snapshot))
                return;
 
        /*
@@ -2207,7 +2559,7 @@ PredicateLockTuple(const Relation relation, const HeapTuple tuple)
         */
        if (relation->rd_index == NULL)
        {
-               TransactionId   myxid;
+               TransactionId myxid;
 
                targetxmin = HeapTupleHeaderGetXmin(tuple->t_data);
 
@@ -2217,6 +2569,7 @@ PredicateLockTuple(const Relation relation, const HeapTuple tuple)
                        if (TransactionIdFollowsOrEquals(targetxmin, TransactionXmin))
                        {
                                TransactionId xid = SubTransGetTopmostTransaction(targetxmin);
+
                                if (TransactionIdEquals(xid, myxid))
                                {
                                        /* We wrote it; we already have a write lock. */
@@ -2225,11 +2578,9 @@ PredicateLockTuple(const Relation relation, const HeapTuple tuple)
                        }
                }
        }
-       else
-               targetxmin = InvalidTransactionId;
 
        /*
-        * Do quick-but-not-definitive test for a relation lock first.  This will
+        * Do quick-but-not-definitive test for a relation lock first.  This will
         * never cause a return when the relation is *not* locked, but will
         * occasionally let the check continue when there really *is* a relation
         * level lock.
@@ -2245,80 +2596,10 @@ PredicateLockTuple(const Relation relation, const HeapTuple tuple)
                                                                         relation->rd_node.dbNode,
                                                                         relation->rd_id,
                                                                         ItemPointerGetBlockNumber(tid),
-                                                                        ItemPointerGetOffsetNumber(tid),
-                                                                        targetxmin);
+                                                                        ItemPointerGetOffsetNumber(tid));
        PredicateLockAcquire(&tag);
 }
 
-/*
- * If the old tuple has any predicate locks, copy them to the new target.
- *
- * This is called at an UPDATE, where any predicate locks held on the old
- * tuple need to be copied to the new tuple, because logically they both
- * represent the same row. A lock taken before the update must conflict
- * with anyone locking the same row after the update.
- */
-void
-PredicateLockTupleRowVersionLink(const Relation relation,
-                                                                const HeapTuple oldTuple,
-                                                                const HeapTuple newTuple)
-{
-       PREDICATELOCKTARGETTAG oldtupletag;
-       PREDICATELOCKTARGETTAG oldpagetag;
-       PREDICATELOCKTARGETTAG newtupletag;
-       BlockNumber     oldblk,
-                               newblk;
-       OffsetNumber oldoff,
-                               newoff;
-       TransactionId oldxmin,
-                               newxmin;
-
-       oldblk = ItemPointerGetBlockNumber(&(oldTuple->t_self));
-       oldoff = ItemPointerGetOffsetNumber(&(oldTuple->t_self));
-       oldxmin = HeapTupleHeaderGetXmin(oldTuple->t_data);
-
-       newblk = ItemPointerGetBlockNumber(&(newTuple->t_self));
-       newoff = ItemPointerGetOffsetNumber(&(newTuple->t_self));
-       newxmin = HeapTupleHeaderGetXmin(newTuple->t_data);
-
-       SET_PREDICATELOCKTARGETTAG_TUPLE(oldtupletag,
-                                                                        relation->rd_node.dbNode,
-                                                                        relation->rd_id,
-                                                                        oldblk,
-                                                                        oldoff,
-                                                                        oldxmin);
-
-       SET_PREDICATELOCKTARGETTAG_PAGE(oldpagetag,
-                                                                       relation->rd_node.dbNode,
-                                                                       relation->rd_id,
-                                                                       oldblk);
-
-       SET_PREDICATELOCKTARGETTAG_TUPLE(newtupletag,
-                                                                        relation->rd_node.dbNode,
-                                                                        relation->rd_id,
-                                                                        newblk,
-                                                                        newoff,
-                                                                        newxmin);
-
-       /*
-        * A page-level lock on the page containing the old tuple counts too.
-        * Anyone holding a lock on the page is logically holding a lock on
-        * the old tuple, so we need to acquire a lock on his behalf on the
-        * new tuple too. However, if the new tuple is on the same page as the
-        * old one, the old page-level lock already covers the new tuple.
-        *
-        * A relation-level lock always covers both tuple versions, so we don't
-        * need to worry about those here.
-        */
-       LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
-
-       TransferPredicateLocksToNewTarget(oldtupletag, newtupletag, false);
-       if (newblk != oldblk)
-               TransferPredicateLocksToNewTarget(oldpagetag, newtupletag, false);
-
-       LWLockRelease(SerializablePredicateLockListLock);
-}
-
 
 /*
  *             DeleteLockTarget
@@ -2336,7 +2617,8 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
        PREDICATELOCK *nextpredlock;
        bool            found;
 
-       Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+       Assert(LWLockHeldByMeInMode(SerializablePredicateLockListLock,
+                                                               LW_EXCLUSIVE));
        Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
 
        predlock = (PREDICATELOCK *)
@@ -2382,8 +2664,8 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
  *
  * Returns true on success, or false if we ran out of shared memory to
  * allocate the new target or locks. Guaranteed to always succeed if
- * removeOld is set (by using the reserved entry in
- * PredicateLockTargetHash for scratch space).
+ * removeOld is set (by using the scratch entry in PredicateLockTargetHash
+ * for scratch space).
  *
  * Warning: the "removeOld" option should be used only with care,
  * because this function does not (indeed, can not) update other
@@ -2396,49 +2678,36 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
  * covers it, or if we are absolutely certain that no one will need to
  * refer to that lock in the future.
  *
- * Caller must hold SerializablePredicateLockListLock.
+ * Caller must hold SerializablePredicateLockListLock exclusively.
  */
 static bool
-TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
-                                                                 const PREDICATELOCKTARGETTAG newtargettag,
+TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
+                                                                 PREDICATELOCKTARGETTAG newtargettag,
                                                                  bool removeOld)
 {
        uint32          oldtargettaghash;
-       LWLockId        oldpartitionLock;
+       LWLock     *oldpartitionLock;
        PREDICATELOCKTARGET *oldtarget;
        uint32          newtargettaghash;
-       LWLockId        newpartitionLock;
+       LWLock     *newpartitionLock;
        bool            found;
        bool            outOfShmem = false;
-       uint32          reservedtargettaghash;
-       LWLockId        reservedpartitionLock;
 
-
-       Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+       Assert(LWLockHeldByMeInMode(SerializablePredicateLockListLock,
+                                                               LW_EXCLUSIVE));
 
        oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
        newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag);
        oldpartitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
        newpartitionLock = PredicateLockHashPartitionLock(newtargettaghash);
 
-       reservedtargettaghash = 0;      /* Quiet compiler warnings. */
-       reservedpartitionLock = 0;      /* Quiet compiler warnings. */
-
        if (removeOld)
        {
                /*
-                * Remove the reserved entry to give us scratch space, so we know
-                * we'll be able to create the new lock target.
+                * Remove the dummy entry to give us scratch space, so we know we'll
+                * be able to create the new lock target.
                 */
-               reservedtargettaghash = PredicateLockTargetTagHashCode(&ReservedTargetTag);
-               reservedpartitionLock = PredicateLockHashPartitionLock(reservedtargettaghash);
-               LWLockAcquire(reservedpartitionLock, LW_EXCLUSIVE);
-               hash_search_with_hash_value(PredicateLockTargetHash,
-                                                                       &ReservedTargetTag,
-                                                                       reservedtargettaghash,
-                                                                       HASH_REMOVE, &found);
-               Assert(found);
-               LWLockRelease(reservedpartitionLock);
+               RemoveScratchTarget(false);
        }
 
        /*
@@ -2496,6 +2765,10 @@ TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
 
                newpredlocktag.myTarget = newtarget;
 
+               /*
+                * Loop through all the locks on the old target, replacing them with
+                * locks on the new target.
+                */
                oldpredlock = (PREDICATELOCK *)
                        SHMQueueNext(&(oldtarget->predicateLocks),
                                                 &(oldtarget->predicateLocks),
@@ -2506,6 +2779,7 @@ TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
                        SHM_QUEUE  *predlocktargetlink;
                        PREDICATELOCK *nextpredlock;
                        PREDICATELOCK *newpredlock;
+                       SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
 
                        predlocktargetlink = &(oldpredlock->targetLink);
                        nextpredlock = (PREDICATELOCK *)
@@ -2522,20 +2796,19 @@ TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
                                hash_search_with_hash_value
                                        (PredicateLockHash,
                                         &oldpredlock->tag,
-                                  PredicateLockHashCodeFromTargetHashCode(&oldpredlock->tag,
-                                                                                                                  oldtargettaghash),
+                                        PredicateLockHashCodeFromTargetHashCode(&oldpredlock->tag,
+                                                                                                                        oldtargettaghash),
                                         HASH_REMOVE, &found);
                                Assert(found);
                        }
 
-
                        newpredlock = (PREDICATELOCK *)
-                               hash_search_with_hash_value
-                               (PredicateLockHash,
-                                &newpredlocktag,
-                                PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
-                                                                                                                newtargettaghash),
-                                HASH_ENTER_NULL, &found);
+                               hash_search_with_hash_value(PredicateLockHash,
+                                                                                       &newpredlocktag,
+                                                                                       PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
+                                                                                                                                                                       newtargettaghash),
+                                                                                       HASH_ENTER_NULL,
+                                                                                       &found);
                        if (!newpredlock)
                        {
                                /* Out of shared memory. Undo what we've done so far. */
@@ -2550,9 +2823,18 @@ TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
                                                                         &(newpredlock->targetLink));
                                SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
                                                                         &(newpredlock->xactLink));
-                               newpredlock->commitSeqNo = InvalidSerCommitSeqNo;
+                               newpredlock->commitSeqNo = oldCommitSeqNo;
+                       }
+                       else
+                       {
+                               if (newpredlock->commitSeqNo < oldCommitSeqNo)
+                                       newpredlock->commitSeqNo = oldCommitSeqNo;
                        }
 
+                       Assert(newpredlock->commitSeqNo != 0);
+                       Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
+                                  || (newpredlock->tag.myXact == OldCommittedSxact));
+
                        oldpredlock = nextpredlock;
                }
                LWLockRelease(SerializableXactHashLock);
@@ -2585,19 +2867,238 @@ exit:
                /* We shouldn't run out of memory if we're moving locks */
                Assert(!outOfShmem);
 
-               /* Put the reserved entry back */
-               LWLockAcquire(reservedpartitionLock, LW_EXCLUSIVE);
-               hash_search_with_hash_value(PredicateLockTargetHash,
-                                                                       &ReservedTargetTag,
-                                                                       reservedtargettaghash,
-                                                                       HASH_ENTER, &found);
-               Assert(!found);
-               LWLockRelease(reservedpartitionLock);
+               /* Put the scratch entry back */
+               RestoreScratchTarget(false);
        }
 
        return !outOfShmem;
 }
 
+/*
+ * Drop all predicate locks of any granularity from the specified relation,
+ * which can be a heap relation or an index relation.  If 'transfer' is true,
+ * acquire a relation lock on the heap for any transactions with any lock(s)
+ * on the specified relation.
+ *
+ * This requires grabbing a lot of LW locks and scanning the entire lock
+ * target table for matches.  That makes this more expensive than most
+ * predicate lock management functions, but it will only be called for DDL
+ * type commands that are expensive anyway, and there are fast returns when
+ * no serializable transactions are active or the relation is temporary.
+ *
+ * We don't use the TransferPredicateLocksToNewTarget function because it
+ * acquires its own locks on the partitions of the two targets involved,
+ * and we'll already be holding all partition locks.
+ *
+ * We can't throw an error from here, because the call could be from a
+ * transaction which is not serializable.
+ *
+ * NOTE: This is currently only called with transfer set to true, but that may
+ * change.  If we decide to clean up the locks from a table on commit of a
+ * transaction which executed DROP TABLE, the false condition will be useful.
+ */
+static void
+DropAllPredicateLocksFromTable(Relation relation, bool transfer)
+{
+       HASH_SEQ_STATUS seqstat;
+       PREDICATELOCKTARGET *oldtarget;
+       PREDICATELOCKTARGET *heaptarget;
+       Oid                     dbId;
+       Oid                     relId;
+       Oid                     heapId;
+       int                     i;
+       bool            isIndex;
+       bool            found;
+       uint32          heaptargettaghash;
+
+       /*
+        * Bail out quickly if there are no serializable transactions running.
+        * It's safe to check this without taking locks because the caller is
+        * holding an ACCESS EXCLUSIVE lock on the relation.  No new locks which
+        * would matter here can be acquired while that is held.
+        */
+       if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
+               return;
+
+       if (!PredicateLockingNeededForRelation(relation))
+               return;
+
+       dbId = relation->rd_node.dbNode;
+       relId = relation->rd_id;
+       if (relation->rd_index == NULL)
+       {
+               isIndex = false;
+               heapId = relId;
+       }
+       else
+       {
+               isIndex = true;
+               heapId = relation->rd_index->indrelid;
+       }
+       Assert(heapId != InvalidOid);
+       Assert(transfer || !isIndex);   /* index OID only makes sense with
+                                                                        * transfer */
+
+       /* Retrieve first time needed, then keep. */
+       heaptargettaghash = 0;
+       heaptarget = NULL;
+
+       /* Acquire locks on all lock partitions */
+       LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
+       for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
+               LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
+       LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
+
+       /*
+        * Remove the dummy entry to give us scratch space, so we know we'll be
+        * able to create the new lock target.
+        */
+       if (transfer)
+               RemoveScratchTarget(true);
+
+       /* Scan through target map */
+       hash_seq_init(&seqstat, PredicateLockTargetHash);
+
+       while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+       {
+               PREDICATELOCK *oldpredlock;
+
+               /*
+                * Check whether this is a target which needs attention.
+                */
+               if (GET_PREDICATELOCKTARGETTAG_RELATION(oldtarget->tag) != relId)
+                       continue;                       /* wrong relation id */
+               if (GET_PREDICATELOCKTARGETTAG_DB(oldtarget->tag) != dbId)
+                       continue;                       /* wrong database id */
+               if (transfer && !isIndex
+                       && GET_PREDICATELOCKTARGETTAG_TYPE(oldtarget->tag) == PREDLOCKTAG_RELATION)
+                       continue;                       /* already the right lock */
+
+               /*
+                * If we made it here, we have work to do.  We make sure the heap
+                * relation lock exists, then we walk the list of predicate locks for
+                * the old target we found, moving all locks to the heap relation lock
+                * -- unless they already hold that.
+                */
+
+               /*
+                * First make sure we have the heap relation target.  We only need to
+                * do this once.
+                */
+               if (transfer && heaptarget == NULL)
+               {
+                       PREDICATELOCKTARGETTAG heaptargettag;
+
+                       SET_PREDICATELOCKTARGETTAG_RELATION(heaptargettag, dbId, heapId);
+                       heaptargettaghash = PredicateLockTargetTagHashCode(&heaptargettag);
+                       heaptarget = hash_search_with_hash_value(PredicateLockTargetHash,
+                                                                                                        &heaptargettag,
+                                                                                                        heaptargettaghash,
+                                                                                                        HASH_ENTER, &found);
+                       if (!found)
+                               SHMQueueInit(&heaptarget->predicateLocks);
+               }
+
+               /*
+                * Loop through all the locks on the old target, replacing them with
+                * locks on the new target.
+                */
+               oldpredlock = (PREDICATELOCK *)
+                       SHMQueueNext(&(oldtarget->predicateLocks),
+                                                &(oldtarget->predicateLocks),
+                                                offsetof(PREDICATELOCK, targetLink));
+               while (oldpredlock)
+               {
+                       PREDICATELOCK *nextpredlock;
+                       PREDICATELOCK *newpredlock;
+                       SerCommitSeqNo oldCommitSeqNo;
+                       SERIALIZABLEXACT *oldXact;
+
+                       nextpredlock = (PREDICATELOCK *)
+                               SHMQueueNext(&(oldtarget->predicateLocks),
+                                                        &(oldpredlock->targetLink),
+                                                        offsetof(PREDICATELOCK, targetLink));
+
+                       /*
+                        * Remove the old lock first. This avoids the chance of running
+                        * out of lock structure entries for the hash table.
+                        */
+                       oldCommitSeqNo = oldpredlock->commitSeqNo;
+                       oldXact = oldpredlock->tag.myXact;
+
+                       SHMQueueDelete(&(oldpredlock->xactLink));
+
+                       /*
+                        * No need for retail delete from oldtarget list, we're removing
+                        * the whole target anyway.
+                        */
+                       hash_search(PredicateLockHash,
+                                               &oldpredlock->tag,
+                                               HASH_REMOVE, &found);
+                       Assert(found);
+
+                       if (transfer)
+                       {
+                               PREDICATELOCKTAG newpredlocktag;
+
+                               newpredlocktag.myTarget = heaptarget;
+                               newpredlocktag.myXact = oldXact;
+                               newpredlock = (PREDICATELOCK *)
+                                       hash_search_with_hash_value(PredicateLockHash,
+                                                                                               &newpredlocktag,
+                                                                                               PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
+                                                                                                                                                                               heaptargettaghash),
+                                                                                               HASH_ENTER,
+                                                                                               &found);
+                               if (!found)
+                               {
+                                       SHMQueueInsertBefore(&(heaptarget->predicateLocks),
+                                                                                &(newpredlock->targetLink));
+                                       SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
+                                                                                &(newpredlock->xactLink));
+                                       newpredlock->commitSeqNo = oldCommitSeqNo;
+                               }
+                               else
+                               {
+                                       if (newpredlock->commitSeqNo < oldCommitSeqNo)
+                                               newpredlock->commitSeqNo = oldCommitSeqNo;
+                               }
+
+                               Assert(newpredlock->commitSeqNo != 0);
+                               Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
+                                          || (newpredlock->tag.myXact == OldCommittedSxact));
+                       }
+
+                       oldpredlock = nextpredlock;
+               }
+
+               hash_search(PredicateLockTargetHash, &oldtarget->tag, HASH_REMOVE,
+                                       &found);
+               Assert(found);
+       }
+
+       /* Put the scratch entry back */
+       if (transfer)
+               RestoreScratchTarget(true);
+
+       /* Release locks in reverse order */
+       LWLockRelease(SerializableXactHashLock);
+       for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
+               LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
+       LWLockRelease(SerializablePredicateLockListLock);
+}
+
+/*
+ * TransferPredicateLocksToHeapRelation
+ *             For all transactions, transfer all predicate locks for the given
+ *             relation to a single relation lock on the heap.
+ */
+void
+TransferPredicateLocksToHeapRelation(Relation relation)
+{
+       DropAllPredicateLocksFromTable(relation, true);
+}
+
 
 /*
  *             PredicateLockPageSplit
@@ -2614,14 +3115,27 @@ exit:
  * which hold the locks getting in and noticing.
  */
 void
-PredicateLockPageSplit(const Relation relation, const BlockNumber oldblkno,
-                                          const BlockNumber newblkno)
+PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
+                                          BlockNumber newblkno)
 {
        PREDICATELOCKTARGETTAG oldtargettag;
        PREDICATELOCKTARGETTAG newtargettag;
        bool            success;
 
-       if (SkipSplitTracking(relation))
+       /*
+        * Bail out quickly if there are no serializable transactions running.
+        *
+        * It's safe to do this check without taking any additional locks. Even if
+        * a serializable transaction starts concurrently, we know it can't take
+        * any SIREAD locks on the page being split because the caller is holding
+        * the associated buffer page lock. Memory reordering isn't an issue; the
+        * memory barrier in the LWLock acquisition guarantees that this read
+        * occurs while the buffer page lock is held.
+        */
+       if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
+               return;
+
+       if (!PredicateLockingNeededForRelation(relation))
                return;
 
        Assert(oldblkno != newblkno);
@@ -2662,10 +3176,10 @@ PredicateLockPageSplit(const Relation relation, const BlockNumber oldblkno,
                /*
                 * Move the locks to the parent. This shouldn't fail.
                 *
-                * Note that here we are removing locks held by other
-                * backends, leading to a possible inconsistency in their
-                * local lock hash table. This is OK because we're replacing
-                * it with a lock that covers the old one.
+                * Note that here we are removing locks held by other backends,
+                * leading to a possible inconsistency in their local lock hash table.
+                * This is OK because we're replacing it with a lock that covers the
+                * old one.
                 */
                success = TransferPredicateLocksToNewTarget(oldtargettag,
                                                                                                        newtargettag,
@@ -2686,26 +3200,26 @@ PredicateLockPageSplit(const Relation relation, const BlockNumber oldblkno,
  * occurs in the context of another transaction isolation level.
  */
 void
-PredicateLockPageCombine(const Relation relation, const BlockNumber oldblkno,
-                                                const BlockNumber newblkno)
+PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
+                                                BlockNumber newblkno)
 {
        /*
-        * Page combines differ from page splits in that we ought to be
-        * able to remove the locks on the old page after transferring
-        * them to the new page, instead of duplicating them. However,
-        * because we can't edit other backends' local lock tables,
-        * removing the old lock would leave them with an entry in their
-        * LocalPredicateLockHash for a lock they're not holding, which
-        * isn't acceptable. So we wind up having to do the same work as a
-        * page split, acquiring a lock on the new page and keeping the old
-        * page locked too. That can lead to some false positives, but
-        * should be rare in practice.
+        * Page combines differ from page splits in that we ought to be able to
+        * remove the locks on the old page after transferring them to the new
+        * page, instead of duplicating them. However, because we can't edit other
+        * backends' local lock tables, removing the old lock would leave them
+        * with an entry in their LocalPredicateLockHash for a lock they're not
+        * holding, which isn't acceptable. So we wind up having to do the same
+        * work as a page split, acquiring a lock on the new page and keeping the
+        * old page locked too. That can lead to some false positives, but should
+        * be rare in practice.
         */
        PredicateLockPageSplit(relation, oldblkno, newblkno);
 }
 
 /*
- * Walk the hash table and find the new xmin.
+ * Walk the list of in-progress serializable transactions and find the new
+ * xmin.
  */
 static void
 SetNewSxactGlobalXmin(void)
@@ -2754,11 +3268,19 @@ SetNewSxactGlobalXmin(void)
  * up in some relatively timely fashion.
  *
  * If this transaction is committing and is holding any predicate locks,
- * it must be added to a list of completed serializable transaction still
+ * it must be added to a list of completed serializable transactions still
  * holding locks.
+ *
+ * If isReadOnlySafe is true, then predicate locks are being released before
+ * the end of the transaction because MySerializableXact has been determined
+ * to be RO_SAFE.  In non-parallel mode we can release it completely, but it
+ * in parallel mode we partially release the SERIALIZABLEXACT and keep it
+ * around until the end of the transaction, allowing each backend to clear its
+ * MySerializableXact variable and benefit from the optimization in its own
+ * time.
  */
 void
-ReleasePredicateLocks(const bool isCommit)
+ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
 {
        bool            needToClear;
        RWConflict      conflict,
@@ -2769,7 +3291,7 @@ ReleasePredicateLocks(const bool isCommit)
        /*
         * We can't trust XactReadOnly here, because a transaction which started
         * as READ WRITE can show as READ ONLY later, e.g., within
-        * substransactions.  We want to flag a transaction as READ ONLY if it
+        * subtransactions.  We want to flag a transaction as READ ONLY if it
         * commits without writing so that de facto READ ONLY transactions get the
         * benefit of some RO optimizations, so we will use this local variable to
         * get some cleanup logic right which is based on whether the transaction
@@ -2777,54 +3299,155 @@ ReleasePredicateLocks(const bool isCommit)
         */
        bool            topLevelIsDeclaredReadOnly;
 
+       /* We can't be both committing and releasing early due to RO_SAFE. */
+       Assert(!(isCommit && isReadOnlySafe));
+
+       /* Are we at the end of a transaction, that is, a commit or abort? */
+       if (!isReadOnlySafe)
+       {
+               /*
+                * Parallel workers mustn't release predicate locks at the end of
+                * their transaction.  The leader will do that at the end of its
+                * transaction.
+                */
+               if (IsParallelWorker())
+               {
+                       ReleasePredicateLocksLocal();
+                       return;
+               }
+
+               /*
+                * By the time the leader in a parallel query reaches end of
+                * transaction, it has waited for all workers to exit.
+                */
+               Assert(!ParallelContextActive());
+
+               /*
+                * If the leader in a parallel query earlier stashed a partially
+                * released SERIALIZABLEXACT for final clean-up at end of transaction
+                * (because workers might still have been accessing it), then it's
+                * time to restore it.
+                */
+               if (SavedSerializableXact != InvalidSerializableXact)
+               {
+                       Assert(MySerializableXact == InvalidSerializableXact);
+                       MySerializableXact = SavedSerializableXact;
+                       SavedSerializableXact = InvalidSerializableXact;
+                       Assert(SxactIsPartiallyReleased(MySerializableXact));
+               }
+       }
+
        if (MySerializableXact == InvalidSerializableXact)
        {
                Assert(LocalPredicateLockHash == NULL);
                return;
        }
 
+       LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
+
+       /*
+        * If the transaction is committing, but it has been partially released
+        * already, then treat this as a roll back.  It was marked as rolled back.
+        */
+       if (isCommit && SxactIsPartiallyReleased(MySerializableXact))
+               isCommit = false;
+
+       /*
+        * If we're called in the middle of a transaction because we discovered
+        * that the SXACT_FLAG_RO_SAFE flag was set, then we'll partially release
+        * it (that is, release the predicate locks and conflicts, but not the
+        * SERIALIZABLEXACT itself) if we're the first backend to have noticed.
+        */
+       if (isReadOnlySafe && IsInParallelMode())
+       {
+               /*
+                * The leader needs to stash a pointer to it, so that it can
+                * completely release it at end-of-transaction.
+                */
+               if (!IsParallelWorker())
+                       SavedSerializableXact = MySerializableXact;
+
+               /*
+                * The first backend to reach this condition will partially release
+                * the SERIALIZABLEXACT.  All others will just clear their
+                * backend-local state so that they stop doing SSI checks for the rest
+                * of the transaction.
+                */
+               if (SxactIsPartiallyReleased(MySerializableXact))
+               {
+                       LWLockRelease(SerializableXactHashLock);
+                       ReleasePredicateLocksLocal();
+                       return;
+               }
+               else
+               {
+                       MySerializableXact->flags |= SXACT_FLAG_PARTIALLY_RELEASED;
+                       /* ... and proceed to perform the partial release below. */
+               }
+       }
        Assert(!isCommit || SxactIsPrepared(MySerializableXact));
-       Assert(!SxactIsRolledBack(MySerializableXact));
+       Assert(!isCommit || !SxactIsDoomed(MySerializableXact));
        Assert(!SxactIsCommitted(MySerializableXact));
+       Assert(SxactIsPartiallyReleased(MySerializableXact)
+                  || !SxactIsRolledBack(MySerializableXact));
 
        /* may not be serializable during COMMIT/ROLLBACK PREPARED */
-       if (MySerializableXact->pid != 0)
-               Assert(IsolationIsSerializable());
+       Assert(MySerializableXact->pid == 0 || IsolationIsSerializable());
 
        /* We'd better not already be on the cleanup list. */
-       Assert(!SxactIsOnFinishedList((SERIALIZABLEXACT *) MySerializableXact));
+       Assert(!SxactIsOnFinishedList(MySerializableXact));
 
        topLevelIsDeclaredReadOnly = SxactIsReadOnly(MySerializableXact);
 
-       LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
-
        /*
-        * We don't hold a lock here, assuming that TransactionId is atomic!
+        * We don't hold XidGenLock lock here, assuming that TransactionId is
+        * atomic!
         *
         * If this value is changing, we don't care that much whether we get the
         * old or new value -- it is just used to determine how far
-        * GlobalSerizableXmin must advance before this transaction can be cleaned
-        * fully cleaned up.  The worst that could happen is we wait for ome more
+        * GlobalSerializableXmin must advance before this transaction can be
+        * fully cleaned up.  The worst that could happen is we wait for one more
         * transaction to complete before freeing some RAM; correctness of visible
         * behavior is not affected.
         */
-       MySerializableXact->finishedBefore = ShmemVariableCache->nextXid;
+       MySerializableXact->finishedBefore = XidFromFullTransactionId(ShmemVariableCache->nextFullXid);
 
        /*
-        * If it's not a commit it's a rollback, and we can clear our locks
-        * immediately.
+        * If it's not a commit it's either a rollback or a read-only transaction
+        * flagged SXACT_FLAG_RO_SAFE, and we can clear our locks immediately.
         */
        if (isCommit)
        {
                MySerializableXact->flags |= SXACT_FLAG_COMMITTED;
                MySerializableXact->commitSeqNo = ++(PredXact->LastSxactCommitSeqNo);
                /* Recognize implicit read-only transaction (commit without write). */
-               if (!(MySerializableXact->flags & SXACT_FLAG_DID_WRITE))
+               if (!MyXactDidWrite)
                        MySerializableXact->flags |= SXACT_FLAG_READ_ONLY;
        }
        else
        {
+               /*
+                * The DOOMED flag indicates that we intend to roll back this
+                * transaction and so it should not cause serialization failures for
+                * other transactions that conflict with it. Note that this flag might
+                * already be set, if another backend marked this transaction for
+                * abort.
+                *
+                * The ROLLED_BACK flag further indicates that ReleasePredicateLocks
+                * has been called, and so the SerializableXact is eligible for
+                * cleanup. This means it should not be considered when calculating
+                * SxactGlobalXmin.
+                */
+               MySerializableXact->flags |= SXACT_FLAG_DOOMED;
                MySerializableXact->flags |= SXACT_FLAG_ROLLED_BACK;
+
+               /*
+                * If the transaction was previously prepared, but is now failing due
+                * to a ROLLBACK PREPARED or (hopefully very rare) error after the
+                * prepare, clear the prepared flag.  This simplifies conflict
+                * checking.
+                */
+               MySerializableXact->flags &= ~SXACT_FLAG_PREPARED;
        }
 
        if (!topLevelIsDeclaredReadOnly)
@@ -2852,13 +3475,13 @@ ReleasePredicateLocks(const bool isCommit)
                 * opposed to 'outLink' for the r/w xacts.
                 */
                possibleUnsafeConflict = (RWConflict)
-                       SHMQueueNext((SHM_QUEUE *) &MySerializableXact->possibleUnsafeConflicts,
-                                 (SHM_QUEUE *) &MySerializableXact->possibleUnsafeConflicts,
+                       SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
+                                                &MySerializableXact->possibleUnsafeConflicts,
                                                 offsetof(RWConflictData, inLink));
                while (possibleUnsafeConflict)
                {
                        nextConflict = (RWConflict)
-                               SHMQueueNext((SHM_QUEUE *) &MySerializableXact->possibleUnsafeConflicts,
+                               SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
                                                         &possibleUnsafeConflict->inLink,
                                                         offsetof(RWConflictData, inLink));
 
@@ -2876,24 +3499,28 @@ ReleasePredicateLocks(const bool isCommit)
                && !SxactIsReadOnly(MySerializableXact)
                && SxactHasSummaryConflictOut(MySerializableXact))
        {
+               /*
+                * we don't know which old committed transaction we conflicted with,
+                * so be conservative and use FirstNormalSerCommitSeqNo here
+                */
                MySerializableXact->SeqNo.earliestOutConflictCommit =
                        FirstNormalSerCommitSeqNo;
                MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
        }
 
        /*
-        * Release all outConflicts to committed transactions.  If we're rolling
+        * Release all outConflicts to committed transactions.  If we're rolling
         * back clear them all.  Set SXACT_FLAG_CONFLICT_OUT if any point to
         * previously committed transactions.
         */
        conflict = (RWConflict)
-               SHMQueueNext((SHM_QUEUE *) &MySerializableXact->outConflicts,
-                                        (SHM_QUEUE *) &MySerializableXact->outConflicts,
+               SHMQueueNext(&MySerializableXact->outConflicts,
+                                        &MySerializableXact->outConflicts,
                                         offsetof(RWConflictData, outLink));
        while (conflict)
        {
                nextConflict = (RWConflict)
-                       SHMQueueNext((SHM_QUEUE *) &MySerializableXact->outConflicts,
+                       SHMQueueNext(&MySerializableXact->outConflicts,
                                                 &conflict->outLink,
                                                 offsetof(RWConflictData, outLink));
 
@@ -2902,8 +3529,8 @@ ReleasePredicateLocks(const bool isCommit)
                        && SxactIsCommitted(conflict->sxactIn))
                {
                        if ((MySerializableXact->flags & SXACT_FLAG_CONFLICT_OUT) == 0
-                               || conflict->sxactIn->commitSeqNo < MySerializableXact->SeqNo.earliestOutConflictCommit)
-                               MySerializableXact->SeqNo.earliestOutConflictCommit = conflict->sxactIn->commitSeqNo;
+                               || conflict->sxactIn->prepareSeqNo < MySerializableXact->SeqNo.earliestOutConflictCommit)
+                               MySerializableXact->SeqNo.earliestOutConflictCommit = conflict->sxactIn->prepareSeqNo;
                        MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
                }
 
@@ -2920,13 +3547,13 @@ ReleasePredicateLocks(const bool isCommit)
         * we're rolling back, clear them all.
         */
        conflict = (RWConflict)
-               SHMQueueNext((SHM_QUEUE *) &MySerializableXact->inConflicts,
-                                        (SHM_QUEUE *) &MySerializableXact->inConflicts,
+               SHMQueueNext(&MySerializableXact->inConflicts,
+                                        &MySerializableXact->inConflicts,
                                         offsetof(RWConflictData, inLink));
        while (conflict)
        {
                nextConflict = (RWConflict)
-                       SHMQueueNext((SHM_QUEUE *) &MySerializableXact->inConflicts,
+                       SHMQueueNext(&MySerializableXact->inConflicts,
                                                 &conflict->inLink,
                                                 offsetof(RWConflictData, inLink));
 
@@ -2947,13 +3574,13 @@ ReleasePredicateLocks(const bool isCommit)
                 * up if they are known safe or known unsafe.
                 */
                possibleUnsafeConflict = (RWConflict)
-                       SHMQueueNext((SHM_QUEUE *) &MySerializableXact->possibleUnsafeConflicts,
-                                 (SHM_QUEUE *) &MySerializableXact->possibleUnsafeConflicts,
+                       SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
+                                                &MySerializableXact->possibleUnsafeConflicts,
                                                 offsetof(RWConflictData, outLink));
                while (possibleUnsafeConflict)
                {
                        nextConflict = (RWConflict)
-                               SHMQueueNext((SHM_QUEUE *) &MySerializableXact->possibleUnsafeConflicts,
+                               SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
                                                         &possibleUnsafeConflict->outLink,
                                                         offsetof(RWConflictData, outLink));
 
@@ -2963,7 +3590,7 @@ ReleasePredicateLocks(const bool isCommit)
 
                        /* Mark conflicted if necessary. */
                        if (isCommit
-                               && (MySerializableXact->flags & SXACT_FLAG_DID_WRITE)
+                               && MyXactDidWrite
                                && SxactHasConflictOut(MySerializableXact)
                                && (MySerializableXact->SeqNo.earliestOutConflictCommit
                                        <= roXact->SeqNo.lastCommitBeforeSnapshot))
@@ -3024,18 +3651,32 @@ ReleasePredicateLocks(const bool isCommit)
        /* Add this to the list of transactions to check for later cleanup. */
        if (isCommit)
                SHMQueueInsertBefore(FinishedSerializableTransactions,
-                                                 (SHM_QUEUE *) &(MySerializableXact->finishedLink));
+                                                        &MySerializableXact->finishedLink);
 
+       /*
+        * If we're releasing a RO_SAFE transaction in parallel mode, we'll only
+        * partially release it.  That's necessary because other backends may have
+        * a reference to it.  The leader will release the SERIALIZABLEXACT itself
+        * at the end of the transaction after workers have stopped running.
+        */
        if (!isCommit)
-               ReleaseOneSerializableXact((SERIALIZABLEXACT *) MySerializableXact,
-                                                                  false, false);
+               ReleaseOneSerializableXact(MySerializableXact,
+                                                                  isReadOnlySafe && IsInParallelMode(),
+                                                                  false);
 
        LWLockRelease(SerializableFinishedListLock);
 
        if (needToClear)
                ClearOldPredicateLocks();
 
+       ReleasePredicateLocksLocal();
+}
+
+static void
+ReleasePredicateLocksLocal(void)
+{
        MySerializableXact = InvalidSerializableXact;
+       MyXactDidWrite = false;
 
        /* Delete per-transaction lock table */
        if (LocalPredicateLockHash != NULL)
@@ -3046,31 +3687,8 @@ ReleasePredicateLocks(const bool isCommit)
 }
 
 /*
- * ReleasePredicateLocksIfROSafe
- *             Check if the current transaction is read only and operating on
- *             a safe snapshot. If so, release predicate locks and return
- *             true.
- *
- * A transaction is flagged as RO_SAFE if all concurrent R/W
- * transactions commit without having conflicts out to an earlier
- * snapshot, thus ensuring that no conflicts are possible for this
- * transaction. Thus, we call this function as part of the
- * SkipSerialization check on all public interface methods.
- */
-static bool
-ReleasePredicateLocksIfROSafe(void)
-{
-       if (SxactIsROSafe(MySerializableXact))
-       {
-               ReleasePredicateLocks(false);
-               return true;
-       }
-       else
-               return false;
-}
-
-/*
- * Clear old predicate locks.
+ * Clear old predicate locks, belonging to committed transactions that are no
+ * longer interesting to any in-progress transaction.
  */
 static void
 ClearOldPredicateLocks(void)
@@ -3078,6 +3696,10 @@ ClearOldPredicateLocks(void)
        SERIALIZABLEXACT *finishedSxact;
        PREDICATELOCK *predlock;
 
+       /*
+        * Loop through finished transactions. They are in commit order, so we can
+        * stop as soon as we find one that's still interesting.
+        */
        LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
        finishedSxact = (SERIALIZABLEXACT *)
                SHMQueueNext(FinishedSerializableTransactions,
@@ -3096,23 +3718,49 @@ ClearOldPredicateLocks(void)
                        || TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
                                                                                         PredXact->SxactGlobalXmin))
                {
+                       /*
+                        * This transaction committed before any in-progress transaction
+                        * took its snapshot. It's no longer interesting.
+                        */
                        LWLockRelease(SerializableXactHashLock);
                        SHMQueueDelete(&(finishedSxact->finishedLink));
                        ReleaseOneSerializableXact(finishedSxact, false, false);
                        LWLockAcquire(SerializableXactHashLock, LW_SHARED);
                }
                else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
-                  && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
+                                && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
                {
+                       /*
+                        * Any active transactions that took their snapshot before this
+                        * transaction committed are read-only, so we can clear part of
+                        * its state.
+                        */
                        LWLockRelease(SerializableXactHashLock);
-                       ReleaseOneSerializableXact(finishedSxact,
-                                                                          !SxactIsReadOnly(finishedSxact),
-                                                                          false);
+
+                       if (SxactIsReadOnly(finishedSxact))
+                       {
+                               /* A read-only transaction can be removed entirely */
+                               SHMQueueDelete(&(finishedSxact->finishedLink));
+                               ReleaseOneSerializableXact(finishedSxact, false, false);
+                       }
+                       else
+                       {
+                               /*
+                                * A read-write transaction can only be partially cleared. We
+                                * need to keep the SERIALIZABLEXACT but can release the
+                                * SIREAD locks and conflicts in.
+                                */
+                               ReleaseOneSerializableXact(finishedSxact, true, false);
+                       }
+
                        PredXact->HavePartialClearedThrough = finishedSxact->commitSeqNo;
                        LWLockAcquire(SerializableXactHashLock, LW_SHARED);
                }
                else
+               {
+                       /* Still interesting. */
                        break;
+               }
                finishedSxact = nextSxact;
        }
        LWLockRelease(SerializableXactHashLock);
@@ -3120,11 +3768,11 @@ ClearOldPredicateLocks(void)
        /*
         * Loop through predicate locks on dummy transaction for summarized data.
         */
+       LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
        predlock = (PREDICATELOCK *)
                SHMQueueNext(&OldCommittedSxact->predicateLocks,
                                         &OldCommittedSxact->predicateLocks,
                                         offsetof(PREDICATELOCK, xactLink));
-       LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
        while (predlock)
        {
                PREDICATELOCK *nextpredlock;
@@ -3136,20 +3784,24 @@ ClearOldPredicateLocks(void)
                                                 offsetof(PREDICATELOCK, xactLink));
 
                LWLockAcquire(SerializableXactHashLock, LW_SHARED);
+               Assert(predlock->commitSeqNo != 0);
+               Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
                canDoPartialCleanup = (predlock->commitSeqNo <= PredXact->CanPartialClearThrough);
                LWLockRelease(SerializableXactHashLock);
 
+               /*
+                * If this lock originally belonged to an old enough transaction, we
+                * can release it.
+                */
                if (canDoPartialCleanup)
                {
                        PREDICATELOCKTAG tag;
-                       SHM_QUEUE  *targetLink;
                        PREDICATELOCKTARGET *target;
                        PREDICATELOCKTARGETTAG targettag;
                        uint32          targettaghash;
-                       LWLockId        partitionLock;
+                       LWLock     *partitionLock;
 
                        tag = predlock->tag;
-                       targetLink = &(predlock->targetLink);
                        target = tag.myTarget;
                        targettag = target->tag;
                        targettaghash = PredicateLockTargetTagHashCode(&targettag);
@@ -3157,12 +3809,12 @@ ClearOldPredicateLocks(void)
 
                        LWLockAcquire(partitionLock, LW_EXCLUSIVE);
 
-                       SHMQueueDelete(targetLink);
+                       SHMQueueDelete(&(predlock->targetLink));
                        SHMQueueDelete(&(predlock->xactLink));
 
                        hash_search_with_hash_value(PredicateLockHash, &tag,
-                                                               PredicateLockHashCodeFromTargetHashCode(&tag,
-                                                                                                                         targettaghash),
+                                                                               PredicateLockHashCodeFromTargetHashCode(&tag,
+                                                                                                                                                               targettaghash),
                                                                                HASH_REMOVE, NULL);
                        RemoveTargetIfNoLongerUsed(target, targettaghash);
 
@@ -3185,12 +3837,12 @@ ClearOldPredicateLocks(void)
  * delete the transaction.
  *
  * When the partial flag is set, we can release all predicate locks and
- * out-conflict information -- we've established that there are no longer
+ * in-conflict information -- we've established that there are no longer
  * any overlapping read write transactions for which this transaction could
- * matter.
+ * matter -- but keep the transaction entry itself and any outConflicts.
  *
  * When the summarize flag is set, we've run short of room for sxact data
- * and must summarize to the SLRU.     Predicate locks are transferred to a
+ * and must summarize to the SLRU.  Predicate locks are transferred to a
  * dummy "old" transaction, with duplicate locks on a single target
  * collapsing to a single lock with the "latest" commitSeqNo from among
  * the conflicting locks..
@@ -3206,9 +3858,16 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
 
        Assert(sxact != NULL);
        Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
+       Assert(partial || !SxactIsOnFinishedList(sxact));
        Assert(LWLockHeldByMe(SerializableFinishedListLock));
 
+       /*
+        * First release all the predicate locks held by this xact (or transfer
+        * them to OldCommittedSxact if summarize is true)
+        */
        LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
+       if (IsInParallelMode())
+               LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
        predlock = (PREDICATELOCK *)
                SHMQueueNext(&(sxact->predicateLocks),
                                         &(sxact->predicateLocks),
@@ -3221,7 +3880,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
                PREDICATELOCKTARGET *target;
                PREDICATELOCKTARGETTAG targettag;
                uint32          targettaghash;
-               LWLockId        partitionLock;
+               LWLock     *partitionLock;
 
                nextpredlock = (PREDICATELOCK *)
                        SHMQueueNext(&(sxact->predicateLocks),
@@ -3240,8 +3899,8 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
                SHMQueueDelete(targetLink);
 
                hash_search_with_hash_value(PredicateLockHash, &tag,
-                                                               PredicateLockHashCodeFromTargetHashCode(&tag,
-                                                                                                                         targettaghash),
+                                                                       PredicateLockHashCodeFromTargetHashCode(&tag,
+                                                                                                                                                       targettaghash),
                                                                        HASH_REMOVE, NULL);
                if (summarize)
                {
@@ -3250,9 +3909,9 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
                        /* Fold into dummy transaction list. */
                        tag.myXact = OldCommittedSxact;
                        predlock = hash_search_with_hash_value(PredicateLockHash, &tag,
-                                                               PredicateLockHashCodeFromTargetHashCode(&tag,
-                                                                                                                         targettaghash),
-                                                                                                  HASH_ENTER, &found);
+                                                                                                  PredicateLockHashCodeFromTargetHashCode(&tag,
+                                                                                                                                                                                  targettaghash),
+                                                                                                  HASH_ENTER_NULL, &found);
                        if (!predlock)
                                ereport(ERROR,
                                                (errcode(ERRCODE_OUT_OF_MEMORY),
@@ -3260,6 +3919,8 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
                                                 errhint("You might need to increase max_pred_locks_per_transaction.")));
                        if (found)
                        {
+                               Assert(predlock->commitSeqNo != 0);
+                               Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
                                if (predlock->commitSeqNo < sxact->commitSeqNo)
                                        predlock->commitSeqNo = sxact->commitSeqNo;
                        }
@@ -3286,14 +3947,16 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
         */
        SHMQueueInit(&sxact->predicateLocks);
 
+       if (IsInParallelMode())
+               LWLockRelease(&sxact->predicateLockListLock);
        LWLockRelease(SerializablePredicateLockListLock);
 
        sxidtag.xid = sxact->topXid;
        LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
 
+       /* Release all outConflicts (unless 'partial' is true) */
        if (!partial)
        {
-               /* Release all outConflicts. */
                conflict = (RWConflict)
                        SHMQueueNext(&sxact->outConflicts,
                                                 &sxact->outConflicts,
@@ -3328,9 +3991,9 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
                conflict = nextConflict;
        }
 
+       /* Finally, get rid of the xid and the record of the transaction itself. */
        if (!partial)
        {
-               /* Get rid of the xid and the record of the transaction itself. */
                if (sxidtag.xid != InvalidTransactionId)
                        hash_search(SerializableXidHash, &sxidtag, HASH_REMOVE, NULL);
                ReleasePredXact(sxact);
@@ -3376,7 +4039,7 @@ XidIsConcurrent(TransactionId xid)
 /*
  * CheckForSerializableConflictOut
  *             We are reading a tuple which has been modified.  If it is visible to
- *             us but has been deleted, that indicates a rw-conflict out.      If it's
+ *             us but has been deleted, that indicates a rw-conflict out.  If it's
  *             not visible and was created by a concurrent (overlapping)
  *             serializable transaction, that is also a rw-conflict out,
  *
@@ -3391,8 +4054,9 @@ XidIsConcurrent(TransactionId xid)
  * currently no known reason to call this function from an index AM.
  */
 void
-CheckForSerializableConflictOut(const bool visible, const Relation relation,
-                                                               const HeapTuple tuple, const Buffer buffer)
+CheckForSerializableConflictOut(bool visible, Relation relation,
+                                                               HeapTuple tuple, Buffer buffer,
+                                                               Snapshot snapshot)
 {
        TransactionId xid;
        SERIALIZABLEXIDTAG sxidtag;
@@ -3400,15 +4064,16 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
        SERIALIZABLEXACT *sxact;
        HTSV_Result htsvResult;
 
-       if (SkipSerialization(relation))
+       if (!SerializationNeededForRead(relation, snapshot))
                return;
 
-       if (SxactIsMarkedForDeath(MySerializableXact))
+       /* Check if someone else has already decided that we need to die */
+       if (SxactIsDoomed(MySerializableXact))
        {
                ereport(ERROR,
                                (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                 errmsg("could not serialize access due to read/write dependencies among transactions"),
-                                errdetail("Cancelled on identification as a pivot, during conflict out checking."),
+                                errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
                                 errhint("The transaction might succeed if retried.")));
        }
 
@@ -3419,7 +4084,7 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
         * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
         * is going on with it.
         */
-       htsvResult = HeapTupleSatisfiesVacuum(tuple->t_data, TransactionXmin, buffer);
+       htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
        switch (htsvResult)
        {
                case HEAPTUPLE_LIVE:
@@ -3430,10 +4095,10 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
                case HEAPTUPLE_RECENTLY_DEAD:
                        if (!visible)
                                return;
-                       xid = HeapTupleHeaderGetXmax(tuple->t_data);
+                       xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
                        break;
                case HEAPTUPLE_DELETE_IN_PROGRESS:
-                       xid = HeapTupleHeaderGetXmax(tuple->t_data);
+                       xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
                        break;
                case HEAPTUPLE_INSERT_IN_PROGRESS:
                        xid = HeapTupleHeaderGetXmin(tuple->t_data);
@@ -3461,7 +4126,7 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
        Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
 
        /*
-        * Find top level xid.  Bail out if xid is too early to be a conflict, or
+        * Find top level xid.  Bail out if xid is too early to be a conflict, or
         * if it's our own xid.
         */
        if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
@@ -3497,16 +4162,16 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
                                ereport(ERROR,
                                                (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                                 errmsg("could not serialize access due to read/write dependencies among transactions"),
-                               errdetail("Cancelled on conflict out to old pivot %u.", xid),
-                                         errhint("The transaction might succeed if retried.")));
+                                                errdetail_internal("Reason code: Canceled on conflict out to old pivot %u.", xid),
+                                                errhint("The transaction might succeed if retried.")));
 
                        if (SxactHasSummaryConflictIn(MySerializableXact)
-                       || !SHMQueueEmpty((SHM_QUEUE *) &MySerializableXact->inConflicts))
+                               || !SHMQueueEmpty(&MySerializableXact->inConflicts))
                                ereport(ERROR,
                                                (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                                 errmsg("could not serialize access due to read/write dependencies among transactions"),
-                                                errdetail("Cancelled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
-                                         errhint("The transaction might succeed if retried.")));
+                                                errdetail_internal("Reason code: Canceled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
+                                                errhint("The transaction might succeed if retried.")));
 
                        MySerializableXact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
                }
@@ -3517,26 +4182,24 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
        }
        sxact = sxid->myXact;
        Assert(TransactionIdEquals(sxact->topXid, xid));
-       if (sxact == MySerializableXact
-               || SxactIsRolledBack(sxact)
-               || SxactIsMarkedForDeath(sxact))
+       if (sxact == MySerializableXact || SxactIsDoomed(sxact))
        {
-               /* We can't conflict with our own transaction or one rolled back. */
+               /* Can't conflict with ourself or a transaction that will roll back. */
                LWLockRelease(SerializableXactHashLock);
                return;
        }
 
        /*
         * We have a conflict out to a transaction which has a conflict out to a
-        * summarized transaction.      That summarized transaction must have
+        * summarized transaction.  That summarized transaction must have
         * committed first, and we can't tell when it committed in relation to our
-        * snapshot acquisition, so something needs to be cancelled.
+        * snapshot acquisition, so something needs to be canceled.
         */
        if (SxactHasSummaryConflictOut(sxact))
        {
                if (!SxactIsPrepared(sxact))
                {
-                       sxact->flags |= SXACT_FLAG_MARKED_FOR_DEATH;
+                       sxact->flags |= SXACT_FLAG_DOOMED;
                        LWLockRelease(SerializableXactHashLock);
                        return;
                }
@@ -3546,7 +4209,7 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
                        ereport(ERROR,
                                        (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                         errmsg("could not serialize access due to read/write dependencies among transactions"),
-                                        errdetail("Cancelled on conflict out to old pivot."),
+                                        errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
                                         errhint("The transaction might succeed if retried.")));
                }
        }
@@ -3562,7 +4225,7 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
                && (!SxactHasConflictOut(sxact)
                        || MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
        {
-               /* Read-only transaction will appear to run first.      No conflict. */
+               /* Read-only transaction will appear to run first.  No conflict. */
                LWLockRelease(SerializableXactHashLock);
                return;
        }
@@ -3574,7 +4237,7 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
                return;
        }
 
-       if (RWConflictExists((SERIALIZABLEXACT *) MySerializableXact, sxact))
+       if (RWConflictExists(MySerializableXact, sxact))
        {
                /* We don't want duplicate conflict records in the list. */
                LWLockRelease(SerializableXactHashLock);
@@ -3585,20 +4248,23 @@ CheckForSerializableConflictOut(const bool visible, const Relation relation,
         * Flag the conflict.  But first, if this conflict creates a dangerous
         * structure, ereport an error.
         */
-       FlagRWConflict((SERIALIZABLEXACT *) MySerializableXact, sxact);
+       FlagRWConflict(MySerializableXact, sxact);
        LWLockRelease(SerializableXactHashLock);
 }
 
 /*
- * Check a particular target for rw-dependency conflict in.
+ * Check a particular target for rw-dependency conflict in. A subroutine of
+ * CheckForSerializableConflictIn().
  */
 static void
 CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
 {
        uint32          targettaghash;
-       LWLockId        partitionLock;
+       LWLock     *partitionLock;
        PREDICATELOCKTARGET *target;
        PREDICATELOCK *predlock;
+       PREDICATELOCK *mypredlock = NULL;
+       PREDICATELOCKTAG mypredlocktag;
 
        Assert(MySerializableXact != InvalidSerializableXact);
 
@@ -3644,134 +4310,43 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
                if (sxact == MySerializableXact)
                {
                        /*
-                        * If we're getting a write lock on the tuple, we don't need a
-                        * predicate (SIREAD) lock. At this point our transaction already
-                        * has an ExclusiveRowLock on the relation, so we are OK to drop
-                        * the predicate lock on the tuple, if found, without fearing that
-                        * another write against the tuple will occur before the MVCC
-                        * information makes it to the buffer.
+                        * If we're getting a write lock on a tuple, we don't need a
+                        * predicate (SIREAD) lock on the same tuple. We can safely remove
+                        * our SIREAD lock, but we'll defer doing so until after the loop
+                        * because that requires upgrading to an exclusive partition lock.
+                        *
+                        * We can't use this optimization within a subtransaction because
+                        * the subtransaction could roll back, and we would be left
+                        * without any lock at the top level.
                         */
-                       if (GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
+                       if (!IsSubTransaction()
+                               && GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
                        {
-                               uint32          predlockhashcode;
-                               PREDICATELOCKTARGET *rmtarget = NULL;
-                               PREDICATELOCK *rmpredlock;
-                               LOCALPREDICATELOCK *locallock,
-                                                  *rmlocallock;
-
-                               /*
-                                * This is a tuple on which we have a tuple predicate lock. We
-                                * only have shared LW locks now; release those, and get
-                                * exclusive locks only while we modify things.
-                                */
-                               LWLockRelease(SerializableXactHashLock);
-                               LWLockRelease(partitionLock);
-                               LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
-                               LWLockAcquire(partitionLock, LW_EXCLUSIVE);
-                               LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
-
-                               /*
-                                * Remove the predicate lock from shared memory, if it wasn't
-                                * removed while the locks were released.  One way that could
-                                * happen is from autovacuum cleaning up an index.
-                                */
-                               predlockhashcode = PredicateLockHashCodeFromTargetHashCode
-                                       (&(predlock->tag), targettaghash);
-                               rmpredlock = (PREDICATELOCK *)
-                                       hash_search_with_hash_value(PredicateLockHash,
-                                                                                               &(predlock->tag),
-                                                                                               predlockhashcode,
-                                                                                               HASH_FIND, NULL);
-                               if (rmpredlock)
-                               {
-                                       Assert(rmpredlock == predlock);
-
-                                       SHMQueueDelete(predlocktargetlink);
-                                       SHMQueueDelete(&(predlock->xactLink));
-
-                                       rmpredlock = (PREDICATELOCK *)
-                                               hash_search_with_hash_value(PredicateLockHash,
-                                                                                                       &(predlock->tag),
-                                                                                                       predlockhashcode,
-                                                                                                       HASH_REMOVE, NULL);
-                                       Assert(rmpredlock == predlock);
-
-                                       RemoveTargetIfNoLongerUsed(target, targettaghash);
-
-                                       LWLockRelease(SerializableXactHashLock);
-                                       LWLockRelease(partitionLock);
-                                       LWLockRelease(SerializablePredicateLockListLock);
-
-                                       locallock = (LOCALPREDICATELOCK *)
-                                               hash_search_with_hash_value(LocalPredicateLockHash,
-                                                                                                       targettag, targettaghash,
-                                                                                                       HASH_FIND, NULL);
-
-                                       /*
-                                        * Remove entry in local lock table if it exists and has
-                                        * no children. It's OK if it doesn't exist; that means
-                                        * the lock was transferred to a new target by a
-                                        * different backend.
-                                        */
-                                       if (locallock != NULL)
-                                       {
-                                               locallock->held = false;
-
-                                               if (locallock->childLocks == 0)
-                                               {
-                                                       rmlocallock = (LOCALPREDICATELOCK *)
-                                                               hash_search_with_hash_value(LocalPredicateLockHash,
-                                                                                                                       targettag, targettaghash,
-                                                                                                                       HASH_REMOVE, NULL);
-                                                       Assert(rmlocallock == locallock);
-                                               }
-                                       }
-
-                                       DecrementParentLocks(targettag);
-
-                                       /*
-                                        * If we've cleaned up the last of the predicate locks for
-                                        * the target, bail out before re-acquiring the locks.
-                                        */
-                                       if (rmtarget)
-                                               return;
-
-                                       /*
-                                        * The list has been altered.  Start over at the front.
-                                        */
-                                       LWLockAcquire(partitionLock, LW_SHARED);
-                                       nextpredlock = (PREDICATELOCK *)
-                                               SHMQueueNext(&(target->predicateLocks),
-                                                                        &(target->predicateLocks),
-                                                                        offsetof(PREDICATELOCK, targetLink));
-
-                                       LWLockAcquire(SerializableXactHashLock, LW_SHARED);
-                               }
-                               else
-                               {
-                                       /*
-                                        * The predicate lock was cleared while we were attempting
-                                        * to upgrade our lightweight locks. Revert to the shared
-                                        * locks.
-                                        */
-                                       LWLockRelease(SerializableXactHashLock);
-                                       LWLockRelease(partitionLock);
-                                       LWLockRelease(SerializablePredicateLockListLock);
-                                       LWLockAcquire(partitionLock, LW_SHARED);
-                                       LWLockAcquire(SerializableXactHashLock, LW_SHARED);
-                               }
+                               mypredlock = predlock;
+                               mypredlocktag = predlock->tag;
                        }
                }
-               else if (!SxactIsRolledBack(sxact)
+               else if (!SxactIsDoomed(sxact)
                                 && (!SxactIsCommitted(sxact)
                                         || TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
                                                                                          sxact->finishedBefore))
-               && !RWConflictExists(sxact, (SERIALIZABLEXACT *) MySerializableXact))
+                                && !RWConflictExists(sxact, MySerializableXact))
                {
                        LWLockRelease(SerializableXactHashLock);
                        LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
 
-                       FlagRWConflict(sxact, (SERIALIZABLEXACT *) MySerializableXact);
+                       /*
+                        * Re-check after getting exclusive lock because the other
+                        * transaction may have flagged a conflict.
+                        */
+                       if (!SxactIsDoomed(sxact)
+                               && (!SxactIsCommitted(sxact)
+                                       || TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
+                                                                                        sxact->finishedBefore))
+                               && !RWConflictExists(sxact, MySerializableXact))
+                       {
+                               FlagRWConflict(sxact, MySerializableXact);
+                       }
 
                        LWLockRelease(SerializableXactHashLock);
                        LWLockAcquire(SerializableXactHashLock, LW_SHARED);
@@ -3781,6 +4356,75 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
        }
        LWLockRelease(SerializableXactHashLock);
        LWLockRelease(partitionLock);
+
+       /*
+        * If we found one of our own SIREAD locks to remove, remove it now.
+        *
+        * At this point our transaction already has a RowExclusiveLock on the
+        * relation, so we are OK to drop the predicate lock on the tuple, if
+        * found, without fearing that another write against the tuple will occur
+        * before the MVCC information makes it to the buffer.
+        */
+       if (mypredlock != NULL)
+       {
+               uint32          predlockhashcode;
+               PREDICATELOCK *rmpredlock;
+
+               LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
+               if (IsInParallelMode())
+                       LWLockAcquire(&MySerializableXact->predicateLockListLock, LW_EXCLUSIVE);
+               LWLockAcquire(partitionLock, LW_EXCLUSIVE);
+               LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
+
+               /*
+                * Remove the predicate lock from shared memory, if it wasn't removed
+                * while the locks were released.  One way that could happen is from
+                * autovacuum cleaning up an index.
+                */
+               predlockhashcode = PredicateLockHashCodeFromTargetHashCode
+                       (&mypredlocktag, targettaghash);
+               rmpredlock = (PREDICATELOCK *)
+                       hash_search_with_hash_value(PredicateLockHash,
+                                                                               &mypredlocktag,
+                                                                               predlockhashcode,
+                                                                               HASH_FIND, NULL);
+               if (rmpredlock != NULL)
+               {
+                       Assert(rmpredlock == mypredlock);
+
+                       SHMQueueDelete(&(mypredlock->targetLink));
+                       SHMQueueDelete(&(mypredlock->xactLink));
+
+                       rmpredlock = (PREDICATELOCK *)
+                               hash_search_with_hash_value(PredicateLockHash,
+                                                                                       &mypredlocktag,
+                                                                                       predlockhashcode,
+                                                                                       HASH_REMOVE, NULL);
+                       Assert(rmpredlock == mypredlock);
+
+                       RemoveTargetIfNoLongerUsed(target, targettaghash);
+               }
+
+               LWLockRelease(SerializableXactHashLock);
+               LWLockRelease(partitionLock);
+               if (IsInParallelMode())
+                       LWLockRelease(&MySerializableXact->predicateLockListLock);
+               LWLockRelease(SerializablePredicateLockListLock);
+
+               if (rmpredlock != NULL)
+               {
+                       /*
+                        * Remove entry in local lock table if it exists. It's OK if it
+                        * doesn't exist; that means the lock was transferred to a new
+                        * target by a different backend.
+                        */
+                       hash_search_with_hash_value(LocalPredicateLockHash,
+                                                                               targettag, targettaghash,
+                                                                               HASH_REMOVE, NULL);
+
+                       DecrementParentLocks(targettag);
+               }
+       }
 }
 
 /*
@@ -3795,22 +4439,27 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
  * tuple itself.
  */
 void
-CheckForSerializableConflictIn(const Relation relation, const HeapTuple tuple,
-                                                          const Buffer buffer)
+CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
+                                                          Buffer buffer)
 {
        PREDICATELOCKTARGETTAG targettag;
 
-       if (SkipSerialization(relation))
+       if (!SerializationNeededForWrite(relation))
                return;
 
-       if (SxactIsMarkedForDeath(MySerializableXact))
+       /* Check if someone else has already decided that we need to die */
+       if (SxactIsDoomed(MySerializableXact))
                ereport(ERROR,
                                (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                 errmsg("could not serialize access due to read/write dependencies among transactions"),
-                                errdetail("Cancelled on identification as a pivot, during conflict in checking."),
+                                errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict in checking."),
                                 errhint("The transaction might succeed if retried.")));
 
-       MySerializableXact->flags |= SXACT_FLAG_DID_WRITE;
+       /*
+        * We're doing a write which might cause rw-conflicts now or later.
+        * Memorize that fact.
+        */
+       MyXactDidWrite = true;
 
        /*
         * It is important that we check for locks from the finest granularity to
@@ -3826,9 +4475,8 @@ CheckForSerializableConflictIn(const Relation relation, const HeapTuple tuple,
                SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
                                                                                 relation->rd_node.dbNode,
                                                                                 relation->rd_id,
-                                                ItemPointerGetBlockNumber(&(tuple->t_data->t_ctid)),
-                                          ItemPointerGetOffsetNumber(&(tuple->t_data->t_ctid)),
-                                          HeapTupleHeaderGetXmin(tuple->t_data));
+                                                                                ItemPointerGetBlockNumber(&(tuple->t_self)),
+                                                                                ItemPointerGetOffsetNumber(&(tuple->t_self)));
                CheckTargetForConflictsIn(&targettag);
        }
 
@@ -3847,6 +4495,119 @@ CheckForSerializableConflictIn(const Relation relation, const HeapTuple tuple,
        CheckTargetForConflictsIn(&targettag);
 }
 
+/*
+ * CheckTableForSerializableConflictIn
+ *             The entire table is going through a DDL-style logical mass delete
+ *             like TRUNCATE or DROP TABLE.  If that causes a rw-conflict in from
+ *             another serializable transaction, take appropriate action.
+ *
+ * While these operations do not operate entirely within the bounds of
+ * snapshot isolation, they can occur inside a serializable transaction, and
+ * will logically occur after any reads which saw rows which were destroyed
+ * by these operations, so we do what we can to serialize properly under
+ * SSI.
+ *
+ * The relation passed in must be a heap relation. Any predicate lock of any
+ * granularity on the heap will cause a rw-conflict in to this transaction.
+ * Predicate locks on indexes do not matter because they only exist to guard
+ * against conflicting inserts into the index, and this is a mass *delete*.
+ * When a table is truncated or dropped, the index will also be truncated
+ * or dropped, and we'll deal with locks on the index when that happens.
+ *
+ * Dropping or truncating a table also needs to drop any existing predicate
+ * locks on heap tuples or pages, because they're about to go away. This
+ * should be done before altering the predicate locks because the transaction
+ * could be rolled back because of a conflict, in which case the lock changes
+ * are not needed. (At the moment, we don't actually bother to drop the
+ * existing locks on a dropped or truncated table at the moment. That might
+ * lead to some false positives, but it doesn't seem worth the trouble.)
+ */
+void
+CheckTableForSerializableConflictIn(Relation relation)
+{
+       HASH_SEQ_STATUS seqstat;
+       PREDICATELOCKTARGET *target;
+       Oid                     dbId;
+       Oid                     heapId;
+       int                     i;
+
+       /*
+        * Bail out quickly if there are no serializable transactions running.
+        * It's safe to check this without taking locks because the caller is
+        * holding an ACCESS EXCLUSIVE lock on the relation.  No new locks which
+        * would matter here can be acquired while that is held.
+        */
+       if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
+               return;
+
+       if (!SerializationNeededForWrite(relation))
+               return;
+
+       /*
+        * We're doing a write which might cause rw-conflicts now or later.
+        * Memorize that fact.
+        */
+       MyXactDidWrite = true;
+
+       Assert(relation->rd_index == NULL); /* not an index relation */
+
+       dbId = relation->rd_node.dbNode;
+       heapId = relation->rd_id;
+
+       LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
+       for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
+               LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
+       LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
+
+       /* Scan through target list */
+       hash_seq_init(&seqstat, PredicateLockTargetHash);
+
+       while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+       {
+               PREDICATELOCK *predlock;
+
+               /*
+                * Check whether this is a target which needs attention.
+                */
+               if (GET_PREDICATELOCKTARGETTAG_RELATION(target->tag) != heapId)
+                       continue;                       /* wrong relation id */
+               if (GET_PREDICATELOCKTARGETTAG_DB(target->tag) != dbId)
+                       continue;                       /* wrong database id */
+
+               /*
+                * Loop through locks for this target and flag conflicts.
+                */
+               predlock = (PREDICATELOCK *)
+                       SHMQueueNext(&(target->predicateLocks),
+                                                &(target->predicateLocks),
+                                                offsetof(PREDICATELOCK, targetLink));
+               while (predlock)
+               {
+                       PREDICATELOCK *nextpredlock;
+
+                       nextpredlock = (PREDICATELOCK *)
+                               SHMQueueNext(&(target->predicateLocks),
+                                                        &(predlock->targetLink),
+                                                        offsetof(PREDICATELOCK, targetLink));
+
+                       if (predlock->tag.myXact != MySerializableXact
+                               && !RWConflictExists(predlock->tag.myXact, MySerializableXact))
+                       {
+                               FlagRWConflict(predlock->tag.myXact, MySerializableXact);
+                       }
+
+                       predlock = nextpredlock;
+               }
+       }
+
+       /* Release locks in reverse order */
+       LWLockRelease(SerializableXactHashLock);
+       for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
+               LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
+       LWLockRelease(SerializablePredicateLockListLock);
+}
+
+
 /*
  * Flag a rw-dependency between two serializable transactions.
  *
@@ -3870,9 +4631,23 @@ FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
                SetRWConflict(reader, writer);
 }
 
-/*
- * Check whether we should roll back one of these transactions
- * instead of flagging a new rw-conflict.
+/*----------------------------------------------------------------------------
+ * We are about to add a RW-edge to the dependency graph - check that we don't
+ * introduce a dangerous structure by doing so, and abort one of the
+ * transactions if so.
+ *
+ * A serialization failure can only occur if there is a dangerous structure
+ * in the dependency graph:
+ *
+ *             Tin ------> Tpivot ------> Tout
+ *                       rw                     rw
+ *
+ * Furthermore, Tout must commit first.
+ *
+ * One more optimization is that if Tin is declared READ ONLY (or commits
+ * without writing), we can only have a problem if Tout committed before Tin
+ * acquired its snapshot.
+ *----------------------------------------------------------------------------
  */
 static void
 OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
@@ -3885,108 +4660,151 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
 
        failure = false;
 
-       /*
-        * Check for already-committed writer with rw-conflict out flagged. This
-        * means that the reader must immediately fail.
+       /*------------------------------------------------------------------------
+        * Check for already-committed writer with rw-conflict out flagged
+        * (conflict-flag on W means that T2 committed before W):
+        *
+        *              R ------> W ------> T2
+        *                      rw                rw
+        *
+        * That is a dangerous structure, so we must abort. (Since the writer
+        * has already committed, we must be the reader)
+        *------------------------------------------------------------------------
         */
        if (SxactIsCommitted(writer)
-         && (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
+               && (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
                failure = true;
 
-       /*
-        * Check whether the reader has become a pivot with a committed writer. If
-        * so, we must roll back unless every in-conflict either committed before
-        * the writer committed or is READ ONLY and overlaps the writer.
+       /*------------------------------------------------------------------------
+        * Check whether the writer has become a pivot with an out-conflict
+        * committed transaction (T2), and T2 committed first:
+        *
+        *              R ------> W ------> T2
+        *                      rw                rw
+        *
+        * Because T2 must've committed first, there is no anomaly if:
+        * - the reader committed before T2
+        * - the writer committed before T2
+        * - the reader is a READ ONLY transaction and the reader was concurrent
+        *       with T2 (= reader acquired its snapshot before T2 committed)
+        *
+        * We also handle the case that T2 is prepared but not yet committed
+        * here. In that case T2 has already checked for conflicts, so if it
+        * commits first, making the above conflict real, it's too late for it
+        * to abort.
+        *------------------------------------------------------------------------
         */
-       if (!failure && SxactIsCommitted(writer) && !SxactIsReadOnly(reader))
+       if (!failure)
        {
-               if (SxactHasSummaryConflictIn(reader))
+               if (SxactHasSummaryConflictOut(writer))
                {
                        failure = true;
                        conflict = NULL;
                }
                else
                        conflict = (RWConflict)
-                               SHMQueueNext(&reader->inConflicts,
-                                                        &reader->inConflicts,
-                                                        offsetof(RWConflictData, inLink));
+                               SHMQueueNext(&writer->outConflicts,
+                                                        &writer->outConflicts,
+                                                        offsetof(RWConflictData, outLink));
                while (conflict)
                {
-                       if (!SxactIsRolledBack(conflict->sxactOut)
-                               && (!SxactIsCommitted(conflict->sxactOut)
-                                       || conflict->sxactOut->commitSeqNo >= writer->commitSeqNo)
-                               && (!SxactIsReadOnly(conflict->sxactOut)
-                                       || conflict->sxactOut->SeqNo.lastCommitBeforeSnapshot >= writer->commitSeqNo))
+                       SERIALIZABLEXACT *t2 = conflict->sxactIn;
+
+                       if (SxactIsPrepared(t2)
+                               && (!SxactIsCommitted(reader)
+                                       || t2->prepareSeqNo <= reader->commitSeqNo)
+                               && (!SxactIsCommitted(writer)
+                                       || t2->prepareSeqNo <= writer->commitSeqNo)
+                               && (!SxactIsReadOnly(reader)
+                                       || t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
                        {
                                failure = true;
                                break;
                        }
                        conflict = (RWConflict)
-                               SHMQueueNext(&reader->inConflicts,
-                                                        &conflict->inLink,
-                                                        offsetof(RWConflictData, inLink));
+                               SHMQueueNext(&writer->outConflicts,
+                                                        &conflict->outLink,
+                                                        offsetof(RWConflictData, outLink));
                }
        }
 
-       /*
-        * Check whether the writer has become a pivot with an out-conflict
-        * committed transaction, while neither reader nor writer is committed. If
-        * the reader is a READ ONLY transaction, there is only a serialization
-        * failure if an out-conflict transaction causing the pivot committed
-        * before the reader acquired its snapshot.  (That is, the reader must not
-        * have been concurrent with the out-conflict transaction.)
+       /*------------------------------------------------------------------------
+        * Check whether the reader has become a pivot with a writer
+        * that's committed (or prepared):
+        *
+        *              T0 ------> R ------> W
+        *                       rw                rw
+        *
+        * Because W must've committed first for an anomaly to occur, there is no
+        * anomaly if:
+        * - T0 committed before the writer
+        * - T0 is READ ONLY, and overlaps the writer
+        *------------------------------------------------------------------------
         */
-       if (!failure && !SxactIsCommitted(writer))
+       if (!failure && SxactIsPrepared(writer) && !SxactIsReadOnly(reader))
        {
-               if (SxactHasSummaryConflictOut(reader))
+               if (SxactHasSummaryConflictIn(reader))
                {
                        failure = true;
                        conflict = NULL;
                }
                else
                        conflict = (RWConflict)
-                               SHMQueueNext(&writer->outConflicts,
-                                                        &writer->outConflicts,
-                                                        offsetof(RWConflictData, outLink));
+                               SHMQueueNext(&reader->inConflicts,
+                                                        &reader->inConflicts,
+                                                        offsetof(RWConflictData, inLink));
                while (conflict)
                {
-                       if ((reader == conflict->sxactIn && SxactIsCommitted(reader))
-                               || (SxactIsCommitted(conflict->sxactIn)
-                                       && !SxactIsCommitted(reader)
-                                       && (!SxactIsReadOnly(reader)
-                                               || conflict->sxactIn->commitSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot)))
+                       SERIALIZABLEXACT *t0 = conflict->sxactOut;
+
+                       if (!SxactIsDoomed(t0)
+                               && (!SxactIsCommitted(t0)
+                                       || t0->commitSeqNo >= writer->prepareSeqNo)
+                               && (!SxactIsReadOnly(t0)
+                                       || t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
                        {
                                failure = true;
                                break;
                        }
                        conflict = (RWConflict)
-                               SHMQueueNext(&writer->outConflicts,
-                                                        &conflict->outLink,
-                                                        offsetof(RWConflictData, outLink));
+                               SHMQueueNext(&reader->inConflicts,
+                                                        &conflict->inLink,
+                                                        offsetof(RWConflictData, inLink));
                }
        }
 
        if (failure)
        {
+               /*
+                * We have to kill a transaction to avoid a possible anomaly from
+                * occurring. If the writer is us, we can just ereport() to cause a
+                * transaction abort. Otherwise we flag the writer for termination,
+                * causing it to abort when it tries to commit. However, if the writer
+                * is a prepared transaction, already prepared, we can't abort it
+                * anymore, so we have to kill the reader instead.
+                */
                if (MySerializableXact == writer)
                {
                        LWLockRelease(SerializableXactHashLock);
                        ereport(ERROR,
                                        (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                         errmsg("could not serialize access due to read/write dependencies among transactions"),
-                       errdetail("Cancelled on identification as pivot, during write."),
+                                        errdetail_internal("Reason code: Canceled on identification as a pivot, during write."),
                                         errhint("The transaction might succeed if retried.")));
                }
                else if (SxactIsPrepared(writer))
                {
                        LWLockRelease(SerializableXactHashLock);
+
+                       /* if we're not the writer, we have to be the reader */
+                       Assert(MySerializableXact == reader);
                        ereport(ERROR,
                                        (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                         errmsg("could not serialize access due to read/write dependencies among transactions"),
-                                        errdetail("Cancelled on conflict out to pivot %u, during read.", writer->topXid),
+                                        errdetail_internal("Reason code: Canceled on conflict out to pivot %u, during read.", writer->topXid),
                                         errhint("The transaction might succeed if retried.")));
                }
-               writer->flags |= SXACT_FLAG_MARKED_FOR_DEATH;
+               writer->flags |= SXACT_FLAG_DOOMED;
        }
 }
 
@@ -4002,9 +4820,9 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
  *
  * If a dangerous structure is found, the pivot (the near conflict) is
  * marked for death, because rolling back another transaction might mean
- * that we flail without ever making progress. This transaction is
+ * that we flail without ever making progress.  This transaction is
  * committing writes, so letting it commit ensures progress.  If we
- * cancelled the far conflict, it might immediately fail again on retry.
+ * canceled the far conflict, it might immediately fail again on retry.
  */
 void
 PreCommit_CheckForSerializationFailure(void)
@@ -4018,25 +4836,26 @@ PreCommit_CheckForSerializationFailure(void)
 
        LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
 
-       if (SxactIsMarkedForDeath(MySerializableXact))
+       /* Check if someone else has already decided that we need to die */
+       if (SxactIsDoomed(MySerializableXact))
        {
+               Assert(!SxactIsPartiallyReleased(MySerializableXact));
                LWLockRelease(SerializableXactHashLock);
                ereport(ERROR,
                                (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
                                 errmsg("could not serialize access due to read/write dependencies among transactions"),
-                                errdetail("Cancelled on identification as a pivot, during commit attempt."),
+                                errdetail_internal("Reason code: Canceled on identification as a pivot, during commit attempt."),
                                 errhint("The transaction might succeed if retried.")));
        }
 
        nearConflict = (RWConflict)
-               SHMQueueNext((SHM_QUEUE *) &MySerializableXact->inConflicts,
-                                        (SHM_QUEUE *) &MySerializableXact->inConflicts,
+               SHMQueueNext(&MySerializableXact->inConflicts,
+                                        &MySerializableXact->inConflicts,
                                         offsetof(RWConflictData, inLink));
        while (nearConflict)
        {
                if (!SxactIsCommitted(nearConflict->sxactOut)
-                       && !SxactIsRolledBack(nearConflict->sxactOut)
-                       && !SxactIsMarkedForDeath(nearConflict->sxactOut))
+                       && !SxactIsDoomed(nearConflict->sxactOut))
                {
                        RWConflict      farConflict;
 
@@ -4049,10 +4868,24 @@ PreCommit_CheckForSerializationFailure(void)
                                if (farConflict->sxactOut == MySerializableXact
                                        || (!SxactIsCommitted(farConflict->sxactOut)
                                                && !SxactIsReadOnly(farConflict->sxactOut)
-                                               && !SxactIsRolledBack(farConflict->sxactOut)
-                                               && !SxactIsMarkedForDeath(farConflict->sxactOut)))
+                                               && !SxactIsDoomed(farConflict->sxactOut)))
                                {
-                                       nearConflict->sxactOut->flags |= SXACT_FLAG_MARKED_FOR_DEATH;
+                                       /*
+                                        * Normally, we kill the pivot transaction to make sure we
+                                        * make progress if the failing transaction is retried.
+                                        * However, we can't kill it if it's already prepared, so
+                                        * in that case we commit suicide instead.
+                                        */
+                                       if (SxactIsPrepared(nearConflict->sxactOut))
+                                       {
+                                               LWLockRelease(SerializableXactHashLock);
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+                                                                errmsg("could not serialize access due to read/write dependencies among transactions"),
+                                                                errdetail_internal("Reason code: Canceled on commit attempt with conflict in from prepared pivot."),
+                                                                errhint("The transaction might succeed if retried.")));
+                                       }
+                                       nearConflict->sxactOut->flags |= SXACT_FLAG_DOOMED;
                                        break;
                                }
                                farConflict = (RWConflict)
@@ -4063,11 +4896,12 @@ PreCommit_CheckForSerializationFailure(void)
                }
 
                nearConflict = (RWConflict)
-                       SHMQueueNext((SHM_QUEUE *) &MySerializableXact->inConflicts,
+                       SHMQueueNext(&MySerializableXact->inConflicts,
                                                 &nearConflict->inLink,
                                                 offsetof(RWConflictData, inLink));
        }
 
+       MySerializableXact->prepareSeqNo = ++(PredXact->LastSxactCommitSeqNo);
        MySerializableXact->flags |= SXACT_FLAG_PREPARED;
 
        LWLockRelease(SerializableXactHashLock);
@@ -4093,27 +4927,24 @@ AtPrepare_PredicateLocks(void)
        TwoPhasePredicateXactRecord *xactRecord;
        TwoPhasePredicateLockRecord *lockRecord;
 
-       sxact = (SERIALIZABLEXACT *) MySerializableXact;
+       sxact = MySerializableXact;
        xactRecord = &(record.data.xactRecord);
        lockRecord = &(record.data.lockRecord);
 
        if (MySerializableXact == InvalidSerializableXact)
                return;
 
-       /* Generate a xact record for our SERIALIZABLEXACT */
+       /* Generate an xact record for our SERIALIZABLEXACT */
        record.type = TWOPHASEPREDICATERECORD_XACT;
        xactRecord->xmin = MySerializableXact->xmin;
        xactRecord->flags = MySerializableXact->flags;
 
        /*
-        * Tweak the flags. Since we're not going to output the inConflicts and
-        * outConflicts lists, if they're non-empty we'll represent that by
-        * setting the appropriate summary conflict flags.
+        * Note that we don't include the list of conflicts in our out in the
+        * statefile, because new conflicts can be added even after the
+        * transaction prepares. We'll just make a conservative assumption during
+        * recovery instead.
         */
-       if (!SHMQueueEmpty((SHM_QUEUE *) &MySerializableXact->inConflicts))
-               xactRecord->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
-       if (!SHMQueueEmpty((SHM_QUEUE *) &MySerializableXact->outConflicts))
-               xactRecord->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
 
        RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
                                                   &record, sizeof(record));
@@ -4127,6 +4958,13 @@ AtPrepare_PredicateLocks(void)
         */
        LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
 
+       /*
+        * No need to take sxact->predicateLockListLock in parallel mode because
+        * there cannot be any parallel workers running while we are preparing a
+        * transaction.
+        */
+       Assert(!IsParallelWorker() && !ParallelContextActive());
+
        predlock = (PREDICATELOCK *)
                SHMQueueNext(&(sxact->predicateLocks),
                                         &(sxact->predicateLocks),
@@ -4170,6 +5008,7 @@ PostPrepare_PredicateLocks(TransactionId xid)
        LocalPredicateLockHash = NULL;
 
        MySerializableXact = InvalidSerializableXact;
+       MyXactDidWrite = false;
 }
 
 /*
@@ -4196,7 +5035,9 @@ PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
 
        /* Release its locks */
        MySerializableXact = sxid->myXact;
-       ReleasePredicateLocks(isCommit);
+       MyXactDidWrite = true;          /* conservatively assume that we wrote
+                                                                * something */
+       ReleasePredicateLocks(isCommit, false);
 }
 
 /*
@@ -4239,20 +5080,12 @@ predicatelock_twophase_recover(TransactionId xid, uint16 info,
                sxact->pid = 0;
 
                /* a prepared xact hasn't committed yet */
+               sxact->prepareSeqNo = RecoverySerCommitSeqNo;
                sxact->commitSeqNo = InvalidSerCommitSeqNo;
                sxact->finishedBefore = InvalidTransactionId;
 
                sxact->SeqNo.lastCommitBeforeSnapshot = RecoverySerCommitSeqNo;
 
-
-               /*
-                * We don't need the details of a prepared transaction's conflicts,
-                * just whether it had conflicts in or out (which we get from the
-                * flags)
-                */
-               SHMQueueInit(&(sxact->outConflicts));
-               SHMQueueInit(&(sxact->inConflicts));
-
                /*
                 * Don't need to track this; no transactions running at the time the
                 * recovered xact started are still active, except possibly other
@@ -4274,15 +5107,22 @@ predicatelock_twophase_recover(TransactionId xid, uint16 info,
                                   (MaxBackends + max_prepared_xacts));
                }
 
+               /*
+                * We don't know whether the transaction had any conflicts or not, so
+                * we'll conservatively assume that it had both a conflict in and a
+                * conflict out, and represent that with the summary conflict flags.
+                */
+               SHMQueueInit(&(sxact->outConflicts));
+               SHMQueueInit(&(sxact->inConflicts));
+               sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
+               sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
+
                /* Register the transaction's xid */
                sxidtag.xid = xid;
                sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
                                                                                           &sxidtag,
                                                                                           HASH_ENTER, &found);
-               if (!sxid)
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_OUT_OF_MEMORY),
-                                        errmsg("out of shared memory")));
+               Assert(sxid != NULL);
                Assert(!found);
                sxid->myXact = (SERIALIZABLEXACT *) sxact;
 
@@ -4333,3 +5173,28 @@ predicatelock_twophase_recover(TransactionId xid, uint16 info,
                CreatePredicateLock(&lockRecord->target, targettaghash, sxact);
        }
 }
+
+/*
+ * Prepare to share the current SERIALIZABLEXACT with parallel workers.
+ * Return a handle object that can be used by AttachSerializableXact() in a
+ * parallel worker.
+ */
+SerializableXactHandle
+ShareSerializableXact(void)
+{
+       return MySerializableXact;
+}
+
+/*
+ * Allow parallel workers to import the leader's SERIALIZABLEXACT.
+ */
+void
+AttachSerializableXact(SerializableXactHandle handle)
+{
+
+       Assert(MySerializableXact == InvalidSerializableXact);
+
+       MySerializableXact = (SERIALIZABLEXACT *) handle;
+       if (MySerializableXact != InvalidSerializableXact)
+               CreateLocalPredicateLockHash();
+}