]> granicus.if.org Git - postgresql/blob - src/backend/storage/lmgr/predicate.c
Update copyright for 2015
[postgresql] / src / backend / storage / lmgr / predicate.c
1 /*-------------------------------------------------------------------------
2  *
3  * predicate.c
4  *        POSTGRES predicate locking
5  *        to support full serializable transaction isolation
6  *
7  *
8  * The approach taken is to implement Serializable Snapshot Isolation (SSI)
9  * as initially described in this paper:
10  *
11  *      Michael J. Cahill, Uwe Röhm, and Alan D. Fekete. 2008.
12  *      Serializable isolation for snapshot databases.
13  *      In SIGMOD '08: Proceedings of the 2008 ACM SIGMOD
14  *      international conference on Management of data,
15  *      pages 729-738, New York, NY, USA. ACM.
16  *      http://doi.acm.org/10.1145/1376616.1376690
17  *
18  * and further elaborated in Cahill's doctoral thesis:
19  *
20  *      Michael James Cahill. 2009.
21  *      Serializable Isolation for Snapshot Databases.
22  *      Sydney Digital Theses.
23  *      University of Sydney, School of Information Technologies.
24  *      http://hdl.handle.net/2123/5353
25  *
26  *
27  * Predicate locks for Serializable Snapshot Isolation (SSI) are SIREAD
28  * locks, which are so different from normal locks that a distinct set of
29  * structures is required to handle them.  They are needed to detect
30  * rw-conflicts when the read happens before the write.  (When the write
31  * occurs first, the reading transaction can check for a conflict by
32  * examining the MVCC data.)
33  *
34  * (1)  Besides tuples actually read, they must cover ranges of tuples
35  *              which would have been read based on the predicate.  This will
36  *              require modelling the predicates through locks against database
37  *              objects such as pages, index ranges, or entire tables.
38  *
39  * (2)  They must be kept in RAM for quick access.  Because of this, it
40  *              isn't possible to always maintain tuple-level granularity -- when
41  *              the space allocated to store these approaches exhaustion, a
42  *              request for a lock may need to scan for situations where a single
43  *              transaction holds many fine-grained locks which can be coalesced
44  *              into a single coarser-grained lock.
45  *
46  * (3)  They never block anything; they are more like flags than locks
47  *              in that regard; although they refer to database objects and are
48  *              used to identify rw-conflicts with normal write locks.
49  *
50  * (4)  While they are associated with a transaction, they must survive
51  *              a successful COMMIT of that transaction, and remain until all
52  *              overlapping transactions complete.  This even means that they
53  *              must survive termination of the transaction's process.  If a
54  *              top level transaction is rolled back, however, it is immediately
55  *              flagged so that it can be ignored, and its SIREAD locks can be
56  *              released any time after that.
57  *
58  * (5)  The only transactions which create SIREAD locks or check for
59  *              conflicts with them are serializable transactions.
60  *
61  * (6)  When a write lock for a top level transaction is found to cover
62  *              an existing SIREAD lock for the same transaction, the SIREAD lock
63  *              can be deleted.
64  *
65  * (7)  A write from a serializable transaction must ensure that a xact
66  *              record exists for the transaction, with the same lifespan (until
67  *              all concurrent transaction complete or the transaction is rolled
68  *              back) so that rw-dependencies to that transaction can be
69  *              detected.
70  *
71  * We use an optimization for read-only transactions. Under certain
72  * circumstances, a read-only transaction's snapshot can be shown to
73  * never have conflicts with other transactions.  This is referred to
74  * as a "safe" snapshot (and one known not to be is "unsafe").
75  * However, it can't be determined whether a snapshot is safe until
76  * all concurrent read/write transactions complete.
77  *
78  * Once a read-only transaction is known to have a safe snapshot, it
79  * can release its predicate locks and exempt itself from further
80  * predicate lock tracking. READ ONLY DEFERRABLE transactions run only
81  * on safe snapshots, waiting as necessary for one to be available.
82  *
83  *
84  * Lightweight locks to manage access to the predicate locking shared
85  * memory objects must be taken in this order, and should be released in
86  * reverse order:
87  *
88  *      SerializableFinishedListLock
89  *              - Protects the list of transactions which have completed but which
90  *                      may yet matter because they overlap still-active transactions.
91  *
92  *      SerializablePredicateLockListLock
93  *              - Protects the linked list of locks held by a transaction.  Note
94  *                      that the locks themselves are also covered by the partition
95  *                      locks of their respective lock targets; this lock only affects
96  *                      the linked list connecting the locks related to a transaction.
97  *              - All transactions share this single lock (with no partitioning).
98  *              - There is never a need for a process other than the one running
99  *                      an active transaction to walk the list of locks held by that
100  *                      transaction.
101  *              - It is relatively infrequent that another process needs to
102  *                      modify the list for a transaction, but it does happen for such
103  *                      things as index page splits for pages with predicate locks and
104  *                      freeing of predicate locked pages by a vacuum process.  When
105  *                      removing a lock in such cases, the lock itself contains the
106  *                      pointers needed to remove it from the list.  When adding a
107  *                      lock in such cases, the lock can be added using the anchor in
108  *                      the transaction structure.  Neither requires walking the list.
109  *              - Cleaning up the list for a terminated transaction is sometimes
110  *                      not done on a retail basis, in which case no lock is required.
111  *              - Due to the above, a process accessing its active transaction's
112  *                      list always uses a shared lock, regardless of whether it is
113  *                      walking or maintaining the list.  This improves concurrency
114  *                      for the common access patterns.
115  *              - A process which needs to alter the list of a transaction other
116  *                      than its own active transaction must acquire an exclusive
117  *                      lock.
118  *
119  *      FirstPredicateLockMgrLock based partition locks
120  *              - The same lock protects a target, all locks on that target, and
121  *                      the linked list of locks on the target..
122  *              - When more than one is needed, acquire in ascending order.
123  *
124  *      SerializableXactHashLock
125  *              - Protects both PredXact and SerializableXidHash.
126  *
127  *
128  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
129  * Portions Copyright (c) 1994, Regents of the University of California
130  *
131  *
132  * IDENTIFICATION
133  *        src/backend/storage/lmgr/predicate.c
134  *
135  *-------------------------------------------------------------------------
136  */
137 /*
138  * INTERFACE ROUTINES
139  *
140  * housekeeping for setting up shared memory predicate lock structures
141  *              InitPredicateLocks(void)
142  *              PredicateLockShmemSize(void)
143  *
144  * predicate lock reporting
145  *              GetPredicateLockStatusData(void)
146  *              PageIsPredicateLocked(Relation relation, BlockNumber blkno)
147  *
148  * predicate lock maintenance
149  *              GetSerializableTransactionSnapshot(Snapshot snapshot)
150  *              SetSerializableTransactionSnapshot(Snapshot snapshot,
151  *                                                                                 TransactionId sourcexid)
152  *              RegisterPredicateLockingXid(void)
153  *              PredicateLockRelation(Relation relation, Snapshot snapshot)
154  *              PredicateLockPage(Relation relation, BlockNumber blkno,
155  *                                              Snapshot snapshot)
156  *              PredicateLockTuple(Relation relation, HeapTuple tuple,
157  *                                              Snapshot snapshot)
158  *              PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
159  *                                                         BlockNumber newblkno)
160  *              PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
161  *                                                               BlockNumber newblkno)
162  *              TransferPredicateLocksToHeapRelation(Relation relation)
163  *              ReleasePredicateLocks(bool isCommit)
164  *
165  * conflict detection (may also trigger rollback)
166  *              CheckForSerializableConflictOut(bool visible, Relation relation,
167  *                                                                              HeapTupleData *tup, Buffer buffer,
168  *                                                                              Snapshot snapshot)
169  *              CheckForSerializableConflictIn(Relation relation, HeapTupleData *tup,
170  *                                                                         Buffer buffer)
171  *              CheckTableForSerializableConflictIn(Relation relation)
172  *
173  * final rollback checking
174  *              PreCommit_CheckForSerializationFailure(void)
175  *
176  * two-phase commit support
177  *              AtPrepare_PredicateLocks(void);
178  *              PostPrepare_PredicateLocks(TransactionId xid);
179  *              PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
180  *              predicatelock_twophase_recover(TransactionId xid, uint16 info,
181  *                                                                         void *recdata, uint32 len);
182  */
183
184 #include "postgres.h"
185
186 #include "access/htup_details.h"
187 #include "access/slru.h"
188 #include "access/subtrans.h"
189 #include "access/transam.h"
190 #include "access/twophase.h"
191 #include "access/twophase_rmgr.h"
192 #include "access/xact.h"
193 #include "access/xlog.h"
194 #include "miscadmin.h"
195 #include "storage/bufmgr.h"
196 #include "storage/predicate.h"
197 #include "storage/predicate_internals.h"
198 #include "storage/proc.h"
199 #include "storage/procarray.h"
200 #include "utils/rel.h"
201 #include "utils/snapmgr.h"
202 #include "utils/tqual.h"
203
204 /* Uncomment the next line to test the graceful degradation code. */
205 /* #define TEST_OLDSERXID */
206
207 /*
208  * Test the most selective fields first, for performance.
209  *
210  * a is covered by b if all of the following hold:
211  *      1) a.database = b.database
212  *      2) a.relation = b.relation
213  *      3) b.offset is invalid (b is page-granularity or higher)
214  *      4) either of the following:
215  *              4a) a.offset is valid (a is tuple-granularity) and a.page = b.page
216  *       or 4b) a.offset is invalid and b.page is invalid (a is
217  *                      page-granularity and b is relation-granularity
218  */
219 #define TargetTagIsCoveredBy(covered_target, covering_target)                   \
220         ((GET_PREDICATELOCKTARGETTAG_RELATION(covered_target) == /* (2) */      \
221           GET_PREDICATELOCKTARGETTAG_RELATION(covering_target))                         \
222          && (GET_PREDICATELOCKTARGETTAG_OFFSET(covering_target) ==                      \
223                  InvalidOffsetNumber)                                                            /* (3) */      \
224          && (((GET_PREDICATELOCKTARGETTAG_OFFSET(covered_target) !=                     \
225                    InvalidOffsetNumber)                                                          /* (4a) */ \
226                   && (GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) ==               \
227                           GET_PREDICATELOCKTARGETTAG_PAGE(covered_target)))                     \
228                  || ((GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) ==               \
229                           InvalidBlockNumber)                                                    /* (4b) */ \
230                          && (GET_PREDICATELOCKTARGETTAG_PAGE(covered_target)            \
231                                  != InvalidBlockNumber)))                                                               \
232          && (GET_PREDICATELOCKTARGETTAG_DB(covered_target) ==    /* (1) */      \
233                  GET_PREDICATELOCKTARGETTAG_DB(covering_target)))
234
235 /*
236  * The predicate locking target and lock shared hash tables are partitioned to
237  * reduce contention.  To determine which partition a given target belongs to,
238  * compute the tag's hash code with PredicateLockTargetTagHashCode(), then
239  * apply one of these macros.
240  * NB: NUM_PREDICATELOCK_PARTITIONS must be a power of 2!
241  */
242 #define PredicateLockHashPartition(hashcode) \
243         ((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
244 #define PredicateLockHashPartitionLock(hashcode) \
245         (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + \
246                 PredicateLockHashPartition(hashcode)].lock)
247 #define PredicateLockHashPartitionLockByIndex(i) \
248         (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
249
250 #define NPREDICATELOCKTARGETENTS() \
251         mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
252
253 #define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
254
255 /*
256  * Note that a sxact is marked "prepared" once it has passed
257  * PreCommit_CheckForSerializationFailure, even if it isn't using
258  * 2PC. This is the point at which it can no longer be aborted.
259  *
260  * The PREPARED flag remains set after commit, so SxactIsCommitted
261  * implies SxactIsPrepared.
262  */
263 #define SxactIsCommitted(sxact) (((sxact)->flags & SXACT_FLAG_COMMITTED) != 0)
264 #define SxactIsPrepared(sxact) (((sxact)->flags & SXACT_FLAG_PREPARED) != 0)
265 #define SxactIsRolledBack(sxact) (((sxact)->flags & SXACT_FLAG_ROLLED_BACK) != 0)
266 #define SxactIsDoomed(sxact) (((sxact)->flags & SXACT_FLAG_DOOMED) != 0)
267 #define SxactIsReadOnly(sxact) (((sxact)->flags & SXACT_FLAG_READ_ONLY) != 0)
268 #define SxactHasSummaryConflictIn(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_IN) != 0)
269 #define SxactHasSummaryConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_OUT) != 0)
270 /*
271  * The following macro actually means that the specified transaction has a
272  * conflict out *to a transaction which committed ahead of it*.  It's hard
273  * to get that into a name of a reasonable length.
274  */
275 #define SxactHasConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_CONFLICT_OUT) != 0)
276 #define SxactIsDeferrableWaiting(sxact) (((sxact)->flags & SXACT_FLAG_DEFERRABLE_WAITING) != 0)
277 #define SxactIsROSafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_SAFE) != 0)
278 #define SxactIsROUnsafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_UNSAFE) != 0)
279
280 /*
281  * Compute the hash code associated with a PREDICATELOCKTARGETTAG.
282  *
283  * To avoid unnecessary recomputations of the hash code, we try to do this
284  * just once per function, and then pass it around as needed.  Aside from
285  * passing the hashcode to hash_search_with_hash_value(), we can extract
286  * the lock partition number from the hashcode.
287  */
288 #define PredicateLockTargetTagHashCode(predicatelocktargettag) \
289         get_hash_value(PredicateLockTargetHash, predicatelocktargettag)
290
291 /*
292  * Given a predicate lock tag, and the hash for its target,
293  * compute the lock hash.
294  *
295  * To make the hash code also depend on the transaction, we xor the sxid
296  * struct's address into the hash code, left-shifted so that the
297  * partition-number bits don't change.  Since this is only a hash, we
298  * don't care if we lose high-order bits of the address; use an
299  * intermediate variable to suppress cast-pointer-to-int warnings.
300  */
301 #define PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash) \
302         ((targethash) ^ ((uint32) PointerGetDatum((predicatelocktag)->myXact)) \
303          << LOG2_NUM_PREDICATELOCK_PARTITIONS)
304
305
306 /*
307  * The SLRU buffer area through which we access the old xids.
308  */
309 static SlruCtlData OldSerXidSlruCtlData;
310
311 #define OldSerXidSlruCtl                        (&OldSerXidSlruCtlData)
312
313 #define OLDSERXID_PAGESIZE                      BLCKSZ
314 #define OLDSERXID_ENTRYSIZE                     sizeof(SerCommitSeqNo)
315 #define OLDSERXID_ENTRIESPERPAGE        (OLDSERXID_PAGESIZE / OLDSERXID_ENTRYSIZE)
316
317 /*
318  * Set maximum pages based on the lesser of the number needed to track all
319  * transactions and the maximum that SLRU supports.
320  */
321 #define OLDSERXID_MAX_PAGE                      Min(SLRU_PAGES_PER_SEGMENT * 0x10000 - 1, \
322                                                                                 (MaxTransactionId) / OLDSERXID_ENTRIESPERPAGE)
323
324 #define OldSerXidNextPage(page) (((page) >= OLDSERXID_MAX_PAGE) ? 0 : (page) + 1)
325
326 #define OldSerXidValue(slotno, xid) (*((SerCommitSeqNo *) \
327         (OldSerXidSlruCtl->shared->page_buffer[slotno] + \
328         ((((uint32) (xid)) % OLDSERXID_ENTRIESPERPAGE) * OLDSERXID_ENTRYSIZE))))
329
330 #define OldSerXidPage(xid)      ((((uint32) (xid)) / OLDSERXID_ENTRIESPERPAGE) % (OLDSERXID_MAX_PAGE + 1))
331 #define OldSerXidSegment(page)  ((page) / SLRU_PAGES_PER_SEGMENT)
332
333 typedef struct OldSerXidControlData
334 {
335         int                     headPage;               /* newest initialized page */
336         TransactionId headXid;          /* newest valid Xid in the SLRU */
337         TransactionId tailXid;          /* oldest xmin we might be interested in */
338         bool            warningIssued;  /* have we issued SLRU wrap-around warning? */
339 }       OldSerXidControlData;
340
341 typedef struct OldSerXidControlData *OldSerXidControl;
342
343 static OldSerXidControl oldSerXidControl;
344
345 /*
346  * When the oldest committed transaction on the "finished" list is moved to
347  * SLRU, its predicate locks will be moved to this "dummy" transaction,
348  * collapsing duplicate targets.  When a duplicate is found, the later
349  * commitSeqNo is used.
350  */
351 static SERIALIZABLEXACT *OldCommittedSxact;
352
353
354 /* This configuration variable is used to set the predicate lock table size */
355 int                     max_predicate_locks_per_xact;           /* set by guc.c */
356
357 /*
358  * This provides a list of objects in order to track transactions
359  * participating in predicate locking.  Entries in the list are fixed size,
360  * and reside in shared memory.  The memory address of an entry must remain
361  * fixed during its lifetime.  The list will be protected from concurrent
362  * update externally; no provision is made in this code to manage that.  The
363  * number of entries in the list, and the size allowed for each entry is
364  * fixed upon creation.
365  */
366 static PredXactList PredXact;
367
368 /*
369  * This provides a pool of RWConflict data elements to use in conflict lists
370  * between transactions.
371  */
372 static RWConflictPoolHeader RWConflictPool;
373
374 /*
375  * The predicate locking hash tables are in shared memory.
376  * Each backend keeps pointers to them.
377  */
378 static HTAB *SerializableXidHash;
379 static HTAB *PredicateLockTargetHash;
380 static HTAB *PredicateLockHash;
381 static SHM_QUEUE *FinishedSerializableTransactions;
382
383 /*
384  * Tag for a dummy entry in PredicateLockTargetHash. By temporarily removing
385  * this entry, you can ensure that there's enough scratch space available for
386  * inserting one entry in the hash table. This is an otherwise-invalid tag.
387  */
388 static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0};
389 static uint32 ScratchTargetTagHash;
390 static LWLock *ScratchPartitionLock;
391
392 /*
393  * The local hash table used to determine when to combine multiple fine-
394  * grained locks into a single courser-grained lock.
395  */
396 static HTAB *LocalPredicateLockHash = NULL;
397
398 /*
399  * Keep a pointer to the currently-running serializable transaction (if any)
400  * for quick reference. Also, remember if we have written anything that could
401  * cause a rw-conflict.
402  */
403 static SERIALIZABLEXACT *MySerializableXact = InvalidSerializableXact;
404 static bool MyXactDidWrite = false;
405
406 /* local functions */
407
408 static SERIALIZABLEXACT *CreatePredXact(void);
409 static void ReleasePredXact(SERIALIZABLEXACT *sxact);
410 static SERIALIZABLEXACT *FirstPredXact(void);
411 static SERIALIZABLEXACT *NextPredXact(SERIALIZABLEXACT *sxact);
412
413 static bool RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer);
414 static void SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
415 static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT *activeXact);
416 static void ReleaseRWConflict(RWConflict conflict);
417 static void FlagSxactUnsafe(SERIALIZABLEXACT *sxact);
418
419 static bool OldSerXidPagePrecedesLogically(int p, int q);
420 static void OldSerXidInit(void);
421 static void OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo);
422 static SerCommitSeqNo OldSerXidGetMinConflictCommitSeqNo(TransactionId xid);
423 static void OldSerXidSetActiveSerXmin(TransactionId xid);
424
425 static uint32 predicatelock_hash(const void *key, Size keysize);
426 static void SummarizeOldestCommittedSxact(void);
427 static Snapshot GetSafeSnapshot(Snapshot snapshot);
428 static Snapshot GetSerializableTransactionSnapshotInt(Snapshot snapshot,
429                                                                           TransactionId sourcexid);
430 static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
431 static bool GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
432                                                   PREDICATELOCKTARGETTAG *parent);
433 static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag);
434 static void RemoveScratchTarget(bool lockheld);
435 static void RestoreScratchTarget(bool lockheld);
436 static void RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target,
437                                                    uint32 targettaghash);
438 static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag);
439 static int      PredicateLockPromotionThreshold(const PREDICATELOCKTARGETTAG *tag);
440 static bool CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag);
441 static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag);
442 static void CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
443                                         uint32 targettaghash,
444                                         SERIALIZABLEXACT *sxact);
445 static void DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash);
446 static bool TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
447                                                                   PREDICATELOCKTARGETTAG newtargettag,
448                                                                   bool removeOld);
449 static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag);
450 static void DropAllPredicateLocksFromTable(Relation relation,
451                                                            bool transfer);
452 static void SetNewSxactGlobalXmin(void);
453 static void ClearOldPredicateLocks(void);
454 static void ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
455                                                    bool summarize);
456 static bool XidIsConcurrent(TransactionId xid);
457 static void CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag);
458 static void FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
459 static void OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
460                                                                                 SERIALIZABLEXACT *writer);
461
462
463 /*------------------------------------------------------------------------*/
464
465 /*
466  * Does this relation participate in predicate locking? Temporary and system
467  * relations are exempt, as are materialized views.
468  */
469 static inline bool
470 PredicateLockingNeededForRelation(Relation relation)
471 {
472         return !(relation->rd_id < FirstBootstrapObjectId ||
473                          RelationUsesLocalBuffers(relation) ||
474                          relation->rd_rel->relkind == RELKIND_MATVIEW);
475 }
476
477 /*
478  * When a public interface method is called for a read, this is the test to
479  * see if we should do a quick return.
480  *
481  * Note: this function has side-effects! If this transaction has been flagged
482  * as RO-safe since the last call, we release all predicate locks and reset
483  * MySerializableXact. That makes subsequent calls to return quickly.
484  *
485  * This is marked as 'inline' to make to eliminate the function call overhead
486  * in the common case that serialization is not needed.
487  */
488 static inline bool
489 SerializationNeededForRead(Relation relation, Snapshot snapshot)
490 {
491         /* Nothing to do if this is not a serializable transaction */
492         if (MySerializableXact == InvalidSerializableXact)
493                 return false;
494
495         /*
496          * Don't acquire locks or conflict when scanning with a special snapshot.
497          * This excludes things like CLUSTER and REINDEX. They use the wholesale
498          * functions TransferPredicateLocksToHeapRelation() and
499          * CheckTableForSerializableConflictIn() to participate serialization, but
500          * the scans involved don't need serialization.
501          */
502         if (!IsMVCCSnapshot(snapshot))
503                 return false;
504
505         /*
506          * Check if we have just become "RO-safe". If we have, immediately release
507          * all locks as they're not needed anymore. This also resets
508          * MySerializableXact, so that subsequent calls to this function can exit
509          * quickly.
510          *
511          * A transaction is flagged as RO_SAFE if all concurrent R/W transactions
512          * commit without having conflicts out to an earlier snapshot, thus
513          * ensuring that no conflicts are possible for this transaction.
514          */
515         if (SxactIsROSafe(MySerializableXact))
516         {
517                 ReleasePredicateLocks(false);
518                 return false;
519         }
520
521         /* Check if the relation doesn't participate in predicate locking */
522         if (!PredicateLockingNeededForRelation(relation))
523                 return false;
524
525         return true;                            /* no excuse to skip predicate locking */
526 }
527
528 /*
529  * Like SerializationNeededForRead(), but called on writes.
530  * The logic is the same, but there is no snapshot and we can't be RO-safe.
531  */
532 static inline bool
533 SerializationNeededForWrite(Relation relation)
534 {
535         /* Nothing to do if this is not a serializable transaction */
536         if (MySerializableXact == InvalidSerializableXact)
537                 return false;
538
539         /* Check if the relation doesn't participate in predicate locking */
540         if (!PredicateLockingNeededForRelation(relation))
541                 return false;
542
543         return true;                            /* no excuse to skip predicate locking */
544 }
545
546
547 /*------------------------------------------------------------------------*/
548
549 /*
550  * These functions are a simple implementation of a list for this specific
551  * type of struct.  If there is ever a generalized shared memory list, we
552  * should probably switch to that.
553  */
554 static SERIALIZABLEXACT *
555 CreatePredXact(void)
556 {
557         PredXactListElement ptle;
558
559         ptle = (PredXactListElement)
560                 SHMQueueNext(&PredXact->availableList,
561                                          &PredXact->availableList,
562                                          offsetof(PredXactListElementData, link));
563         if (!ptle)
564                 return NULL;
565
566         SHMQueueDelete(&ptle->link);
567         SHMQueueInsertBefore(&PredXact->activeList, &ptle->link);
568         return &ptle->sxact;
569 }
570
571 static void
572 ReleasePredXact(SERIALIZABLEXACT *sxact)
573 {
574         PredXactListElement ptle;
575
576         Assert(ShmemAddrIsValid(sxact));
577
578         ptle = (PredXactListElement)
579                 (((char *) sxact)
580                  - offsetof(PredXactListElementData, sxact)
581                  + offsetof(PredXactListElementData, link));
582         SHMQueueDelete(&ptle->link);
583         SHMQueueInsertBefore(&PredXact->availableList, &ptle->link);
584 }
585
586 static SERIALIZABLEXACT *
587 FirstPredXact(void)
588 {
589         PredXactListElement ptle;
590
591         ptle = (PredXactListElement)
592                 SHMQueueNext(&PredXact->activeList,
593                                          &PredXact->activeList,
594                                          offsetof(PredXactListElementData, link));
595         if (!ptle)
596                 return NULL;
597
598         return &ptle->sxact;
599 }
600
601 static SERIALIZABLEXACT *
602 NextPredXact(SERIALIZABLEXACT *sxact)
603 {
604         PredXactListElement ptle;
605
606         Assert(ShmemAddrIsValid(sxact));
607
608         ptle = (PredXactListElement)
609                 (((char *) sxact)
610                  - offsetof(PredXactListElementData, sxact)
611                  + offsetof(PredXactListElementData, link));
612         ptle = (PredXactListElement)
613                 SHMQueueNext(&PredXact->activeList,
614                                          &ptle->link,
615                                          offsetof(PredXactListElementData, link));
616         if (!ptle)
617                 return NULL;
618
619         return &ptle->sxact;
620 }
621
622 /*------------------------------------------------------------------------*/
623
624 /*
625  * These functions manage primitive access to the RWConflict pool and lists.
626  */
627 static bool
628 RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
629 {
630         RWConflict      conflict;
631
632         Assert(reader != writer);
633
634         /* Check the ends of the purported conflict first. */
635         if (SxactIsDoomed(reader)
636                 || SxactIsDoomed(writer)
637                 || SHMQueueEmpty(&reader->outConflicts)
638                 || SHMQueueEmpty(&writer->inConflicts))
639                 return false;
640
641         /* A conflict is possible; walk the list to find out. */
642         conflict = (RWConflict)
643                 SHMQueueNext(&reader->outConflicts,
644                                          &reader->outConflicts,
645                                          offsetof(RWConflictData, outLink));
646         while (conflict)
647         {
648                 if (conflict->sxactIn == writer)
649                         return true;
650                 conflict = (RWConflict)
651                         SHMQueueNext(&reader->outConflicts,
652                                                  &conflict->outLink,
653                                                  offsetof(RWConflictData, outLink));
654         }
655
656         /* No conflict found. */
657         return false;
658 }
659
660 static void
661 SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
662 {
663         RWConflict      conflict;
664
665         Assert(reader != writer);
666         Assert(!RWConflictExists(reader, writer));
667
668         conflict = (RWConflict)
669                 SHMQueueNext(&RWConflictPool->availableList,
670                                          &RWConflictPool->availableList,
671                                          offsetof(RWConflictData, outLink));
672         if (!conflict)
673                 ereport(ERROR,
674                                 (errcode(ERRCODE_OUT_OF_MEMORY),
675                                  errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
676                                  errhint("You might need to run fewer transactions at a time or increase max_connections.")));
677
678         SHMQueueDelete(&conflict->outLink);
679
680         conflict->sxactOut = reader;
681         conflict->sxactIn = writer;
682         SHMQueueInsertBefore(&reader->outConflicts, &conflict->outLink);
683         SHMQueueInsertBefore(&writer->inConflicts, &conflict->inLink);
684 }
685
686 static void
687 SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact,
688                                                   SERIALIZABLEXACT *activeXact)
689 {
690         RWConflict      conflict;
691
692         Assert(roXact != activeXact);
693         Assert(SxactIsReadOnly(roXact));
694         Assert(!SxactIsReadOnly(activeXact));
695
696         conflict = (RWConflict)
697                 SHMQueueNext(&RWConflictPool->availableList,
698                                          &RWConflictPool->availableList,
699                                          offsetof(RWConflictData, outLink));
700         if (!conflict)
701                 ereport(ERROR,
702                                 (errcode(ERRCODE_OUT_OF_MEMORY),
703                                  errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
704                                  errhint("You might need to run fewer transactions at a time or increase max_connections.")));
705
706         SHMQueueDelete(&conflict->outLink);
707
708         conflict->sxactOut = activeXact;
709         conflict->sxactIn = roXact;
710         SHMQueueInsertBefore(&activeXact->possibleUnsafeConflicts,
711                                                  &conflict->outLink);
712         SHMQueueInsertBefore(&roXact->possibleUnsafeConflicts,
713                                                  &conflict->inLink);
714 }
715
716 static void
717 ReleaseRWConflict(RWConflict conflict)
718 {
719         SHMQueueDelete(&conflict->inLink);
720         SHMQueueDelete(&conflict->outLink);
721         SHMQueueInsertBefore(&RWConflictPool->availableList, &conflict->outLink);
722 }
723
724 static void
725 FlagSxactUnsafe(SERIALIZABLEXACT *sxact)
726 {
727         RWConflict      conflict,
728                                 nextConflict;
729
730         Assert(SxactIsReadOnly(sxact));
731         Assert(!SxactIsROSafe(sxact));
732
733         sxact->flags |= SXACT_FLAG_RO_UNSAFE;
734
735         /*
736          * We know this isn't a safe snapshot, so we can stop looking for other
737          * potential conflicts.
738          */
739         conflict = (RWConflict)
740                 SHMQueueNext(&sxact->possibleUnsafeConflicts,
741                                          &sxact->possibleUnsafeConflicts,
742                                          offsetof(RWConflictData, inLink));
743         while (conflict)
744         {
745                 nextConflict = (RWConflict)
746                         SHMQueueNext(&sxact->possibleUnsafeConflicts,
747                                                  &conflict->inLink,
748                                                  offsetof(RWConflictData, inLink));
749
750                 Assert(!SxactIsReadOnly(conflict->sxactOut));
751                 Assert(sxact == conflict->sxactIn);
752
753                 ReleaseRWConflict(conflict);
754
755                 conflict = nextConflict;
756         }
757 }
758
759 /*------------------------------------------------------------------------*/
760
761 /*
762  * We will work on the page range of 0..OLDSERXID_MAX_PAGE.
763  * Compares using wraparound logic, as is required by slru.c.
764  */
765 static bool
766 OldSerXidPagePrecedesLogically(int p, int q)
767 {
768         int                     diff;
769
770         /*
771          * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2.  Both inputs should
772          * be in the range 0..OLDSERXID_MAX_PAGE.
773          */
774         Assert(p >= 0 && p <= OLDSERXID_MAX_PAGE);
775         Assert(q >= 0 && q <= OLDSERXID_MAX_PAGE);
776
777         diff = p - q;
778         if (diff >= ((OLDSERXID_MAX_PAGE + 1) / 2))
779                 diff -= OLDSERXID_MAX_PAGE + 1;
780         else if (diff < -((int) (OLDSERXID_MAX_PAGE + 1) / 2))
781                 diff += OLDSERXID_MAX_PAGE + 1;
782         return diff < 0;
783 }
784
785 /*
786  * Initialize for the tracking of old serializable committed xids.
787  */
788 static void
789 OldSerXidInit(void)
790 {
791         bool            found;
792
793         /*
794          * Set up SLRU management of the pg_serial data.
795          */
796         OldSerXidSlruCtl->PagePrecedes = OldSerXidPagePrecedesLogically;
797         SimpleLruInit(OldSerXidSlruCtl, "OldSerXid SLRU Ctl",
798                                   NUM_OLDSERXID_BUFFERS, 0, OldSerXidLock, "pg_serial");
799         /* Override default assumption that writes should be fsync'd */
800         OldSerXidSlruCtl->do_fsync = false;
801
802         /*
803          * Create or attach to the OldSerXidControl structure.
804          */
805         oldSerXidControl = (OldSerXidControl)
806                 ShmemInitStruct("OldSerXidControlData", sizeof(OldSerXidControlData), &found);
807
808         if (!found)
809         {
810                 /*
811                  * Set control information to reflect empty SLRU.
812                  */
813                 oldSerXidControl->headPage = -1;
814                 oldSerXidControl->headXid = InvalidTransactionId;
815                 oldSerXidControl->tailXid = InvalidTransactionId;
816                 oldSerXidControl->warningIssued = false;
817         }
818 }
819
820 /*
821  * Record a committed read write serializable xid and the minimum
822  * commitSeqNo of any transactions to which this xid had a rw-conflict out.
823  * An invalid seqNo means that there were no conflicts out from xid.
824  */
825 static void
826 OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
827 {
828         TransactionId tailXid;
829         int                     targetPage;
830         int                     slotno;
831         int                     firstZeroPage;
832         bool            isNewPage;
833
834         Assert(TransactionIdIsValid(xid));
835
836         targetPage = OldSerXidPage(xid);
837
838         LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
839
840         /*
841          * If no serializable transactions are active, there shouldn't be anything
842          * to push out to the SLRU.  Hitting this assert would mean there's
843          * something wrong with the earlier cleanup logic.
844          */
845         tailXid = oldSerXidControl->tailXid;
846         Assert(TransactionIdIsValid(tailXid));
847
848         /*
849          * If the SLRU is currently unused, zero out the whole active region from
850          * tailXid to headXid before taking it into use. Otherwise zero out only
851          * any new pages that enter the tailXid-headXid range as we advance
852          * headXid.
853          */
854         if (oldSerXidControl->headPage < 0)
855         {
856                 firstZeroPage = OldSerXidPage(tailXid);
857                 isNewPage = true;
858         }
859         else
860         {
861                 firstZeroPage = OldSerXidNextPage(oldSerXidControl->headPage);
862                 isNewPage = OldSerXidPagePrecedesLogically(oldSerXidControl->headPage,
863                                                                                                    targetPage);
864         }
865
866         if (!TransactionIdIsValid(oldSerXidControl->headXid)
867                 || TransactionIdFollows(xid, oldSerXidControl->headXid))
868                 oldSerXidControl->headXid = xid;
869         if (isNewPage)
870                 oldSerXidControl->headPage = targetPage;
871
872         /*
873          * Give a warning if we're about to run out of SLRU pages.
874          *
875          * slru.c has a maximum of 64k segments, with 32 (SLRU_PAGES_PER_SEGMENT)
876          * pages each. We need to store a 64-bit integer for each Xid, and with
877          * default 8k block size, 65536*32 pages is only enough to cover 2^30
878          * XIDs. If we're about to hit that limit and wrap around, warn the user.
879          *
880          * To avoid spamming the user, we only give one warning when we've used 1
881          * billion XIDs, and stay silent until the situation is fixed and the
882          * number of XIDs used falls below 800 million again.
883          *
884          * XXX: We have no safeguard to actually *prevent* the wrap-around,
885          * though. All you get is a warning.
886          */
887         if (oldSerXidControl->warningIssued)
888         {
889                 TransactionId lowWatermark;
890
891                 lowWatermark = tailXid + 800000000;
892                 if (lowWatermark < FirstNormalTransactionId)
893                         lowWatermark = FirstNormalTransactionId;
894                 if (TransactionIdPrecedes(xid, lowWatermark))
895                         oldSerXidControl->warningIssued = false;
896         }
897         else
898         {
899                 TransactionId highWatermark;
900
901                 highWatermark = tailXid + 1000000000;
902                 if (highWatermark < FirstNormalTransactionId)
903                         highWatermark = FirstNormalTransactionId;
904                 if (TransactionIdFollows(xid, highWatermark))
905                 {
906                         oldSerXidControl->warningIssued = true;
907                         ereport(WARNING,
908                                         (errmsg("memory for serializable conflict tracking is nearly exhausted"),
909                                          errhint("There might be an idle transaction or a forgotten prepared transaction causing this.")));
910                 }
911         }
912
913         if (isNewPage)
914         {
915                 /* Initialize intervening pages. */
916                 while (firstZeroPage != targetPage)
917                 {
918                         (void) SimpleLruZeroPage(OldSerXidSlruCtl, firstZeroPage);
919                         firstZeroPage = OldSerXidNextPage(firstZeroPage);
920                 }
921                 slotno = SimpleLruZeroPage(OldSerXidSlruCtl, targetPage);
922         }
923         else
924                 slotno = SimpleLruReadPage(OldSerXidSlruCtl, targetPage, true, xid);
925
926         OldSerXidValue(slotno, xid) = minConflictCommitSeqNo;
927         OldSerXidSlruCtl->shared->page_dirty[slotno] = true;
928
929         LWLockRelease(OldSerXidLock);
930 }
931
932 /*
933  * Get the minimum commitSeqNo for any conflict out for the given xid.  For
934  * a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
935  * will be returned.
936  */
937 static SerCommitSeqNo
938 OldSerXidGetMinConflictCommitSeqNo(TransactionId xid)
939 {
940         TransactionId headXid;
941         TransactionId tailXid;
942         SerCommitSeqNo val;
943         int                     slotno;
944
945         Assert(TransactionIdIsValid(xid));
946
947         LWLockAcquire(OldSerXidLock, LW_SHARED);
948         headXid = oldSerXidControl->headXid;
949         tailXid = oldSerXidControl->tailXid;
950         LWLockRelease(OldSerXidLock);
951
952         if (!TransactionIdIsValid(headXid))
953                 return 0;
954
955         Assert(TransactionIdIsValid(tailXid));
956
957         if (TransactionIdPrecedes(xid, tailXid)
958                 || TransactionIdFollows(xid, headXid))
959                 return 0;
960
961         /*
962          * The following function must be called without holding OldSerXidLock,
963          * but will return with that lock held, which must then be released.
964          */
965         slotno = SimpleLruReadPage_ReadOnly(OldSerXidSlruCtl,
966                                                                                 OldSerXidPage(xid), xid);
967         val = OldSerXidValue(slotno, xid);
968         LWLockRelease(OldSerXidLock);
969         return val;
970 }
971
972 /*
973  * Call this whenever there is a new xmin for active serializable
974  * transactions.  We don't need to keep information on transactions which
975  * precede that.  InvalidTransactionId means none active, so everything in
976  * the SLRU can be discarded.
977  */
978 static void
979 OldSerXidSetActiveSerXmin(TransactionId xid)
980 {
981         LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
982
983         /*
984          * When no sxacts are active, nothing overlaps, set the xid values to
985          * invalid to show that there are no valid entries.  Don't clear headPage,
986          * though.  A new xmin might still land on that page, and we don't want to
987          * repeatedly zero out the same page.
988          */
989         if (!TransactionIdIsValid(xid))
990         {
991                 oldSerXidControl->tailXid = InvalidTransactionId;
992                 oldSerXidControl->headXid = InvalidTransactionId;
993                 LWLockRelease(OldSerXidLock);
994                 return;
995         }
996
997         /*
998          * When we're recovering prepared transactions, the global xmin might move
999          * backwards depending on the order they're recovered. Normally that's not
1000          * OK, but during recovery no serializable transactions will commit, so
1001          * the SLRU is empty and we can get away with it.
1002          */
1003         if (RecoveryInProgress())
1004         {
1005                 Assert(oldSerXidControl->headPage < 0);
1006                 if (!TransactionIdIsValid(oldSerXidControl->tailXid)
1007                         || TransactionIdPrecedes(xid, oldSerXidControl->tailXid))
1008                 {
1009                         oldSerXidControl->tailXid = xid;
1010                 }
1011                 LWLockRelease(OldSerXidLock);
1012                 return;
1013         }
1014
1015         Assert(!TransactionIdIsValid(oldSerXidControl->tailXid)
1016                    || TransactionIdFollows(xid, oldSerXidControl->tailXid));
1017
1018         oldSerXidControl->tailXid = xid;
1019
1020         LWLockRelease(OldSerXidLock);
1021 }
1022
1023 /*
1024  * Perform a checkpoint --- either during shutdown, or on-the-fly
1025  *
1026  * We don't have any data that needs to survive a restart, but this is a
1027  * convenient place to truncate the SLRU.
1028  */
1029 void
1030 CheckPointPredicate(void)
1031 {
1032         int                     tailPage;
1033
1034         LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
1035
1036         /* Exit quickly if the SLRU is currently not in use. */
1037         if (oldSerXidControl->headPage < 0)
1038         {
1039                 LWLockRelease(OldSerXidLock);
1040                 return;
1041         }
1042
1043         if (TransactionIdIsValid(oldSerXidControl->tailXid))
1044         {
1045                 /* We can truncate the SLRU up to the page containing tailXid */
1046                 tailPage = OldSerXidPage(oldSerXidControl->tailXid);
1047         }
1048         else
1049         {
1050                 /*
1051                  * The SLRU is no longer needed. Truncate to head before we set head
1052                  * invalid.
1053                  *
1054                  * XXX: It's possible that the SLRU is not needed again until XID
1055                  * wrap-around has happened, so that the segment containing headPage
1056                  * that we leave behind will appear to be new again. In that case it
1057                  * won't be removed until XID horizon advances enough to make it
1058                  * current again.
1059                  */
1060                 tailPage = oldSerXidControl->headPage;
1061                 oldSerXidControl->headPage = -1;
1062         }
1063
1064         LWLockRelease(OldSerXidLock);
1065
1066         /* Truncate away pages that are no longer required */
1067         SimpleLruTruncate(OldSerXidSlruCtl, tailPage);
1068
1069         /*
1070          * Flush dirty SLRU pages to disk
1071          *
1072          * This is not actually necessary from a correctness point of view. We do
1073          * it merely as a debugging aid.
1074          *
1075          * We're doing this after the truncation to avoid writing pages right
1076          * before deleting the file in which they sit, which would be completely
1077          * pointless.
1078          */
1079         SimpleLruFlush(OldSerXidSlruCtl, true);
1080 }
1081
1082 /*------------------------------------------------------------------------*/
1083
1084 /*
1085  * InitPredicateLocks -- Initialize the predicate locking data structures.
1086  *
1087  * This is called from CreateSharedMemoryAndSemaphores(), which see for
1088  * more comments.  In the normal postmaster case, the shared hash tables
1089  * are created here.  Backends inherit the pointers
1090  * to the shared tables via fork().  In the EXEC_BACKEND case, each
1091  * backend re-executes this code to obtain pointers to the already existing
1092  * shared hash tables.
1093  */
1094 void
1095 InitPredicateLocks(void)
1096 {
1097         HASHCTL         info;
1098         long            max_table_size;
1099         Size            requestSize;
1100         bool            found;
1101
1102         /*
1103          * Compute size of predicate lock target hashtable. Note these
1104          * calculations must agree with PredicateLockShmemSize!
1105          */
1106         max_table_size = NPREDICATELOCKTARGETENTS();
1107
1108         /*
1109          * Allocate hash table for PREDICATELOCKTARGET structs.  This stores
1110          * per-predicate-lock-target information.
1111          */
1112         MemSet(&info, 0, sizeof(info));
1113         info.keysize = sizeof(PREDICATELOCKTARGETTAG);
1114         info.entrysize = sizeof(PREDICATELOCKTARGET);
1115         info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
1116
1117         PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
1118                                                                                         max_table_size,
1119                                                                                         max_table_size,
1120                                                                                         &info,
1121                                                                                         HASH_ELEM | HASH_BLOBS |
1122                                                                                         HASH_PARTITION | HASH_FIXED_SIZE);
1123
1124         /* Assume an average of 2 xacts per target */
1125         max_table_size *= 2;
1126
1127         /*
1128          * Reserve a dummy entry in the hash table; we use it to make sure there's
1129          * always one entry available when we need to split or combine a page,
1130          * because running out of space there could mean aborting a
1131          * non-serializable transaction.
1132          */
1133         hash_search(PredicateLockTargetHash, &ScratchTargetTag, HASH_ENTER, NULL);
1134
1135         /*
1136          * Allocate hash table for PREDICATELOCK structs.  This stores per
1137          * xact-lock-of-a-target information.
1138          */
1139         MemSet(&info, 0, sizeof(info));
1140         info.keysize = sizeof(PREDICATELOCKTAG);
1141         info.entrysize = sizeof(PREDICATELOCK);
1142         info.hash = predicatelock_hash;
1143         info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
1144
1145         PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
1146                                                                           max_table_size,
1147                                                                           max_table_size,
1148                                                                           &info,
1149                                                                           HASH_ELEM | HASH_FUNCTION |
1150                                                                           HASH_PARTITION | HASH_FIXED_SIZE);
1151
1152         /*
1153          * Compute size for serializable transaction hashtable. Note these
1154          * calculations must agree with PredicateLockShmemSize!
1155          */
1156         max_table_size = (MaxBackends + max_prepared_xacts);
1157
1158         /*
1159          * Allocate a list to hold information on transactions participating in
1160          * predicate locking.
1161          *
1162          * Assume an average of 10 predicate locking transactions per backend.
1163          * This allows aggressive cleanup while detail is present before data must
1164          * be summarized for storage in SLRU and the "dummy" transaction.
1165          */
1166         max_table_size *= 10;
1167
1168         PredXact = ShmemInitStruct("PredXactList",
1169                                                            PredXactListDataSize,
1170                                                            &found);
1171         if (!found)
1172         {
1173                 int                     i;
1174
1175                 SHMQueueInit(&PredXact->availableList);
1176                 SHMQueueInit(&PredXact->activeList);
1177                 PredXact->SxactGlobalXmin = InvalidTransactionId;
1178                 PredXact->SxactGlobalXminCount = 0;
1179                 PredXact->WritableSxactCount = 0;
1180                 PredXact->LastSxactCommitSeqNo = FirstNormalSerCommitSeqNo - 1;
1181                 PredXact->CanPartialClearThrough = 0;
1182                 PredXact->HavePartialClearedThrough = 0;
1183                 requestSize = mul_size((Size) max_table_size,
1184                                                            PredXactListElementDataSize);
1185                 PredXact->element = ShmemAlloc(requestSize);
1186                 if (PredXact->element == NULL)
1187                         ereport(ERROR,
1188                                         (errcode(ERRCODE_OUT_OF_MEMORY),
1189                          errmsg("not enough shared memory for elements of data structure"
1190                                         " \"%s\" (%zu bytes requested)",
1191                                         "PredXactList", requestSize)));
1192                 /* Add all elements to available list, clean. */
1193                 memset(PredXact->element, 0, requestSize);
1194                 for (i = 0; i < max_table_size; i++)
1195                 {
1196                         SHMQueueInsertBefore(&(PredXact->availableList),
1197                                                                  &(PredXact->element[i].link));
1198                 }
1199                 PredXact->OldCommittedSxact = CreatePredXact();
1200                 SetInvalidVirtualTransactionId(PredXact->OldCommittedSxact->vxid);
1201                 PredXact->OldCommittedSxact->prepareSeqNo = 0;
1202                 PredXact->OldCommittedSxact->commitSeqNo = 0;
1203                 PredXact->OldCommittedSxact->SeqNo.lastCommitBeforeSnapshot = 0;
1204                 SHMQueueInit(&PredXact->OldCommittedSxact->outConflicts);
1205                 SHMQueueInit(&PredXact->OldCommittedSxact->inConflicts);
1206                 SHMQueueInit(&PredXact->OldCommittedSxact->predicateLocks);
1207                 SHMQueueInit(&PredXact->OldCommittedSxact->finishedLink);
1208                 SHMQueueInit(&PredXact->OldCommittedSxact->possibleUnsafeConflicts);
1209                 PredXact->OldCommittedSxact->topXid = InvalidTransactionId;
1210                 PredXact->OldCommittedSxact->finishedBefore = InvalidTransactionId;
1211                 PredXact->OldCommittedSxact->xmin = InvalidTransactionId;
1212                 PredXact->OldCommittedSxact->flags = SXACT_FLAG_COMMITTED;
1213                 PredXact->OldCommittedSxact->pid = 0;
1214         }
1215         /* This never changes, so let's keep a local copy. */
1216         OldCommittedSxact = PredXact->OldCommittedSxact;
1217
1218         /*
1219          * Allocate hash table for SERIALIZABLEXID structs.  This stores per-xid
1220          * information for serializable transactions which have accessed data.
1221          */
1222         MemSet(&info, 0, sizeof(info));
1223         info.keysize = sizeof(SERIALIZABLEXIDTAG);
1224         info.entrysize = sizeof(SERIALIZABLEXID);
1225
1226         SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
1227                                                                                 max_table_size,
1228                                                                                 max_table_size,
1229                                                                                 &info,
1230                                                                                 HASH_ELEM | HASH_BLOBS |
1231                                                                                 HASH_FIXED_SIZE);
1232
1233         /*
1234          * Allocate space for tracking rw-conflicts in lists attached to the
1235          * transactions.
1236          *
1237          * Assume an average of 5 conflicts per transaction.  Calculations suggest
1238          * that this will prevent resource exhaustion in even the most pessimal
1239          * loads up to max_connections = 200 with all 200 connections pounding the
1240          * database with serializable transactions.  Beyond that, there may be
1241          * occasional transactions canceled when trying to flag conflicts. That's
1242          * probably OK.
1243          */
1244         max_table_size *= 5;
1245
1246         RWConflictPool = ShmemInitStruct("RWConflictPool",
1247                                                                          RWConflictPoolHeaderDataSize,
1248                                                                          &found);
1249         if (!found)
1250         {
1251                 int                     i;
1252
1253                 SHMQueueInit(&RWConflictPool->availableList);
1254                 requestSize = mul_size((Size) max_table_size,
1255                                                            RWConflictDataSize);
1256                 RWConflictPool->element = ShmemAlloc(requestSize);
1257                 if (RWConflictPool->element == NULL)
1258                         ereport(ERROR,
1259                                         (errcode(ERRCODE_OUT_OF_MEMORY),
1260                          errmsg("not enough shared memory for elements of data structure"
1261                                         " \"%s\" (%zu bytes requested)",
1262                                         "RWConflictPool", requestSize)));
1263                 /* Add all elements to available list, clean. */
1264                 memset(RWConflictPool->element, 0, requestSize);
1265                 for (i = 0; i < max_table_size; i++)
1266                 {
1267                         SHMQueueInsertBefore(&(RWConflictPool->availableList),
1268                                                                  &(RWConflictPool->element[i].outLink));
1269                 }
1270         }
1271
1272         /*
1273          * Create or attach to the header for the list of finished serializable
1274          * transactions.
1275          */
1276         FinishedSerializableTransactions = (SHM_QUEUE *)
1277                 ShmemInitStruct("FinishedSerializableTransactions",
1278                                                 sizeof(SHM_QUEUE),
1279                                                 &found);
1280         if (!found)
1281                 SHMQueueInit(FinishedSerializableTransactions);
1282
1283         /*
1284          * Initialize the SLRU storage for old committed serializable
1285          * transactions.
1286          */
1287         OldSerXidInit();
1288
1289         /* Pre-calculate the hash and partition lock of the scratch entry */
1290         ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag);
1291         ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash);
1292 }
1293
1294 /*
1295  * Estimate shared-memory space used for predicate lock table
1296  */
1297 Size
1298 PredicateLockShmemSize(void)
1299 {
1300         Size            size = 0;
1301         long            max_table_size;
1302
1303         /* predicate lock target hash table */
1304         max_table_size = NPREDICATELOCKTARGETENTS();
1305         size = add_size(size, hash_estimate_size(max_table_size,
1306                                                                                          sizeof(PREDICATELOCKTARGET)));
1307
1308         /* predicate lock hash table */
1309         max_table_size *= 2;
1310         size = add_size(size, hash_estimate_size(max_table_size,
1311                                                                                          sizeof(PREDICATELOCK)));
1312
1313         /*
1314          * Since NPREDICATELOCKTARGETENTS is only an estimate, add 10% safety
1315          * margin.
1316          */
1317         size = add_size(size, size / 10);
1318
1319         /* transaction list */
1320         max_table_size = MaxBackends + max_prepared_xacts;
1321         max_table_size *= 10;
1322         size = add_size(size, PredXactListDataSize);
1323         size = add_size(size, mul_size((Size) max_table_size,
1324                                                                    PredXactListElementDataSize));
1325
1326         /* transaction xid table */
1327         size = add_size(size, hash_estimate_size(max_table_size,
1328                                                                                          sizeof(SERIALIZABLEXID)));
1329
1330         /* rw-conflict pool */
1331         max_table_size *= 5;
1332         size = add_size(size, RWConflictPoolHeaderDataSize);
1333         size = add_size(size, mul_size((Size) max_table_size,
1334                                                                    RWConflictDataSize));
1335
1336         /* Head for list of finished serializable transactions. */
1337         size = add_size(size, sizeof(SHM_QUEUE));
1338
1339         /* Shared memory structures for SLRU tracking of old committed xids. */
1340         size = add_size(size, sizeof(OldSerXidControlData));
1341         size = add_size(size, SimpleLruShmemSize(NUM_OLDSERXID_BUFFERS, 0));
1342
1343         return size;
1344 }
1345
1346
1347 /*
1348  * Compute the hash code associated with a PREDICATELOCKTAG.
1349  *
1350  * Because we want to use just one set of partition locks for both the
1351  * PREDICATELOCKTARGET and PREDICATELOCK hash tables, we have to make sure
1352  * that PREDICATELOCKs fall into the same partition number as their
1353  * associated PREDICATELOCKTARGETs.  dynahash.c expects the partition number
1354  * to be the low-order bits of the hash code, and therefore a
1355  * PREDICATELOCKTAG's hash code must have the same low-order bits as the
1356  * associated PREDICATELOCKTARGETTAG's hash code.  We achieve this with this
1357  * specialized hash function.
1358  */
1359 static uint32
1360 predicatelock_hash(const void *key, Size keysize)
1361 {
1362         const PREDICATELOCKTAG *predicatelocktag = (const PREDICATELOCKTAG *) key;
1363         uint32          targethash;
1364
1365         Assert(keysize == sizeof(PREDICATELOCKTAG));
1366
1367         /* Look into the associated target object, and compute its hash code */
1368         targethash = PredicateLockTargetTagHashCode(&predicatelocktag->myTarget->tag);
1369
1370         return PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash);
1371 }
1372
1373
1374 /*
1375  * GetPredicateLockStatusData
1376  *              Return a table containing the internal state of the predicate
1377  *              lock manager for use in pg_lock_status.
1378  *
1379  * Like GetLockStatusData, this function tries to hold the partition LWLocks
1380  * for as short a time as possible by returning two arrays that simply
1381  * contain the PREDICATELOCKTARGETTAG and SERIALIZABLEXACT for each lock
1382  * table entry. Multiple copies of the same PREDICATELOCKTARGETTAG and
1383  * SERIALIZABLEXACT will likely appear.
1384  */
1385 PredicateLockData *
1386 GetPredicateLockStatusData(void)
1387 {
1388         PredicateLockData *data;
1389         int                     i;
1390         int                     els,
1391                                 el;
1392         HASH_SEQ_STATUS seqstat;
1393         PREDICATELOCK *predlock;
1394
1395         data = (PredicateLockData *) palloc(sizeof(PredicateLockData));
1396
1397         /*
1398          * To ensure consistency, take simultaneous locks on all partition locks
1399          * in ascending order, then SerializableXactHashLock.
1400          */
1401         for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
1402                 LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
1403         LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1404
1405         /* Get number of locks and allocate appropriately-sized arrays. */
1406         els = hash_get_num_entries(PredicateLockHash);
1407         data->nelements = els;
1408         data->locktags = (PREDICATELOCKTARGETTAG *)
1409                 palloc(sizeof(PREDICATELOCKTARGETTAG) * els);
1410         data->xacts = (SERIALIZABLEXACT *)
1411                 palloc(sizeof(SERIALIZABLEXACT) * els);
1412
1413
1414         /* Scan through PredicateLockHash and copy contents */
1415         hash_seq_init(&seqstat, PredicateLockHash);
1416
1417         el = 0;
1418
1419         while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
1420         {
1421                 data->locktags[el] = predlock->tag.myTarget->tag;
1422                 data->xacts[el] = *predlock->tag.myXact;
1423                 el++;
1424         }
1425
1426         Assert(el == els);
1427
1428         /* Release locks in reverse order */
1429         LWLockRelease(SerializableXactHashLock);
1430         for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
1431                 LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
1432
1433         return data;
1434 }
1435
1436 /*
1437  * Free up shared memory structures by pushing the oldest sxact (the one at
1438  * the front of the SummarizeOldestCommittedSxact queue) into summary form.
1439  * Each call will free exactly one SERIALIZABLEXACT structure and may also
1440  * free one or more of these structures: SERIALIZABLEXID, PREDICATELOCK,
1441  * PREDICATELOCKTARGET, RWConflictData.
1442  */
1443 static void
1444 SummarizeOldestCommittedSxact(void)
1445 {
1446         SERIALIZABLEXACT *sxact;
1447
1448         LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
1449
1450         /*
1451          * This function is only called if there are no sxact slots available.
1452          * Some of them must belong to old, already-finished transactions, so
1453          * there should be something in FinishedSerializableTransactions list that
1454          * we can summarize. However, there's a race condition: while we were not
1455          * holding any locks, a transaction might have ended and cleaned up all
1456          * the finished sxact entries already, freeing up their sxact slots. In
1457          * that case, we have nothing to do here. The caller will find one of the
1458          * slots released by the other backend when it retries.
1459          */
1460         if (SHMQueueEmpty(FinishedSerializableTransactions))
1461         {
1462                 LWLockRelease(SerializableFinishedListLock);
1463                 return;
1464         }
1465
1466         /*
1467          * Grab the first sxact off the finished list -- this will be the earliest
1468          * commit.  Remove it from the list.
1469          */
1470         sxact = (SERIALIZABLEXACT *)
1471                 SHMQueueNext(FinishedSerializableTransactions,
1472                                          FinishedSerializableTransactions,
1473                                          offsetof(SERIALIZABLEXACT, finishedLink));
1474         SHMQueueDelete(&(sxact->finishedLink));
1475
1476         /* Add to SLRU summary information. */
1477         if (TransactionIdIsValid(sxact->topXid) && !SxactIsReadOnly(sxact))
1478                 OldSerXidAdd(sxact->topXid, SxactHasConflictOut(sxact)
1479                    ? sxact->SeqNo.earliestOutConflictCommit : InvalidSerCommitSeqNo);
1480
1481         /* Summarize and release the detail. */
1482         ReleaseOneSerializableXact(sxact, false, true);
1483
1484         LWLockRelease(SerializableFinishedListLock);
1485 }
1486
1487 /*
1488  * GetSafeSnapshot
1489  *              Obtain and register a snapshot for a READ ONLY DEFERRABLE
1490  *              transaction. Ensures that the snapshot is "safe", i.e. a
1491  *              read-only transaction running on it can execute serializably
1492  *              without further checks. This requires waiting for concurrent
1493  *              transactions to complete, and retrying with a new snapshot if
1494  *              one of them could possibly create a conflict.
1495  *
1496  *              As with GetSerializableTransactionSnapshot (which this is a subroutine
1497  *              for), the passed-in Snapshot pointer should reference a static data
1498  *              area that can safely be passed to GetSnapshotData.
1499  */
1500 static Snapshot
1501 GetSafeSnapshot(Snapshot origSnapshot)
1502 {
1503         Snapshot        snapshot;
1504
1505         Assert(XactReadOnly && XactDeferrable);
1506
1507         while (true)
1508         {
1509                 /*
1510                  * GetSerializableTransactionSnapshotInt is going to call
1511                  * GetSnapshotData, so we need to provide it the static snapshot area
1512                  * our caller passed to us.  The pointer returned is actually the same
1513                  * one passed to it, but we avoid assuming that here.
1514                  */
1515                 snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
1516                                                                                                            InvalidTransactionId);
1517
1518                 if (MySerializableXact == InvalidSerializableXact)
1519                         return snapshot;        /* no concurrent r/w xacts; it's safe */
1520
1521                 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1522
1523                 /*
1524                  * Wait for concurrent transactions to finish. Stop early if one of
1525                  * them marked us as conflicted.
1526                  */
1527                 MySerializableXact->flags |= SXACT_FLAG_DEFERRABLE_WAITING;
1528                 while (!(SHMQueueEmpty(&MySerializableXact->possibleUnsafeConflicts) ||
1529                                  SxactIsROUnsafe(MySerializableXact)))
1530                 {
1531                         LWLockRelease(SerializableXactHashLock);
1532                         ProcWaitForSignal();
1533                         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1534                 }
1535                 MySerializableXact->flags &= ~SXACT_FLAG_DEFERRABLE_WAITING;
1536
1537                 if (!SxactIsROUnsafe(MySerializableXact))
1538                 {
1539                         LWLockRelease(SerializableXactHashLock);
1540                         break;                          /* success */
1541                 }
1542
1543                 LWLockRelease(SerializableXactHashLock);
1544
1545                 /* else, need to retry... */
1546                 ereport(DEBUG2,
1547                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1548                                  errmsg("deferrable snapshot was unsafe; trying a new one")));
1549                 ReleasePredicateLocks(false);
1550         }
1551
1552         /*
1553          * Now we have a safe snapshot, so we don't need to do any further checks.
1554          */
1555         Assert(SxactIsROSafe(MySerializableXact));
1556         ReleasePredicateLocks(false);
1557
1558         return snapshot;
1559 }
1560
1561 /*
1562  * Acquire a snapshot that can be used for the current transaction.
1563  *
1564  * Make sure we have a SERIALIZABLEXACT reference in MySerializableXact.
1565  * It should be current for this process and be contained in PredXact.
1566  *
1567  * The passed-in Snapshot pointer should reference a static data area that
1568  * can safely be passed to GetSnapshotData.  The return value is actually
1569  * always this same pointer; no new snapshot data structure is allocated
1570  * within this function.
1571  */
1572 Snapshot
1573 GetSerializableTransactionSnapshot(Snapshot snapshot)
1574 {
1575         Assert(IsolationIsSerializable());
1576
1577         /*
1578          * Can't use serializable mode while recovery is still active, as it is,
1579          * for example, on a hot standby.  We could get here despite the check in
1580          * check_XactIsoLevel() if default_transaction_isolation is set to
1581          * serializable, so phrase the hint accordingly.
1582          */
1583         if (RecoveryInProgress())
1584                 ereport(ERROR,
1585                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1586                                  errmsg("cannot use serializable mode in a hot standby"),
1587                                  errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
1588                                  errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
1589
1590         /*
1591          * A special optimization is available for SERIALIZABLE READ ONLY
1592          * DEFERRABLE transactions -- we can wait for a suitable snapshot and
1593          * thereby avoid all SSI overhead once it's running.
1594          */
1595         if (XactReadOnly && XactDeferrable)
1596                 return GetSafeSnapshot(snapshot);
1597
1598         return GetSerializableTransactionSnapshotInt(snapshot,
1599                                                                                                  InvalidTransactionId);
1600 }
1601
1602 /*
1603  * Import a snapshot to be used for the current transaction.
1604  *
1605  * This is nearly the same as GetSerializableTransactionSnapshot, except that
1606  * we don't take a new snapshot, but rather use the data we're handed.
1607  *
1608  * The caller must have verified that the snapshot came from a serializable
1609  * transaction; and if we're read-write, the source transaction must not be
1610  * read-only.
1611  */
1612 void
1613 SetSerializableTransactionSnapshot(Snapshot snapshot,
1614                                                                    TransactionId sourcexid)
1615 {
1616         Assert(IsolationIsSerializable());
1617
1618         /*
1619          * We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
1620          * import snapshots, since there's no way to wait for a safe snapshot when
1621          * we're using the snap we're told to.  (XXX instead of throwing an error,
1622          * we could just ignore the XactDeferrable flag?)
1623          */
1624         if (XactReadOnly && XactDeferrable)
1625                 ereport(ERROR,
1626                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1627                                  errmsg("a snapshot-importing transaction must not be READ ONLY DEFERRABLE")));
1628
1629         (void) GetSerializableTransactionSnapshotInt(snapshot, sourcexid);
1630 }
1631
1632 /*
1633  * Guts of GetSerializableTransactionSnapshot
1634  *
1635  * If sourcexid is valid, this is actually an import operation and we should
1636  * skip calling GetSnapshotData, because the snapshot contents are already
1637  * loaded up.  HOWEVER: to avoid race conditions, we must check that the
1638  * source xact is still running after we acquire SerializableXactHashLock.
1639  * We do that by calling ProcArrayInstallImportedXmin.
1640  */
1641 static Snapshot
1642 GetSerializableTransactionSnapshotInt(Snapshot snapshot,
1643                                                                           TransactionId sourcexid)
1644 {
1645         PGPROC     *proc;
1646         VirtualTransactionId vxid;
1647         SERIALIZABLEXACT *sxact,
1648                            *othersxact;
1649         HASHCTL         hash_ctl;
1650
1651         /* We only do this for serializable transactions.  Once. */
1652         Assert(MySerializableXact == InvalidSerializableXact);
1653
1654         Assert(!RecoveryInProgress());
1655
1656         proc = MyProc;
1657         Assert(proc != NULL);
1658         GET_VXID_FROM_PGPROC(vxid, *proc);
1659
1660         /*
1661          * First we get the sxact structure, which may involve looping and access
1662          * to the "finished" list to free a structure for use.
1663          *
1664          * We must hold SerializableXactHashLock when taking/checking the snapshot
1665          * to avoid race conditions, for much the same reasons that
1666          * GetSnapshotData takes the ProcArrayLock.  Since we might have to
1667          * release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
1668          * this means we have to create the sxact first, which is a bit annoying
1669          * (in particular, an elog(ERROR) in procarray.c would cause us to leak
1670          * the sxact).  Consider refactoring to avoid this.
1671          */
1672 #ifdef TEST_OLDSERXID
1673         SummarizeOldestCommittedSxact();
1674 #endif
1675         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1676         do
1677         {
1678                 sxact = CreatePredXact();
1679                 /* If null, push out committed sxact to SLRU summary & retry. */
1680                 if (!sxact)
1681                 {
1682                         LWLockRelease(SerializableXactHashLock);
1683                         SummarizeOldestCommittedSxact();
1684                         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1685                 }
1686         } while (!sxact);
1687
1688         /* Get the snapshot, or check that it's safe to use */
1689         if (!TransactionIdIsValid(sourcexid))
1690                 snapshot = GetSnapshotData(snapshot);
1691         else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcexid))
1692         {
1693                 ReleasePredXact(sxact);
1694                 LWLockRelease(SerializableXactHashLock);
1695                 ereport(ERROR,
1696                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1697                                  errmsg("could not import the requested snapshot"),
1698                            errdetail("The source transaction %u is not running anymore.",
1699                                                  sourcexid)));
1700         }
1701
1702         /*
1703          * If there are no serializable transactions which are not read-only, we
1704          * can "opt out" of predicate locking and conflict checking for a
1705          * read-only transaction.
1706          *
1707          * The reason this is safe is that a read-only transaction can only become
1708          * part of a dangerous structure if it overlaps a writable transaction
1709          * which in turn overlaps a writable transaction which committed before
1710          * the read-only transaction started.  A new writable transaction can
1711          * overlap this one, but it can't meet the other condition of overlapping
1712          * a transaction which committed before this one started.
1713          */
1714         if (XactReadOnly && PredXact->WritableSxactCount == 0)
1715         {
1716                 ReleasePredXact(sxact);
1717                 LWLockRelease(SerializableXactHashLock);
1718                 return snapshot;
1719         }
1720
1721         /* Maintain serializable global xmin info. */
1722         if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
1723         {
1724                 Assert(PredXact->SxactGlobalXminCount == 0);
1725                 PredXact->SxactGlobalXmin = snapshot->xmin;
1726                 PredXact->SxactGlobalXminCount = 1;
1727                 OldSerXidSetActiveSerXmin(snapshot->xmin);
1728         }
1729         else if (TransactionIdEquals(snapshot->xmin, PredXact->SxactGlobalXmin))
1730         {
1731                 Assert(PredXact->SxactGlobalXminCount > 0);
1732                 PredXact->SxactGlobalXminCount++;
1733         }
1734         else
1735         {
1736                 Assert(TransactionIdFollows(snapshot->xmin, PredXact->SxactGlobalXmin));
1737         }
1738
1739         /* Initialize the structure. */
1740         sxact->vxid = vxid;
1741         sxact->SeqNo.lastCommitBeforeSnapshot = PredXact->LastSxactCommitSeqNo;
1742         sxact->prepareSeqNo = InvalidSerCommitSeqNo;
1743         sxact->commitSeqNo = InvalidSerCommitSeqNo;
1744         SHMQueueInit(&(sxact->outConflicts));
1745         SHMQueueInit(&(sxact->inConflicts));
1746         SHMQueueInit(&(sxact->possibleUnsafeConflicts));
1747         sxact->topXid = GetTopTransactionIdIfAny();
1748         sxact->finishedBefore = InvalidTransactionId;
1749         sxact->xmin = snapshot->xmin;
1750         sxact->pid = MyProcPid;
1751         SHMQueueInit(&(sxact->predicateLocks));
1752         SHMQueueElemInit(&(sxact->finishedLink));
1753         sxact->flags = 0;
1754         if (XactReadOnly)
1755         {
1756                 sxact->flags |= SXACT_FLAG_READ_ONLY;
1757
1758                 /*
1759                  * Register all concurrent r/w transactions as possible conflicts; if
1760                  * all of them commit without any outgoing conflicts to earlier
1761                  * transactions then this snapshot can be deemed safe (and we can run
1762                  * without tracking predicate locks).
1763                  */
1764                 for (othersxact = FirstPredXact();
1765                          othersxact != NULL;
1766                          othersxact = NextPredXact(othersxact))
1767                 {
1768                         if (!SxactIsCommitted(othersxact)
1769                                 && !SxactIsDoomed(othersxact)
1770                                 && !SxactIsReadOnly(othersxact))
1771                         {
1772                                 SetPossibleUnsafeConflict(sxact, othersxact);
1773                         }
1774                 }
1775         }
1776         else
1777         {
1778                 ++(PredXact->WritableSxactCount);
1779                 Assert(PredXact->WritableSxactCount <=
1780                            (MaxBackends + max_prepared_xacts));
1781         }
1782
1783         MySerializableXact = sxact;
1784         MyXactDidWrite = false;         /* haven't written anything yet */
1785
1786         LWLockRelease(SerializableXactHashLock);
1787
1788         /* Initialize the backend-local hash table of parent locks */
1789         Assert(LocalPredicateLockHash == NULL);
1790         MemSet(&hash_ctl, 0, sizeof(hash_ctl));
1791         hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
1792         hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
1793         LocalPredicateLockHash = hash_create("Local predicate lock",
1794                                                                                  max_predicate_locks_per_xact,
1795                                                                                  &hash_ctl,
1796                                                                                  HASH_ELEM | HASH_BLOBS);
1797
1798         return snapshot;
1799 }
1800
1801 /*
1802  * Register the top level XID in SerializableXidHash.
1803  * Also store it for easy reference in MySerializableXact.
1804  */
1805 void
1806 RegisterPredicateLockingXid(TransactionId xid)
1807 {
1808         SERIALIZABLEXIDTAG sxidtag;
1809         SERIALIZABLEXID *sxid;
1810         bool            found;
1811
1812         /*
1813          * If we're not tracking predicate lock data for this transaction, we
1814          * should ignore the request and return quickly.
1815          */
1816         if (MySerializableXact == InvalidSerializableXact)
1817                 return;
1818
1819         /* We should have a valid XID and be at the top level. */
1820         Assert(TransactionIdIsValid(xid));
1821
1822         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1823
1824         /* This should only be done once per transaction. */
1825         Assert(MySerializableXact->topXid == InvalidTransactionId);
1826
1827         MySerializableXact->topXid = xid;
1828
1829         sxidtag.xid = xid;
1830         sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
1831                                                                                    &sxidtag,
1832                                                                                    HASH_ENTER, &found);
1833         Assert(!found);
1834
1835         /* Initialize the structure. */
1836         sxid->myXact = MySerializableXact;
1837         LWLockRelease(SerializableXactHashLock);
1838 }
1839
1840
1841 /*
1842  * Check whether there are any predicate locks held by any transaction
1843  * for the page at the given block number.
1844  *
1845  * Note that the transaction may be completed but not yet subject to
1846  * cleanup due to overlapping serializable transactions.  This must
1847  * return valid information regardless of transaction isolation level.
1848  *
1849  * Also note that this doesn't check for a conflicting relation lock,
1850  * just a lock specifically on the given page.
1851  *
1852  * One use is to support proper behavior during GiST index vacuum.
1853  */
1854 bool
1855 PageIsPredicateLocked(Relation relation, BlockNumber blkno)
1856 {
1857         PREDICATELOCKTARGETTAG targettag;
1858         uint32          targettaghash;
1859         LWLock     *partitionLock;
1860         PREDICATELOCKTARGET *target;
1861
1862         SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
1863                                                                         relation->rd_node.dbNode,
1864                                                                         relation->rd_id,
1865                                                                         blkno);
1866
1867         targettaghash = PredicateLockTargetTagHashCode(&targettag);
1868         partitionLock = PredicateLockHashPartitionLock(targettaghash);
1869         LWLockAcquire(partitionLock, LW_SHARED);
1870         target = (PREDICATELOCKTARGET *)
1871                 hash_search_with_hash_value(PredicateLockTargetHash,
1872                                                                         &targettag, targettaghash,
1873                                                                         HASH_FIND, NULL);
1874         LWLockRelease(partitionLock);
1875
1876         return (target != NULL);
1877 }
1878
1879
1880 /*
1881  * Check whether a particular lock is held by this transaction.
1882  *
1883  * Important note: this function may return false even if the lock is
1884  * being held, because it uses the local lock table which is not
1885  * updated if another transaction modifies our lock list (e.g. to
1886  * split an index page). It can also return true when a coarser
1887  * granularity lock that covers this target is being held. Be careful
1888  * to only use this function in circumstances where such errors are
1889  * acceptable!
1890  */
1891 static bool
1892 PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag)
1893 {
1894         LOCALPREDICATELOCK *lock;
1895
1896         /* check local hash table */
1897         lock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
1898                                                                                           targettag,
1899                                                                                           HASH_FIND, NULL);
1900
1901         if (!lock)
1902                 return false;
1903
1904         /*
1905          * Found entry in the table, but still need to check whether it's actually
1906          * held -- it could just be a parent of some held lock.
1907          */
1908         return lock->held;
1909 }
1910
1911 /*
1912  * Return the parent lock tag in the lock hierarchy: the next coarser
1913  * lock that covers the provided tag.
1914  *
1915  * Returns true and sets *parent to the parent tag if one exists,
1916  * returns false if none exists.
1917  */
1918 static bool
1919 GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
1920                                                   PREDICATELOCKTARGETTAG *parent)
1921 {
1922         switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
1923         {
1924                 case PREDLOCKTAG_RELATION:
1925                         /* relation locks have no parent lock */
1926                         return false;
1927
1928                 case PREDLOCKTAG_PAGE:
1929                         /* parent lock is relation lock */
1930                         SET_PREDICATELOCKTARGETTAG_RELATION(*parent,
1931                                                                                  GET_PREDICATELOCKTARGETTAG_DB(*tag),
1932                                                                   GET_PREDICATELOCKTARGETTAG_RELATION(*tag));
1933
1934                         return true;
1935
1936                 case PREDLOCKTAG_TUPLE:
1937                         /* parent lock is page lock */
1938                         SET_PREDICATELOCKTARGETTAG_PAGE(*parent,
1939                                                                                  GET_PREDICATELOCKTARGETTAG_DB(*tag),
1940                                                                    GET_PREDICATELOCKTARGETTAG_RELATION(*tag),
1941                                                                           GET_PREDICATELOCKTARGETTAG_PAGE(*tag));
1942                         return true;
1943         }
1944
1945         /* not reachable */
1946         Assert(false);
1947         return false;
1948 }
1949
1950 /*
1951  * Check whether the lock we are considering is already covered by a
1952  * coarser lock for our transaction.
1953  *
1954  * Like PredicateLockExists, this function might return a false
1955  * negative, but it will never return a false positive.
1956  */
1957 static bool
1958 CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag)
1959 {
1960         PREDICATELOCKTARGETTAG targettag,
1961                                 parenttag;
1962
1963         targettag = *newtargettag;
1964
1965         /* check parents iteratively until no more */
1966         while (GetParentPredicateLockTag(&targettag, &parenttag))
1967         {
1968                 targettag = parenttag;
1969                 if (PredicateLockExists(&targettag))
1970                         return true;
1971         }
1972
1973         /* no more parents to check; lock is not covered */
1974         return false;
1975 }
1976
1977 /*
1978  * Remove the dummy entry from the predicate lock target hash, to free up some
1979  * scratch space. The caller must be holding SerializablePredicateLockListLock,
1980  * and must restore the entry with RestoreScratchTarget() before releasing the
1981  * lock.
1982  *
1983  * If lockheld is true, the caller is already holding the partition lock
1984  * of the partition containing the scratch entry.
1985  */
1986 static void
1987 RemoveScratchTarget(bool lockheld)
1988 {
1989         bool            found;
1990
1991         Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
1992
1993         if (!lockheld)
1994                 LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
1995         hash_search_with_hash_value(PredicateLockTargetHash,
1996                                                                 &ScratchTargetTag,
1997                                                                 ScratchTargetTagHash,
1998                                                                 HASH_REMOVE, &found);
1999         Assert(found);
2000         if (!lockheld)
2001                 LWLockRelease(ScratchPartitionLock);
2002 }
2003
2004 /*
2005  * Re-insert the dummy entry in predicate lock target hash.
2006  */
2007 static void
2008 RestoreScratchTarget(bool lockheld)
2009 {
2010         bool            found;
2011
2012         Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2013
2014         if (!lockheld)
2015                 LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2016         hash_search_with_hash_value(PredicateLockTargetHash,
2017                                                                 &ScratchTargetTag,
2018                                                                 ScratchTargetTagHash,
2019                                                                 HASH_ENTER, &found);
2020         Assert(!found);
2021         if (!lockheld)
2022                 LWLockRelease(ScratchPartitionLock);
2023 }
2024
2025 /*
2026  * Check whether the list of related predicate locks is empty for a
2027  * predicate lock target, and remove the target if it is.
2028  */
2029 static void
2030 RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
2031 {
2032         PREDICATELOCKTARGET *rmtarget PG_USED_FOR_ASSERTS_ONLY;
2033
2034         Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2035
2036         /* Can't remove it until no locks at this target. */
2037         if (!SHMQueueEmpty(&target->predicateLocks))
2038                 return;
2039
2040         /* Actually remove the target. */
2041         rmtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2042                                                                                    &target->tag,
2043                                                                                    targettaghash,
2044                                                                                    HASH_REMOVE, NULL);
2045         Assert(rmtarget == target);
2046 }
2047
2048 /*
2049  * Delete child target locks owned by this process.
2050  * This implementation is assuming that the usage of each target tag field
2051  * is uniform.  No need to make this hard if we don't have to.
2052  *
2053  * We aren't acquiring lightweight locks for the predicate lock or lock
2054  * target structures associated with this transaction unless we're going
2055  * to modify them, because no other process is permitted to modify our
2056  * locks.
2057  */
2058 static void
2059 DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
2060 {
2061         SERIALIZABLEXACT *sxact;
2062         PREDICATELOCK *predlock;
2063
2064         LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2065         sxact = MySerializableXact;
2066         predlock = (PREDICATELOCK *)
2067                 SHMQueueNext(&(sxact->predicateLocks),
2068                                          &(sxact->predicateLocks),
2069                                          offsetof(PREDICATELOCK, xactLink));
2070         while (predlock)
2071         {
2072                 SHM_QUEUE  *predlocksxactlink;
2073                 PREDICATELOCK *nextpredlock;
2074                 PREDICATELOCKTAG oldlocktag;
2075                 PREDICATELOCKTARGET *oldtarget;
2076                 PREDICATELOCKTARGETTAG oldtargettag;
2077
2078                 predlocksxactlink = &(predlock->xactLink);
2079                 nextpredlock = (PREDICATELOCK *)
2080                         SHMQueueNext(&(sxact->predicateLocks),
2081                                                  predlocksxactlink,
2082                                                  offsetof(PREDICATELOCK, xactLink));
2083
2084                 oldlocktag = predlock->tag;
2085                 Assert(oldlocktag.myXact == sxact);
2086                 oldtarget = oldlocktag.myTarget;
2087                 oldtargettag = oldtarget->tag;
2088
2089                 if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
2090                 {
2091                         uint32          oldtargettaghash;
2092                         LWLock     *partitionLock;
2093                         PREDICATELOCK *rmpredlock PG_USED_FOR_ASSERTS_ONLY;
2094
2095                         oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2096                         partitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2097
2098                         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2099
2100                         SHMQueueDelete(predlocksxactlink);
2101                         SHMQueueDelete(&(predlock->targetLink));
2102                         rmpredlock = hash_search_with_hash_value
2103                                 (PredicateLockHash,
2104                                  &oldlocktag,
2105                                  PredicateLockHashCodeFromTargetHashCode(&oldlocktag,
2106                                                                                                                  oldtargettaghash),
2107                                  HASH_REMOVE, NULL);
2108                         Assert(rmpredlock == predlock);
2109
2110                         RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2111
2112                         LWLockRelease(partitionLock);
2113
2114                         DecrementParentLocks(&oldtargettag);
2115                 }
2116
2117                 predlock = nextpredlock;
2118         }
2119         LWLockRelease(SerializablePredicateLockListLock);
2120 }
2121
2122 /*
2123  * Returns the promotion threshold for a given predicate lock
2124  * target. This is the number of descendant locks required to promote
2125  * to the specified tag. Note that the threshold includes non-direct
2126  * descendants, e.g. both tuples and pages for a relation lock.
2127  *
2128  * TODO SSI: We should do something more intelligent about what the
2129  * thresholds are, either making it proportional to the number of
2130  * tuples in a page & pages in a relation, or at least making it a
2131  * GUC. Currently the threshold is 3 for a page lock, and
2132  * max_pred_locks_per_transaction/2 for a relation lock, chosen
2133  * entirely arbitrarily (and without benchmarking).
2134  */
2135 static int
2136 PredicateLockPromotionThreshold(const PREDICATELOCKTARGETTAG *tag)
2137 {
2138         switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2139         {
2140                 case PREDLOCKTAG_RELATION:
2141                         return max_predicate_locks_per_xact / 2;
2142
2143                 case PREDLOCKTAG_PAGE:
2144                         return 3;
2145
2146                 case PREDLOCKTAG_TUPLE:
2147
2148                         /*
2149                          * not reachable: nothing is finer-granularity than a tuple, so we
2150                          * should never try to promote to it.
2151                          */
2152                         Assert(false);
2153                         return 0;
2154         }
2155
2156         /* not reachable */
2157         Assert(false);
2158         return 0;
2159 }
2160
2161 /*
2162  * For all ancestors of a newly-acquired predicate lock, increment
2163  * their child count in the parent hash table. If any of them have
2164  * more descendants than their promotion threshold, acquire the
2165  * coarsest such lock.
2166  *
2167  * Returns true if a parent lock was acquired and false otherwise.
2168  */
2169 static bool
2170 CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag)
2171 {
2172         PREDICATELOCKTARGETTAG targettag,
2173                                 nexttag,
2174                                 promotiontag;
2175         LOCALPREDICATELOCK *parentlock;
2176         bool            found,
2177                                 promote;
2178
2179         promote = false;
2180
2181         targettag = *reqtag;
2182
2183         /* check parents iteratively */
2184         while (GetParentPredicateLockTag(&targettag, &nexttag))
2185         {
2186                 targettag = nexttag;
2187                 parentlock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
2188                                                                                                                 &targettag,
2189                                                                                                                 HASH_ENTER,
2190                                                                                                                 &found);
2191                 if (!found)
2192                 {
2193                         parentlock->held = false;
2194                         parentlock->childLocks = 1;
2195                 }
2196                 else
2197                         parentlock->childLocks++;
2198
2199                 if (parentlock->childLocks >=
2200                         PredicateLockPromotionThreshold(&targettag))
2201                 {
2202                         /*
2203                          * We should promote to this parent lock. Continue to check its
2204                          * ancestors, however, both to get their child counts right and to
2205                          * check whether we should just go ahead and promote to one of
2206                          * them.
2207                          */
2208                         promotiontag = targettag;
2209                         promote = true;
2210                 }
2211         }
2212
2213         if (promote)
2214         {
2215                 /* acquire coarsest ancestor eligible for promotion */
2216                 PredicateLockAcquire(&promotiontag);
2217                 return true;
2218         }
2219         else
2220                 return false;
2221 }
2222
2223 /*
2224  * When releasing a lock, decrement the child count on all ancestor
2225  * locks.
2226  *
2227  * This is called only when releasing a lock via
2228  * DeleteChildTargetLocks (i.e. when a lock becomes redundant because
2229  * we've acquired its parent, possibly due to promotion) or when a new
2230  * MVCC write lock makes the predicate lock unnecessary. There's no
2231  * point in calling it when locks are released at transaction end, as
2232  * this information is no longer needed.
2233  */
2234 static void
2235 DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag)
2236 {
2237         PREDICATELOCKTARGETTAG parenttag,
2238                                 nexttag;
2239
2240         parenttag = *targettag;
2241
2242         while (GetParentPredicateLockTag(&parenttag, &nexttag))
2243         {
2244                 uint32          targettaghash;
2245                 LOCALPREDICATELOCK *parentlock,
2246                                    *rmlock PG_USED_FOR_ASSERTS_ONLY;
2247
2248                 parenttag = nexttag;
2249                 targettaghash = PredicateLockTargetTagHashCode(&parenttag);
2250                 parentlock = (LOCALPREDICATELOCK *)
2251                         hash_search_with_hash_value(LocalPredicateLockHash,
2252                                                                                 &parenttag, targettaghash,
2253                                                                                 HASH_FIND, NULL);
2254
2255                 /*
2256                  * There's a small chance the parent lock doesn't exist in the lock
2257                  * table. This can happen if we prematurely removed it because an
2258                  * index split caused the child refcount to be off.
2259                  */
2260                 if (parentlock == NULL)
2261                         continue;
2262
2263                 parentlock->childLocks--;
2264
2265                 /*
2266                  * Under similar circumstances the parent lock's refcount might be
2267                  * zero. This only happens if we're holding that lock (otherwise we
2268                  * would have removed the entry).
2269                  */
2270                 if (parentlock->childLocks < 0)
2271                 {
2272                         Assert(parentlock->held);
2273                         parentlock->childLocks = 0;
2274                 }
2275
2276                 if ((parentlock->childLocks == 0) && (!parentlock->held))
2277                 {
2278                         rmlock = (LOCALPREDICATELOCK *)
2279                                 hash_search_with_hash_value(LocalPredicateLockHash,
2280                                                                                         &parenttag, targettaghash,
2281                                                                                         HASH_REMOVE, NULL);
2282                         Assert(rmlock == parentlock);
2283                 }
2284         }
2285 }
2286
2287 /*
2288  * Indicate that a predicate lock on the given target is held by the
2289  * specified transaction. Has no effect if the lock is already held.
2290  *
2291  * This updates the lock table and the sxact's lock list, and creates
2292  * the lock target if necessary, but does *not* do anything related to
2293  * granularity promotion or the local lock table. See
2294  * PredicateLockAcquire for that.
2295  */
2296 static void
2297 CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
2298                                         uint32 targettaghash,
2299                                         SERIALIZABLEXACT *sxact)
2300 {
2301         PREDICATELOCKTARGET *target;
2302         PREDICATELOCKTAG locktag;
2303         PREDICATELOCK *lock;
2304         LWLock     *partitionLock;
2305         bool            found;
2306
2307         partitionLock = PredicateLockHashPartitionLock(targettaghash);
2308
2309         LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2310         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2311
2312         /* Make sure that the target is represented. */
2313         target = (PREDICATELOCKTARGET *)
2314                 hash_search_with_hash_value(PredicateLockTargetHash,
2315                                                                         targettag, targettaghash,
2316                                                                         HASH_ENTER_NULL, &found);
2317         if (!target)
2318                 ereport(ERROR,
2319                                 (errcode(ERRCODE_OUT_OF_MEMORY),
2320                                  errmsg("out of shared memory"),
2321                                  errhint("You might need to increase max_pred_locks_per_transaction.")));
2322         if (!found)
2323                 SHMQueueInit(&(target->predicateLocks));
2324
2325         /* We've got the sxact and target, make sure they're joined. */
2326         locktag.myTarget = target;
2327         locktag.myXact = sxact;
2328         lock = (PREDICATELOCK *)
2329                 hash_search_with_hash_value(PredicateLockHash, &locktag,
2330                         PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
2331                                                                         HASH_ENTER_NULL, &found);
2332         if (!lock)
2333                 ereport(ERROR,
2334                                 (errcode(ERRCODE_OUT_OF_MEMORY),
2335                                  errmsg("out of shared memory"),
2336                                  errhint("You might need to increase max_pred_locks_per_transaction.")));
2337
2338         if (!found)
2339         {
2340                 SHMQueueInsertBefore(&(target->predicateLocks), &(lock->targetLink));
2341                 SHMQueueInsertBefore(&(sxact->predicateLocks),
2342                                                          &(lock->xactLink));
2343                 lock->commitSeqNo = InvalidSerCommitSeqNo;
2344         }
2345
2346         LWLockRelease(partitionLock);
2347         LWLockRelease(SerializablePredicateLockListLock);
2348 }
2349
2350 /*
2351  * Acquire a predicate lock on the specified target for the current
2352  * connection if not already held. This updates the local lock table
2353  * and uses it to implement granularity promotion. It will consolidate
2354  * multiple locks into a coarser lock if warranted, and will release
2355  * any finer-grained locks covered by the new one.
2356  */
2357 static void
2358 PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag)
2359 {
2360         uint32          targettaghash;
2361         bool            found;
2362         LOCALPREDICATELOCK *locallock;
2363
2364         /* Do we have the lock already, or a covering lock? */
2365         if (PredicateLockExists(targettag))
2366                 return;
2367
2368         if (CoarserLockCovers(targettag))
2369                 return;
2370
2371         /* the same hash and LW lock apply to the lock target and the local lock. */
2372         targettaghash = PredicateLockTargetTagHashCode(targettag);
2373
2374         /* Acquire lock in local table */
2375         locallock = (LOCALPREDICATELOCK *)
2376                 hash_search_with_hash_value(LocalPredicateLockHash,
2377                                                                         targettag, targettaghash,
2378                                                                         HASH_ENTER, &found);
2379         locallock->held = true;
2380         if (!found)
2381                 locallock->childLocks = 0;
2382
2383         /* Actually create the lock */
2384         CreatePredicateLock(targettag, targettaghash, MySerializableXact);
2385
2386         /*
2387          * Lock has been acquired. Check whether it should be promoted to a
2388          * coarser granularity, or whether there are finer-granularity locks to
2389          * clean up.
2390          */
2391         if (CheckAndPromotePredicateLockRequest(targettag))
2392         {
2393                 /*
2394                  * Lock request was promoted to a coarser-granularity lock, and that
2395                  * lock was acquired. It will delete this lock and any of its
2396                  * children, so we're done.
2397                  */
2398         }
2399         else
2400         {
2401                 /* Clean up any finer-granularity locks */
2402                 if (GET_PREDICATELOCKTARGETTAG_TYPE(*targettag) != PREDLOCKTAG_TUPLE)
2403                         DeleteChildTargetLocks(targettag);
2404         }
2405 }
2406
2407
2408 /*
2409  *              PredicateLockRelation
2410  *
2411  * Gets a predicate lock at the relation level.
2412  * Skip if not in full serializable transaction isolation level.
2413  * Skip if this is a temporary table.
2414  * Clear any finer-grained predicate locks this session has on the relation.
2415  */
2416 void
2417 PredicateLockRelation(Relation relation, Snapshot snapshot)
2418 {
2419         PREDICATELOCKTARGETTAG tag;
2420
2421         if (!SerializationNeededForRead(relation, snapshot))
2422                 return;
2423
2424         SET_PREDICATELOCKTARGETTAG_RELATION(tag,
2425                                                                                 relation->rd_node.dbNode,
2426                                                                                 relation->rd_id);
2427         PredicateLockAcquire(&tag);
2428 }
2429
2430 /*
2431  *              PredicateLockPage
2432  *
2433  * Gets a predicate lock at the page level.
2434  * Skip if not in full serializable transaction isolation level.
2435  * Skip if this is a temporary table.
2436  * Skip if a coarser predicate lock already covers this page.
2437  * Clear any finer-grained predicate locks this session has on the relation.
2438  */
2439 void
2440 PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
2441 {
2442         PREDICATELOCKTARGETTAG tag;
2443
2444         if (!SerializationNeededForRead(relation, snapshot))
2445                 return;
2446
2447         SET_PREDICATELOCKTARGETTAG_PAGE(tag,
2448                                                                         relation->rd_node.dbNode,
2449                                                                         relation->rd_id,
2450                                                                         blkno);
2451         PredicateLockAcquire(&tag);
2452 }
2453
2454 /*
2455  *              PredicateLockTuple
2456  *
2457  * Gets a predicate lock at the tuple level.
2458  * Skip if not in full serializable transaction isolation level.
2459  * Skip if this is a temporary table.
2460  */
2461 void
2462 PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
2463 {
2464         PREDICATELOCKTARGETTAG tag;
2465         ItemPointer tid;
2466         TransactionId targetxmin;
2467
2468         if (!SerializationNeededForRead(relation, snapshot))
2469                 return;
2470
2471         /*
2472          * If it's a heap tuple, return if this xact wrote it.
2473          */
2474         if (relation->rd_index == NULL)
2475         {
2476                 TransactionId myxid;
2477
2478                 targetxmin = HeapTupleHeaderGetXmin(tuple->t_data);
2479
2480                 myxid = GetTopTransactionIdIfAny();
2481                 if (TransactionIdIsValid(myxid))
2482                 {
2483                         if (TransactionIdFollowsOrEquals(targetxmin, TransactionXmin))
2484                         {
2485                                 TransactionId xid = SubTransGetTopmostTransaction(targetxmin);
2486
2487                                 if (TransactionIdEquals(xid, myxid))
2488                                 {
2489                                         /* We wrote it; we already have a write lock. */
2490                                         return;
2491                                 }
2492                         }
2493                 }
2494         }
2495
2496         /*
2497          * Do quick-but-not-definitive test for a relation lock first.  This will
2498          * never cause a return when the relation is *not* locked, but will
2499          * occasionally let the check continue when there really *is* a relation
2500          * level lock.
2501          */
2502         SET_PREDICATELOCKTARGETTAG_RELATION(tag,
2503                                                                                 relation->rd_node.dbNode,
2504                                                                                 relation->rd_id);
2505         if (PredicateLockExists(&tag))
2506                 return;
2507
2508         tid = &(tuple->t_self);
2509         SET_PREDICATELOCKTARGETTAG_TUPLE(tag,
2510                                                                          relation->rd_node.dbNode,
2511                                                                          relation->rd_id,
2512                                                                          ItemPointerGetBlockNumber(tid),
2513                                                                          ItemPointerGetOffsetNumber(tid));
2514         PredicateLockAcquire(&tag);
2515 }
2516
2517
2518 /*
2519  *              DeleteLockTarget
2520  *
2521  * Remove a predicate lock target along with any locks held for it.
2522  *
2523  * Caller must hold SerializablePredicateLockListLock and the
2524  * appropriate hash partition lock for the target.
2525  */
2526 static void
2527 DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
2528 {
2529         PREDICATELOCK *predlock;
2530         SHM_QUEUE  *predlocktargetlink;
2531         PREDICATELOCK *nextpredlock;
2532         bool            found;
2533
2534         Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2535         Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
2536
2537         predlock = (PREDICATELOCK *)
2538                 SHMQueueNext(&(target->predicateLocks),
2539                                          &(target->predicateLocks),
2540                                          offsetof(PREDICATELOCK, targetLink));
2541         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2542         while (predlock)
2543         {
2544                 predlocktargetlink = &(predlock->targetLink);
2545                 nextpredlock = (PREDICATELOCK *)
2546                         SHMQueueNext(&(target->predicateLocks),
2547                                                  predlocktargetlink,
2548                                                  offsetof(PREDICATELOCK, targetLink));
2549
2550                 SHMQueueDelete(&(predlock->xactLink));
2551                 SHMQueueDelete(&(predlock->targetLink));
2552
2553                 hash_search_with_hash_value
2554                         (PredicateLockHash,
2555                          &predlock->tag,
2556                          PredicateLockHashCodeFromTargetHashCode(&predlock->tag,
2557                                                                                                          targettaghash),
2558                          HASH_REMOVE, &found);
2559                 Assert(found);
2560
2561                 predlock = nextpredlock;
2562         }
2563         LWLockRelease(SerializableXactHashLock);
2564
2565         /* Remove the target itself, if possible. */
2566         RemoveTargetIfNoLongerUsed(target, targettaghash);
2567 }
2568
2569
2570 /*
2571  *              TransferPredicateLocksToNewTarget
2572  *
2573  * Move or copy all the predicate locks for a lock target, for use by
2574  * index page splits/combines and other things that create or replace
2575  * lock targets. If 'removeOld' is true, the old locks and the target
2576  * will be removed.
2577  *
2578  * Returns true on success, or false if we ran out of shared memory to
2579  * allocate the new target or locks. Guaranteed to always succeed if
2580  * removeOld is set (by using the scratch entry in PredicateLockTargetHash
2581  * for scratch space).
2582  *
2583  * Warning: the "removeOld" option should be used only with care,
2584  * because this function does not (indeed, can not) update other
2585  * backends' LocalPredicateLockHash. If we are only adding new
2586  * entries, this is not a problem: the local lock table is used only
2587  * as a hint, so missing entries for locks that are held are
2588  * OK. Having entries for locks that are no longer held, as can happen
2589  * when using "removeOld", is not in general OK. We can only use it
2590  * safely when replacing a lock with a coarser-granularity lock that
2591  * covers it, or if we are absolutely certain that no one will need to
2592  * refer to that lock in the future.
2593  *
2594  * Caller must hold SerializablePredicateLockListLock.
2595  */
2596 static bool
2597 TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
2598                                                                   PREDICATELOCKTARGETTAG newtargettag,
2599                                                                   bool removeOld)
2600 {
2601         uint32          oldtargettaghash;
2602         LWLock     *oldpartitionLock;
2603         PREDICATELOCKTARGET *oldtarget;
2604         uint32          newtargettaghash;
2605         LWLock     *newpartitionLock;
2606         bool            found;
2607         bool            outOfShmem = false;
2608
2609         Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2610
2611         oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2612         newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag);
2613         oldpartitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2614         newpartitionLock = PredicateLockHashPartitionLock(newtargettaghash);
2615
2616         if (removeOld)
2617         {
2618                 /*
2619                  * Remove the dummy entry to give us scratch space, so we know we'll
2620                  * be able to create the new lock target.
2621                  */
2622                 RemoveScratchTarget(false);
2623         }
2624
2625         /*
2626          * We must get the partition locks in ascending sequence to avoid
2627          * deadlocks. If old and new partitions are the same, we must request the
2628          * lock only once.
2629          */
2630         if (oldpartitionLock < newpartitionLock)
2631         {
2632                 LWLockAcquire(oldpartitionLock,
2633                                           (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2634                 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2635         }
2636         else if (oldpartitionLock > newpartitionLock)
2637         {
2638                 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2639                 LWLockAcquire(oldpartitionLock,
2640                                           (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2641         }
2642         else
2643                 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2644
2645         /*
2646          * Look for the old target.  If not found, that's OK; no predicate locks
2647          * are affected, so we can just clean up and return. If it does exist,
2648          * walk its list of predicate locks and move or copy them to the new
2649          * target.
2650          */
2651         oldtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2652                                                                                         &oldtargettag,
2653                                                                                         oldtargettaghash,
2654                                                                                         HASH_FIND, NULL);
2655
2656         if (oldtarget)
2657         {
2658                 PREDICATELOCKTARGET *newtarget;
2659                 PREDICATELOCK *oldpredlock;
2660                 PREDICATELOCKTAG newpredlocktag;
2661
2662                 newtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2663                                                                                                 &newtargettag,
2664                                                                                                 newtargettaghash,
2665                                                                                                 HASH_ENTER_NULL, &found);
2666
2667                 if (!newtarget)
2668                 {
2669                         /* Failed to allocate due to insufficient shmem */
2670                         outOfShmem = true;
2671                         goto exit;
2672                 }
2673
2674                 /* If we created a new entry, initialize it */
2675                 if (!found)
2676                         SHMQueueInit(&(newtarget->predicateLocks));
2677
2678                 newpredlocktag.myTarget = newtarget;
2679
2680                 /*
2681                  * Loop through all the locks on the old target, replacing them with
2682                  * locks on the new target.
2683                  */
2684                 oldpredlock = (PREDICATELOCK *)
2685                         SHMQueueNext(&(oldtarget->predicateLocks),
2686                                                  &(oldtarget->predicateLocks),
2687                                                  offsetof(PREDICATELOCK, targetLink));
2688                 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2689                 while (oldpredlock)
2690                 {
2691                         SHM_QUEUE  *predlocktargetlink;
2692                         PREDICATELOCK *nextpredlock;
2693                         PREDICATELOCK *newpredlock;
2694                         SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
2695
2696                         predlocktargetlink = &(oldpredlock->targetLink);
2697                         nextpredlock = (PREDICATELOCK *)
2698                                 SHMQueueNext(&(oldtarget->predicateLocks),
2699                                                          predlocktargetlink,
2700                                                          offsetof(PREDICATELOCK, targetLink));
2701                         newpredlocktag.myXact = oldpredlock->tag.myXact;
2702
2703                         if (removeOld)
2704                         {
2705                                 SHMQueueDelete(&(oldpredlock->xactLink));
2706                                 SHMQueueDelete(&(oldpredlock->targetLink));
2707
2708                                 hash_search_with_hash_value
2709                                         (PredicateLockHash,
2710                                          &oldpredlock->tag,
2711                                    PredicateLockHashCodeFromTargetHashCode(&oldpredlock->tag,
2712                                                                                                                    oldtargettaghash),
2713                                          HASH_REMOVE, &found);
2714                                 Assert(found);
2715                         }
2716
2717                         newpredlock = (PREDICATELOCK *)
2718                                 hash_search_with_hash_value(PredicateLockHash,
2719                                                                                         &newpredlocktag,
2720                                          PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
2721                                                                                                                    newtargettaghash),
2722                                                                                         HASH_ENTER_NULL,
2723                                                                                         &found);
2724                         if (!newpredlock)
2725                         {
2726                                 /* Out of shared memory. Undo what we've done so far. */
2727                                 LWLockRelease(SerializableXactHashLock);
2728                                 DeleteLockTarget(newtarget, newtargettaghash);
2729                                 outOfShmem = true;
2730                                 goto exit;
2731                         }
2732                         if (!found)
2733                         {
2734                                 SHMQueueInsertBefore(&(newtarget->predicateLocks),
2735                                                                          &(newpredlock->targetLink));
2736                                 SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
2737                                                                          &(newpredlock->xactLink));
2738                                 newpredlock->commitSeqNo = oldCommitSeqNo;
2739                         }
2740                         else
2741                         {
2742                                 if (newpredlock->commitSeqNo < oldCommitSeqNo)
2743                                         newpredlock->commitSeqNo = oldCommitSeqNo;
2744                         }
2745
2746                         Assert(newpredlock->commitSeqNo != 0);
2747                         Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
2748                                    || (newpredlock->tag.myXact == OldCommittedSxact));
2749
2750                         oldpredlock = nextpredlock;
2751                 }
2752                 LWLockRelease(SerializableXactHashLock);
2753
2754                 if (removeOld)
2755                 {
2756                         Assert(SHMQueueEmpty(&oldtarget->predicateLocks));
2757                         RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2758                 }
2759         }
2760
2761
2762 exit:
2763         /* Release partition locks in reverse order of acquisition. */
2764         if (oldpartitionLock < newpartitionLock)
2765         {
2766                 LWLockRelease(newpartitionLock);
2767                 LWLockRelease(oldpartitionLock);
2768         }
2769         else if (oldpartitionLock > newpartitionLock)
2770         {
2771                 LWLockRelease(oldpartitionLock);
2772                 LWLockRelease(newpartitionLock);
2773         }
2774         else
2775                 LWLockRelease(newpartitionLock);
2776
2777         if (removeOld)
2778         {
2779                 /* We shouldn't run out of memory if we're moving locks */
2780                 Assert(!outOfShmem);
2781
2782                 /* Put the scrach entry back */
2783                 RestoreScratchTarget(false);
2784         }
2785
2786         return !outOfShmem;
2787 }
2788
2789 /*
2790  * Drop all predicate locks of any granularity from the specified relation,
2791  * which can be a heap relation or an index relation.  If 'transfer' is true,
2792  * acquire a relation lock on the heap for any transactions with any lock(s)
2793  * on the specified relation.
2794  *
2795  * This requires grabbing a lot of LW locks and scanning the entire lock
2796  * target table for matches.  That makes this more expensive than most
2797  * predicate lock management functions, but it will only be called for DDL
2798  * type commands that are expensive anyway, and there are fast returns when
2799  * no serializable transactions are active or the relation is temporary.
2800  *
2801  * We don't use the TransferPredicateLocksToNewTarget function because it
2802  * acquires its own locks on the partitions of the two targets involved,
2803  * and we'll already be holding all partition locks.
2804  *
2805  * We can't throw an error from here, because the call could be from a
2806  * transaction which is not serializable.
2807  *
2808  * NOTE: This is currently only called with transfer set to true, but that may
2809  * change.  If we decide to clean up the locks from a table on commit of a
2810  * transaction which executed DROP TABLE, the false condition will be useful.
2811  */
2812 static void
2813 DropAllPredicateLocksFromTable(Relation relation, bool transfer)
2814 {
2815         HASH_SEQ_STATUS seqstat;
2816         PREDICATELOCKTARGET *oldtarget;
2817         PREDICATELOCKTARGET *heaptarget;
2818         Oid                     dbId;
2819         Oid                     relId;
2820         Oid                     heapId;
2821         int                     i;
2822         bool            isIndex;
2823         bool            found;
2824         uint32          heaptargettaghash;
2825
2826         /*
2827          * Bail out quickly if there are no serializable transactions running.
2828          * It's safe to check this without taking locks because the caller is
2829          * holding an ACCESS EXCLUSIVE lock on the relation.  No new locks which
2830          * would matter here can be acquired while that is held.
2831          */
2832         if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
2833                 return;
2834
2835         if (!PredicateLockingNeededForRelation(relation))
2836                 return;
2837
2838         dbId = relation->rd_node.dbNode;
2839         relId = relation->rd_id;
2840         if (relation->rd_index == NULL)
2841         {
2842                 isIndex = false;
2843                 heapId = relId;
2844         }
2845         else
2846         {
2847                 isIndex = true;
2848                 heapId = relation->rd_index->indrelid;
2849         }
2850         Assert(heapId != InvalidOid);
2851         Assert(transfer || !isIndex);           /* index OID only makes sense with
2852                                                                                  * transfer */
2853
2854         /* Retrieve first time needed, then keep. */
2855         heaptargettaghash = 0;
2856         heaptarget = NULL;
2857
2858         /* Acquire locks on all lock partitions */
2859         LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
2860         for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
2861                 LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
2862         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2863
2864         /*
2865          * Remove the dummy entry to give us scratch space, so we know we'll be
2866          * able to create the new lock target.
2867          */
2868         if (transfer)
2869                 RemoveScratchTarget(true);
2870
2871         /* Scan through target map */
2872         hash_seq_init(&seqstat, PredicateLockTargetHash);
2873
2874         while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
2875         {
2876                 PREDICATELOCK *oldpredlock;
2877
2878                 /*
2879                  * Check whether this is a target which needs attention.
2880                  */
2881                 if (GET_PREDICATELOCKTARGETTAG_RELATION(oldtarget->tag) != relId)
2882                         continue;                       /* wrong relation id */
2883                 if (GET_PREDICATELOCKTARGETTAG_DB(oldtarget->tag) != dbId)
2884                         continue;                       /* wrong database id */
2885                 if (transfer && !isIndex
2886                         && GET_PREDICATELOCKTARGETTAG_TYPE(oldtarget->tag) == PREDLOCKTAG_RELATION)
2887                         continue;                       /* already the right lock */
2888
2889                 /*
2890                  * If we made it here, we have work to do.  We make sure the heap
2891                  * relation lock exists, then we walk the list of predicate locks for
2892                  * the old target we found, moving all locks to the heap relation lock
2893                  * -- unless they already hold that.
2894                  */
2895
2896                 /*
2897                  * First make sure we have the heap relation target.  We only need to
2898                  * do this once.
2899                  */
2900                 if (transfer && heaptarget == NULL)
2901                 {
2902                         PREDICATELOCKTARGETTAG heaptargettag;
2903
2904                         SET_PREDICATELOCKTARGETTAG_RELATION(heaptargettag, dbId, heapId);
2905                         heaptargettaghash = PredicateLockTargetTagHashCode(&heaptargettag);
2906                         heaptarget = hash_search_with_hash_value(PredicateLockTargetHash,
2907                                                                                                          &heaptargettag,
2908                                                                                                          heaptargettaghash,
2909                                                                                                          HASH_ENTER, &found);
2910                         if (!found)
2911                                 SHMQueueInit(&heaptarget->predicateLocks);
2912                 }
2913
2914                 /*
2915                  * Loop through all the locks on the old target, replacing them with
2916                  * locks on the new target.
2917                  */
2918                 oldpredlock = (PREDICATELOCK *)
2919                         SHMQueueNext(&(oldtarget->predicateLocks),
2920                                                  &(oldtarget->predicateLocks),
2921                                                  offsetof(PREDICATELOCK, targetLink));
2922                 while (oldpredlock)
2923                 {
2924                         PREDICATELOCK *nextpredlock;
2925                         PREDICATELOCK *newpredlock;
2926                         SerCommitSeqNo oldCommitSeqNo;
2927                         SERIALIZABLEXACT *oldXact;
2928
2929                         nextpredlock = (PREDICATELOCK *)
2930                                 SHMQueueNext(&(oldtarget->predicateLocks),
2931                                                          &(oldpredlock->targetLink),
2932                                                          offsetof(PREDICATELOCK, targetLink));
2933
2934                         /*
2935                          * Remove the old lock first. This avoids the chance of running
2936                          * out of lock structure entries for the hash table.
2937                          */
2938                         oldCommitSeqNo = oldpredlock->commitSeqNo;
2939                         oldXact = oldpredlock->tag.myXact;
2940
2941                         SHMQueueDelete(&(oldpredlock->xactLink));
2942
2943                         /*
2944                          * No need for retail delete from oldtarget list, we're removing
2945                          * the whole target anyway.
2946                          */
2947                         hash_search(PredicateLockHash,
2948                                                 &oldpredlock->tag,
2949                                                 HASH_REMOVE, &found);
2950                         Assert(found);
2951
2952                         if (transfer)
2953                         {
2954                                 PREDICATELOCKTAG newpredlocktag;
2955
2956                                 newpredlocktag.myTarget = heaptarget;
2957                                 newpredlocktag.myXact = oldXact;
2958                                 newpredlock = (PREDICATELOCK *)
2959                                         hash_search_with_hash_value(PredicateLockHash,
2960                                                                                                 &newpredlocktag,
2961                                          PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
2962                                                                                                                   heaptargettaghash),
2963                                                                                                 HASH_ENTER,
2964                                                                                                 &found);
2965                                 if (!found)
2966                                 {
2967                                         SHMQueueInsertBefore(&(heaptarget->predicateLocks),
2968                                                                                  &(newpredlock->targetLink));
2969                                         SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
2970                                                                                  &(newpredlock->xactLink));
2971                                         newpredlock->commitSeqNo = oldCommitSeqNo;
2972                                 }
2973                                 else
2974                                 {
2975                                         if (newpredlock->commitSeqNo < oldCommitSeqNo)
2976                                                 newpredlock->commitSeqNo = oldCommitSeqNo;
2977                                 }
2978
2979                                 Assert(newpredlock->commitSeqNo != 0);
2980                                 Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
2981                                            || (newpredlock->tag.myXact == OldCommittedSxact));
2982                         }
2983
2984                         oldpredlock = nextpredlock;
2985                 }
2986
2987                 hash_search(PredicateLockTargetHash, &oldtarget->tag, HASH_REMOVE,
2988                                         &found);
2989                 Assert(found);
2990         }
2991
2992         /* Put the scratch entry back */
2993         if (transfer)
2994                 RestoreScratchTarget(true);
2995
2996         /* Release locks in reverse order */
2997         LWLockRelease(SerializableXactHashLock);
2998         for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
2999                 LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
3000         LWLockRelease(SerializablePredicateLockListLock);
3001 }
3002
3003 /*
3004  * TransferPredicateLocksToHeapRelation
3005  *              For all transactions, transfer all predicate locks for the given
3006  *              relation to a single relation lock on the heap.
3007  */
3008 void
3009 TransferPredicateLocksToHeapRelation(Relation relation)
3010 {
3011         DropAllPredicateLocksFromTable(relation, true);
3012 }
3013
3014
3015 /*
3016  *              PredicateLockPageSplit
3017  *
3018  * Copies any predicate locks for the old page to the new page.
3019  * Skip if this is a temporary table or toast table.
3020  *
3021  * NOTE: A page split (or overflow) affects all serializable transactions,
3022  * even if it occurs in the context of another transaction isolation level.
3023  *
3024  * NOTE: This currently leaves the local copy of the locks without
3025  * information on the new lock which is in shared memory.  This could cause
3026  * problems if enough page splits occur on locked pages without the processes
3027  * which hold the locks getting in and noticing.
3028  */
3029 void
3030 PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
3031                                            BlockNumber newblkno)
3032 {
3033         PREDICATELOCKTARGETTAG oldtargettag;
3034         PREDICATELOCKTARGETTAG newtargettag;
3035         bool            success;
3036
3037         /*
3038          * Bail out quickly if there are no serializable transactions running.
3039          *
3040          * It's safe to do this check without taking any additional locks. Even if
3041          * a serializable transaction starts concurrently, we know it can't take
3042          * any SIREAD locks on the page being split because the caller is holding
3043          * the associated buffer page lock. Memory reordering isn't an issue; the
3044          * memory barrier in the LWLock acquisition guarantees that this read
3045          * occurs while the buffer page lock is held.
3046          */
3047         if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
3048                 return;
3049
3050         if (!PredicateLockingNeededForRelation(relation))
3051                 return;
3052
3053         Assert(oldblkno != newblkno);
3054         Assert(BlockNumberIsValid(oldblkno));
3055         Assert(BlockNumberIsValid(newblkno));
3056
3057         SET_PREDICATELOCKTARGETTAG_PAGE(oldtargettag,
3058                                                                         relation->rd_node.dbNode,
3059                                                                         relation->rd_id,
3060                                                                         oldblkno);
3061         SET_PREDICATELOCKTARGETTAG_PAGE(newtargettag,
3062                                                                         relation->rd_node.dbNode,
3063                                                                         relation->rd_id,
3064                                                                         newblkno);
3065
3066         LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
3067
3068         /*
3069          * Try copying the locks over to the new page's tag, creating it if
3070          * necessary.
3071          */
3072         success = TransferPredicateLocksToNewTarget(oldtargettag,
3073                                                                                                 newtargettag,
3074                                                                                                 false);
3075
3076         if (!success)
3077         {
3078                 /*
3079                  * No more predicate lock entries are available. Failure isn't an
3080                  * option here, so promote the page lock to a relation lock.
3081                  */
3082
3083                 /* Get the parent relation lock's lock tag */
3084                 success = GetParentPredicateLockTag(&oldtargettag,
3085                                                                                         &newtargettag);
3086                 Assert(success);
3087
3088                 /*
3089                  * Move the locks to the parent. This shouldn't fail.
3090                  *
3091                  * Note that here we are removing locks held by other backends,
3092                  * leading to a possible inconsistency in their local lock hash table.
3093                  * This is OK because we're replacing it with a lock that covers the
3094                  * old one.
3095                  */
3096                 success = TransferPredicateLocksToNewTarget(oldtargettag,
3097                                                                                                         newtargettag,
3098                                                                                                         true);
3099                 Assert(success);
3100         }
3101
3102         LWLockRelease(SerializablePredicateLockListLock);
3103 }
3104
3105 /*
3106  *              PredicateLockPageCombine
3107  *
3108  * Combines predicate locks for two existing pages.
3109  * Skip if this is a temporary table or toast table.
3110  *
3111  * NOTE: A page combine affects all serializable transactions, even if it
3112  * occurs in the context of another transaction isolation level.
3113  */
3114 void
3115 PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
3116                                                  BlockNumber newblkno)
3117 {
3118         /*
3119          * Page combines differ from page splits in that we ought to be able to
3120          * remove the locks on the old page after transferring them to the new
3121          * page, instead of duplicating them. However, because we can't edit other
3122          * backends' local lock tables, removing the old lock would leave them
3123          * with an entry in their LocalPredicateLockHash for a lock they're not
3124          * holding, which isn't acceptable. So we wind up having to do the same
3125          * work as a page split, acquiring a lock on the new page and keeping the
3126          * old page locked too. That can lead to some false positives, but should
3127          * be rare in practice.
3128          */
3129         PredicateLockPageSplit(relation, oldblkno, newblkno);
3130 }
3131
3132 /*
3133  * Walk the list of in-progress serializable transactions and find the new
3134  * xmin.
3135  */
3136 static void
3137 SetNewSxactGlobalXmin(void)
3138 {
3139         SERIALIZABLEXACT *sxact;
3140
3141         Assert(LWLockHeldByMe(SerializableXactHashLock));
3142
3143         PredXact->SxactGlobalXmin = InvalidTransactionId;
3144         PredXact->SxactGlobalXminCount = 0;
3145
3146         for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
3147         {
3148                 if (!SxactIsRolledBack(sxact)
3149                         && !SxactIsCommitted(sxact)
3150                         && sxact != OldCommittedSxact)
3151                 {
3152                         Assert(sxact->xmin != InvalidTransactionId);
3153                         if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3154                                 || TransactionIdPrecedes(sxact->xmin,
3155                                                                                  PredXact->SxactGlobalXmin))
3156                         {
3157                                 PredXact->SxactGlobalXmin = sxact->xmin;
3158                                 PredXact->SxactGlobalXminCount = 1;
3159                         }
3160                         else if (TransactionIdEquals(sxact->xmin,
3161                                                                                  PredXact->SxactGlobalXmin))
3162                                 PredXact->SxactGlobalXminCount++;
3163                 }
3164         }
3165
3166         OldSerXidSetActiveSerXmin(PredXact->SxactGlobalXmin);
3167 }
3168
3169 /*
3170  *              ReleasePredicateLocks
3171  *
3172  * Releases predicate locks based on completion of the current transaction,
3173  * whether committed or rolled back.  It can also be called for a read only
3174  * transaction when it becomes impossible for the transaction to become
3175  * part of a dangerous structure.
3176  *
3177  * We do nothing unless this is a serializable transaction.
3178  *
3179  * This method must ensure that shared memory hash tables are cleaned
3180  * up in some relatively timely fashion.
3181  *
3182  * If this transaction is committing and is holding any predicate locks,
3183  * it must be added to a list of completed serializable transactions still
3184  * holding locks.
3185  */
3186 void
3187 ReleasePredicateLocks(bool isCommit)
3188 {
3189         bool            needToClear;
3190         RWConflict      conflict,
3191                                 nextConflict,
3192                                 possibleUnsafeConflict;
3193         SERIALIZABLEXACT *roXact;
3194
3195         /*
3196          * We can't trust XactReadOnly here, because a transaction which started
3197          * as READ WRITE can show as READ ONLY later, e.g., within
3198          * substransactions.  We want to flag a transaction as READ ONLY if it
3199          * commits without writing so that de facto READ ONLY transactions get the
3200          * benefit of some RO optimizations, so we will use this local variable to
3201          * get some cleanup logic right which is based on whether the transaction
3202          * was declared READ ONLY at the top level.
3203          */
3204         bool            topLevelIsDeclaredReadOnly;
3205
3206         if (MySerializableXact == InvalidSerializableXact)
3207         {
3208                 Assert(LocalPredicateLockHash == NULL);
3209                 return;
3210         }
3211
3212         Assert(!isCommit || SxactIsPrepared(MySerializableXact));
3213         Assert(!isCommit || !SxactIsDoomed(MySerializableXact));
3214         Assert(!SxactIsCommitted(MySerializableXact));
3215         Assert(!SxactIsRolledBack(MySerializableXact));
3216
3217         /* may not be serializable during COMMIT/ROLLBACK PREPARED */
3218         if (MySerializableXact->pid != 0)
3219                 Assert(IsolationIsSerializable());
3220
3221         /* We'd better not already be on the cleanup list. */
3222         Assert(!SxactIsOnFinishedList(MySerializableXact));
3223
3224         topLevelIsDeclaredReadOnly = SxactIsReadOnly(MySerializableXact);
3225
3226         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3227
3228         /*
3229          * We don't hold XidGenLock lock here, assuming that TransactionId is
3230          * atomic!
3231          *
3232          * If this value is changing, we don't care that much whether we get the
3233          * old or new value -- it is just used to determine how far
3234          * GlobalSerializableXmin must advance before this transaction can be
3235          * fully cleaned up.  The worst that could happen is we wait for one more
3236          * transaction to complete before freeing some RAM; correctness of visible
3237          * behavior is not affected.
3238          */
3239         MySerializableXact->finishedBefore = ShmemVariableCache->nextXid;
3240
3241         /*
3242          * If it's not a commit it's a rollback, and we can clear our locks
3243          * immediately.
3244          */
3245         if (isCommit)
3246         {
3247                 MySerializableXact->flags |= SXACT_FLAG_COMMITTED;
3248                 MySerializableXact->commitSeqNo = ++(PredXact->LastSxactCommitSeqNo);
3249                 /* Recognize implicit read-only transaction (commit without write). */
3250                 if (!MyXactDidWrite)
3251                         MySerializableXact->flags |= SXACT_FLAG_READ_ONLY;
3252         }
3253         else
3254         {
3255                 /*
3256                  * The DOOMED flag indicates that we intend to roll back this
3257                  * transaction and so it should not cause serialization failures for
3258                  * other transactions that conflict with it. Note that this flag might
3259                  * already be set, if another backend marked this transaction for
3260                  * abort.
3261                  *
3262                  * The ROLLED_BACK flag further indicates that ReleasePredicateLocks
3263                  * has been called, and so the SerializableXact is eligible for
3264                  * cleanup. This means it should not be considered when calculating
3265                  * SxactGlobalXmin.
3266                  */
3267                 MySerializableXact->flags |= SXACT_FLAG_DOOMED;
3268                 MySerializableXact->flags |= SXACT_FLAG_ROLLED_BACK;
3269
3270                 /*
3271                  * If the transaction was previously prepared, but is now failing due
3272                  * to a ROLLBACK PREPARED or (hopefully very rare) error after the
3273                  * prepare, clear the prepared flag.  This simplifies conflict
3274                  * checking.
3275                  */
3276                 MySerializableXact->flags &= ~SXACT_FLAG_PREPARED;
3277         }
3278
3279         if (!topLevelIsDeclaredReadOnly)
3280         {
3281                 Assert(PredXact->WritableSxactCount > 0);
3282                 if (--(PredXact->WritableSxactCount) == 0)
3283                 {
3284                         /*
3285                          * Release predicate locks and rw-conflicts in for all committed
3286                          * transactions.  There are no longer any transactions which might
3287                          * conflict with the locks and no chance for new transactions to
3288                          * overlap.  Similarly, existing conflicts in can't cause pivots,
3289                          * and any conflicts in which could have completed a dangerous
3290                          * structure would already have caused a rollback, so any
3291                          * remaining ones must be benign.
3292                          */
3293                         PredXact->CanPartialClearThrough = PredXact->LastSxactCommitSeqNo;
3294                 }
3295         }
3296         else
3297         {
3298                 /*
3299                  * Read-only transactions: clear the list of transactions that might
3300                  * make us unsafe. Note that we use 'inLink' for the iteration as
3301                  * opposed to 'outLink' for the r/w xacts.
3302                  */
3303                 possibleUnsafeConflict = (RWConflict)
3304                         SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3305                                                  &MySerializableXact->possibleUnsafeConflicts,
3306                                                  offsetof(RWConflictData, inLink));
3307                 while (possibleUnsafeConflict)
3308                 {
3309                         nextConflict = (RWConflict)
3310                                 SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3311                                                          &possibleUnsafeConflict->inLink,
3312                                                          offsetof(RWConflictData, inLink));
3313
3314                         Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
3315                         Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
3316
3317                         ReleaseRWConflict(possibleUnsafeConflict);
3318
3319                         possibleUnsafeConflict = nextConflict;
3320                 }
3321         }
3322
3323         /* Check for conflict out to old committed transactions. */
3324         if (isCommit
3325                 && !SxactIsReadOnly(MySerializableXact)
3326                 && SxactHasSummaryConflictOut(MySerializableXact))
3327         {
3328                 /*
3329                  * we don't know which old committed transaction we conflicted with,
3330                  * so be conservative and use FirstNormalSerCommitSeqNo here
3331                  */
3332                 MySerializableXact->SeqNo.earliestOutConflictCommit =
3333                         FirstNormalSerCommitSeqNo;
3334                 MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3335         }
3336
3337         /*
3338          * Release all outConflicts to committed transactions.  If we're rolling
3339          * back clear them all.  Set SXACT_FLAG_CONFLICT_OUT if any point to
3340          * previously committed transactions.
3341          */
3342         conflict = (RWConflict)
3343                 SHMQueueNext(&MySerializableXact->outConflicts,
3344                                          &MySerializableXact->outConflicts,
3345                                          offsetof(RWConflictData, outLink));
3346         while (conflict)
3347         {
3348                 nextConflict = (RWConflict)
3349                         SHMQueueNext(&MySerializableXact->outConflicts,
3350                                                  &conflict->outLink,
3351                                                  offsetof(RWConflictData, outLink));
3352
3353                 if (isCommit
3354                         && !SxactIsReadOnly(MySerializableXact)
3355                         && SxactIsCommitted(conflict->sxactIn))
3356                 {
3357                         if ((MySerializableXact->flags & SXACT_FLAG_CONFLICT_OUT) == 0
3358                                 || conflict->sxactIn->prepareSeqNo < MySerializableXact->SeqNo.earliestOutConflictCommit)
3359                                 MySerializableXact->SeqNo.earliestOutConflictCommit = conflict->sxactIn->prepareSeqNo;
3360                         MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3361                 }
3362
3363                 if (!isCommit
3364                         || SxactIsCommitted(conflict->sxactIn)
3365                         || (conflict->sxactIn->SeqNo.lastCommitBeforeSnapshot >= PredXact->LastSxactCommitSeqNo))
3366                         ReleaseRWConflict(conflict);
3367
3368                 conflict = nextConflict;
3369         }
3370
3371         /*
3372          * Release all inConflicts from committed and read-only transactions. If
3373          * we're rolling back, clear them all.
3374          */
3375         conflict = (RWConflict)
3376                 SHMQueueNext(&MySerializableXact->inConflicts,
3377                                          &MySerializableXact->inConflicts,
3378                                          offsetof(RWConflictData, inLink));
3379         while (conflict)
3380         {
3381                 nextConflict = (RWConflict)
3382                         SHMQueueNext(&MySerializableXact->inConflicts,
3383                                                  &conflict->inLink,
3384                                                  offsetof(RWConflictData, inLink));
3385
3386                 if (!isCommit
3387                         || SxactIsCommitted(conflict->sxactOut)
3388                         || SxactIsReadOnly(conflict->sxactOut))
3389                         ReleaseRWConflict(conflict);
3390
3391                 conflict = nextConflict;
3392         }
3393
3394         if (!topLevelIsDeclaredReadOnly)
3395         {
3396                 /*
3397                  * Remove ourselves from the list of possible conflicts for concurrent
3398                  * READ ONLY transactions, flagging them as unsafe if we have a
3399                  * conflict out. If any are waiting DEFERRABLE transactions, wake them
3400                  * up if they are known safe or known unsafe.
3401                  */
3402                 possibleUnsafeConflict = (RWConflict)
3403                         SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3404                                                  &MySerializableXact->possibleUnsafeConflicts,
3405                                                  offsetof(RWConflictData, outLink));
3406                 while (possibleUnsafeConflict)
3407                 {
3408                         nextConflict = (RWConflict)
3409                                 SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3410                                                          &possibleUnsafeConflict->outLink,
3411                                                          offsetof(RWConflictData, outLink));
3412
3413                         roXact = possibleUnsafeConflict->sxactIn;
3414                         Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
3415                         Assert(SxactIsReadOnly(roXact));
3416
3417                         /* Mark conflicted if necessary. */
3418                         if (isCommit
3419                                 && MyXactDidWrite
3420                                 && SxactHasConflictOut(MySerializableXact)
3421                                 && (MySerializableXact->SeqNo.earliestOutConflictCommit
3422                                         <= roXact->SeqNo.lastCommitBeforeSnapshot))
3423                         {
3424                                 /*
3425                                  * This releases possibleUnsafeConflict (as well as all other
3426                                  * possible conflicts for roXact)
3427                                  */
3428                                 FlagSxactUnsafe(roXact);
3429                         }
3430                         else
3431                         {
3432                                 ReleaseRWConflict(possibleUnsafeConflict);
3433
3434                                 /*
3435                                  * If we were the last possible conflict, flag it safe. The
3436                                  * transaction can now safely release its predicate locks (but
3437                                  * that transaction's backend has to do that itself).
3438                                  */
3439                                 if (SHMQueueEmpty(&roXact->possibleUnsafeConflicts))
3440                                         roXact->flags |= SXACT_FLAG_RO_SAFE;
3441                         }
3442
3443                         /*
3444                          * Wake up the process for a waiting DEFERRABLE transaction if we
3445                          * now know it's either safe or conflicted.
3446                          */
3447                         if (SxactIsDeferrableWaiting(roXact) &&
3448                                 (SxactIsROUnsafe(roXact) || SxactIsROSafe(roXact)))
3449                                 ProcSendSignal(roXact->pid);
3450
3451                         possibleUnsafeConflict = nextConflict;
3452                 }
3453         }
3454
3455         /*
3456          * Check whether it's time to clean up old transactions. This can only be
3457          * done when the last serializable transaction with the oldest xmin among
3458          * serializable transactions completes.  We then find the "new oldest"
3459          * xmin and purge any transactions which finished before this transaction
3460          * was launched.
3461          */
3462         needToClear = false;
3463         if (TransactionIdEquals(MySerializableXact->xmin, PredXact->SxactGlobalXmin))
3464         {
3465                 Assert(PredXact->SxactGlobalXminCount > 0);
3466                 if (--(PredXact->SxactGlobalXminCount) == 0)
3467                 {
3468                         SetNewSxactGlobalXmin();
3469                         needToClear = true;
3470                 }
3471         }
3472
3473         LWLockRelease(SerializableXactHashLock);
3474
3475         LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3476
3477         /* Add this to the list of transactions to check for later cleanup. */
3478         if (isCommit)
3479                 SHMQueueInsertBefore(FinishedSerializableTransactions,
3480                                                          &MySerializableXact->finishedLink);
3481
3482         if (!isCommit)
3483                 ReleaseOneSerializableXact(MySerializableXact, false, false);
3484
3485         LWLockRelease(SerializableFinishedListLock);
3486
3487         if (needToClear)
3488                 ClearOldPredicateLocks();
3489
3490         MySerializableXact = InvalidSerializableXact;
3491         MyXactDidWrite = false;
3492
3493         /* Delete per-transaction lock table */
3494         if (LocalPredicateLockHash != NULL)
3495         {
3496                 hash_destroy(LocalPredicateLockHash);
3497                 LocalPredicateLockHash = NULL;
3498         }
3499 }
3500
3501 /*
3502  * Clear old predicate locks, belonging to committed transactions that are no
3503  * longer interesting to any in-progress transaction.
3504  */
3505 static void
3506 ClearOldPredicateLocks(void)
3507 {
3508         SERIALIZABLEXACT *finishedSxact;
3509         PREDICATELOCK *predlock;
3510
3511         /*
3512          * Loop through finished transactions. They are in commit order, so we can
3513          * stop as soon as we find one that's still interesting.
3514          */
3515         LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3516         finishedSxact = (SERIALIZABLEXACT *)
3517                 SHMQueueNext(FinishedSerializableTransactions,
3518                                          FinishedSerializableTransactions,
3519                                          offsetof(SERIALIZABLEXACT, finishedLink));
3520         LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3521         while (finishedSxact)
3522         {
3523                 SERIALIZABLEXACT *nextSxact;
3524
3525                 nextSxact = (SERIALIZABLEXACT *)
3526                         SHMQueueNext(FinishedSerializableTransactions,
3527                                                  &(finishedSxact->finishedLink),
3528                                                  offsetof(SERIALIZABLEXACT, finishedLink));
3529                 if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3530                         || TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
3531                                                                                          PredXact->SxactGlobalXmin))
3532                 {
3533                         /*
3534                          * This transaction committed before any in-progress transaction
3535                          * took its snapshot. It's no longer interesting.
3536                          */
3537                         LWLockRelease(SerializableXactHashLock);
3538                         SHMQueueDelete(&(finishedSxact->finishedLink));
3539                         ReleaseOneSerializableXact(finishedSxact, false, false);
3540                         LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3541                 }
3542                 else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
3543                    && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
3544                 {
3545                         /*
3546                          * Any active transactions that took their snapshot before this
3547                          * transaction committed are read-only, so we can clear part of
3548                          * its state.
3549                          */
3550                         LWLockRelease(SerializableXactHashLock);
3551
3552                         if (SxactIsReadOnly(finishedSxact))
3553                         {
3554                                 /* A read-only transaction can be removed entirely */
3555                                 SHMQueueDelete(&(finishedSxact->finishedLink));
3556                                 ReleaseOneSerializableXact(finishedSxact, false, false);
3557                         }
3558                         else
3559                         {
3560                                 /*
3561                                  * A read-write transaction can only be partially cleared. We
3562                                  * need to keep the SERIALIZABLEXACT but can release the
3563                                  * SIREAD locks and conflicts in.
3564                                  */
3565                                 ReleaseOneSerializableXact(finishedSxact, true, false);
3566                         }
3567
3568                         PredXact->HavePartialClearedThrough = finishedSxact->commitSeqNo;
3569                         LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3570                 }
3571                 else
3572                 {
3573                         /* Still interesting. */
3574                         break;
3575                 }
3576                 finishedSxact = nextSxact;
3577         }
3578         LWLockRelease(SerializableXactHashLock);
3579
3580         /*
3581          * Loop through predicate locks on dummy transaction for summarized data.
3582          */
3583         LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3584         predlock = (PREDICATELOCK *)
3585                 SHMQueueNext(&OldCommittedSxact->predicateLocks,
3586                                          &OldCommittedSxact->predicateLocks,
3587                                          offsetof(PREDICATELOCK, xactLink));
3588         while (predlock)
3589         {
3590                 PREDICATELOCK *nextpredlock;
3591                 bool            canDoPartialCleanup;
3592
3593                 nextpredlock = (PREDICATELOCK *)
3594                         SHMQueueNext(&OldCommittedSxact->predicateLocks,
3595                                                  &predlock->xactLink,
3596                                                  offsetof(PREDICATELOCK, xactLink));
3597
3598                 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3599                 Assert(predlock->commitSeqNo != 0);
3600                 Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
3601                 canDoPartialCleanup = (predlock->commitSeqNo <= PredXact->CanPartialClearThrough);
3602                 LWLockRelease(SerializableXactHashLock);
3603
3604                 /*
3605                  * If this lock originally belonged to an old enough transaction, we
3606                  * can release it.
3607                  */
3608                 if (canDoPartialCleanup)
3609                 {
3610                         PREDICATELOCKTAG tag;
3611                         PREDICATELOCKTARGET *target;
3612                         PREDICATELOCKTARGETTAG targettag;
3613                         uint32          targettaghash;
3614                         LWLock     *partitionLock;
3615
3616                         tag = predlock->tag;
3617                         target = tag.myTarget;
3618                         targettag = target->tag;
3619                         targettaghash = PredicateLockTargetTagHashCode(&targettag);
3620                         partitionLock = PredicateLockHashPartitionLock(targettaghash);
3621
3622                         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3623
3624                         SHMQueueDelete(&(predlock->targetLink));
3625                         SHMQueueDelete(&(predlock->xactLink));
3626
3627                         hash_search_with_hash_value(PredicateLockHash, &tag,
3628                                                                 PredicateLockHashCodeFromTargetHashCode(&tag,
3629                                                                                                                           targettaghash),
3630                                                                                 HASH_REMOVE, NULL);
3631                         RemoveTargetIfNoLongerUsed(target, targettaghash);
3632
3633                         LWLockRelease(partitionLock);
3634                 }
3635
3636                 predlock = nextpredlock;
3637         }
3638
3639         LWLockRelease(SerializablePredicateLockListLock);
3640         LWLockRelease(SerializableFinishedListLock);
3641 }
3642
3643 /*
3644  * This is the normal way to delete anything from any of the predicate
3645  * locking hash tables.  Given a transaction which we know can be deleted:
3646  * delete all predicate locks held by that transaction and any predicate
3647  * lock targets which are now unreferenced by a lock; delete all conflicts
3648  * for the transaction; delete all xid values for the transaction; then
3649  * delete the transaction.
3650  *
3651  * When the partial flag is set, we can release all predicate locks and
3652  * in-conflict information -- we've established that there are no longer
3653  * any overlapping read write transactions for which this transaction could
3654  * matter -- but keep the transaction entry itself and any outConflicts.
3655  *
3656  * When the summarize flag is set, we've run short of room for sxact data
3657  * and must summarize to the SLRU.  Predicate locks are transferred to a
3658  * dummy "old" transaction, with duplicate locks on a single target
3659  * collapsing to a single lock with the "latest" commitSeqNo from among
3660  * the conflicting locks..
3661  */
3662 static void
3663 ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
3664                                                    bool summarize)
3665 {
3666         PREDICATELOCK *predlock;
3667         SERIALIZABLEXIDTAG sxidtag;
3668         RWConflict      conflict,
3669                                 nextConflict;
3670
3671         Assert(sxact != NULL);
3672         Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
3673         Assert(partial || !SxactIsOnFinishedList(sxact));
3674         Assert(LWLockHeldByMe(SerializableFinishedListLock));
3675
3676         /*
3677          * First release all the predicate locks held by this xact (or transfer
3678          * them to OldCommittedSxact if summarize is true)
3679          */
3680         LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3681         predlock = (PREDICATELOCK *)
3682                 SHMQueueNext(&(sxact->predicateLocks),
3683                                          &(sxact->predicateLocks),
3684                                          offsetof(PREDICATELOCK, xactLink));
3685         while (predlock)
3686         {
3687                 PREDICATELOCK *nextpredlock;
3688                 PREDICATELOCKTAG tag;
3689                 SHM_QUEUE  *targetLink;
3690                 PREDICATELOCKTARGET *target;
3691                 PREDICATELOCKTARGETTAG targettag;
3692                 uint32          targettaghash;
3693                 LWLock     *partitionLock;
3694
3695                 nextpredlock = (PREDICATELOCK *)
3696                         SHMQueueNext(&(sxact->predicateLocks),
3697                                                  &(predlock->xactLink),
3698                                                  offsetof(PREDICATELOCK, xactLink));
3699
3700                 tag = predlock->tag;
3701                 targetLink = &(predlock->targetLink);
3702                 target = tag.myTarget;
3703                 targettag = target->tag;
3704                 targettaghash = PredicateLockTargetTagHashCode(&targettag);
3705                 partitionLock = PredicateLockHashPartitionLock(targettaghash);
3706
3707                 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3708
3709                 SHMQueueDelete(targetLink);
3710
3711                 hash_search_with_hash_value(PredicateLockHash, &tag,
3712                                                                 PredicateLockHashCodeFromTargetHashCode(&tag,
3713                                                                                                                           targettaghash),
3714                                                                         HASH_REMOVE, NULL);
3715                 if (summarize)
3716                 {
3717                         bool            found;
3718
3719                         /* Fold into dummy transaction list. */
3720                         tag.myXact = OldCommittedSxact;
3721                         predlock = hash_search_with_hash_value(PredicateLockHash, &tag,
3722                                                                 PredicateLockHashCodeFromTargetHashCode(&tag,
3723                                                                                                                           targettaghash),
3724                                                                                                    HASH_ENTER_NULL, &found);
3725                         if (!predlock)
3726                                 ereport(ERROR,
3727                                                 (errcode(ERRCODE_OUT_OF_MEMORY),
3728                                                  errmsg("out of shared memory"),
3729                                                  errhint("You might need to increase max_pred_locks_per_transaction.")));
3730                         if (found)
3731                         {
3732                                 Assert(predlock->commitSeqNo != 0);
3733                                 Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
3734                                 if (predlock->commitSeqNo < sxact->commitSeqNo)
3735                                         predlock->commitSeqNo = sxact->commitSeqNo;
3736                         }
3737                         else
3738                         {
3739                                 SHMQueueInsertBefore(&(target->predicateLocks),
3740                                                                          &(predlock->targetLink));
3741                                 SHMQueueInsertBefore(&(OldCommittedSxact->predicateLocks),
3742                                                                          &(predlock->xactLink));
3743                                 predlock->commitSeqNo = sxact->commitSeqNo;
3744                         }
3745                 }
3746                 else
3747                         RemoveTargetIfNoLongerUsed(target, targettaghash);
3748
3749                 LWLockRelease(partitionLock);
3750
3751                 predlock = nextpredlock;
3752         }
3753
3754         /*
3755          * Rather than retail removal, just re-init the head after we've run
3756          * through the list.
3757          */
3758         SHMQueueInit(&sxact->predicateLocks);
3759
3760         LWLockRelease(SerializablePredicateLockListLock);
3761
3762         sxidtag.xid = sxact->topXid;
3763         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3764
3765         /* Release all outConflicts (unless 'partial' is true) */
3766         if (!partial)
3767         {
3768                 conflict = (RWConflict)
3769                         SHMQueueNext(&sxact->outConflicts,
3770                                                  &sxact->outConflicts,
3771                                                  offsetof(RWConflictData, outLink));
3772                 while (conflict)
3773                 {
3774                         nextConflict = (RWConflict)
3775                                 SHMQueueNext(&sxact->outConflicts,
3776                                                          &conflict->outLink,
3777                                                          offsetof(RWConflictData, outLink));
3778                         if (summarize)
3779                                 conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
3780                         ReleaseRWConflict(conflict);
3781                         conflict = nextConflict;
3782                 }
3783         }
3784
3785         /* Release all inConflicts. */
3786         conflict = (RWConflict)
3787                 SHMQueueNext(&sxact->inConflicts,
3788                                          &sxact->inConflicts,
3789                                          offsetof(RWConflictData, inLink));
3790         while (conflict)
3791         {
3792                 nextConflict = (RWConflict)
3793                         SHMQueueNext(&sxact->inConflicts,
3794                                                  &conflict->inLink,
3795                                                  offsetof(RWConflictData, inLink));
3796                 if (summarize)
3797                         conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
3798                 ReleaseRWConflict(conflict);
3799                 conflict = nextConflict;
3800         }
3801
3802         /* Finally, get rid of the xid and the record of the transaction itself. */
3803         if (!partial)
3804         {
3805                 if (sxidtag.xid != InvalidTransactionId)
3806                         hash_search(SerializableXidHash, &sxidtag, HASH_REMOVE, NULL);
3807                 ReleasePredXact(sxact);
3808         }
3809
3810         LWLockRelease(SerializableXactHashLock);
3811 }
3812
3813 /*
3814  * Tests whether the given top level transaction is concurrent with
3815  * (overlaps) our current transaction.
3816  *
3817  * We need to identify the top level transaction for SSI, anyway, so pass
3818  * that to this function to save the overhead of checking the snapshot's
3819  * subxip array.
3820  */
3821 static bool
3822 XidIsConcurrent(TransactionId xid)
3823 {
3824         Snapshot        snap;
3825         uint32          i;
3826
3827         Assert(TransactionIdIsValid(xid));
3828         Assert(!TransactionIdEquals(xid, GetTopTransactionIdIfAny()));
3829
3830         snap = GetTransactionSnapshot();
3831
3832         if (TransactionIdPrecedes(xid, snap->xmin))
3833                 return false;
3834
3835         if (TransactionIdFollowsOrEquals(xid, snap->xmax))
3836                 return true;
3837
3838         for (i = 0; i < snap->xcnt; i++)
3839         {
3840                 if (xid == snap->xip[i])
3841                         return true;
3842         }
3843
3844         return false;
3845 }
3846
3847 /*
3848  * CheckForSerializableConflictOut
3849  *              We are reading a tuple which has been modified.  If it is visible to
3850  *              us but has been deleted, that indicates a rw-conflict out.  If it's
3851  *              not visible and was created by a concurrent (overlapping)
3852  *              serializable transaction, that is also a rw-conflict out,
3853  *
3854  * We will determine the top level xid of the writing transaction with which
3855  * we may be in conflict, and check for overlap with our own transaction.
3856  * If the transactions overlap (i.e., they cannot see each other's writes),
3857  * then we have a conflict out.
3858  *
3859  * This function should be called just about anywhere in heapam.c where a
3860  * tuple has been read. The caller must hold at least a shared lock on the
3861  * buffer, because this function might set hint bits on the tuple. There is
3862  * currently no known reason to call this function from an index AM.
3863  */
3864 void
3865 CheckForSerializableConflictOut(bool visible, Relation relation,
3866                                                                 HeapTuple tuple, Buffer buffer,
3867                                                                 Snapshot snapshot)
3868 {
3869         TransactionId xid;
3870         SERIALIZABLEXIDTAG sxidtag;
3871         SERIALIZABLEXID *sxid;
3872         SERIALIZABLEXACT *sxact;
3873         HTSV_Result htsvResult;
3874
3875         if (!SerializationNeededForRead(relation, snapshot))
3876                 return;
3877
3878         /* Check if someone else has already decided that we need to die */
3879         if (SxactIsDoomed(MySerializableXact))
3880         {
3881                 ereport(ERROR,
3882                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3883                                  errmsg("could not serialize access due to read/write dependencies among transactions"),
3884                                  errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
3885                                  errhint("The transaction might succeed if retried.")));
3886         }
3887
3888         /*
3889          * Check to see whether the tuple has been written to by a concurrent
3890          * transaction, either to create it not visible to us, or to delete it
3891          * while it is visible to us.  The "visible" bool indicates whether the
3892          * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
3893          * is going on with it.
3894          */
3895         htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
3896         switch (htsvResult)
3897         {
3898                 case HEAPTUPLE_LIVE:
3899                         if (visible)
3900                                 return;
3901                         xid = HeapTupleHeaderGetXmin(tuple->t_data);
3902                         break;
3903                 case HEAPTUPLE_RECENTLY_DEAD:
3904                         if (!visible)
3905                                 return;
3906                         xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
3907                         break;
3908                 case HEAPTUPLE_DELETE_IN_PROGRESS:
3909                         xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
3910                         break;
3911                 case HEAPTUPLE_INSERT_IN_PROGRESS:
3912                         xid = HeapTupleHeaderGetXmin(tuple->t_data);
3913                         break;
3914                 case HEAPTUPLE_DEAD:
3915                         return;
3916                 default:
3917
3918                         /*
3919                          * The only way to get to this default clause is if a new value is
3920                          * added to the enum type without adding it to this switch
3921                          * statement.  That's a bug, so elog.
3922                          */
3923                         elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
3924
3925                         /*
3926                          * In spite of having all enum values covered and calling elog on
3927                          * this default, some compilers think this is a code path which
3928                          * allows xid to be used below without initialization. Silence
3929                          * that warning.
3930                          */
3931                         xid = InvalidTransactionId;
3932         }
3933         Assert(TransactionIdIsValid(xid));
3934         Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
3935
3936         /*
3937          * Find top level xid.  Bail out if xid is too early to be a conflict, or
3938          * if it's our own xid.
3939          */
3940         if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
3941                 return;
3942         xid = SubTransGetTopmostTransaction(xid);
3943         if (TransactionIdPrecedes(xid, TransactionXmin))
3944                 return;
3945         if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
3946                 return;
3947
3948         /*
3949          * Find sxact or summarized info for the top level xid.
3950          */
3951         sxidtag.xid = xid;
3952         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3953         sxid = (SERIALIZABLEXID *)
3954                 hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
3955         if (!sxid)
3956         {
3957                 /*
3958                  * Transaction not found in "normal" SSI structures.  Check whether it
3959                  * got pushed out to SLRU storage for "old committed" transactions.
3960                  */
3961                 SerCommitSeqNo conflictCommitSeqNo;
3962
3963                 conflictCommitSeqNo = OldSerXidGetMinConflictCommitSeqNo(xid);
3964                 if (conflictCommitSeqNo != 0)
3965                 {
3966                         if (conflictCommitSeqNo != InvalidSerCommitSeqNo
3967                                 && (!SxactIsReadOnly(MySerializableXact)
3968                                         || conflictCommitSeqNo
3969                                         <= MySerializableXact->SeqNo.lastCommitBeforeSnapshot))
3970                                 ereport(ERROR,
3971                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3972                                                  errmsg("could not serialize access due to read/write dependencies among transactions"),
3973                                                  errdetail_internal("Reason code: Canceled on conflict out to old pivot %u.", xid),
3974                                           errhint("The transaction might succeed if retried.")));
3975
3976                         if (SxactHasSummaryConflictIn(MySerializableXact)
3977                                 || !SHMQueueEmpty(&MySerializableXact->inConflicts))
3978                                 ereport(ERROR,
3979                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3980                                                  errmsg("could not serialize access due to read/write dependencies among transactions"),
3981                                                  errdetail_internal("Reason code: Canceled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
3982                                           errhint("The transaction might succeed if retried.")));
3983
3984                         MySerializableXact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
3985                 }
3986
3987                 /* It's not serializable or otherwise not important. */
3988                 LWLockRelease(SerializableXactHashLock);
3989                 return;
3990         }
3991         sxact = sxid->myXact;
3992         Assert(TransactionIdEquals(sxact->topXid, xid));
3993         if (sxact == MySerializableXact || SxactIsDoomed(sxact))
3994         {
3995                 /* Can't conflict with ourself or a transaction that will roll back. */
3996                 LWLockRelease(SerializableXactHashLock);
3997                 return;
3998         }
3999
4000         /*
4001          * We have a conflict out to a transaction which has a conflict out to a
4002          * summarized transaction.  That summarized transaction must have
4003          * committed first, and we can't tell when it committed in relation to our
4004          * snapshot acquisition, so something needs to be canceled.
4005          */
4006         if (SxactHasSummaryConflictOut(sxact))
4007         {
4008                 if (!SxactIsPrepared(sxact))
4009                 {
4010                         sxact->flags |= SXACT_FLAG_DOOMED;
4011                         LWLockRelease(SerializableXactHashLock);
4012                         return;
4013                 }
4014                 else
4015                 {
4016                         LWLockRelease(SerializableXactHashLock);
4017                         ereport(ERROR,
4018                                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4019                                          errmsg("could not serialize access due to read/write dependencies among transactions"),
4020                                          errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
4021                                          errhint("The transaction might succeed if retried.")));
4022                 }
4023         }
4024
4025         /*
4026          * If this is a read-only transaction and the writing transaction has
4027          * committed, and it doesn't have a rw-conflict to a transaction which
4028          * committed before it, no conflict.
4029          */
4030         if (SxactIsReadOnly(MySerializableXact)
4031                 && SxactIsCommitted(sxact)
4032                 && !SxactHasSummaryConflictOut(sxact)
4033                 && (!SxactHasConflictOut(sxact)
4034                         || MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
4035         {
4036                 /* Read-only transaction will appear to run first.  No conflict. */
4037                 LWLockRelease(SerializableXactHashLock);
4038                 return;
4039         }
4040
4041         if (!XidIsConcurrent(xid))
4042         {
4043                 /* This write was already in our snapshot; no conflict. */
4044                 LWLockRelease(SerializableXactHashLock);
4045                 return;
4046         }
4047
4048         if (RWConflictExists(MySerializableXact, sxact))
4049         {
4050                 /* We don't want duplicate conflict records in the list. */
4051                 LWLockRelease(SerializableXactHashLock);
4052                 return;
4053         }
4054
4055         /*
4056          * Flag the conflict.  But first, if this conflict creates a dangerous
4057          * structure, ereport an error.
4058          */
4059         FlagRWConflict(MySerializableXact, sxact);
4060         LWLockRelease(SerializableXactHashLock);
4061 }
4062
4063 /*
4064  * Check a particular target for rw-dependency conflict in. A subroutine of
4065  * CheckForSerializableConflictIn().
4066  */
4067 static void
4068 CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
4069 {
4070         uint32          targettaghash;
4071         LWLock     *partitionLock;
4072         PREDICATELOCKTARGET *target;
4073         PREDICATELOCK *predlock;
4074         PREDICATELOCK *mypredlock = NULL;
4075         PREDICATELOCKTAG mypredlocktag;
4076
4077         Assert(MySerializableXact != InvalidSerializableXact);
4078
4079         /*
4080          * The same hash and LW lock apply to the lock target and the lock itself.
4081          */
4082         targettaghash = PredicateLockTargetTagHashCode(targettag);
4083         partitionLock = PredicateLockHashPartitionLock(targettaghash);
4084         LWLockAcquire(partitionLock, LW_SHARED);
4085         target = (PREDICATELOCKTARGET *)
4086                 hash_search_with_hash_value(PredicateLockTargetHash,
4087                                                                         targettag, targettaghash,
4088                                                                         HASH_FIND, NULL);
4089         if (!target)
4090         {
4091                 /* Nothing has this target locked; we're done here. */
4092                 LWLockRelease(partitionLock);
4093                 return;
4094         }
4095
4096         /*
4097          * Each lock for an overlapping transaction represents a conflict: a
4098          * rw-dependency in to this transaction.
4099          */
4100         predlock = (PREDICATELOCK *)
4101                 SHMQueueNext(&(target->predicateLocks),
4102                                          &(target->predicateLocks),
4103                                          offsetof(PREDICATELOCK, targetLink));
4104         LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4105         while (predlock)
4106         {
4107                 SHM_QUEUE  *predlocktargetlink;
4108                 PREDICATELOCK *nextpredlock;
4109                 SERIALIZABLEXACT *sxact;
4110
4111                 predlocktargetlink = &(predlock->targetLink);
4112                 nextpredlock = (PREDICATELOCK *)
4113                         SHMQueueNext(&(target->predicateLocks),
4114                                                  predlocktargetlink,
4115                                                  offsetof(PREDICATELOCK, targetLink));
4116
4117                 sxact = predlock->tag.myXact;
4118                 if (sxact == MySerializableXact)
4119                 {
4120                         /*
4121                          * If we're getting a write lock on a tuple, we don't need a
4122                          * predicate (SIREAD) lock on the same tuple. We can safely remove
4123                          * our SIREAD lock, but we'll defer doing so until after the loop
4124                          * because that requires upgrading to an exclusive partition lock.
4125                          *
4126                          * We can't use this optimization within a subtransaction because
4127                          * the subtransaction could roll back, and we would be left
4128                          * without any lock at the top level.
4129                          */
4130                         if (!IsSubTransaction()
4131                                 && GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
4132                         {
4133                                 mypredlock = predlock;
4134                                 mypredlocktag = predlock->tag;
4135                         }
4136                 }
4137                 else if (!SxactIsDoomed(sxact)
4138                                  && (!SxactIsCommitted(sxact)
4139                                          || TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
4140                                                                                           sxact->finishedBefore))
4141                                  && !RWConflictExists(sxact, MySerializableXact))
4142                 {
4143                         LWLockRelease(SerializableXactHashLock);
4144                         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4145
4146                         /*
4147                          * Re-check after getting exclusive lock because the other
4148                          * transaction may have flagged a conflict.
4149                          */
4150                         if (!SxactIsDoomed(sxact)
4151                                 && (!SxactIsCommitted(sxact)
4152                                         || TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
4153                                                                                          sxact->finishedBefore))
4154                                 && !RWConflictExists(sxact, MySerializableXact))
4155                         {
4156                                 FlagRWConflict(sxact, MySerializableXact);
4157                         }
4158
4159                         LWLockRelease(SerializableXactHashLock);
4160                         LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4161                 }
4162
4163                 predlock = nextpredlock;
4164         }
4165         LWLockRelease(SerializableXactHashLock);
4166         LWLockRelease(partitionLock);
4167
4168         /*
4169          * If we found one of our own SIREAD locks to remove, remove it now.
4170          *
4171          * At this point our transaction already has an ExclusiveRowLock on the
4172          * relation, so we are OK to drop the predicate lock on the tuple, if
4173          * found, without fearing that another write against the tuple will occur
4174          * before the MVCC information makes it to the buffer.
4175          */
4176         if (mypredlock != NULL)
4177         {
4178                 uint32          predlockhashcode;
4179                 PREDICATELOCK *rmpredlock;
4180
4181                 LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4182                 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4183                 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4184
4185                 /*
4186                  * Remove the predicate lock from shared memory, if it wasn't removed
4187                  * while the locks were released.  One way that could happen is from
4188                  * autovacuum cleaning up an index.
4189                  */
4190                 predlockhashcode = PredicateLockHashCodeFromTargetHashCode
4191                         (&mypredlocktag, targettaghash);
4192                 rmpredlock = (PREDICATELOCK *)
4193                         hash_search_with_hash_value(PredicateLockHash,
4194                                                                                 &mypredlocktag,
4195                                                                                 predlockhashcode,
4196                                                                                 HASH_FIND, NULL);
4197                 if (rmpredlock != NULL)
4198                 {
4199                         Assert(rmpredlock == mypredlock);
4200
4201                         SHMQueueDelete(&(mypredlock->targetLink));
4202                         SHMQueueDelete(&(mypredlock->xactLink));
4203
4204                         rmpredlock = (PREDICATELOCK *)
4205                                 hash_search_with_hash_value(PredicateLockHash,
4206                                                                                         &mypredlocktag,
4207                                                                                         predlockhashcode,
4208                                                                                         HASH_REMOVE, NULL);
4209                         Assert(rmpredlock == mypredlock);
4210
4211                         RemoveTargetIfNoLongerUsed(target, targettaghash);
4212                 }
4213
4214                 LWLockRelease(SerializableXactHashLock);
4215                 LWLockRelease(partitionLock);
4216                 LWLockRelease(SerializablePredicateLockListLock);
4217
4218                 if (rmpredlock != NULL)
4219                 {
4220                         /*
4221                          * Remove entry in local lock table if it exists. It's OK if it
4222                          * doesn't exist; that means the lock was transferred to a new
4223                          * target by a different backend.
4224                          */
4225                         hash_search_with_hash_value(LocalPredicateLockHash,
4226                                                                                 targettag, targettaghash,
4227                                                                                 HASH_REMOVE, NULL);
4228
4229                         DecrementParentLocks(targettag);
4230                 }
4231         }
4232 }
4233
4234 /*
4235  * CheckForSerializableConflictIn
4236  *              We are writing the given tuple.  If that indicates a rw-conflict
4237  *              in from another serializable transaction, take appropriate action.
4238  *
4239  * Skip checking for any granularity for which a parameter is missing.
4240  *
4241  * A tuple update or delete is in conflict if we have a predicate lock
4242  * against the relation or page in which the tuple exists, or against the
4243  * tuple itself.
4244  */
4245 void
4246 CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
4247                                                            Buffer buffer)
4248 {
4249         PREDICATELOCKTARGETTAG targettag;
4250
4251         if (!SerializationNeededForWrite(relation))
4252                 return;
4253
4254         /* Check if someone else has already decided that we need to die */
4255         if (SxactIsDoomed(MySerializableXact))
4256                 ereport(ERROR,
4257                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4258                                  errmsg("could not serialize access due to read/write dependencies among transactions"),
4259                                  errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict in checking."),
4260                                  errhint("The transaction might succeed if retried.")));
4261
4262         /*
4263          * We're doing a write which might cause rw-conflicts now or later.
4264          * Memorize that fact.
4265          */
4266         MyXactDidWrite = true;
4267
4268         /*
4269          * It is important that we check for locks from the finest granularity to
4270          * the coarsest granularity, so that granularity promotion doesn't cause
4271          * us to miss a lock.  The new (coarser) lock will be acquired before the
4272          * old (finer) locks are released.
4273          *
4274          * It is not possible to take and hold a lock across the checks for all
4275          * granularities because each target could be in a separate partition.
4276          */
4277         if (tuple != NULL)
4278         {
4279                 SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
4280                                                                                  relation->rd_node.dbNode,
4281                                                                                  relation->rd_id,
4282                                                                  ItemPointerGetBlockNumber(&(tuple->t_self)),
4283                                                            ItemPointerGetOffsetNumber(&(tuple->t_self)));
4284                 CheckTargetForConflictsIn(&targettag);
4285         }
4286
4287         if (BufferIsValid(buffer))
4288         {
4289                 SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
4290                                                                                 relation->rd_node.dbNode,
4291                                                                                 relation->rd_id,
4292                                                                                 BufferGetBlockNumber(buffer));
4293                 CheckTargetForConflictsIn(&targettag);
4294         }
4295
4296         SET_PREDICATELOCKTARGETTAG_RELATION(targettag,
4297                                                                                 relation->rd_node.dbNode,
4298                                                                                 relation->rd_id);
4299         CheckTargetForConflictsIn(&targettag);
4300 }
4301
4302 /*
4303  * CheckTableForSerializableConflictIn
4304  *              The entire table is going through a DDL-style logical mass delete
4305  *              like TRUNCATE or DROP TABLE.  If that causes a rw-conflict in from
4306  *              another serializable transaction, take appropriate action.
4307  *
4308  * While these operations do not operate entirely within the bounds of
4309  * snapshot isolation, they can occur inside a serializable transaction, and
4310  * will logically occur after any reads which saw rows which were destroyed
4311  * by these operations, so we do what we can to serialize properly under
4312  * SSI.
4313  *
4314  * The relation passed in must be a heap relation. Any predicate lock of any
4315  * granularity on the heap will cause a rw-conflict in to this transaction.
4316  * Predicate locks on indexes do not matter because they only exist to guard
4317  * against conflicting inserts into the index, and this is a mass *delete*.
4318  * When a table is truncated or dropped, the index will also be truncated
4319  * or dropped, and we'll deal with locks on the index when that happens.
4320  *
4321  * Dropping or truncating a table also needs to drop any existing predicate
4322  * locks on heap tuples or pages, because they're about to go away. This
4323  * should be done before altering the predicate locks because the transaction
4324  * could be rolled back because of a conflict, in which case the lock changes
4325  * are not needed. (At the moment, we don't actually bother to drop the
4326  * existing locks on a dropped or truncated table at the moment. That might
4327  * lead to some false positives, but it doesn't seem worth the trouble.)
4328  */
4329 void
4330 CheckTableForSerializableConflictIn(Relation relation)
4331 {
4332         HASH_SEQ_STATUS seqstat;
4333         PREDICATELOCKTARGET *target;
4334         Oid                     dbId;
4335         Oid                     heapId;
4336         int                     i;
4337
4338         /*
4339          * Bail out quickly if there are no serializable transactions running.
4340          * It's safe to check this without taking locks because the caller is
4341          * holding an ACCESS EXCLUSIVE lock on the relation.  No new locks which
4342          * would matter here can be acquired while that is held.
4343          */
4344         if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
4345                 return;
4346
4347         if (!SerializationNeededForWrite(relation))
4348                 return;
4349
4350         /*
4351          * We're doing a write which might cause rw-conflicts now or later.
4352          * Memorize that fact.
4353          */
4354         MyXactDidWrite = true;
4355
4356         Assert(relation->rd_index == NULL); /* not an index relation */
4357
4358         dbId = relation->rd_node.dbNode;
4359         heapId = relation->rd_id;
4360
4361         LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
4362         for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
4363                 LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
4364         LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4365
4366         /* Scan through target list */
4367         hash_seq_init(&seqstat, PredicateLockTargetHash);
4368
4369         while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
4370         {
4371                 PREDICATELOCK *predlock;
4372
4373                 /*
4374                  * Check whether this is a target which needs attention.
4375                  */
4376                 if (GET_PREDICATELOCKTARGETTAG_RELATION(target->tag) != heapId)
4377                         continue;                       /* wrong relation id */
4378                 if (GET_PREDICATELOCKTARGETTAG_DB(target->tag) != dbId)
4379                         continue;                       /* wrong database id */
4380
4381                 /*
4382                  * Loop through locks for this target and flag conflicts.
4383                  */
4384                 predlock = (PREDICATELOCK *)
4385                         SHMQueueNext(&(target->predicateLocks),
4386                                                  &(target->predicateLocks),
4387                                                  offsetof(PREDICATELOCK, targetLink));
4388                 while (predlock)
4389                 {
4390                         PREDICATELOCK *nextpredlock;
4391
4392                         nextpredlock = (PREDICATELOCK *)
4393                                 SHMQueueNext(&(target->predicateLocks),
4394                                                          &(predlock->targetLink),
4395                                                          offsetof(PREDICATELOCK, targetLink));
4396
4397                         if (predlock->tag.myXact != MySerializableXact
4398                           && !RWConflictExists(predlock->tag.myXact, MySerializableXact))
4399                         {
4400                                 FlagRWConflict(predlock->tag.myXact, MySerializableXact);
4401                         }
4402
4403                         predlock = nextpredlock;
4404                 }
4405         }
4406
4407         /* Release locks in reverse order */
4408         LWLockRelease(SerializableXactHashLock);
4409         for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
4410                 LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
4411         LWLockRelease(SerializablePredicateLockListLock);
4412 }
4413
4414
4415 /*
4416  * Flag a rw-dependency between two serializable transactions.
4417  *
4418  * The caller is responsible for ensuring that we have a LW lock on
4419  * the transaction hash table.
4420  */
4421 static void
4422 FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
4423 {
4424         Assert(reader != writer);
4425
4426         /* First, see if this conflict causes failure. */
4427         OnConflict_CheckForSerializationFailure(reader, writer);
4428
4429         /* Actually do the conflict flagging. */
4430         if (reader == OldCommittedSxact)
4431                 writer->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
4432         else if (writer == OldCommittedSxact)
4433                 reader->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4434         else
4435                 SetRWConflict(reader, writer);
4436 }
4437
4438 /*----------------------------------------------------------------------------
4439  * We are about to add a RW-edge to the dependency graph - check that we don't
4440  * introduce a dangerous structure by doing so, and abort one of the
4441  * transactions if so.
4442  *
4443  * A serialization failure can only occur if there is a dangerous structure
4444  * in the dependency graph:
4445  *
4446  *              Tin ------> Tpivot ------> Tout
4447  *                        rw                     rw
4448  *
4449  * Furthermore, Tout must commit first.
4450  *
4451  * One more optimization is that if Tin is declared READ ONLY (or commits
4452  * without writing), we can only have a problem if Tout committed before Tin
4453  * acquired its snapshot.
4454  *----------------------------------------------------------------------------
4455  */
4456 static void
4457 OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
4458                                                                                 SERIALIZABLEXACT *writer)
4459 {
4460         bool            failure;
4461         RWConflict      conflict;
4462
4463         Assert(LWLockHeldByMe(SerializableXactHashLock));
4464
4465         failure = false;
4466
4467         /*------------------------------------------------------------------------
4468          * Check for already-committed writer with rw-conflict out flagged
4469          * (conflict-flag on W means that T2 committed before W):
4470          *
4471          *              R ------> W ------> T2
4472          *                      rw                rw
4473          *
4474          * That is a dangerous structure, so we must abort. (Since the writer
4475          * has already committed, we must be the reader)
4476          *------------------------------------------------------------------------
4477          */
4478         if (SxactIsCommitted(writer)
4479           && (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
4480                 failure = true;
4481
4482         /*------------------------------------------------------------------------
4483          * Check whether the writer has become a pivot with an out-conflict
4484          * committed transaction (T2), and T2 committed first:
4485          *
4486          *              R ------> W ------> T2
4487          *                      rw                rw
4488          *
4489          * Because T2 must've committed first, there is no anomaly if:
4490          * - the reader committed before T2
4491          * - the writer committed before T2
4492          * - the reader is a READ ONLY transaction and the reader was concurrent
4493          *       with T2 (= reader acquired its snapshot before T2 committed)
4494          *
4495          * We also handle the case that T2 is prepared but not yet committed
4496          * here. In that case T2 has already checked for conflicts, so if it
4497          * commits first, making the above conflict real, it's too late for it
4498          * to abort.
4499          *------------------------------------------------------------------------
4500          */
4501         if (!failure)
4502         {
4503                 if (SxactHasSummaryConflictOut(writer))
4504                 {
4505                         failure = true;
4506                         conflict = NULL;
4507                 }
4508                 else
4509                         conflict = (RWConflict)
4510                                 SHMQueueNext(&writer->outConflicts,
4511                                                          &writer->outConflicts,
4512                                                          offsetof(RWConflictData, outLink));
4513                 while (conflict)
4514                 {
4515                         SERIALIZABLEXACT *t2 = conflict->sxactIn;
4516
4517                         if (SxactIsPrepared(t2)
4518                                 && (!SxactIsCommitted(reader)
4519                                         || t2->prepareSeqNo <= reader->commitSeqNo)
4520                                 && (!SxactIsCommitted(writer)
4521                                         || t2->prepareSeqNo <= writer->commitSeqNo)
4522                                 && (!SxactIsReadOnly(reader)
4523                           || t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
4524                         {
4525                                 failure = true;
4526                                 break;
4527                         }
4528                         conflict = (RWConflict)
4529                                 SHMQueueNext(&writer->outConflicts,
4530                                                          &conflict->outLink,
4531                                                          offsetof(RWConflictData, outLink));
4532                 }
4533         }
4534
4535         /*------------------------------------------------------------------------
4536          * Check whether the reader has become a pivot with a writer
4537          * that's committed (or prepared):
4538          *
4539          *              T0 ------> R ------> W
4540          *                       rw                rw
4541          *
4542          * Because W must've committed first for an anomaly to occur, there is no
4543          * anomaly if:
4544          * - T0 committed before the writer
4545          * - T0 is READ ONLY, and overlaps the writer
4546          *------------------------------------------------------------------------
4547          */
4548         if (!failure && SxactIsPrepared(writer) && !SxactIsReadOnly(reader))
4549         {
4550                 if (SxactHasSummaryConflictIn(reader))
4551                 {
4552                         failure = true;
4553                         conflict = NULL;
4554                 }
4555                 else
4556                         conflict = (RWConflict)
4557                                 SHMQueueNext(&reader->inConflicts,
4558                                                          &reader->inConflicts,
4559                                                          offsetof(RWConflictData, inLink));
4560                 while (conflict)
4561                 {
4562                         SERIALIZABLEXACT *t0 = conflict->sxactOut;
4563
4564                         if (!SxactIsDoomed(t0)
4565                                 && (!SxactIsCommitted(t0)
4566                                         || t0->commitSeqNo >= writer->prepareSeqNo)
4567                                 && (!SxactIsReadOnly(t0)
4568                           || t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
4569                         {
4570                                 failure = true;
4571                                 break;
4572                         }
4573                         conflict = (RWConflict)
4574                                 SHMQueueNext(&reader->inConflicts,
4575                                                          &conflict->inLink,
4576                                                          offsetof(RWConflictData, inLink));
4577                 }
4578         }
4579
4580         if (failure)
4581         {
4582                 /*
4583                  * We have to kill a transaction to avoid a possible anomaly from
4584                  * occurring. If the writer is us, we can just ereport() to cause a
4585                  * transaction abort. Otherwise we flag the writer for termination,
4586                  * causing it to abort when it tries to commit. However, if the writer
4587                  * is a prepared transaction, already prepared, we can't abort it
4588                  * anymore, so we have to kill the reader instead.
4589                  */
4590                 if (MySerializableXact == writer)
4591                 {
4592                         LWLockRelease(SerializableXactHashLock);
4593                         ereport(ERROR,
4594                                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4595                                          errmsg("could not serialize access due to read/write dependencies among transactions"),
4596                                          errdetail_internal("Reason code: Canceled on identification as a pivot, during write."),
4597                                          errhint("The transaction might succeed if retried.")));
4598                 }
4599                 else if (SxactIsPrepared(writer))
4600                 {
4601                         LWLockRelease(SerializableXactHashLock);
4602
4603                         /* if we're not the writer, we have to be the reader */
4604                         Assert(MySerializableXact == reader);
4605                         ereport(ERROR,
4606                                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4607                                          errmsg("could not serialize access due to read/write dependencies among transactions"),
4608                                          errdetail_internal("Reason code: Canceled on conflict out to pivot %u, during read.", writer->topXid),
4609                                          errhint("The transaction might succeed if retried.")));
4610                 }
4611                 writer->flags |= SXACT_FLAG_DOOMED;
4612         }
4613 }
4614
4615 /*
4616  * PreCommit_CheckForSerializableConflicts
4617  *              Check for dangerous structures in a serializable transaction
4618  *              at commit.
4619  *
4620  * We're checking for a dangerous structure as each conflict is recorded.
4621  * The only way we could have a problem at commit is if this is the "out"
4622  * side of a pivot, and neither the "in" side nor the pivot has yet
4623  * committed.
4624  *
4625  * If a dangerous structure is found, the pivot (the near conflict) is
4626  * marked for death, because rolling back another transaction might mean
4627  * that we flail without ever making progress.  This transaction is
4628  * committing writes, so letting it commit ensures progress.  If we
4629  * canceled the far conflict, it might immediately fail again on retry.
4630  */
4631 void
4632 PreCommit_CheckForSerializationFailure(void)
4633 {
4634         RWConflict      nearConflict;
4635
4636         if (MySerializableXact == InvalidSerializableXact)
4637                 return;
4638
4639         Assert(IsolationIsSerializable());
4640
4641         LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4642
4643         /* Check if someone else has already decided that we need to die */
4644         if (SxactIsDoomed(MySerializableXact))
4645         {
4646                 LWLockRelease(SerializableXactHashLock);
4647                 ereport(ERROR,
4648                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4649                                  errmsg("could not serialize access due to read/write dependencies among transactions"),
4650                                  errdetail_internal("Reason code: Canceled on identification as a pivot, during commit attempt."),
4651                                  errhint("The transaction might succeed if retried.")));
4652         }
4653
4654         nearConflict = (RWConflict)
4655                 SHMQueueNext(&MySerializableXact->inConflicts,
4656                                          &MySerializableXact->inConflicts,
4657                                          offsetof(RWConflictData, inLink));
4658         while (nearConflict)
4659         {
4660                 if (!SxactIsCommitted(nearConflict->sxactOut)
4661                         && !SxactIsDoomed(nearConflict->sxactOut))
4662                 {
4663                         RWConflict      farConflict;
4664
4665                         farConflict = (RWConflict)
4666                                 SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4667                                                          &nearConflict->sxactOut->inConflicts,
4668                                                          offsetof(RWConflictData, inLink));
4669                         while (farConflict)
4670                         {
4671                                 if (farConflict->sxactOut == MySerializableXact
4672                                         || (!SxactIsCommitted(farConflict->sxactOut)
4673                                                 && !SxactIsReadOnly(farConflict->sxactOut)
4674                                                 && !SxactIsDoomed(farConflict->sxactOut)))
4675                                 {
4676                                         /*
4677                                          * Normally, we kill the pivot transaction to make sure we
4678                                          * make progress if the failing transaction is retried.
4679                                          * However, we can't kill it if it's already prepared, so
4680                                          * in that case we commit suicide instead.
4681                                          */
4682                                         if (SxactIsPrepared(nearConflict->sxactOut))
4683                                         {
4684                                                 LWLockRelease(SerializableXactHashLock);
4685                                                 ereport(ERROR,
4686                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4687                                                                  errmsg("could not serialize access due to read/write dependencies among transactions"),
4688                                                                  errdetail_internal("Reason code: Canceled on commit attempt with conflict in from prepared pivot."),
4689                                                                  errhint("The transaction might succeed if retried.")));
4690                                         }
4691                                         nearConflict->sxactOut->flags |= SXACT_FLAG_DOOMED;
4692                                         break;
4693                                 }
4694                                 farConflict = (RWConflict)
4695                                         SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4696                                                                  &farConflict->inLink,
4697                                                                  offsetof(RWConflictData, inLink));
4698                         }
4699                 }
4700
4701                 nearConflict = (RWConflict)
4702                         SHMQueueNext(&MySerializableXact->inConflicts,
4703                                                  &nearConflict->inLink,
4704                                                  offsetof(RWConflictData, inLink));
4705         }
4706
4707         MySerializableXact->prepareSeqNo = ++(PredXact->LastSxactCommitSeqNo);
4708         MySerializableXact->flags |= SXACT_FLAG_PREPARED;
4709
4710         LWLockRelease(SerializableXactHashLock);
4711 }
4712
4713 /*------------------------------------------------------------------------*/
4714
4715 /*
4716  * Two-phase commit support
4717  */
4718
4719 /*
4720  * AtPrepare_Locks
4721  *              Do the preparatory work for a PREPARE: make 2PC state file
4722  *              records for all predicate locks currently held.
4723  */
4724 void
4725 AtPrepare_PredicateLocks(void)
4726 {
4727         PREDICATELOCK *predlock;
4728         SERIALIZABLEXACT *sxact;
4729         TwoPhasePredicateRecord record;
4730         TwoPhasePredicateXactRecord *xactRecord;
4731         TwoPhasePredicateLockRecord *lockRecord;
4732
4733         sxact = MySerializableXact;
4734         xactRecord = &(record.data.xactRecord);
4735         lockRecord = &(record.data.lockRecord);
4736
4737         if (MySerializableXact == InvalidSerializableXact)
4738                 return;
4739
4740         /* Generate a xact record for our SERIALIZABLEXACT */
4741         record.type = TWOPHASEPREDICATERECORD_XACT;
4742         xactRecord->xmin = MySerializableXact->xmin;
4743         xactRecord->flags = MySerializableXact->flags;
4744
4745         /*
4746          * Note that we don't include the list of conflicts in our out in the
4747          * statefile, because new conflicts can be added even after the
4748          * transaction prepares. We'll just make a conservative assumption during
4749          * recovery instead.
4750          */
4751
4752         RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
4753                                                    &record, sizeof(record));
4754
4755         /*
4756          * Generate a lock record for each lock.
4757          *
4758          * To do this, we need to walk the predicate lock list in our sxact rather
4759          * than using the local predicate lock table because the latter is not
4760          * guaranteed to be accurate.
4761          */
4762         LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4763
4764         predlock = (PREDICATELOCK *)
4765                 SHMQueueNext(&(sxact->predicateLocks),
4766                                          &(sxact->predicateLocks),
4767                                          offsetof(PREDICATELOCK, xactLink));
4768
4769         while (predlock != NULL)
4770         {
4771                 record.type = TWOPHASEPREDICATERECORD_LOCK;
4772                 lockRecord->target = predlock->tag.myTarget->tag;
4773
4774                 RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
4775                                                            &record, sizeof(record));
4776
4777                 predlock = (PREDICATELOCK *)
4778                         SHMQueueNext(&(sxact->predicateLocks),
4779                                                  &(predlock->xactLink),
4780                                                  offsetof(PREDICATELOCK, xactLink));
4781         }
4782
4783         LWLockRelease(SerializablePredicateLockListLock);
4784 }
4785
4786 /*
4787  * PostPrepare_Locks
4788  *              Clean up after successful PREPARE. Unlike the non-predicate
4789  *              lock manager, we do not need to transfer locks to a dummy
4790  *              PGPROC because our SERIALIZABLEXACT will stay around
4791  *              anyway. We only need to clean up our local state.
4792  */
4793 void
4794 PostPrepare_PredicateLocks(TransactionId xid)
4795 {
4796         if (MySerializableXact == InvalidSerializableXact)
4797                 return;
4798
4799         Assert(SxactIsPrepared(MySerializableXact));
4800
4801         MySerializableXact->pid = 0;
4802
4803         hash_destroy(LocalPredicateLockHash);
4804         LocalPredicateLockHash = NULL;
4805
4806         MySerializableXact = InvalidSerializableXact;
4807         MyXactDidWrite = false;
4808 }
4809
4810 /*
4811  * PredicateLockTwoPhaseFinish
4812  *              Release a prepared transaction's predicate locks once it
4813  *              commits or aborts.
4814  */
4815 void
4816 PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
4817 {
4818         SERIALIZABLEXID *sxid;
4819         SERIALIZABLEXIDTAG sxidtag;
4820
4821         sxidtag.xid = xid;
4822
4823         LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4824         sxid = (SERIALIZABLEXID *)
4825                 hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4826         LWLockRelease(SerializableXactHashLock);
4827
4828         /* xid will not be found if it wasn't a serializable transaction */
4829         if (sxid == NULL)
4830                 return;
4831
4832         /* Release its locks */
4833         MySerializableXact = sxid->myXact;
4834         MyXactDidWrite = true;          /* conservatively assume that we wrote
4835                                                                  * something */
4836         ReleasePredicateLocks(isCommit);
4837 }
4838
4839 /*
4840  * Re-acquire a predicate lock belonging to a transaction that was prepared.
4841  */
4842 void
4843 predicatelock_twophase_recover(TransactionId xid, uint16 info,
4844                                                            void *recdata, uint32 len)
4845 {
4846         TwoPhasePredicateRecord *record;
4847
4848         Assert(len == sizeof(TwoPhasePredicateRecord));
4849
4850         record = (TwoPhasePredicateRecord *) recdata;
4851
4852         Assert((record->type == TWOPHASEPREDICATERECORD_XACT) ||
4853                    (record->type == TWOPHASEPREDICATERECORD_LOCK));
4854
4855         if (record->type == TWOPHASEPREDICATERECORD_XACT)
4856         {
4857                 /* Per-transaction record. Set up a SERIALIZABLEXACT. */
4858                 TwoPhasePredicateXactRecord *xactRecord;
4859                 SERIALIZABLEXACT *sxact;
4860                 SERIALIZABLEXID *sxid;
4861                 SERIALIZABLEXIDTAG sxidtag;
4862                 bool            found;
4863
4864                 xactRecord = (TwoPhasePredicateXactRecord *) &record->data.xactRecord;
4865
4866                 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4867                 sxact = CreatePredXact();
4868                 if (!sxact)
4869                         ereport(ERROR,
4870                                         (errcode(ERRCODE_OUT_OF_MEMORY),
4871                                          errmsg("out of shared memory")));
4872
4873                 /* vxid for a prepared xact is InvalidBackendId/xid; no pid */
4874                 sxact->vxid.backendId = InvalidBackendId;
4875                 sxact->vxid.localTransactionId = (LocalTransactionId) xid;
4876                 sxact->pid = 0;
4877
4878                 /* a prepared xact hasn't committed yet */
4879                 sxact->prepareSeqNo = RecoverySerCommitSeqNo;
4880                 sxact->commitSeqNo = InvalidSerCommitSeqNo;
4881                 sxact->finishedBefore = InvalidTransactionId;
4882
4883                 sxact->SeqNo.lastCommitBeforeSnapshot = RecoverySerCommitSeqNo;
4884
4885                 /*
4886                  * Don't need to track this; no transactions running at the time the
4887                  * recovered xact started are still active, except possibly other
4888                  * prepared xacts and we don't care whether those are RO_SAFE or not.
4889                  */
4890                 SHMQueueInit(&(sxact->possibleUnsafeConflicts));
4891
4892                 SHMQueueInit(&(sxact->predicateLocks));
4893                 SHMQueueElemInit(&(sxact->finishedLink));
4894
4895                 sxact->topXid = xid;
4896                 sxact->xmin = xactRecord->xmin;
4897                 sxact->flags = xactRecord->flags;
4898                 Assert(SxactIsPrepared(sxact));
4899                 if (!SxactIsReadOnly(sxact))
4900                 {
4901                         ++(PredXact->WritableSxactCount);
4902                         Assert(PredXact->WritableSxactCount <=
4903                                    (MaxBackends + max_prepared_xacts));
4904                 }
4905
4906                 /*
4907                  * We don't know whether the transaction had any conflicts or not, so
4908                  * we'll conservatively assume that it had both a conflict in and a
4909                  * conflict out, and represent that with the summary conflict flags.
4910                  */
4911                 SHMQueueInit(&(sxact->outConflicts));
4912                 SHMQueueInit(&(sxact->inConflicts));
4913                 sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
4914                 sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4915
4916                 /* Register the transaction's xid */
4917                 sxidtag.xid = xid;
4918                 sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
4919                                                                                            &sxidtag,
4920                                                                                            HASH_ENTER, &found);
4921                 Assert(sxid != NULL);
4922                 Assert(!found);
4923                 sxid->myXact = (SERIALIZABLEXACT *) sxact;
4924
4925                 /*
4926                  * Update global xmin. Note that this is a special case compared to
4927                  * registering a normal transaction, because the global xmin might go
4928                  * backwards. That's OK, because until recovery is over we're not
4929                  * going to complete any transactions or create any non-prepared
4930                  * transactions, so there's no danger of throwing away.
4931                  */
4932                 if ((!TransactionIdIsValid(PredXact->SxactGlobalXmin)) ||
4933                         (TransactionIdFollows(PredXact->SxactGlobalXmin, sxact->xmin)))
4934                 {
4935                         PredXact->SxactGlobalXmin = sxact->xmin;
4936                         PredXact->SxactGlobalXminCount = 1;
4937                         OldSerXidSetActiveSerXmin(sxact->xmin);
4938                 }
4939                 else if (TransactionIdEquals(sxact->xmin, PredXact->SxactGlobalXmin))
4940                 {
4941                         Assert(PredXact->SxactGlobalXminCount > 0);
4942                         PredXact->SxactGlobalXminCount++;
4943                 }
4944
4945                 LWLockRelease(SerializableXactHashLock);
4946         }
4947         else if (record->type == TWOPHASEPREDICATERECORD_LOCK)
4948         {
4949                 /* Lock record. Recreate the PREDICATELOCK */
4950                 TwoPhasePredicateLockRecord *lockRecord;
4951                 SERIALIZABLEXID *sxid;
4952                 SERIALIZABLEXACT *sxact;
4953                 SERIALIZABLEXIDTAG sxidtag;
4954                 uint32          targettaghash;
4955
4956                 lockRecord = (TwoPhasePredicateLockRecord *) &record->data.lockRecord;
4957                 targettaghash = PredicateLockTargetTagHashCode(&lockRecord->target);
4958
4959                 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4960                 sxidtag.xid = xid;
4961                 sxid = (SERIALIZABLEXID *)
4962                         hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4963                 LWLockRelease(SerializableXactHashLock);
4964
4965                 Assert(sxid != NULL);
4966                 sxact = sxid->myXact;
4967                 Assert(sxact != InvalidSerializableXact);
4968
4969                 CreatePredicateLock(&lockRecord->target, targettaghash, sxact);
4970         }
4971 }