1 /*-------------------------------------------------------------------------
4 * Lightweight lock manager
7 * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/include/storage/lwlock.h
12 *-------------------------------------------------------------------------
17 #include "lib/ilist.h"
18 #include "storage/s_lock.h"
19 #include "port/atomics.h"
24 * It's occasionally necessary to identify a particular LWLock "by name"; e.g.
25 * because we wish to report the lock to dtrace. We could store a name or
26 * other identifying information in the lock itself, but since it's common
27 * to have many nearly-identical locks (e.g. one per buffer) this would end
28 * up wasting significant amounts of memory. Instead, each lwlock stores a
29 * tranche ID which tells us which array it's part of. Based on that, we can
30 * figure out where the lwlock lies within the array using the data structure
31 * shown below; the lock is then identified based on the tranche name and
32 * computed array index. We need the array stride because the array might not
33 * be an array of lwlocks, but rather some larger data structure that includes
34 * one or more lwlocks per element.
36 typedef struct LWLockTranche
44 * Code outside of lwlock.c should not manipulate the contents of this
45 * structure directly, but we have to declare it here to allow LWLocks to be
46 * incorporated into other data structures.
50 slock_t mutex; /* Protects LWLock and queue of PGPROCs */
51 uint16 tranche; /* tranche ID */
53 pg_atomic_uint32 state; /* state of exlusive/nonexclusive lockers */
55 pg_atomic_uint32 nwaiters; /* number of waiters */
57 dlist_head waiters; /* list of waiting PGPROCs */
59 struct PGPROC *owner; /* last exlusive owner of the lock */
64 * Prior to PostgreSQL 9.4, every lightweight lock in the system was stored
65 * in a single array. For convenience and for compatibility with past
66 * releases, we still have a main array, but it's now also permissible to
67 * store LWLocks elsewhere in the main shared memory segment or in a dynamic
68 * shared memory segment. In the main array, we force the array stride to
69 * be a power of 2, which saves a few cycles in indexing, but more importantly
70 * also ensures that individual LWLocks don't cross cache line boundaries.
71 * This reduces cache contention problems, especially on AMD Opterons.
72 * (Of course, we have to also ensure that the array start address is suitably
75 * On a 32-bit platforms a LWLock will these days fit into 16 bytes, but since
76 * that didn't use to be the case and cramming more lwlocks into a cacheline
77 * might be detrimental performancewise we still use 32 byte alignment
78 * there. So, both on 32 and 64 bit platforms, it should fit into 32 bytes
79 * unless slock_t is really big. We allow for that just in case.
81 #define LWLOCK_PADDED_SIZE (sizeof(LWLock) <= 32 ? 32 : 64)
83 typedef union LWLockPadded
86 char pad[LWLOCK_PADDED_SIZE];
88 extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
91 * Some commonly-used locks have predefined positions within MainLWLockArray;
92 * defining macros here makes it much easier to keep track of these. If you
93 * add a lock, add it to the end to avoid renumbering the existing locks;
94 * if you remove a lock, consider leaving a gap in the numbering sequence for
95 * the benefit of DTrace and other external debugging scripts.
97 /* 0 is available; was formerly BufFreelistLock */
98 #define ShmemIndexLock (&MainLWLockArray[1].lock)
99 #define OidGenLock (&MainLWLockArray[2].lock)
100 #define XidGenLock (&MainLWLockArray[3].lock)
101 #define ProcArrayLock (&MainLWLockArray[4].lock)
102 #define SInvalReadLock (&MainLWLockArray[5].lock)
103 #define SInvalWriteLock (&MainLWLockArray[6].lock)
104 #define WALBufMappingLock (&MainLWLockArray[7].lock)
105 #define WALWriteLock (&MainLWLockArray[8].lock)
106 #define ControlFileLock (&MainLWLockArray[9].lock)
107 #define CheckpointLock (&MainLWLockArray[10].lock)
108 #define CLogControlLock (&MainLWLockArray[11].lock)
109 #define SubtransControlLock (&MainLWLockArray[12].lock)
110 #define MultiXactGenLock (&MainLWLockArray[13].lock)
111 #define MultiXactOffsetControlLock (&MainLWLockArray[14].lock)
112 #define MultiXactMemberControlLock (&MainLWLockArray[15].lock)
113 #define RelCacheInitLock (&MainLWLockArray[16].lock)
114 #define CheckpointerCommLock (&MainLWLockArray[17].lock)
115 #define TwoPhaseStateLock (&MainLWLockArray[18].lock)
116 #define TablespaceCreateLock (&MainLWLockArray[19].lock)
117 #define BtreeVacuumLock (&MainLWLockArray[20].lock)
118 #define AddinShmemInitLock (&MainLWLockArray[21].lock)
119 #define AutovacuumLock (&MainLWLockArray[22].lock)
120 #define AutovacuumScheduleLock (&MainLWLockArray[23].lock)
121 #define SyncScanLock (&MainLWLockArray[24].lock)
122 #define RelationMappingLock (&MainLWLockArray[25].lock)
123 #define AsyncCtlLock (&MainLWLockArray[26].lock)
124 #define AsyncQueueLock (&MainLWLockArray[27].lock)
125 #define SerializableXactHashLock (&MainLWLockArray[28].lock)
126 #define SerializableFinishedListLock (&MainLWLockArray[29].lock)
127 #define SerializablePredicateLockListLock (&MainLWLockArray[30].lock)
128 #define OldSerXidLock (&MainLWLockArray[31].lock)
129 #define SyncRepLock (&MainLWLockArray[32].lock)
130 #define BackgroundWorkerLock (&MainLWLockArray[33].lock)
131 #define DynamicSharedMemoryControlLock (&MainLWLockArray[34].lock)
132 #define AutoFileLock (&MainLWLockArray[35].lock)
133 #define ReplicationSlotAllocationLock (&MainLWLockArray[36].lock)
134 #define ReplicationSlotControlLock (&MainLWLockArray[37].lock)
135 #define CommitTsControlLock (&MainLWLockArray[38].lock)
136 #define CommitTsLock (&MainLWLockArray[39].lock)
138 #define NUM_INDIVIDUAL_LWLOCKS 40
141 * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
142 * here, but we need them to figure out offsets within MainLWLockArray, and
143 * having this file include lock.h or bufmgr.h would be backwards.
146 /* Number of partitions of the shared buffer mapping hashtable */
147 #define NUM_BUFFER_PARTITIONS 128
149 /* Number of partitions the shared lock tables are divided into */
150 #define LOG2_NUM_LOCK_PARTITIONS 4
151 #define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
153 /* Number of partitions the shared predicate lock tables are divided into */
154 #define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
155 #define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
157 /* Offsets for various chunks of preallocated lwlocks. */
158 #define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
159 #define LOCK_MANAGER_LWLOCK_OFFSET \
160 (BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
161 #define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
162 (LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS)
163 #define NUM_FIXED_LWLOCKS \
164 (PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
166 typedef enum LWLockMode
170 LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode,
171 * when waiting for lock to become free. Not
172 * to be used as LWLockAcquire argument */
177 extern bool Trace_lwlocks;
180 extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
181 extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
182 extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
183 extern void LWLockRelease(LWLock *lock);
184 extern void LWLockReleaseAll(void);
185 extern bool LWLockHeldByMe(LWLock *lock);
187 extern bool LWLockAcquireWithVar(LWLock *lock, uint64 *valptr, uint64 val);
188 extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
189 extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value);
191 extern Size LWLockShmemSize(void);
192 extern void CreateLWLocks(void);
193 extern void InitLWLockAccess(void);
196 * The traditional method for obtaining an lwlock for use by an extension is
197 * to call RequestAddinLWLocks() during postmaster startup; this will reserve
198 * space for the indicated number of locks in MainLWLockArray. Subsequently,
199 * a lock can be allocated using LWLockAssign.
201 extern void RequestAddinLWLocks(int n);
202 extern LWLock *LWLockAssign(void);
205 * There is another, more flexible method of obtaining lwlocks. First, call
206 * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from
207 * a shared counter. Next, each individual process using the tranche should
208 * call LWLockRegisterTranche() to associate that tranche ID with appropriate
209 * metadata. Finally, LWLockInitialize should be called just once per lwlock,
210 * passing the tranche ID as an argument.
212 * It may seem strange that each process using the tranche must register it
213 * separately, but dynamic shared memory segments aren't guaranteed to be
214 * mapped at the same address in all coordinating backends, so storing the
215 * registration in the main shared memory segment wouldn't work for that case.
217 extern int LWLockNewTrancheId(void);
218 extern void LWLockRegisterTranche(int tranche_id, LWLockTranche *tranche);
219 extern void LWLockInitialize(LWLock *lock, int tranche_id);
222 * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer
223 * to LWLocks. New code should instead use LWLock *. However, for the
224 * convenience of third-party code, we include the following typedef.
226 typedef LWLock *LWLockId;
228 #endif /* LWLOCK_H */