X-Git-Url: https://granicus.if.org/sourcecode?a=blobdiff_plain;f=src%2Finclude%2Fstorage%2Flwlock.h;h=96c773200667e775922270dd7027e9da5b3b88da;hb=97c39498e5ca9208d3de5a443a2282923619bf91;hp=ed9025babd183c98f2f360ac0ac044dba6e09100;hpb=3fed417452b226d9bd85a3a54d7056b06eb14897;p=postgresql diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index ed9025babd..96c7732006 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -4,7 +4,7 @@ * Lightweight lock manager * * - * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/lwlock.h @@ -18,32 +18,12 @@ #error "lwlock.h may not be included from frontend code" #endif -#include "lib/ilist.h" +#include "storage/proclist_types.h" #include "storage/s_lock.h" #include "port/atomics.h" struct PGPROC; -/* - * It's occasionally necessary to identify a particular LWLock "by name"; e.g. - * because we wish to report the lock to dtrace. We could store a name or - * other identifying information in the lock itself, but since it's common - * to have many nearly-identical locks (e.g. one per buffer) this would end - * up wasting significant amounts of memory. Instead, each lwlock stores a - * tranche ID which tells us which array it's part of. Based on that, we can - * figure out where the lwlock lies within the array using the data structure - * shown below; the lock is then identified based on the tranche name and - * computed array index. We need the array stride because the array might not - * be an array of lwlocks, but rather some larger data structure that includes - * one or more lwlocks per element. - */ -typedef struct LWLockTranche -{ - const char *name; - void *array_base; - Size array_stride; -} LWLockTranche; - /* * Code outside of lwlock.c should not manipulate the contents of this * structure directly, but we have to declare it here to allow LWLocks to be @@ -51,46 +31,74 @@ typedef struct LWLockTranche */ typedef struct LWLock { - slock_t mutex; /* Protects LWLock and queue of PGPROCs */ uint16 tranche; /* tranche ID */ - pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */ + proclist_head waiters; /* list of waiting PGPROCs */ #ifdef LOCK_DEBUG pg_atomic_uint32 nwaiters; /* number of waiters */ -#endif - dlist_head waiters; /* list of waiting PGPROCs */ -#ifdef LOCK_DEBUG struct PGPROC *owner; /* last exclusive owner of the lock */ #endif } LWLock; /* - * Prior to PostgreSQL 9.4, every lightweight lock in the system was stored - * in a single array. For convenience and for compatibility with past - * releases, we still have a main array, but it's now also permissible to - * store LWLocks elsewhere in the main shared memory segment or in a dynamic - * shared memory segment. In the main array, we force the array stride to - * be a power of 2, which saves a few cycles in indexing, but more importantly - * also ensures that individual LWLocks don't cross cache line boundaries. - * This reduces cache contention problems, especially on AMD Opterons. - * (Of course, we have to also ensure that the array start address is suitably - * aligned.) + * In most cases, it's desirable to force each tranche of LWLocks to be aligned + * on a cache line boundary and make the array stride a power of 2. This saves + * a few cycles in indexing, but more importantly ensures that individual + * LWLocks don't cross cache line boundaries. This reduces cache contention + * problems, especially on AMD Opterons. In some cases, it's useful to add + * even more padding so that each LWLock takes up an entire cache line; this is + * useful, for example, in the main LWLock array, where the overall number of + * locks is small but some are heavily contended. + * + * When allocating a tranche that contains data other than LWLocks, it is + * probably best to include a bare LWLock and then pad the resulting structure + * as necessary for performance. For an array that contains only LWLocks, + * LWLockMinimallyPadded can be used for cases where we just want to ensure + * that we don't cross cache line boundaries within a single lock, while + * LWLockPadded can be used for cases where we want each lock to be an entire + * cache line. + * + * An LWLockMinimallyPadded might contain more than the absolute minimum amount + * of padding required to keep a lock from crossing a cache line boundary, + * because an unpadded LWLock will normally fit into 16 bytes. We ignore that + * possibility when determining the minimal amount of padding. Older releases + * had larger LWLocks, so 32 really was the minimum, and packing them in + * tighter might hurt performance. * - * On a 32-bit platforms a LWLock will these days fit into 16 bytes, but since - * that didn't use to be the case and cramming more lwlocks into a cacheline - * might be detrimental performancewise we still use 32 byte alignment - * there. So, both on 32 and 64 bit platforms, it should fit into 32 bytes - * unless slock_t is really big. We allow for that just in case. + * LWLOCK_MINIMAL_SIZE should be 32 on basically all common platforms, but + * because pg_atomic_uint32 is more than 4 bytes on some obscure platforms, we + * allow for the possibility that it might be 64. Even on those platforms, + * we probably won't exceed 32 bytes unless LOCK_DEBUG is defined. */ -#define LWLOCK_PADDED_SIZE (sizeof(LWLock) <= 32 ? 32 : 64) +#define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE +#define LWLOCK_MINIMAL_SIZE (sizeof(LWLock) <= 32 ? 32 : 64) +/* LWLock, padded to a full cache line size */ typedef union LWLockPadded { LWLock lock; char pad[LWLOCK_PADDED_SIZE]; } LWLockPadded; + +/* LWLock, minimally padded */ +typedef union LWLockMinimallyPadded +{ + LWLock lock; + char pad[LWLOCK_MINIMAL_SIZE]; +} LWLockMinimallyPadded; + extern PGDLLIMPORT LWLockPadded *MainLWLockArray; -extern char *MainLWLockNames[]; +extern const char *const MainLWLockNames[]; + +/* struct for storing named tranche information */ +typedef struct NamedLWLockTranche +{ + int trancheId; + char *trancheName; +} NamedLWLockTranche; + +extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray; +extern PGDLLIMPORT int NamedLWLockTrancheRequests; /* Names for fixed lwlocks */ #include "storage/lwlocknames.h" @@ -142,6 +150,7 @@ extern void LWLockRelease(LWLock *lock); extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val); extern void LWLockReleaseAll(void); extern bool LWLockHeldByMe(LWLock *lock); +extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode); extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval); extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value); @@ -150,22 +159,24 @@ extern Size LWLockShmemSize(void); extern void CreateLWLocks(void); extern void InitLWLockAccess(void); +extern const char *GetLWLockIdentifier(uint32 classId, uint16 eventId); + /* - * The traditional method for obtaining an lwlock for use by an extension is - * to call RequestAddinLWLocks() during postmaster startup; this will reserve - * space for the indicated number of locks in MainLWLockArray. Subsequently, - * a lock can be allocated using LWLockAssign. + * Extensions (or core code) can obtain an LWLocks by calling + * RequestNamedLWLockTranche() during postmaster startup. Subsequently, + * call GetNamedLWLockTranche() to obtain a pointer to an array containing + * the number of LWLocks requested. */ -extern void RequestAddinLWLocks(int n); -extern LWLock *LWLockAssign(void); +extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks); +extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name); /* * There is another, more flexible method of obtaining lwlocks. First, call * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from * a shared counter. Next, each individual process using the tranche should - * call LWLockRegisterTranche() to associate that tranche ID with appropriate - * metadata. Finally, LWLockInitialize should be called just once per lwlock, - * passing the tranche ID as an argument. + * call LWLockRegisterTranche() to associate that tranche ID with a name. + * Finally, LWLockInitialize should be called just once per lwlock, passing + * the tranche ID as an argument. * * It may seem strange that each process using the tranche must register it * separately, but dynamic shared memory segments aren't guaranteed to be @@ -173,19 +184,43 @@ extern LWLock *LWLockAssign(void); * registration in the main shared memory segment wouldn't work for that case. */ extern int LWLockNewTrancheId(void); -extern void LWLockRegisterTranche(int tranche_id, LWLockTranche *tranche); +extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name); extern void LWLockInitialize(LWLock *lock, int tranche_id); /* - * We reserve a few predefined tranche IDs. A call to LWLockNewTrancheId - * will never return a value less than LWTRANCHE_FIRST_USER_DEFINED. + * Every tranche ID less than NUM_INDIVIDUAL_LWLOCKS is reserved; also, + * we reserve additional tranche IDs for builtin tranches not included in + * the set of individual LWLocks. A call to LWLockNewTrancheId will never + * return a value less than LWTRANCHE_FIRST_USER_DEFINED. */ typedef enum BuiltinTrancheIds { - LWTRANCHE_MAIN, + LWTRANCHE_CLOG_BUFFERS = NUM_INDIVIDUAL_LWLOCKS, + LWTRANCHE_COMMITTS_BUFFERS, + LWTRANCHE_SUBTRANS_BUFFERS, + LWTRANCHE_MXACTOFFSET_BUFFERS, + LWTRANCHE_MXACTMEMBER_BUFFERS, + LWTRANCHE_ASYNC_BUFFERS, + LWTRANCHE_OLDSERXID_BUFFERS, LWTRANCHE_WAL_INSERT, + LWTRANCHE_BUFFER_CONTENT, + LWTRANCHE_BUFFER_IO_IN_PROGRESS, + LWTRANCHE_REPLICATION_ORIGIN, + LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS, + LWTRANCHE_PROC, + LWTRANCHE_BUFFER_MAPPING, + LWTRANCHE_LOCK_MANAGER, + LWTRANCHE_PREDICATE_LOCK_MANAGER, + LWTRANCHE_PARALLEL_HASH_JOIN, + LWTRANCHE_PARALLEL_QUERY_DSA, + LWTRANCHE_SESSION_DSA, + LWTRANCHE_SESSION_RECORD_TABLE, + LWTRANCHE_SESSION_TYPMOD_TABLE, + LWTRANCHE_SHARED_TUPLESTORE, + LWTRANCHE_TBM, + LWTRANCHE_PARALLEL_APPEND, LWTRANCHE_FIRST_USER_DEFINED -} BuiltinTrancheIds; +} BuiltinTrancheIds; /* * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer @@ -194,4 +229,4 @@ typedef enum BuiltinTrancheIds */ typedef LWLock *LWLockId; -#endif /* LWLOCK_H */ +#endif /* LWLOCK_H */