From 4a6fab03f23ed15f33e607b321c339d0c96c6b58 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Wed, 18 Apr 2012 11:29:34 -0400 Subject: [PATCH] Finish rename of FastPathStrongLocks to FastPathStrongRelationLocks. Commit 8e5ac74c1249820ca55481223a95b9124b4a4f95 tried to do this renaming, but I relied on gcc to tell me where I needed to make changes, instead of grep. Noted by Jeff Davis. --- src/backend/storage/lmgr/README | 10 +++++----- src/backend/storage/lmgr/lock.c | 4 ++-- src/include/storage/lock.h | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/backend/storage/lmgr/README b/src/backend/storage/lmgr/README index ee94725bb8..3c7c9e55e8 100644 --- a/src/backend/storage/lmgr/README +++ b/src/backend/storage/lmgr/README @@ -314,18 +314,18 @@ A performs a store, A and B both acquire an LWLock in either order, and B then performs a load on the same memory location, it is guaranteed to see A's store. In this case, each backend's fast-path lock queue is protected by an LWLock. A backend wishing to acquire a fast-path lock grabs this -LWLock before examining FastPathStrongLocks to check for the presence of a -conflicting strong lock. And the backend attempting to acquire a strong +LWLock before examining FastPathStrongRelationLocks to check for the presence of +a conflicting strong lock. And the backend attempting to acquire a strong lock, because it must transfer any matching weak locks taken via the fast-path mechanism to the shared lock table, will acquire every LWLock protecting -a backend fast-path queue in turn. Thus, if we examine FastPathStrongLocks +a backend fast-path queue in turn. So, if we examine FastPathStrongRelationLocks and see a zero, then either the value is truly zero, or if it is a stale value, the strong locker has yet to acquire the per-backend LWLock we now hold (or, indeed, even the first per-backend LWLock) and will notice any weak lock we take when it does. -Fast-path VXID locks do not use the FastPathStrongLocks table. The first -lock taken on a VXID is always the ExclusiveLock taken by its owner. Any +Fast-path VXID locks do not use the FastPathStrongRelationLocks table. The +first lock taken on a VXID is always the ExclusiveLock taken by its owner. Any subsequent lockers are share lockers waiting for the VXID to terminate. Indeed, the only reason VXID locks use the lock manager at all (rather than waiting for the VXID to terminate via some other method) is for deadlock diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 568de68beb..a216fb90ae 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -723,8 +723,8 @@ LockAcquireExtended(const LOCKTAG *locktag, /* * LWLockAcquire acts as a memory sequencing point, so it's safe * to assume that any strong locker whose increment to - * FastPathStrongLocks->counts becomes visible after we test it has - * yet to begin to transfer fast-path locks. + * FastPathStrongRelationLocks->counts becomes visible after we test + * it has yet to begin to transfer fast-path locks. */ LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE); if (FastPathStrongRelationLocks->count[fasthashcode] != 0) diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h index 92b6d9d1b4..faddef579c 100644 --- a/src/include/storage/lock.h +++ b/src/include/storage/lock.h @@ -412,7 +412,7 @@ typedef struct LOCALLOCK int64 nLocks; /* total number of times lock is held */ int numLockOwners; /* # of relevant ResourceOwners */ int maxLockOwners; /* allocated size of array */ - bool holdsStrongLockCount; /* did we bump FastPathStrongLocks? */ + bool holdsStrongLockCount; /* bumped FastPathStrongRelatonLocks? */ LOCALLOCKOWNER *lockOwners; /* dynamically resizable array */ } LOCALLOCK; -- 2.40.0