1 /*-------------------------------------------------------------------------
4 * POSTGRES low-level lock mechanism
6 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.75 2001/01/02 04:33:16 tgl Exp $
14 * Outside modules can create a lock table and acquire/release
15 * locks. A lock table is a shared memory hash table. When
16 * a process tries to acquire a lock of a type that conflicts
17 * with existing locks, it is put to sleep using the routines
18 * in storage/lmgr/proc.c.
20 * For the most part, this code should be invoked via lmgr.c
21 * or another lock-management module, not directly.
25 * LockAcquire(), LockRelease(), LockMethodTableInit(),
26 * LockMethodTableRename(), LockReleaseAll,
27 * LockResolveConflicts(), GrantLock()
29 *-------------------------------------------------------------------------
31 #include <sys/types.h>
37 #include "access/xact.h"
38 #include "miscadmin.h"
39 #include "storage/proc.h"
40 #include "utils/memutils.h"
41 #include "utils/ps_status.h"
43 static int WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
44 LOCK *lock, HOLDER *holder);
45 static void LockCountMyLocks(SHMEM_OFFSET lockOffset, PROC *proc,
47 static int LockGetMyHoldLocks(SHMEM_OFFSET lockOffset, PROC *proc);
49 static char *lock_types[] =
56 "ShareRowExclusiveLock",
61 static char *DeadLockMessage = "Deadlock detected.\n\tSee the lock(l) manual page for a possible cause.";
67 * The following configuration options are available for lock debugging:
69 * trace_locks -- give a bunch of output what's going on in this file
70 * trace_userlocks -- same but for user locks
71 * trace_lock_oidmin-- do not trace locks for tables below this oid
72 * (use to avoid output on system tables)
73 * trace_lock_table -- trace locks on this table (oid) unconditionally
74 * debug_deadlocks -- currently dumps locks at untimely occasions ;)
75 * Furthermore, but in storage/ipc/spin.c:
76 * trace_spinlocks -- trace spinlocks (pretty useless)
78 * Define LOCK_DEBUG at compile time to get all this enabled.
81 int Trace_lock_oidmin = BootstrapObjectIdData;
82 bool Trace_locks = false;
83 bool Trace_userlocks = false;
84 int Trace_lock_table = 0;
85 bool Debug_deadlocks = false;
89 LOCK_DEBUG_ENABLED(const LOCK * lock)
92 (((LOCK_LOCKMETHOD(*lock) == DEFAULT_LOCKMETHOD && Trace_locks)
93 || (LOCK_LOCKMETHOD(*lock) == USER_LOCKMETHOD && Trace_userlocks))
94 && (lock->tag.relId >= Trace_lock_oidmin))
95 || (Trace_lock_table && (lock->tag.relId == Trace_lock_table));
100 LOCK_PRINT(const char * where, const LOCK * lock, LOCKMODE type)
102 if (LOCK_DEBUG_ENABLED(lock))
104 "%s: lock(%lx) tbl(%d) rel(%u) db(%u) obj(%u) mask(%x) "
105 "hold(%d,%d,%d,%d,%d,%d,%d)=%d "
106 "act(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
107 where, MAKE_OFFSET(lock),
108 lock->tag.lockmethod, lock->tag.relId, lock->tag.dbId,
109 lock->tag.objId.blkno, lock->mask,
110 lock->holders[1], lock->holders[2], lock->holders[3], lock->holders[4],
111 lock->holders[5], lock->holders[6], lock->holders[7], lock->nHolding,
112 lock->activeHolders[1], lock->activeHolders[2], lock->activeHolders[3],
113 lock->activeHolders[4], lock->activeHolders[5], lock->activeHolders[6],
114 lock->activeHolders[7], lock->nActive,
115 lock->waitProcs.size, lock_types[type]);
120 HOLDER_PRINT(const char * where, const HOLDER * holderP)
123 (((HOLDER_LOCKMETHOD(*holderP) == DEFAULT_LOCKMETHOD && Trace_locks)
124 || (HOLDER_LOCKMETHOD(*holderP) == USER_LOCKMETHOD && Trace_userlocks))
125 && (((LOCK *)MAKE_PTR(holderP->tag.lock))->tag.relId >= Trace_lock_oidmin))
126 || (Trace_lock_table && (((LOCK *)MAKE_PTR(holderP->tag.lock))->tag.relId == Trace_lock_table))
129 "%s: holder(%lx) lock(%lx) tbl(%d) pid(%d) xid(%u) hold(%d,%d,%d,%d,%d,%d,%d)=%d",
130 where, MAKE_OFFSET(holderP), holderP->tag.lock,
131 HOLDER_LOCKMETHOD(*(holderP)),
132 holderP->tag.pid, holderP->tag.xid,
133 holderP->holders[1], holderP->holders[2], holderP->holders[3], holderP->holders[4],
134 holderP->holders[5], holderP->holders[6], holderP->holders[7], holderP->nHolding);
137 #else /* not LOCK_DEBUG */
139 #define LOCK_PRINT(where, lock, type)
140 #define HOLDER_PRINT(where, holderP)
142 #endif /* not LOCK_DEBUG */
146 SPINLOCK LockMgrLock; /* in Shmem or created in
147 * CreateSpinlocks() */
149 /* This is to simplify/speed up some bit arithmetic */
151 static LOCKMASK BITS_OFF[MAX_LOCKMODES];
152 static LOCKMASK BITS_ON[MAX_LOCKMODES];
158 static bool LockingIsDisabled;
160 /* -------------------
161 * map from lockmethod to the lock table structure
162 * -------------------
164 static LOCKMETHODTABLE *LockMethodTable[MAX_LOCK_METHODS];
166 static int NumLockMethods;
168 /* -------------------
169 * InitLocks -- Init the lock module. Create a private data
170 * structure for constructing conflict masks.
171 * -------------------
180 for (i = 0; i < MAX_LOCKMODES; i++, bit <<= 1)
187 /* -------------------
188 * LockDisable -- sets LockingIsDisabled flag to TRUE or FALSE.
192 LockDisable(bool status)
194 LockingIsDisabled = status;
198 * Boolean function to determine current locking status
202 LockingDisabled(void)
204 return LockingIsDisabled;
209 * LockMethodInit -- initialize the lock table's lock type
212 * Notes: just copying. Should only be called once.
215 LockMethodInit(LOCKMETHODTABLE *lockMethodTable,
216 LOCKMASK *conflictsP,
222 lockMethodTable->ctl->numLockModes = numModes;
224 for (i = 0; i < numModes; i++, prioP++, conflictsP++)
226 lockMethodTable->ctl->conflictTab[i] = *conflictsP;
227 lockMethodTable->ctl->prio[i] = *prioP;
232 * LockMethodTableInit -- initialize a lock table structure
235 * (a) a lock table has four separate entries in the shmem index
236 * table. This is because every shared hash table and spinlock
237 * has its name stored in the shmem index at its creation. It
238 * is wasteful, in this case, but not much space is involved.
240 * NOTE: data structures allocated here are allocated permanently, using
241 * TopMemoryContext and shared memory. We don't ever release them anyway,
242 * and in normal multi-backend operation the lock table structures set up
243 * by the postmaster are inherited by each backend, so they must be in
247 LockMethodTableInit(char *tabName,
248 LOCKMASK *conflictsP,
253 LOCKMETHODTABLE *lockMethodTable;
258 long init_table_size,
261 if (numModes > MAX_LOCKMODES)
263 elog(NOTICE, "LockMethodTableInit: too many lock types %d greater than %d",
264 numModes, MAX_LOCKMODES);
265 return INVALID_LOCKMETHOD;
268 /* Compute init/max size to request for lock hashtables */
269 max_table_size = NLOCKENTS(maxBackends);
270 init_table_size = max_table_size / 10;
272 /* Allocate a string for the shmem index table lookups. */
273 /* This is just temp space in this routine, so palloc is OK. */
274 shmemName = (char *) palloc(strlen(tabName) + 32);
276 /* each lock table has a non-shared, permanent header */
277 lockMethodTable = (LOCKMETHODTABLE *)
278 MemoryContextAlloc(TopMemoryContext, sizeof(LOCKMETHODTABLE));
280 /* ------------------------
281 * find/acquire the spinlock for the table
282 * ------------------------
284 SpinAcquire(LockMgrLock);
286 /* -----------------------
287 * allocate a control structure from shared memory or attach to it
288 * if it already exists.
289 * -----------------------
291 sprintf(shmemName, "%s (ctl)", tabName);
292 lockMethodTable->ctl = (LOCKMETHODCTL *)
293 ShmemInitStruct(shmemName, sizeof(LOCKMETHODCTL), &found);
295 if (!lockMethodTable->ctl)
296 elog(FATAL, "LockMethodTableInit: couldn't initialize %s", tabName);
298 /* -------------------
300 * -------------------
305 * we're first - initialize
310 MemSet(lockMethodTable->ctl, 0, sizeof(LOCKMETHODCTL));
311 lockMethodTable->ctl->masterLock = LockMgrLock;
312 lockMethodTable->ctl->lockmethod = NumLockMethods;
315 /* --------------------
316 * other modules refer to the lock table by a lockmethod ID
317 * --------------------
319 LockMethodTable[NumLockMethods] = lockMethodTable;
321 Assert(NumLockMethods <= MAX_LOCK_METHODS);
323 /* ----------------------
324 * allocate a hash table for LOCK structs. This is used
325 * to store per-locked-object information.
326 * ----------------------
328 info.keysize = SHMEM_LOCKTAB_KEYSIZE;
329 info.datasize = SHMEM_LOCKTAB_DATASIZE;
330 info.hash = tag_hash;
331 hash_flags = (HASH_ELEM | HASH_FUNCTION);
333 sprintf(shmemName, "%s (lock hash)", tabName);
334 lockMethodTable->lockHash = ShmemInitHash(shmemName,
340 if (!lockMethodTable->lockHash)
341 elog(FATAL, "LockMethodTableInit: couldn't initialize %s", tabName);
342 Assert(lockMethodTable->lockHash->hash == tag_hash);
344 /* -------------------------
345 * allocate a hash table for HOLDER structs. This is used
346 * to store per-lock-holder information.
347 * -------------------------
349 info.keysize = SHMEM_HOLDERTAB_KEYSIZE;
350 info.datasize = SHMEM_HOLDERTAB_DATASIZE;
351 info.hash = tag_hash;
352 hash_flags = (HASH_ELEM | HASH_FUNCTION);
354 sprintf(shmemName, "%s (holder hash)", tabName);
355 lockMethodTable->holderHash = ShmemInitHash(shmemName,
361 if (!lockMethodTable->holderHash)
362 elog(FATAL, "LockMethodTableInit: couldn't initialize %s", tabName);
364 /* init ctl data structures */
365 LockMethodInit(lockMethodTable, conflictsP, prioP, numModes);
367 SpinRelease(LockMgrLock);
371 return lockMethodTable->ctl->lockmethod;
375 * LockMethodTableRename -- allocate another lockmethod ID to the same
378 * NOTES: Both the lock module and the lock chain (lchain.c)
379 * module use table id's to distinguish between different
380 * kinds of locks. Short term and long term locks look
381 * the same to the lock table, but are handled differently
382 * by the lock chain manager. This function allows the
383 * client to use different lockmethods when acquiring/releasing
384 * short term and long term locks, yet store them all in one hashtable.
388 LockMethodTableRename(LOCKMETHOD lockmethod)
390 LOCKMETHOD newLockMethod;
392 if (NumLockMethods >= MAX_LOCK_METHODS)
393 return INVALID_LOCKMETHOD;
394 if (LockMethodTable[lockmethod] == INVALID_LOCKMETHOD)
395 return INVALID_LOCKMETHOD;
397 /* other modules refer to the lock table by a lockmethod ID */
398 newLockMethod = NumLockMethods;
401 LockMethodTable[newLockMethod] = LockMethodTable[lockmethod];
402 return newLockMethod;
406 * LockAcquire -- Check for lock conflicts, sleep if conflict found,
407 * set lock if/when no conflicts.
409 * Returns: TRUE if parameters are correct, FALSE otherwise.
411 * Side Effects: The lock is always acquired. No way to abort
412 * a lock acquisition other than aborting the transaction.
413 * Lock is recorded in the lkchain.
416 * Note on User Locks:
418 * User locks are handled totally on the application side as
419 * long term cooperative locks which extend beyond the normal
420 * transaction boundaries. Their purpose is to indicate to an
421 * application that someone is `working' on an item. So it is
422 * possible to put an user lock on a tuple's oid, retrieve the
423 * tuple, work on it for an hour and then update it and remove
424 * the lock. While the lock is active other clients can still
425 * read and write the tuple but they can be aware that it has
426 * been locked at the application level by someone.
427 * User locks use lock tags made of an uint16 and an uint32, for
428 * example 0 and a tuple oid, or any other arbitrary pair of
429 * numbers following a convention established by the application.
430 * In this sense tags don't refer to tuples or database entities.
431 * User locks and normal locks are completely orthogonal and
432 * they don't interfere with each other, so it is possible
433 * to acquire a normal lock on an user-locked tuple or user-lock
434 * a tuple for which a normal write lock already exists.
435 * User locks are always non blocking, therefore they are never
436 * acquired if already held by another process. They must be
437 * released explicitly by the application but they are released
438 * automatically when a backend terminates.
439 * They are indicated by a lockmethod 2 which is an alias for the
440 * normal lock table, and are distinguished from normal locks
441 * by the following differences:
443 * normal lock user lock
446 * tag.dbId database oid database oid
447 * tag.relId rel oid or 0 0
448 * tag.objId block id lock id2
450 * tag.offnum 0 lock id1
451 * xid.pid backend pid backend pid
453 * persistence transaction user or backend
456 * The lockmode parameter can have the same values for normal locks
457 * although probably only WRITE_LOCK can have some practical use.
463 LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
464 TransactionId xid, LOCKMODE lockmode)
472 LOCKMETHODTABLE *lockMethodTable;
474 int myHolders[MAX_LOCKMODES];
478 if (lockmethod == USER_LOCKMETHOD && Trace_userlocks)
479 elog(DEBUG, "LockAcquire: user lock [%u] %s",
480 locktag->objId.blkno, lock_types[lockmode]);
483 /* ???????? This must be changed when short term locks will be used */
484 locktag->lockmethod = lockmethod;
486 Assert(lockmethod < NumLockMethods);
487 lockMethodTable = LockMethodTable[lockmethod];
488 if (!lockMethodTable)
490 elog(NOTICE, "LockAcquire: bad lock table %d", lockmethod);
494 if (LockingIsDisabled)
497 masterLock = lockMethodTable->ctl->masterLock;
499 SpinAcquire(masterLock);
502 * Find or create a lock with this tag
504 Assert(lockMethodTable->lockHash->hash == tag_hash);
505 lock = (LOCK *) hash_search(lockMethodTable->lockHash, (Pointer) locktag,
509 SpinRelease(masterLock);
510 elog(FATAL, "LockAcquire: lock table %d is corrupted", lockmethod);
514 /* --------------------
515 * if it's a new lock object, initialize it
516 * --------------------
523 MemSet((char *) lock->holders, 0, sizeof(int) * MAX_LOCKMODES);
524 MemSet((char *) lock->activeHolders, 0, sizeof(int) * MAX_LOCKMODES);
525 ProcQueueInit(&(lock->waitProcs));
526 LOCK_PRINT("LockAcquire: new", lock, lockmode);
530 LOCK_PRINT("LockAcquire: found", lock, lockmode);
531 Assert((lock->nHolding > 0) && (lock->holders[lockmode] >= 0));
532 Assert((lock->nActive > 0) && (lock->activeHolders[lockmode] >= 0));
533 Assert(lock->nActive <= lock->nHolding);
536 /* ------------------
537 * Create the hash key for the holder table.
540 MemSet(&holdertag, 0, sizeof(HOLDERTAG)); /* must clear padding, needed */
541 holdertag.lock = MAKE_OFFSET(lock);
542 holdertag.pid = MyProcPid;
543 TransactionIdStore(xid, &holdertag.xid);
546 * Find or create a holder entry with this tag
548 holderTable = lockMethodTable->holderHash;
549 holder = (HOLDER *) hash_search(holderTable, (Pointer) &holdertag,
553 SpinRelease(masterLock);
554 elog(NOTICE, "LockAcquire: holder table corrupted");
559 * If new, initialize the new entry
563 holder->nHolding = 0;
564 MemSet((char *) holder->holders, 0, sizeof(int) * MAX_LOCKMODES);
565 ProcAddLock(&holder->queue);
566 HOLDER_PRINT("LockAcquire: new", holder);
570 HOLDER_PRINT("LockAcquire: found", holder);
571 Assert((holder->nHolding > 0) && (holder->holders[lockmode] >= 0));
572 Assert(holder->nHolding <= lock->nActive);
574 #ifdef CHECK_DEADLOCK_RISK
576 * Issue warning if we already hold a lower-level lock on this
577 * object and do not hold a lock of the requested level or higher.
578 * This indicates a deadlock-prone coding practice (eg, we'd have
579 * a deadlock if another backend were following the same code path
580 * at about the same time).
582 * This is not enabled by default, because it may generate log entries
583 * about user-level coding practices that are in fact safe in context.
584 * It can be enabled to help find system-level problems.
586 * XXX Doing numeric comparison on the lockmodes is a hack;
587 * it'd be better to use a table. For now, though, this works.
589 for (i = lockMethodTable->ctl->numLockModes; i > 0; i--)
591 if (holder->holders[i] > 0)
593 if (i >= (int) lockmode)
594 break; /* safe: we have a lock >= req level */
595 elog(DEBUG, "Deadlock risk: raising lock level"
596 " from %s to %s on object %u/%u/%u",
597 lock_types[i], lock_types[lockmode],
598 lock->tag.relId, lock->tag.dbId, lock->tag.objId.blkno);
602 #endif /* CHECK_DEADLOCK_RISK */
606 * lock->nHolding and lock->holders count the total number of holders
607 * either holding or waiting for the lock, so increment those immediately.
608 * The other counts don't increment till we get the lock.
612 lock->holders[lockmode]++;
613 Assert((lock->nHolding > 0) && (lock->holders[lockmode] > 0));
615 /* --------------------
616 * If I'm the only one holding any lock on this object, then there
617 * cannot be a conflict. The same is true if I already hold this lock.
618 * --------------------
620 if (holder->nHolding == lock->nActive || holder->holders[lockmode] != 0)
622 GrantLock(lock, holder, lockmode);
623 HOLDER_PRINT("LockAcquire: owning", holder);
624 SpinRelease(masterLock);
628 /* --------------------
629 * If this process (under any XID) is a holder of the lock,
630 * then there is no conflict, either.
631 * --------------------
633 LockCountMyLocks(holder->tag.lock, MyProc, myHolders);
634 if (myHolders[lockmode] != 0)
636 GrantLock(lock, holder, lockmode);
637 HOLDER_PRINT("LockAcquire: my other XID owning", holder);
638 SpinRelease(masterLock);
643 * If lock requested conflicts with locks requested by waiters...
645 if (lockMethodTable->ctl->conflictTab[lockmode] & lock->waitMask)
648 * If my process doesn't hold any locks that conflict with waiters
649 * then force to sleep, so that prior waiters get first chance.
651 for (i = 1; i <= lockMethodTable->ctl->numLockModes; i++)
653 if (myHolders[i] > 0 &&
654 lockMethodTable->ctl->conflictTab[i] & lock->waitMask)
655 break; /* yes, there is a conflict */
658 if (i > lockMethodTable->ctl->numLockModes)
660 HOLDER_PRINT("LockAcquire: another proc already waiting",
662 status = STATUS_FOUND;
665 status = LockResolveConflicts(lockmethod, lockmode,
670 status = LockResolveConflicts(lockmethod, lockmode,
674 if (status == STATUS_OK)
675 GrantLock(lock, holder, lockmode);
676 else if (status == STATUS_FOUND)
681 * User locks are non blocking. If we can't acquire a lock we must
682 * remove the holder entry and return FALSE without waiting.
684 if (lockmethod == USER_LOCKMETHOD)
686 if (holder->nHolding == 0)
688 SHMQueueDelete(&holder->queue);
689 holder = (HOLDER *) hash_search(holderTable,
691 HASH_REMOVE, &found);
692 if (!holder || !found)
693 elog(NOTICE, "LockAcquire: remove holder, table corrupted");
696 HOLDER_PRINT("LockAcquire: NHOLDING", holder);
698 lock->holders[lockmode]--;
699 LOCK_PRINT("LockAcquire: user lock failed", lock, lockmode);
700 Assert((lock->nHolding > 0) && (lock->holders[lockmode] >= 0));
701 Assert(lock->nActive <= lock->nHolding);
702 SpinRelease(masterLock);
705 #endif /* USER_LOCKS */
708 * Construct bitmask of locks this process holds on this object.
714 for (i = 1, tmpMask = 2;
715 i <= lockMethodTable->ctl->numLockModes;
718 if (myHolders[i] > 0)
721 MyProc->holdLock = holdLock;
725 * Sleep till someone wakes me up.
727 status = WaitOnLock(lockmethod, lockmode, lock, holder);
730 * Check the holder entry status, in case something in the ipc
731 * communication doesn't work correctly.
733 if (!((holder->nHolding > 0) && (holder->holders[lockmode] > 0)))
735 HOLDER_PRINT("LockAcquire: INCONSISTENT", holder);
736 LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
737 /* Should we retry ? */
738 SpinRelease(masterLock);
741 HOLDER_PRINT("LockAcquire: granted", holder);
742 LOCK_PRINT("LockAcquire: granted", lock, lockmode);
745 SpinRelease(masterLock);
747 return status == STATUS_OK;
750 /* ----------------------------
751 * LockResolveConflicts -- test for lock conflicts
754 * Here's what makes this complicated: one transaction's
755 * locks don't conflict with one another. When many processes
756 * hold locks, each has to subtract off the other's locks when
757 * determining whether or not any new lock acquired conflicts with
760 * The caller can optionally pass the process's total holders counts, if
761 * known. If NULL is passed then these values will be computed internally.
762 * ----------------------------
765 LockResolveConflicts(LOCKMETHOD lockmethod,
770 int *myHolders) /* myHolders[] array or NULL */
772 LOCKMETHODCTL *lockctl = LockMethodTable[lockmethod]->ctl;
773 int numLockModes = lockctl->numLockModes;
777 int localHolders[MAX_LOCKMODES];
779 Assert((holder->nHolding >= 0) && (holder->holders[lockmode] >= 0));
781 /* ----------------------------
782 * first check for global conflicts: If no locks conflict
783 * with mine, then I get the lock.
785 * Checking for conflict: lock->mask represents the types of
786 * currently held locks. conflictTable[lockmode] has a bit
787 * set for each type of lock that conflicts with mine. Bitwise
788 * compare tells if there is a conflict.
789 * ----------------------------
791 if (!(lockctl->conflictTab[lockmode] & lock->mask))
793 HOLDER_PRINT("LockResolveConflicts: no conflict", holder);
797 /* ------------------------
798 * Rats. Something conflicts. But it could still be my own
799 * lock. We have to construct a conflict mask
800 * that does not reflect our own locks. Locks held by the current
801 * process under another XID also count as "our own locks".
802 * ------------------------
804 if (myHolders == NULL)
806 /* Caller didn't do calculation of total holding for me */
807 LockCountMyLocks(holder->tag.lock, proc, localHolders);
808 myHolders = localHolders;
811 /* Compute mask of lock types held by other processes */
814 for (i = 1; i <= numLockModes; i++, tmpMask <<= 1)
816 if (lock->activeHolders[i] != myHolders[i])
820 /* ------------------------
821 * now check again for conflicts. 'bitmask' describes the types
822 * of locks held by other processes. If one of these
823 * conflicts with the kind of lock that I want, there is a
824 * conflict and I have to sleep.
825 * ------------------------
827 if (!(lockctl->conflictTab[lockmode] & bitmask))
829 /* no conflict. OK to get the lock */
830 HOLDER_PRINT("LockResolveConflicts: resolved", holder);
834 HOLDER_PRINT("LockResolveConflicts: conflicting", holder);
839 * LockCountMyLocks --- Count total number of locks held on a given lockable
840 * object by a given process (under any transaction ID).
842 * XXX This could be rather slow if the process holds a large number of locks.
843 * Perhaps it could be sped up if we kept yet a third hashtable of per-
844 * process lock information. However, for the normal case where a transaction
845 * doesn't hold a large number of locks, keeping such a table would probably
849 LockCountMyLocks(SHMEM_OFFSET lockOffset, PROC *proc, int *myHolders)
851 HOLDER *holder = NULL;
852 HOLDER *nextHolder = NULL;
853 SHM_QUEUE *lockQueue = &(proc->lockQueue);
854 SHMEM_OFFSET end = MAKE_OFFSET(lockQueue);
857 MemSet(myHolders, 0, MAX_LOCKMODES * sizeof(int));
859 if (SHMQueueEmpty(lockQueue))
862 SHMQueueFirst(lockQueue, (Pointer *) &holder, &holder->queue);
866 /* ---------------------------
867 * XXX Here we assume the shared memory queue is circular and
868 * that we know its internal structure. Should have some sort of
869 * macros to allow one to walk it. mer 20 July 1991
870 * ---------------------------
872 if (holder->queue.next == end)
875 SHMQueueFirst(&holder->queue,
876 (Pointer *) &nextHolder, &nextHolder->queue);
878 if (lockOffset == holder->tag.lock)
880 for (i = 1; i < MAX_LOCKMODES; i++)
882 myHolders[i] += holder->holders[i];
891 * LockGetMyHoldLocks -- compute bitmask of lock types held by a process
892 * for a given lockable object.
895 LockGetMyHoldLocks(SHMEM_OFFSET lockOffset, PROC *proc)
897 int myHolders[MAX_LOCKMODES];
902 LockCountMyLocks(lockOffset, proc, myHolders);
904 for (i = 1, tmpMask = 2;
908 if (myHolders[i] > 0)
915 * GrantLock -- update the lock and holder data structures to show
916 * the new lock has been granted.
919 GrantLock(LOCK *lock, HOLDER *holder, LOCKMODE lockmode)
922 lock->activeHolders[lockmode]++;
923 lock->mask |= BITS_ON[lockmode];
924 LOCK_PRINT("GrantLock", lock, lockmode);
925 Assert((lock->nActive > 0) && (lock->activeHolders[lockmode] > 0));
926 Assert(lock->nActive <= lock->nHolding);
927 holder->holders[lockmode]++;
929 Assert((holder->nHolding > 0) && (holder->holders[lockmode] > 0));
933 * WaitOnLock -- wait to acquire a lock
935 * The locktable spinlock must be held at entry.
938 WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
939 LOCK *lock, HOLDER *holder)
941 LOCKMETHODTABLE *lockMethodTable = LockMethodTable[lockmethod];
945 Assert(lockmethod < NumLockMethods);
948 * the waitqueue is ordered by priority. I insert myself according to
949 * the priority of the lock I am acquiring.
951 * SYNC NOTE: I am assuming that the lock table spinlock is sufficient
952 * synchronization for this queue. That will not be true if/when
953 * people can be deleted from the queue by a SIGINT or something.
955 LOCK_PRINT("WaitOnLock: sleeping on lock", lock, lockmode);
957 old_status = pstrdup(get_ps_display());
958 new_status = (char *) palloc(strlen(old_status) + 10);
959 strcpy(new_status, old_status);
960 strcat(new_status, " waiting");
961 set_ps_display(new_status);
963 if (ProcSleep(lockMethodTable->ctl,
968 /* -------------------
969 * We failed as a result of a deadlock, see HandleDeadLock().
970 * Decrement the lock nHolding and holders fields as
971 * we are no longer waiting on this lock. Removal of the holder and
972 * lock objects, if no longer needed, will happen in xact cleanup.
973 * -------------------
976 lock->holders[lockmode]--;
977 LOCK_PRINT("WaitOnLock: aborting on lock", lock, lockmode);
978 Assert((lock->nHolding >= 0) && (lock->holders[lockmode] >= 0));
979 Assert(lock->nActive <= lock->nHolding);
980 if (lock->activeHolders[lockmode] == lock->holders[lockmode])
981 lock->waitMask &= BITS_OFF[lockmode];
982 SpinRelease(lockMethodTable->ctl->masterLock);
983 elog(ERROR, DeadLockMessage);
987 if (lock->activeHolders[lockmode] == lock->holders[lockmode])
988 lock->waitMask &= BITS_OFF[lockmode];
990 set_ps_display(old_status);
994 LOCK_PRINT("WaitOnLock: wakeup on lock", lock, lockmode);
999 * LockRelease -- look up 'locktag' in lock table 'lockmethod' and
1002 * Side Effects: if the lock no longer conflicts with the highest
1003 * priority waiting process, that process is granted the lock
1004 * and awoken. (We have to grant the lock here to avoid a
1005 * race between the waking process and any new process to
1006 * come along and request the lock.)
1009 LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
1010 TransactionId xid, LOCKMODE lockmode)
1013 SPINLOCK masterLock;
1015 LOCKMETHODTABLE *lockMethodTable;
1017 HOLDERTAG holdertag;
1019 bool wakeupNeeded = true;
1022 if (lockmethod == USER_LOCKMETHOD && Trace_userlocks)
1023 elog(DEBUG, "LockRelease: user lock tag [%u] %d", locktag->objId.blkno, lockmode);
1026 /* ???????? This must be changed when short term locks will be used */
1027 locktag->lockmethod = lockmethod;
1029 Assert(lockmethod < NumLockMethods);
1030 lockMethodTable = LockMethodTable[lockmethod];
1031 if (!lockMethodTable)
1033 elog(NOTICE, "lockMethodTable is null in LockRelease");
1037 if (LockingIsDisabled)
1040 masterLock = lockMethodTable->ctl->masterLock;
1041 SpinAcquire(masterLock);
1044 * Find a lock with this tag
1046 Assert(lockMethodTable->lockHash->hash == tag_hash);
1047 lock = (LOCK *) hash_search(lockMethodTable->lockHash, (Pointer) locktag,
1051 * let the caller print its own error message, too. Do not
1056 SpinRelease(masterLock);
1057 elog(NOTICE, "LockRelease: locktable corrupted");
1063 SpinRelease(masterLock);
1064 elog(NOTICE, "LockRelease: no such lock");
1067 LOCK_PRINT("LockRelease: found", lock, lockmode);
1068 Assert((lock->nHolding > 0) && (lock->holders[lockmode] >= 0));
1069 Assert((lock->nActive > 0) && (lock->activeHolders[lockmode] >= 0));
1070 Assert(lock->nActive <= lock->nHolding);
1073 * Find the holder entry for this holder.
1075 MemSet(&holdertag, 0, sizeof(HOLDERTAG)); /* must clear padding, needed */
1076 holdertag.lock = MAKE_OFFSET(lock);
1077 holdertag.pid = MyProcPid;
1078 TransactionIdStore(xid, &holdertag.xid);
1080 holderTable = lockMethodTable->holderHash;
1081 holder = (HOLDER *) hash_search(holderTable, (Pointer) &holdertag,
1082 HASH_FIND_SAVE, &found);
1083 if (!holder || !found)
1085 SpinRelease(masterLock);
1087 if (!found && lockmethod == USER_LOCKMETHOD)
1088 elog(NOTICE, "LockRelease: no lock with this tag");
1091 elog(NOTICE, "LockRelease: holder table corrupted");
1094 HOLDER_PRINT("LockRelease: found", holder);
1095 Assert(holder->tag.lock == MAKE_OFFSET(lock));
1098 * Check that we are actually holding a lock of the type we want to
1101 if (!(holder->holders[lockmode] > 0))
1103 SpinRelease(masterLock);
1104 HOLDER_PRINT("LockRelease: WRONGTYPE", holder);
1105 elog(NOTICE, "LockRelease: you don't own a lock of type %s",
1106 lock_types[lockmode]);
1107 Assert(holder->holders[lockmode] >= 0);
1110 Assert(holder->nHolding > 0);
1113 * fix the general lock stats
1116 lock->holders[lockmode]--;
1118 lock->activeHolders[lockmode]--;
1120 if (!(lock->activeHolders[lockmode]))
1122 /* change the conflict mask. No more of this lock type. */
1123 lock->mask &= BITS_OFF[lockmode];
1127 /* --------------------------
1128 * If there are still active locks of the type I just released, no one
1129 * should be woken up. Whoever is asleep will still conflict
1130 * with the remaining locks.
1131 * --------------------------
1133 if (lock->activeHolders[lockmode])
1134 wakeupNeeded = false;
1139 * Above is not valid any more (due to MVCC lock modes). Actually
1140 * we should compare activeHolders[lockmode] with number of
1141 * waiters holding lock of this type and try to wakeup only if
1142 * these numbers are equal (and lock released conflicts with locks
1143 * requested by waiters). For the moment we only check the last
1146 if (lockMethodTable->ctl->conflictTab[lockmode] & lock->waitMask)
1147 wakeupNeeded = true;
1149 LOCK_PRINT("LockRelease: updated", lock, lockmode);
1150 Assert((lock->nHolding >= 0) && (lock->holders[lockmode] >= 0));
1151 Assert((lock->nActive >= 0) && (lock->activeHolders[lockmode] >= 0));
1152 Assert(lock->nActive <= lock->nHolding);
1154 if (!lock->nHolding)
1156 /* ------------------
1157 * if there's no one waiting in the queue,
1158 * we just released the last lock on this object.
1159 * Delete it from the lock table.
1160 * ------------------
1162 Assert(lockMethodTable->lockHash->hash == tag_hash);
1163 lock = (LOCK *) hash_search(lockMethodTable->lockHash,
1164 (Pointer) &(lock->tag),
1167 Assert(lock && found);
1168 wakeupNeeded = false;
1172 * Now fix the per-holder lock stats.
1174 holder->holders[lockmode]--;
1176 HOLDER_PRINT("LockRelease: updated", holder);
1177 Assert((holder->nHolding >= 0) && (holder->holders[lockmode] >= 0));
1180 * If this was my last hold on this lock, delete my entry in the holder
1183 if (!holder->nHolding)
1185 if (holder->queue.prev == INVALID_OFFSET)
1186 elog(NOTICE, "LockRelease: holder.prev == INVALID_OFFSET");
1187 if (holder->queue.next == INVALID_OFFSET)
1188 elog(NOTICE, "LockRelease: holder.next == INVALID_OFFSET");
1189 if (holder->queue.next != INVALID_OFFSET)
1190 SHMQueueDelete(&holder->queue);
1191 HOLDER_PRINT("LockRelease: deleting", holder);
1192 holder = (HOLDER *) hash_search(holderTable, (Pointer) &holder,
1193 HASH_REMOVE_SAVED, &found);
1194 if (!holder || !found)
1196 SpinRelease(masterLock);
1197 elog(NOTICE, "LockRelease: remove holder, table corrupted");
1203 ProcLockWakeup(lockmethod, lock);
1205 else if (LOCK_DEBUG_ENABLED(lock))
1206 elog(DEBUG, "LockRelease: no wakeup needed");
1209 SpinRelease(masterLock);
1214 * LockReleaseAll -- Release all locks in a process's lock queue.
1216 * Well, not really *all* locks.
1218 * If 'allxids' is TRUE, all locks of the specified lock method are
1219 * released, regardless of transaction affiliation.
1221 * If 'allxids' is FALSE, all locks of the specified lock method and
1222 * specified XID are released.
1225 LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
1226 bool allxids, TransactionId xid)
1228 HOLDER *holder = NULL;
1229 HOLDER *nextHolder = NULL;
1230 SHM_QUEUE *lockQueue = &(proc->lockQueue);
1231 SHMEM_OFFSET end = MAKE_OFFSET(lockQueue);
1232 SPINLOCK masterLock;
1233 LOCKMETHODTABLE *lockMethodTable;
1241 if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
1242 elog(DEBUG, "LockReleaseAll: lockmethod=%d, pid=%d",
1243 lockmethod, MyProcPid);
1246 Assert(lockmethod < NumLockMethods);
1247 lockMethodTable = LockMethodTable[lockmethod];
1248 if (!lockMethodTable)
1250 elog(NOTICE, "LockReleaseAll: bad lockmethod %d", lockmethod);
1254 if (SHMQueueEmpty(lockQueue))
1257 numLockModes = lockMethodTable->ctl->numLockModes;
1258 masterLock = lockMethodTable->ctl->masterLock;
1260 SpinAcquire(masterLock);
1262 SHMQueueFirst(lockQueue, (Pointer *) &holder, &holder->queue);
1268 bool wakeupNeeded = false;
1270 /* ---------------------------
1271 * XXX Here we assume the shared memory queue is circular and
1272 * that we know its internal structure. Should have some sort of
1273 * macros to allow one to walk it. mer 20 July 1991
1274 * ---------------------------
1276 if (holder->queue.next == end)
1279 SHMQueueFirst(&holder->queue,
1280 (Pointer *) &nextHolder, &nextHolder->queue);
1282 Assert(holder->tag.pid == proc->pid);
1284 lock = (LOCK *) MAKE_PTR(holder->tag.lock);
1286 /* Ignore items that are not of the lockmethod to be removed */
1287 if (LOCK_LOCKMETHOD(*lock) != lockmethod)
1293 /* If not allxids, ignore items that are of the wrong xid */
1294 if (!allxids && xid != holder->tag.xid)
1300 HOLDER_PRINT("LockReleaseAll", holder);
1301 LOCK_PRINT("LockReleaseAll", lock, 0);
1302 Assert(lock->nHolding > 0);
1303 Assert(lock->nActive > 0);
1304 Assert(lock->nActive <= lock->nHolding);
1305 Assert(holder->nHolding >= 0);
1306 Assert(holder->nHolding <= lock->nHolding);
1308 /* ------------------
1309 * fix the general lock stats
1310 * ------------------
1312 if (lock->nHolding != holder->nHolding)
1314 for (i = 1; i <= numLockModes; i++)
1316 Assert(holder->holders[i] >= 0);
1317 lock->holders[i] -= holder->holders[i];
1318 lock->activeHolders[i] -= holder->holders[i];
1319 Assert((lock->holders[i] >= 0) \
1320 &&(lock->activeHolders[i] >= 0));
1321 if (!lock->activeHolders[i])
1322 lock->mask &= BITS_OFF[i];
1325 * Read comments in LockRelease
1327 if (!wakeupNeeded && holder->holders[i] > 0 &&
1328 lockMethodTable->ctl->conflictTab[i] & lock->waitMask)
1329 wakeupNeeded = true;
1331 lock->nHolding -= holder->nHolding;
1332 lock->nActive -= holder->nHolding;
1333 Assert((lock->nHolding >= 0) && (lock->nActive >= 0));
1334 Assert(lock->nActive <= lock->nHolding);
1339 * set nHolding to zero so that we can garbage collect the lock
1344 /* Fix the lock status, just for next LOCK_PRINT message. */
1345 for (i = 1; i <= numLockModes; i++)
1347 Assert(lock->holders[i] == lock->activeHolders[i]);
1348 lock->holders[i] = lock->activeHolders[i] = 0;
1351 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
1353 HOLDER_PRINT("LockReleaseAll: deleting", holder);
1356 * Remove the holder entry from the process' lock queue
1358 SHMQueueDelete(&holder->queue);
1361 * remove the holder entry from the hashtable
1363 holder = (HOLDER *) hash_search(lockMethodTable->holderHash,
1367 if (!holder || !found)
1369 SpinRelease(masterLock);
1370 elog(NOTICE, "LockReleaseAll: holder table corrupted");
1374 if (!lock->nHolding)
1376 /* --------------------
1377 * if there's no one waiting in the queue, we've just released
1379 * --------------------
1381 LOCK_PRINT("LockReleaseAll: deleting", lock, 0);
1382 Assert(lockMethodTable->lockHash->hash == tag_hash);
1383 lock = (LOCK *) hash_search(lockMethodTable->lockHash,
1384 (Pointer) &(lock->tag),
1385 HASH_REMOVE, &found);
1386 if ((!lock) || (!found))
1388 SpinRelease(masterLock);
1389 elog(NOTICE, "LockReleaseAll: cannot remove lock from HTAB");
1393 else if (wakeupNeeded)
1394 ProcLockWakeup(lockmethod, lock);
1397 holder = nextHolder;
1401 * Reinitialize the queue only if nothing has been left in.
1406 if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
1407 elog(DEBUG, "LockReleaseAll: reinitializing lockQueue");
1409 SHMQueueInit(lockQueue);
1412 SpinRelease(masterLock);
1414 if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
1415 elog(DEBUG, "LockReleaseAll: done");
1422 LockShmemSize(int maxBackends)
1426 size += MAXALIGN(sizeof(PROC_HDR)); /* ProcGlobal */
1427 size += MAXALIGN(maxBackends * sizeof(PROC)); /* each MyProc */
1428 size += MAXALIGN(maxBackends * sizeof(LOCKMETHODCTL)); /* each
1429 * lockMethodTable->ctl */
1431 /* lockHash table */
1432 size += hash_estimate_size(NLOCKENTS(maxBackends),
1433 SHMEM_LOCKTAB_KEYSIZE,
1434 SHMEM_LOCKTAB_DATASIZE);
1436 /* holderHash table */
1437 size += hash_estimate_size(NLOCKENTS(maxBackends),
1438 SHMEM_HOLDERTAB_KEYSIZE,
1439 SHMEM_HOLDERTAB_DATASIZE);
1442 * Since the lockHash entry count above is only an estimate, add 10%
1451 * DeadlockCheck -- Checks for deadlocks for a given process
1453 * We can't block on user locks, so no sense testing for deadlock
1454 * because there is no blocking, and no timer for the block.
1456 * This code takes a list of locks a process holds, and the lock that
1457 * the process is sleeping on, and tries to find if any of the processes
1458 * waiting on its locks hold the lock it is waiting for. If no deadlock
1459 * is found, it goes on to look at all the processes waiting on their locks.
1461 * We have already locked the master lock before being called.
1464 DeadLockCheck(PROC *thisProc, LOCK *findlock)
1466 HOLDER *holder = NULL;
1467 HOLDER *nextHolder = NULL;
1469 PROC_QUEUE *waitQueue;
1470 SHM_QUEUE *lockQueue = &(thisProc->lockQueue);
1471 SHMEM_OFFSET end = MAKE_OFFSET(lockQueue);
1472 LOCKMETHODCTL *lockctl = LockMethodTable[DEFAULT_LOCKMETHOD]->ctl;
1476 bool first_run = (thisProc == MyProc);
1478 static PROC *checked_procs[MAXBACKENDS];
1481 /* initialize at start of recursion */
1484 checked_procs[0] = thisProc;
1488 if (SHMQueueEmpty(lockQueue))
1491 SHMQueueFirst(lockQueue, (Pointer *) &holder, &holder->queue);
1495 /* ---------------------------
1496 * XXX Here we assume the shared memory queue is circular and
1497 * that we know its internal structure. Should have some sort of
1498 * macros to allow one to walk it. mer 20 July 1991
1499 * ---------------------------
1501 if (holder->queue.next == end)
1504 SHMQueueFirst(&holder->queue,
1505 (Pointer *) &nextHolder, &nextHolder->queue);
1507 Assert(holder->tag.pid == thisProc->pid);
1509 lock = (LOCK *) MAKE_PTR(holder->tag.lock);
1511 /* Ignore user locks */
1512 if (lock->tag.lockmethod != DEFAULT_LOCKMETHOD)
1515 HOLDER_PRINT("DeadLockCheck", holder);
1516 LOCK_PRINT("DeadLockCheck", lock, 0);
1519 * waitLock is always in lockQueue of waiting proc, if !first_run
1520 * then upper caller will handle waitProcs queue of waitLock.
1522 if (thisProc->waitLock == lock && !first_run)
1526 * If we found proc holding findlock and sleeping on some my other
1527 * lock then we have to check does it block me or another waiters.
1529 if (lock == findlock && !first_run)
1533 Assert(holder->nHolding > 0);
1534 for (lm = 1; lm <= lockctl->numLockModes; lm++)
1536 if (holder->holders[lm] > 0 &&
1537 lockctl->conflictTab[lm] & findlock->waitMask)
1542 * Else - get the next lock from thisProc's lockQueue
1547 waitQueue = &(lock->waitProcs);
1548 waitProc = (PROC *) MAKE_PTR(waitQueue->links.prev);
1551 * NOTE: loop must count down because we want to examine each item
1552 * in the queue even if waitQueue->size decreases due to waking up
1553 * some of the processes.
1555 for (i = waitQueue->size; --i >= 0; )
1557 Assert(waitProc->waitLock == lock);
1558 if (waitProc == thisProc)
1560 /* This should only happen at first level */
1561 Assert(waitProc == MyProc);
1564 if (lock == findlock) /* first_run also true */
1567 * If me blocked by his holdlock...
1569 if (lockctl->conflictTab[MyProc->waitLockMode] & waitProc->holdLock)
1571 /* and he blocked by me -> deadlock */
1572 if (lockctl->conflictTab[waitProc->waitLockMode] & MyProc->holdLock)
1574 /* we shouldn't look at lockQueue of our blockers */
1579 * If he isn't blocked by me and we request
1580 * non-conflicting lock modes - no deadlock here because
1581 * he isn't blocked by me in any sense (explicitly or
1582 * implicitly). Note that we don't do like test if
1583 * !first_run (when thisProc is holder and non-waiter on
1584 * lock) and so we call DeadLockCheck below for every
1585 * waitProc in thisProc->lockQueue, even for waitProc-s
1586 * un-blocked by thisProc. Should we? This could save us
1589 if (!(lockctl->conflictTab[waitProc->waitLockMode] & MyProc->holdLock) &&
1590 !(lockctl->conflictTab[waitProc->waitLockMode] & (1 << MyProc->waitLockMode)))
1595 * Skip this waiter if already checked.
1597 for (j = 0; j < nprocs; j++)
1599 if (checked_procs[j] == waitProc)
1603 /* Recursively check this process's lockQueue. */
1604 Assert(nprocs < MAXBACKENDS);
1605 checked_procs[nprocs++] = waitProc;
1607 if (DeadLockCheck(waitProc, findlock))
1612 * Ok, but is waitProc waiting for me (thisProc) ?
1614 if (thisProc->waitLock == lock)
1617 holdLock = thisProc->holdLock;
1621 /* should we cache holdLock to speed this up? */
1622 holdLock = LockGetMyHoldLocks(holder->tag.lock, thisProc);
1623 Assert(holdLock != 0);
1625 if (lockctl->conflictTab[waitProc->waitLockMode] & holdLock)
1628 * Last attempt to avoid deadlock: try to wakeup myself.
1632 if (LockResolveConflicts(DEFAULT_LOCKMETHOD,
1633 MyProc->waitLockMode,
1639 SetWaitingForLock(false);
1640 GrantLock(MyProc->waitLock,
1642 MyProc->waitLockMode);
1643 ProcWakeup(MyProc, NO_ERROR);
1651 * Hell! Is he blocked by any (other) holder ?
1653 if (LockResolveConflicts(DEFAULT_LOCKMETHOD,
1654 waitProc->waitLockMode,
1656 waitProc->waitHolder,
1661 * Blocked by others - no deadlock...
1663 LOCK_PRINT("DeadLockCheck: blocked by others",
1664 lock, waitProc->waitLockMode);
1669 * Well - wakeup this guy! This is the case of
1670 * implicit blocking: thisProc blocked someone who
1671 * blocked waitProc by the fact that he/someone is
1672 * already waiting for lock. We do this for
1675 GrantLock(lock, waitProc->waitHolder, waitProc->waitLockMode);
1676 waitProc = ProcWakeup(waitProc, NO_ERROR);
1678 * Use next-proc link returned by ProcWakeup, since this
1679 * proc's own links field is now cleared.
1685 waitProc = (PROC *) MAKE_PTR(waitProc->links.prev);
1689 holder = nextHolder;
1692 /* if we got here, no deadlock */
1698 * Dump all locks in the proc->lockQueue. Must have already acquired
1704 SHMEM_OFFSET location;
1706 SHM_QUEUE *lockQueue;
1707 HOLDER *holder = NULL;
1708 HOLDER *nextHolder = NULL;
1711 int lockmethod = DEFAULT_LOCKMETHOD;
1712 LOCKMETHODTABLE *lockMethodTable;
1714 ShmemPIDLookup(MyProcPid, &location);
1715 if (location == INVALID_OFFSET)
1717 proc = (PROC *) MAKE_PTR(location);
1720 lockQueue = &proc->lockQueue;
1721 end = MAKE_OFFSET(lockQueue);
1723 Assert(lockmethod < NumLockMethods);
1724 lockMethodTable = LockMethodTable[lockmethod];
1725 if (!lockMethodTable)
1729 LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
1731 if (SHMQueueEmpty(lockQueue))
1734 SHMQueueFirst(lockQueue, (Pointer *) &holder, &holder->queue);
1738 /* ---------------------------
1739 * XXX Here we assume the shared memory queue is circular and
1740 * that we know its internal structure. Should have some sort of
1741 * macros to allow one to walk it. mer 20 July 1991
1742 * ---------------------------
1744 if (holder->queue.next == end)
1747 SHMQueueFirst(&holder->queue,
1748 (Pointer *) &nextHolder, &nextHolder->queue);
1750 Assert(holder->tag.pid == proc->pid);
1752 lock = (LOCK *) MAKE_PTR(holder->tag.lock);
1754 HOLDER_PRINT("DumpLocks", holder);
1755 LOCK_PRINT("DumpLocks", lock, 0);
1757 holder = nextHolder;
1762 * Dump all postgres locks. Must have already acquired the masterLock.
1767 SHMEM_OFFSET location;
1769 HOLDER *holder = NULL;
1772 int lockmethod = DEFAULT_LOCKMETHOD;
1773 LOCKMETHODTABLE *lockMethodTable;
1775 HASH_SEQ_STATUS status;
1778 ShmemPIDLookup(pid, &location);
1779 if (location == INVALID_OFFSET)
1781 proc = (PROC *) MAKE_PTR(location);
1785 Assert(lockmethod < NumLockMethods);
1786 lockMethodTable = LockMethodTable[lockmethod];
1787 if (!lockMethodTable)
1790 holderTable = lockMethodTable->holderHash;
1793 LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
1795 hash_seq_init(&status, holderTable);
1796 while ((holder = (HOLDER *) hash_seq_search(&status)) &&
1797 (holder != (HOLDER *) TRUE))
1799 HOLDER_PRINT("DumpAllLocks", holder);
1801 if (holder->tag.lock)
1803 lock = (LOCK *) MAKE_PTR(holder->tag.lock);
1804 LOCK_PRINT("DumpAllLocks", lock, 0);
1807 elog(DEBUG, "DumpAllLocks: holder->tag.lock = NULL");
1811 #endif /* LOCK_DEBUG */