1 /*-------------------------------------------------------------------------
4 * routines to manage per-process shared memory data structure
6 * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.102 2001/05/25 15:45:33 momjian Exp $
13 *-------------------------------------------------------------------------
16 * Each postgres backend gets one of these. We'll use it to
17 * clean up after the process should the process suddenly die.
21 * ProcSleep(), ProcWakeup(),
22 * ProcQueueAlloc() -- create a shm queue for sleeping processes
23 * ProcQueueInit() -- create a queue without allocing memory
25 * Locking and waiting for buffers can cause the backend to be
26 * put to sleep. Whoever releases the lock, etc. wakes the
27 * process up again (and gives it an error code so it knows
28 * whether it was awoken on an error condition).
32 * ProcReleaseLocks -- frees the locks associated with current transaction
34 * ProcKill -- destroys the shared memory state (and locks)
35 * associated with the process.
37 * 5/15/91 -- removed the buffer pool based lock chain in favor
38 * of a shared memory lock chain. The write-protection is
39 * more expensive if the lock chain is in the buffer pool.
40 * The only reason I kept the lock chain in the buffer pool
41 * in the first place was to allow the lock table to grow larger
42 * than available shared memory and that isn't going to work
43 * without a lot of unimplemented support anyway.
45 * 4/7/95 -- instead of allocating a set of 1 semaphore per process, we
46 * allocate a semaphore from a set of PROC_NSEMS_PER_SET semaphores
47 * shared among backends (we keep a few sets of semaphores around).
48 * This is so that we can support more backends. (system-wide semaphore
49 * sets run out pretty fast.) -ay 4/95
57 #include <sys/types.h>
59 #if defined(solaris_sparc) || defined(__CYGWIN__)
64 #include "miscadmin.h"
66 #if defined(__darwin__)
67 #include "port/darwin/sem.h"
70 /* In Ultrix and QNX, sem.h must be included after ipc.h */
75 #include "access/xact.h"
76 #include "storage/proc.h"
79 int DeadlockTimeout = 1000;
81 /* --------------------
82 * Spin lock for manipulating the shared process data structure:
83 * ProcGlobal.... Adding an extra spin lock seemed like the smallest
84 * hack to get around reading and updating this structure in shared
85 * memory. -mer 17 July 1991
86 * --------------------
88 SPINLOCK ProcStructLock;
90 static PROC_HDR *ProcGlobal = NULL;
94 static bool waitingForLock = false;
96 static void ProcKill(void);
97 static void ProcGetNewSemIdAndNum(IpcSemaphoreId *semId, int *semNum);
98 static void ProcFreeSem(IpcSemaphoreId semId, int semNum);
99 static void ZeroProcSemaphore(PROC *proc);
100 static void ProcFreeAllSemaphores(void);
105 * initializes the global process table. We put it here so that
106 * the postmaster can do this initialization. (ProcFreeAllSemaphores needs
107 * to read this table on exiting the postmaster. If we have the first
108 * backend do this, starting up and killing the postmaster without
109 * starting any backends will be a problem.)
111 * We also allocate all the per-process semaphores we will need to support
112 * the requested number of backends. We used to allocate semaphores
113 * only when backends were actually started up, but that is bad because
114 * it lets Postgres fail under load --- a lot of Unix systems are
115 * (mis)configured with small limits on the number of semaphores, and
116 * running out when trying to start another backend is a common failure.
117 * So, now we grab enough semaphores to support the desired max number
118 * of backends immediately at initialization --- if the sysadmin has set
119 * MaxBackends higher than his kernel will support, he'll find out sooner
123 InitProcGlobal(int maxBackends)
127 /* attach to the free list */
128 ProcGlobal = (PROC_HDR *)
129 ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
131 /* --------------------
132 * We're the first - initialize.
133 * XXX if found should ever be true, it is a sign of impending doom ...
134 * ought to complain if so?
135 * --------------------
141 ProcGlobal->freeProcs = INVALID_OFFSET;
142 for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
144 ProcGlobal->procSemIds[i] = -1;
145 ProcGlobal->freeSemMap[i] = 0;
149 * Arrange to delete semas on exit --- set this up now so that we
150 * will clean up if pre-allocation fails. We use our own
151 * freeproc, rather than IpcSemaphoreCreate's removeOnExit option,
152 * because we don't want to fill up the on_shmem_exit list with a
153 * separate entry for each semaphore set.
155 on_shmem_exit(ProcFreeAllSemaphores, 0);
158 * Pre-create the semaphores for the first maxBackends processes.
160 Assert(maxBackends > 0 && maxBackends <= MAXBACKENDS);
162 for (i = 0; i < ((maxBackends - 1) / PROC_NSEMS_PER_SET + 1); i++)
164 IpcSemaphoreId semId;
166 semId = IpcSemaphoreCreate(PROC_NSEMS_PER_SET,
170 ProcGlobal->procSemIds[i] = semId;
175 /* ------------------------
176 * InitProc -- create a per-process data structure for this process
177 * used by the lock manager on semaphore queues.
178 * ------------------------
184 unsigned long location,
187 SpinAcquire(ProcStructLock);
189 /* attach to the ProcGlobal structure */
190 ProcGlobal = (PROC_HDR *)
191 ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
194 /* this should not happen. InitProcGlobal() is called before this. */
195 elog(STOP, "InitProcess: Proc Header uninitialized");
200 SpinRelease(ProcStructLock);
201 elog(ERROR, "ProcInit: you already exist");
204 /* try to get a proc struct from the free list first */
206 myOffset = ProcGlobal->freeProcs;
208 if (myOffset != INVALID_OFFSET)
210 MyProc = (PROC *) MAKE_PTR(myOffset);
211 ProcGlobal->freeProcs = MyProc->links.next;
217 * have to allocate one. We can't use the normal shmem index
218 * table mechanism because the proc structure is stored by PID
219 * instead of by a global name (need to look it up by PID when we
220 * cleanup dead processes).
223 MyProc = (PROC *) ShmemAlloc(sizeof(PROC));
226 SpinRelease(ProcStructLock);
227 elog(FATAL, "cannot create new proc: out of memory");
232 * zero out the spin lock counts and set the sLocks field for
233 * ProcStructLock to 1 as we have acquired this spinlock above but
234 * didn't record it since we didn't have MyProc until now.
236 MemSet(MyProc->sLocks, 0, sizeof(MyProc->sLocks));
237 MyProc->sLocks[ProcStructLock] = 1;
240 * Set up a wait-semaphore for the proc.
242 if (IsUnderPostmaster)
244 ProcGetNewSemIdAndNum(&MyProc->sem.semId, &MyProc->sem.semNum);
247 * we might be reusing a semaphore that belongs to a dead backend.
248 * So be careful and reinitialize its value here.
250 ZeroProcSemaphore(MyProc);
254 MyProc->sem.semId = -1;
255 MyProc->sem.semNum = -1;
258 SHMQueueElemInit(&(MyProc->links));
259 MyProc->errType = STATUS_OK;
260 MyProc->pid = MyProcPid;
261 MyProc->databaseId = MyDatabaseId;
262 MyProc->xid = InvalidTransactionId;
263 MyProc->xmin = InvalidTransactionId;
264 MyProc->waitLock = NULL;
265 MyProc->waitHolder = NULL;
266 SHMQueueInit(&(MyProc->procHolders));
271 SpinRelease(ProcStructLock);
274 * Install ourselves in the shmem index table. The name to use is
275 * determined by the OS-assigned process id. That allows the cleanup
276 * process to find us after any untimely exit.
278 location = MAKE_OFFSET(MyProc);
279 if ((!ShmemPIDLookup(MyProcPid, &location)) ||
280 (location != MAKE_OFFSET(MyProc)))
281 elog(STOP, "InitProcess: ShmemPID table broken");
284 * Arrange to clean up at backend exit.
286 on_shmem_exit(ProcKill, 0);
289 * Now that we have a PROC, we could try to acquire locks, so
290 * initialize the deadlock checker.
292 InitDeadLockChecking();
296 * Initialize the proc's wait-semaphore to count zero.
299 ZeroProcSemaphore(PROC *proc)
304 if (semctl(proc->sem.semId, proc->sem.semNum, SETVAL, semun) < 0)
306 fprintf(stderr, "ZeroProcSemaphore: semctl(id=%d,SETVAL) failed: %s\n",
307 proc->sem.semId, strerror(errno));
313 * Cancel any pending wait for lock, when aborting a transaction.
315 * Returns true if we had been waiting for a lock, else false.
317 * (Normally, this would only happen if we accept a cancel/die
318 * interrupt while waiting; but an elog(ERROR) while waiting is
319 * within the realm of possibility, too.)
324 /* Nothing to do if we weren't waiting for a lock */
328 waitingForLock = false;
330 /* Turn off the deadlock timer, if it's still running (see ProcSleep) */
333 struct itimerval timeval,
336 MemSet(&timeval, 0, sizeof(struct itimerval));
337 setitimer(ITIMER_REAL, &timeval, &dummy);
340 /* BeOS doesn't have setitimer, but has set_alarm */
341 set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM);
342 #endif /* __BEOS__ */
344 /* Unlink myself from the wait queue, if on it (might not be anymore!) */
346 if (MyProc->links.next != INVALID_OFFSET)
347 RemoveFromWaitQueue(MyProc);
351 * Reset the proc wait semaphore to zero. This is necessary in the
352 * scenario where someone else granted us the lock we wanted before we
353 * were able to remove ourselves from the wait-list. The semaphore
354 * will have been bumped to 1 by the would-be grantor, and since we
355 * are no longer going to wait on the sema, we have to force it back
356 * to zero. Otherwise, our next attempt to wait for a lock will fall
357 * through prematurely.
359 ZeroProcSemaphore(MyProc);
362 * Return true even if we were kicked off the lock before we were able
363 * to remove ourselves.
370 * ProcReleaseLocks() -- release locks associated with current transaction
371 * at transaction commit or abort
373 * At commit, we release only locks tagged with the current transaction's XID,
374 * leaving those marked with XID 0 (ie, session locks) undisturbed. At abort,
375 * we release all locks including XID 0, because we need to clean up after
376 * a failure. This logic will need extension if we ever support nested
379 * Note that user locks are not released in either case.
382 ProcReleaseLocks(bool isCommit)
386 /* If waiting, get off wait queue (should only be needed after error) */
389 LockReleaseAll(DEFAULT_LOCKMETHOD, MyProc,
390 !isCommit, GetCurrentTransactionId());
395 * called by the postmaster to clean up the global tables after a
396 * backend exits. This also frees up the proc's wait semaphore.
401 SHMEM_OFFSET location;
404 location = ShmemPIDDestroy(pid);
405 if (location == INVALID_OFFSET)
407 proc = (PROC *) MAKE_PTR(location);
409 SpinAcquire(ProcStructLock);
411 ProcFreeSem(proc->sem.semId, proc->sem.semNum);
413 /* Add PROC struct to freelist so space can be recycled in future */
414 proc->links.next = ProcGlobal->freeProcs;
415 ProcGlobal->freeProcs = MAKE_OFFSET(proc);
417 SpinRelease(ProcStructLock);
423 * ProcKill() -- Destroy the per-proc data structure for
424 * this process. Release any of its held spin locks.
426 * This is done inside the backend process before it exits.
427 * ProcRemove, above, will be done by the postmaster afterwards.
434 /* Release any spinlocks I am holding */
435 ProcReleaseSpins(MyProc);
437 /* Get off any wait queue I might be on */
440 /* Remove from the standard lock table */
441 LockReleaseAll(DEFAULT_LOCKMETHOD, MyProc, true, InvalidTransactionId);
444 /* Remove from the user lock table */
445 LockReleaseAll(USER_LOCKMETHOD, MyProc, true, InvalidTransactionId);
452 * ProcQueue package: routines for putting processes to sleep
457 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
459 * Returns: a pointer to the queue or NULL
460 * Side Effects: Initializes the queue if we allocated one
464 ProcQueueAlloc(char *name)
467 PROC_QUEUE *queue = (PROC_QUEUE *)
468 ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
473 ProcQueueInit(queue);
480 * ProcQueueInit -- initialize a shared memory process queue
483 ProcQueueInit(PROC_QUEUE *queue)
485 SHMQueueInit(&(queue->links));
491 * ProcSleep -- put a process to sleep
493 * Caller must have set MyProc->heldLocks to reflect locks already held
494 * on the lockable object by this process (under all XIDs).
496 * Locktable's spinlock must be held at entry, and will be held
499 * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
501 * ASSUME: that no one will fiddle with the queue until after
502 * we release the spin lock.
504 * NOTES: The process queue is now a priority queue for locking.
506 * P() on the semaphore should put us to sleep. The process
507 * semaphore is normally zero, so when we try to acquire it, we sleep.
510 ProcSleep(LOCKMETHODTABLE *lockMethodTable,
515 LOCKMETHODCTL *lockctl = lockMethodTable->ctl;
516 SPINLOCK spinlock = lockctl->masterLock;
517 PROC_QUEUE *waitQueue = &(lock->waitProcs);
518 int myHeldLocks = MyProc->heldLocks;
523 struct itimerval timeval,
527 bigtime_t time_interval;
532 * Determine where to add myself in the wait queue.
534 * Normally I should go at the end of the queue. However, if I already
535 * hold locks that conflict with the request of any previous waiter,
536 * put myself in the queue just in front of the first such waiter.
537 * This is not a necessary step, since deadlock detection would move
538 * me to before that waiter anyway; but it's relatively cheap to
539 * detect such a conflict immediately, and avoid delaying till
542 * Special case: if I find I should go in front of some waiter, check to
543 * see if I conflict with already-held locks or the requests before
544 * that waiter. If not, then just grant myself the requested lock
545 * immediately. This is the same as the test for immediate grant in
546 * LockAcquire, except we are only considering the part of the wait
547 * queue before my insertion point.
550 if (myHeldLocks != 0)
552 int aheadRequests = 0;
554 proc = (PROC *) MAKE_PTR(waitQueue->links.next);
555 for (i = 0; i < waitQueue->size; i++)
557 /* Must he wait for me? */
558 if (lockctl->conflictTab[proc->waitLockMode] & myHeldLocks)
560 /* Must I wait for him ? */
561 if (lockctl->conflictTab[lockmode] & proc->heldLocks)
563 /* Yes, can report deadlock failure immediately */
564 MyProc->errType = STATUS_ERROR;
567 /* I must go before this waiter. Check special case. */
568 if ((lockctl->conflictTab[lockmode] & aheadRequests) == 0 &&
569 LockCheckConflicts(lockMethodTable,
576 /* Skip the wait and just grant myself the lock. */
577 GrantLock(lock, holder, lockmode);
580 /* Break out of loop to put myself before him */
583 /* Nope, so advance to next waiter */
584 aheadRequests |= (1 << proc->waitLockMode);
585 proc = (PROC *) MAKE_PTR(proc->links.next);
589 * If we fall out of loop normally, proc points to waitQueue head,
590 * so we will insert at tail of queue as desired.
595 /* I hold no locks, so I can't push in front of anyone. */
596 proc = (PROC *) &(waitQueue->links);
600 * Insert self into queue, ahead of the given proc (or at tail of
603 SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
606 lock->waitMask |= (1 << lockmode);
608 /* Set up wait information in PROC object, too */
609 MyProc->waitLock = lock;
610 MyProc->waitHolder = holder;
611 MyProc->waitLockMode = lockmode;
613 MyProc->errType = STATUS_OK;/* initialize result for success */
615 /* mark that we are waiting for a lock */
616 waitingForLock = true;
619 * Release the locktable's spin lock.
621 * NOTE: this may also cause us to exit critical-section state, possibly
622 * allowing a cancel/die interrupt to be accepted. This is OK because
623 * we have recorded the fact that we are waiting for a lock, and so
624 * LockWaitCancel will clean up if cancel/die happens.
626 SpinRelease(spinlock);
629 * Set timer so we can wake up after awhile and check for a deadlock.
630 * If a deadlock is detected, the handler releases the process's
631 * semaphore and sets MyProc->errType = STATUS_ERROR, allowing us to
632 * know that we must report failure rather than success.
634 * By delaying the check until we've waited for a bit, we can avoid
635 * running the rather expensive deadlock-check code in most cases.
637 * Need to zero out struct to set the interval and the microseconds
641 MemSet(&timeval, 0, sizeof(struct itimerval));
642 timeval.it_value.tv_sec = DeadlockTimeout / 1000;
643 timeval.it_value.tv_usec = (DeadlockTimeout % 1000) * 1000;
644 if (setitimer(ITIMER_REAL, &timeval, &dummy))
645 elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
647 time_interval = DeadlockTimeout * 1000000; /* usecs */
648 if (set_alarm(time_interval, B_ONE_SHOT_RELATIVE_ALARM) < 0)
649 elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
653 * If someone wakes us between SpinRelease and IpcSemaphoreLock,
654 * IpcSemaphoreLock will not block. The wakeup is "saved" by the
655 * semaphore implementation. Note also that if HandleDeadLock is
656 * invoked but does not detect a deadlock, IpcSemaphoreLock() will
657 * continue to wait. There used to be a loop here, but it was useless
660 * We pass interruptOK = true, which eliminates a window in which
661 * cancel/die interrupts would be held off undesirably. This is a
662 * promise that we don't mind losing control to a cancel/die interrupt
663 * here. We don't, because we have no state-change work to do after
664 * being granted the lock (the grantor did it all).
666 IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum, true);
669 * Disable the timer, if it's still running
672 MemSet(&timeval, 0, sizeof(struct itimerval));
673 if (setitimer(ITIMER_REAL, &timeval, &dummy))
674 elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
676 if (set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM) < 0)
677 elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
681 * Now there is nothing for LockWaitCancel to do.
683 waitingForLock = false;
686 * Re-acquire the locktable's spin lock.
688 * We could accept a cancel/die interrupt here. That's OK because the
689 * lock is now registered as being held by this process.
691 SpinAcquire(spinlock);
694 * We don't have to do anything else, because the awaker did all the
695 * necessary update of the lock table and MyProc.
697 return MyProc->errType;
702 * ProcWakeup -- wake up a process by releasing its private semaphore.
704 * Also remove the process from the wait queue and set its links invalid.
705 * RETURN: the next process in the wait queue.
708 ProcWakeup(PROC *proc, int errType)
712 /* assume that spinlock has been acquired */
714 /* Proc should be sleeping ... */
715 if (proc->links.prev == INVALID_OFFSET ||
716 proc->links.next == INVALID_OFFSET)
717 return (PROC *) NULL;
719 /* Save next process before we zap the list link */
720 retProc = (PROC *) MAKE_PTR(proc->links.next);
722 /* Remove process from wait queue */
723 SHMQueueDelete(&(proc->links));
724 (proc->waitLock->waitProcs.size)--;
726 /* Clean up process' state and pass it the ok/fail signal */
727 proc->waitLock = NULL;
728 proc->waitHolder = NULL;
729 proc->errType = errType;
732 IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum);
738 * ProcLockWakeup -- routine for waking up processes when a lock is
739 * released (or a prior waiter is aborted). Scan all waiters
740 * for lock, waken any that are no longer blocked.
743 ProcLockWakeup(LOCKMETHODTABLE *lockMethodTable, LOCK *lock)
745 LOCKMETHODCTL *lockctl = lockMethodTable->ctl;
746 PROC_QUEUE *waitQueue = &(lock->waitProcs);
747 int queue_size = waitQueue->size;
749 int aheadRequests = 0;
751 Assert(queue_size >= 0);
756 proc = (PROC *) MAKE_PTR(waitQueue->links.next);
758 while (queue_size-- > 0)
760 LOCKMODE lockmode = proc->waitLockMode;
763 * Waken if (a) doesn't conflict with requests of earlier waiters,
764 * and (b) doesn't conflict with already-held locks.
766 if ((lockctl->conflictTab[lockmode] & aheadRequests) == 0 &&
767 LockCheckConflicts(lockMethodTable,
775 GrantLock(lock, proc->waitHolder, lockmode);
776 proc = ProcWakeup(proc, STATUS_OK);
779 * ProcWakeup removes proc from the lock's waiting process
780 * queue and returns the next proc in chain; don't use proc's
781 * next-link, because it's been cleared.
788 * Cannot wake this guy. Remember his request for later
791 aheadRequests |= (1 << lockmode);
792 proc = (PROC *) MAKE_PTR(proc->links.next);
796 Assert(waitQueue->size >= 0);
799 /* --------------------
800 * We only get to this routine if we got SIGALRM after DeadlockTimeout
801 * while waiting for a lock to be released by some other process. Look
802 * to see if there's a deadlock; if not, just return and continue waiting.
803 * If we have a real deadlock, remove ourselves from the lock's wait queue
804 * and signal an error to ProcSleep.
805 * --------------------
808 HandleDeadLock(SIGNAL_ARGS)
810 int save_errno = errno;
813 * Acquire locktable lock. Note that the SIGALRM interrupt had better
814 * not be enabled anywhere that this process itself holds the
815 * locktable lock, else this will wait forever. Also note that this
816 * calls SpinAcquire which creates a critical section, so that this
817 * routine cannot be interrupted by cancel/die interrupts.
822 * Check to see if we've been awoken by anyone in the interim.
824 * If we have we can return and resume our transaction -- happy day.
825 * Before we are awoken the process releasing the lock grants it to us
826 * so we know that we don't have to wait anymore.
828 * We check by looking to see if we've been unlinked from the wait queue.
829 * This is quicker than checking our semaphore's state, since no
830 * kernel call is needed, and it is safe because we hold the locktable
834 if (MyProc->links.prev == INVALID_OFFSET ||
835 MyProc->links.next == INVALID_OFFSET)
847 if (!DeadLockCheck(MyProc))
849 /* No deadlock, so keep waiting */
856 * Oops. We have a deadlock.
858 * Get this process out of wait state.
860 RemoveFromWaitQueue(MyProc);
863 * Set MyProc->errType to STATUS_ERROR so that ProcSleep will report
864 * an error after we return from this signal handler.
866 MyProc->errType = STATUS_ERROR;
869 * Unlock my semaphore so that the interrupted ProcSleep() call can
872 IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum);
875 * We're done here. Transaction abort caused by the error that
876 * ProcSleep will raise will cause any other locks we hold to be
877 * released, thus allowing other processes to wake up; we don't need
878 * to do that here. NOTE: an exception is that releasing locks we hold
879 * doesn't consider the possibility of waiters that were blocked
880 * behind us on the lock we just failed to get, and might now be
881 * wakable because we're not in front of them anymore. However,
882 * RemoveFromWaitQueue took care of waking up any such processes.
889 ProcReleaseSpins(PROC *proc)
898 for (i = 0; i < (int) MAX_SPINS; i++)
902 Assert(proc->sLocks[i] == 1);
909 /*****************************************************************************
911 *****************************************************************************/
914 * ProcGetNewSemIdAndNum -
915 * scan the free semaphore bitmap and allocate a single semaphore from
919 ProcGetNewSemIdAndNum(IpcSemaphoreId *semId, int *semNum)
922 IpcSemaphoreId *procSemIds = ProcGlobal->procSemIds;
923 int32 *freeSemMap = ProcGlobal->freeSemMap;
924 int32 fullmask = (1 << PROC_NSEMS_PER_SET) - 1;
927 * we hold ProcStructLock when entering this routine. We scan through
928 * the bitmap to look for a free semaphore.
931 for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
936 if (freeSemMap[i] == fullmask)
937 continue; /* this set is fully allocated */
938 if (procSemIds[i] < 0)
939 continue; /* this set hasn't been initialized */
941 for (j = 0; j < PROC_NSEMS_PER_SET; j++)
943 if ((freeSemMap[i] & mask) == 0)
947 * a free semaphore found. Mark it as allocated.
949 freeSemMap[i] |= mask;
951 *semId = procSemIds[i];
959 /* if we reach here, all the semaphores are in use. */
960 elog(ERROR, "ProcGetNewSemIdAndNum: cannot allocate a free semaphore");
965 * free up our semaphore in the semaphore set.
968 ProcFreeSem(IpcSemaphoreId semId, int semNum)
973 mask = ~(1 << semNum);
975 for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
977 if (ProcGlobal->procSemIds[i] == semId)
979 ProcGlobal->freeSemMap[i] &= mask;
983 fprintf(stderr, "ProcFreeSem: no ProcGlobal entry for semId %d\n", semId);
987 * ProcFreeAllSemaphores -
988 * called at shmem_exit time, ie when exiting the postmaster or
989 * destroying shared state for a failed set of backends.
990 * Free up all the semaphores allocated to the lmgrs of the backends.
993 ProcFreeAllSemaphores(void)
997 for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
999 if (ProcGlobal->procSemIds[i] >= 0)
1000 IpcSemaphoreKill(ProcGlobal->procSemIds[i]);