* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.224 2005/12/28 23:22:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.225 2005/12/29 18:08:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- SpinLockAcquire_NoHoldoff(&xlogctl->info_lck);
+ SpinLockAcquire(&xlogctl->info_lck);
LogwrtRqst = xlogctl->LogwrtRqst;
LogwrtResult = xlogctl->LogwrtResult;
- SpinLockRelease_NoHoldoff(&xlogctl->info_lck);
+ SpinLockRelease(&xlogctl->info_lck);
}
/*
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- SpinLockAcquire_NoHoldoff(&xlogctl->info_lck);
+ SpinLockAcquire(&xlogctl->info_lck);
/* advance global request to include new block(s) */
if (XLByteLT(xlogctl->LogwrtRqst.Write, WriteRqst))
xlogctl->LogwrtRqst.Write = WriteRqst;
/* update local result copy while I have the chance */
LogwrtResult = xlogctl->LogwrtResult;
- SpinLockRelease_NoHoldoff(&xlogctl->info_lck);
+ SpinLockRelease(&xlogctl->info_lck);
}
ProcLastRecEnd = RecPtr;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- SpinLockAcquire_NoHoldoff(&xlogctl->info_lck);
+ SpinLockAcquire(&xlogctl->info_lck);
if (XLByteLT(xlogctl->LogwrtRqst.Write, FinishedPageRqstPtr))
xlogctl->LogwrtRqst.Write = FinishedPageRqstPtr;
LogwrtResult = xlogctl->LogwrtResult;
- SpinLockRelease_NoHoldoff(&xlogctl->info_lck);
+ SpinLockRelease(&xlogctl->info_lck);
}
update_needed = false; /* Did the shared-request update */
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- SpinLockAcquire_NoHoldoff(&xlogctl->info_lck);
+ SpinLockAcquire(&xlogctl->info_lck);
xlogctl->LogwrtResult = LogwrtResult;
if (XLByteLT(xlogctl->LogwrtRqst.Write, LogwrtResult.Write))
xlogctl->LogwrtRqst.Write = LogwrtResult.Write;
if (XLByteLT(xlogctl->LogwrtRqst.Flush, LogwrtResult.Flush))
xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush;
- SpinLockRelease_NoHoldoff(&xlogctl->info_lck);
+ SpinLockRelease(&xlogctl->info_lck);
}
Write->LogwrtResult = LogwrtResult;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- SpinLockAcquire_NoHoldoff(&xlogctl->info_lck);
+ SpinLockAcquire(&xlogctl->info_lck);
if (XLByteLT(WriteRqstPtr, xlogctl->LogwrtRqst.Write))
WriteRqstPtr = xlogctl->LogwrtRqst.Write;
LogwrtResult = xlogctl->LogwrtResult;
- SpinLockRelease_NoHoldoff(&xlogctl->info_lck);
+ SpinLockRelease(&xlogctl->info_lck);
}
/* done already? */
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- SpinLockAcquire_NoHoldoff(&xlogctl->info_lck);
+ SpinLockAcquire(&xlogctl->info_lck);
Assert(XLByteLE(RedoRecPtr, xlogctl->Insert.RedoRecPtr));
RedoRecPtr = xlogctl->Insert.RedoRecPtr;
- SpinLockRelease_NoHoldoff(&xlogctl->info_lck);
+ SpinLockRelease(&xlogctl->info_lck);
return RedoRecPtr;
}
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- SpinLockAcquire_NoHoldoff(&xlogctl->info_lck);
+ SpinLockAcquire(&xlogctl->info_lck);
RedoRecPtr = xlogctl->Insert.RedoRecPtr = checkPoint.redo;
- SpinLockRelease_NoHoldoff(&xlogctl->info_lck);
+ SpinLockRelease(&xlogctl->info_lck);
}
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.200 2005/11/22 18:17:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.201 2005/12/29 18:08:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Need to lock the buffer header too in order to change its tag.
*/
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
/*
* Somebody could have pinned or re-dirtied the buffer while we were
if (buf->refcount == 1 && !(buf->flags & BM_DIRTY))
break;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
BufTableDelete(&newTag);
LWLockRelease(BufMappingLock);
UnpinBuffer(buf, true, false /* evidently recently used */ );
buf->flags |= BM_TAG_VALID;
buf->usage_count = 0;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
if (oldFlags & BM_TAG_VALID)
BufTableDelete(&oldTag);
*/
LWLockAcquire(BufMappingLock, LW_EXCLUSIVE);
- /* Re-lock the buffer header (NoHoldoff since we have an LWLock) */
- LockBufHdr_NoHoldoff(buf);
+ /* Re-lock the buffer header */
+ LockBufHdr(buf);
/* If it's changed while we were waiting for lock, do nothing */
if (!BUFFERTAGS_EQUAL(buf->tag, oldTag))
{
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
LWLockRelease(BufMappingLock);
return;
}
*/
if (buf->refcount != 0)
{
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
LWLockRelease(BufMappingLock);
/* safety check: should definitely not be our *own* pin */
if (PrivateRefCount[buf->buf_id] != 0)
buf->flags = 0;
buf->usage_count = 0;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
/*
* Remove the buffer from the lookup hashtable, if it was in there.
if (PrivateRefCount[b] == 0)
{
- /*
- * Use NoHoldoff here because we don't want the unlock to be a
- * potential place to honor a QueryCancel request. (The caller should
- * be holding off interrupts anyway.)
- */
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
buf->refcount++;
result = (buf->flags & BM_VALID) != 0;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
}
else
{
if (PrivateRefCount[b] == 0)
buf->refcount++;
- /* NoHoldoff since we mustn't accept cancel interrupt here */
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
PrivateRefCount[b]++;
Assert(PrivateRefCount[b] > 0);
ResourceOwnerRememberBuffer(CurrentResourceOwner,
BufferDescriptorGetBuffer(buf));
- /* Now we can accept cancel */
- RESUME_INTERRUPTS();
}
/*
Assert(!LWLockHeldByMe(buf->content_lock));
Assert(!LWLockHeldByMe(buf->io_in_progress_lock));
- /* NoHoldoff ensures we don't lose control before sending signal */
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
/* Decrement the shared reference count */
Assert(buf->refcount > 0);
int wait_backend_pid = buf->wait_backend_pid;
buf->flags &= ~BM_PIN_COUNT_WAITER;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
ProcSendSignal(wait_backend_pid);
}
else
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
/*
* If VACUUM is releasing an otherwise-unused buffer, send it to the
*/
/* To check if block content changes while flushing. - vadim 01/17/97 */
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
buf->flags &= ~BM_JUST_DIRTIED;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
smgrwrite(reln,
buf->tag.blockNum,
{
HOLD_INTERRUPTS(); /* don't want to die() partway through... */
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
/*
* Don't complain if flag bit not set; it could have been reset but we
buf->wait_backend_pid == MyProcPid)
buf->flags &= ~BM_PIN_COUNT_WAITER;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
ProcCancelWaitForSignal();
* that it's critical to set dirty bit *before* logging changes with
* XLogInsert() - see comments in SyncOneBuffer().
*/
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
}
else
elog(ERROR, "unrecognized buffer lock mode: %d", mode);
* that it's critical to set dirty bit *before* logging changes with
* XLogInsert() - see comments in SyncOneBuffer().
*/
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
return true;
}
{
/* Try to acquire lock */
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- LockBufHdr_NoHoldoff(bufHdr);
+ LockBufHdr(bufHdr);
Assert(bufHdr->refcount > 0);
if (bufHdr->refcount == 1)
{
/* Successfully acquired exclusive lock with pincount 1 */
- UnlockBufHdr_NoHoldoff(bufHdr);
+ UnlockBufHdr(bufHdr);
return;
}
/* Failed, so mark myself as waiting for pincount 1 */
if (bufHdr->flags & BM_PIN_COUNT_WAITER)
{
- UnlockBufHdr_NoHoldoff(bufHdr);
+ UnlockBufHdr(bufHdr);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
elog(ERROR, "multiple backends attempting to wait for pincount 1");
}
bufHdr->wait_backend_pid = MyProcPid;
bufHdr->flags |= BM_PIN_COUNT_WAITER;
PinCountWaitBuf = bufHdr;
- UnlockBufHdr_NoHoldoff(bufHdr);
+ UnlockBufHdr(bufHdr);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/* Wait to be signaled by UnpinBuffer() */
ProcWaitForSignal();
*/
LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
- /* NoHoldoff is OK since we now have an LWLock */
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
if (!(buf->flags & BM_IO_IN_PROGRESS))
break;
* an error (see AbortBufferIO). If that's the case, we must wait for
* him to get unwedged.
*/
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
LWLockRelease(buf->io_in_progress_lock);
WaitIO(buf);
}
if (forInput ? (buf->flags & BM_VALID) : !(buf->flags & BM_DIRTY))
{
/* someone else already did the I/O */
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
LWLockRelease(buf->io_in_progress_lock);
return false;
}
buf->flags |= BM_IO_IN_PROGRESS;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
InProgressBuf = buf;
IsForInput = forInput;
{
Assert(buf == InProgressBuf);
- /* NoHoldoff is OK since we must have an LWLock */
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
Assert(buf->flags & BM_IO_IN_PROGRESS);
buf->flags &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
buf->flags &= ~BM_DIRTY;
buf->flags |= set_flag_bits;
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
InProgressBuf = NULL;
*/
LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
- /* NoHoldoff is OK since we now have an LWLock */
- LockBufHdr_NoHoldoff(buf);
+ LockBufHdr(buf);
Assert(buf->flags & BM_IO_IN_PROGRESS);
if (IsForInput)
{
Assert(!(buf->flags & BM_DIRTY));
/* We'd better not think buffer is valid yet */
Assert(!(buf->flags & BM_VALID));
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
}
else
{
sv_flags = buf->flags;
Assert(sv_flags & BM_DIRTY);
- UnlockBufHdr_NoHoldoff(buf);
+ UnlockBufHdr(buf);
/* Issue notice if this is not the first failure... */
if (sv_flags & BM_IO_ERROR)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.88 2005/11/22 18:17:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.89 2005/12/29 18:08:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/transam.h"
+#include "miscadmin.h"
#include "storage/pg_shmem.h"
#include "storage/spin.h"
#include "utils/tqual.h"
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.36 2005/12/11 21:02:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.37 2005/12/29 18:08:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#include "access/clog.h"
#include "access/multixact.h"
#include "access/subtrans.h"
+#include "miscadmin.h"
#include "storage/lwlock.h"
#include "storage/proc.h"
#include "storage/spin.h"
bool mustwait;
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire_NoHoldoff(&lock->mutex);
+ SpinLockAcquire(&lock->mutex);
/* If retrying, allow LWLockRelease to release waiters again */
if (retry)
* memory initialization.
*/
if (proc == NULL)
- elog(FATAL, "cannot wait without a PGPROC structure");
+ elog(PANIC, "cannot wait without a PGPROC structure");
proc->lwWaiting = true;
proc->lwExclusive = (mode == LW_EXCLUSIVE);
lock->tail = proc;
/* Can release the mutex now */
- SpinLockRelease_NoHoldoff(&lock->mutex);
+ SpinLockRelease(&lock->mutex);
/*
* Wait until awakened.
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease_NoHoldoff(&lock->mutex);
+ SpinLockRelease(&lock->mutex);
/* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks++] = lockid;
HOLD_INTERRUPTS();
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire_NoHoldoff(&lock->mutex);
+ SpinLockAcquire(&lock->mutex);
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease_NoHoldoff(&lock->mutex);
+ SpinLockRelease(&lock->mutex);
if (mustwait)
{
held_lwlocks[i] = held_lwlocks[i + 1];
/* Acquire mutex. Time spent holding mutex should be short! */
- SpinLockAcquire_NoHoldoff(&lock->mutex);
+ SpinLockAcquire(&lock->mutex);
/* Release my hold on lock */
if (lock->exclusive > 0)
}
/* We are done updating shared state of the lock itself. */
- SpinLockRelease_NoHoldoff(&lock->mutex);
+ SpinLockRelease(&lock->mutex);
/*
* Awaken any waiters I removed from the queue.
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.83 2005/11/22 18:17:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.84 2005/12/29 18:08:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#define FREENEXT_NOT_IN_LIST (-2)
/*
- * Macros for acquiring/releasing a buffer header's spinlock. The
- * NoHoldoff cases may be used when we know that we hold some LWLock
- * and therefore interrupts are already held off. Do not apply these
- * to local buffers!
+ * Macros for acquiring/releasing a shared buffer header's spinlock.
+ * Do not apply these to local buffers!
*
* Note: as a general coding rule, if you are using these then you probably
- * want to be using a volatile-qualified pointer to the buffer header, to
+ * need to be using a volatile-qualified pointer to the buffer header, to
* ensure that the compiler doesn't rearrange accesses to the header to
* occur before or after the spinlock is acquired/released.
*/
-#define LockBufHdr(bufHdr) \
- SpinLockAcquire(&(bufHdr)->buf_hdr_lock)
-#define UnlockBufHdr(bufHdr) \
- SpinLockRelease(&(bufHdr)->buf_hdr_lock)
-#define LockBufHdr_NoHoldoff(bufHdr) \
- SpinLockAcquire_NoHoldoff(&(bufHdr)->buf_hdr_lock)
-#define UnlockBufHdr_NoHoldoff(bufHdr) \
- SpinLockRelease_NoHoldoff(&(bufHdr)->buf_hdr_lock)
+#define LockBufHdr(bufHdr) SpinLockAcquire(&(bufHdr)->buf_hdr_lock)
+#define UnlockBufHdr(bufHdr) SpinLockRelease(&(bufHdr)->buf_hdr_lock)
/* in buf_init.c */
* Acquire a spinlock, waiting if necessary.
* Time out and abort() if unable to acquire the lock in a
* "reasonable" amount of time --- typically ~ 1 minute.
- * Cancel/die interrupts are held off until the lock is released.
*
* void SpinLockRelease(volatile slock_t *lock)
* Unlock a previously acquired lock.
- * Release the cancel/die interrupt holdoff.
- *
- * void SpinLockAcquire_NoHoldoff(volatile slock_t *lock)
- * void SpinLockRelease_NoHoldoff(volatile slock_t *lock)
- * Same as above, except no interrupt holdoff processing is done.
- * This pair of macros may be used when there is a surrounding
- * interrupt holdoff.
*
* bool SpinLockFree(slock_t *lock)
* Tests if the lock is free. Returns TRUE if free, FALSE if locked.
* protects shared data with a spinlock MUST reference that shared
* data through a volatile pointer.
*
+ * Keep in mind the coding rule that spinlocks must not be held for more
+ * than a few instructions. In particular, we assume it is not possible
+ * for a CHECK_FOR_INTERRUPTS() to occur while holding a spinlock, and so
+ * it is not necessary to do HOLD/RESUME_INTERRUPTS() in these macros.
+ *
* These macros are implemented in terms of hardware-dependent macros
- * supplied by s_lock.h.
+ * supplied by s_lock.h. There is not currently any extra functionality
+ * added by this header, but there has been in the past and may someday
+ * be again.
*
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/spin.h,v 1.26 2005/10/13 06:17:34 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/storage/spin.h,v 1.27 2005/12/29 18:08:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#define SPIN_H
#include "storage/s_lock.h"
-#include "miscadmin.h"
#define SpinLockInit(lock) S_INIT_LOCK(lock)
-#define SpinLockAcquire(lock) \
- do { \
- HOLD_INTERRUPTS(); \
- S_LOCK(lock); \
- } while (0)
-
-#define SpinLockAcquire_NoHoldoff(lock) S_LOCK(lock)
-
-#define SpinLockRelease(lock) \
- do { \
- S_UNLOCK(lock); \
- RESUME_INTERRUPTS(); \
- } while (0)
+#define SpinLockAcquire(lock) S_LOCK(lock)
-#define SpinLockRelease_NoHoldoff(lock) S_UNLOCK(lock)
+#define SpinLockRelease(lock) S_UNLOCK(lock)
#define SpinLockFree(lock) S_LOCK_FREE(lock)