* void S_INIT_LOCK(slock_t *lock)
* Initialize a spinlock (to the unlocked state).
*
- * void S_LOCK(slock_t *lock)
+ * int S_LOCK(slock_t *lock)
* Acquire a spinlock, waiting if necessary.
* Time out and abort() if unable to acquire the lock in a
* "reasonable" amount of time --- typically ~ 1 minute.
+ * Should return number of "delays"; see s_lock.c
*
* void S_UNLOCK(slock_t *lock)
* Unlock a previously acquired lock.
* macros at the bottom of the file. Check if your platform can use
* these or needs to override them.
*
- * Usually, S_LOCK() is implemented in terms of an even lower-level macro
- * TAS():
+ * Usually, S_LOCK() is implemented in terms of even lower-level macros
+ * TAS() and TAS_SPIN():
*
* int TAS(slock_t *lock)
* Atomic test-and-set instruction. Attempt to acquire the lock,
* but do *not* wait. Returns 0 if successful, nonzero if unable
* to acquire the lock.
*
- * TAS() is NOT part of the API, and should never be called directly.
+ * int TAS_SPIN(slock_t *lock)
+ * Like TAS(), but this version is used when waiting for a lock
+ * previously found to be contended. By default, this is the
+ * same as TAS(), but on some architectures it's better to poll a
+ * contended lock using an unlocked instruction and retry the
+ * atomic test-and-set only when it appears free.
*
- * CAUTION: on some platforms TAS() may sometimes report failure to acquire
- * a lock even when the lock is not locked. For example, on Alpha TAS()
- * will "fail" if interrupted. Therefore TAS() should always be invoked
- * in a retry loop, even if you are certain the lock is free.
+ * TAS() and TAS_SPIN() are NOT part of the API, and should never be called
+ * directly.
*
- * ANOTHER CAUTION: be sure that TAS() and S_UNLOCK() represent sequence
- * points, ie, loads and stores of other values must not be moved across
- * a lock or unlock. In most cases it suffices to make the operation be
- * done through a "volatile" pointer.
+ * CAUTION: on some platforms TAS() and/or TAS_SPIN() may sometimes report
+ * failure to acquire a lock even when the lock is not locked. For example,
+ * on Alpha TAS() will "fail" if interrupted. Therefore a retry loop must
+ * always be used, even if you are certain the lock is free.
+ *
+ * Another caution for users of these macros is that it is the caller's
+ * responsibility to ensure that the compiler doesn't re-order accesses
+ * to shared memory to precede the actual lock acquisition, or follow the
+ * lock release. Typically we handle this by using volatile-qualified
+ * pointers to refer to both the spinlock itself and the shared data
+ * structure being accessed within the spinlocked critical section.
+ * That fixes it because compilers are not allowed to re-order accesses
+ * to volatile objects relative to other such accesses.
+ *
+ * On platforms with weak memory ordering, the TAS(), TAS_SPIN(), and
+ * S_UNLOCK() macros must further include hardware-level memory fence
+ * instructions to prevent similar re-ordering at the hardware level.
+ * TAS() and TAS_SPIN() must guarantee that loads and stores issued after
+ * the macro are not executed until the lock has been obtained. Conversely,
+ * S_UNLOCK() must guarantee that loads and stores issued before the macro
+ * have been executed before the lock is released.
*
* On most supported platforms, TAS() uses a tas() function written
* in assembly language to execute a hardware atomic-test-and-set
* when using the SysV semaphore code.
*
*
- * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/s_lock.h,v 1.169 2010/01/02 16:58:08 momjian Exp $
+ * src/include/storage/s_lock.h
*
*-------------------------------------------------------------------------
*/
#ifndef S_LOCK_H
#define S_LOCK_H
-#include "storage/pg_sema.h"
-
#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
-
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
/*************************************************************************
* All the gcc inlines
* Use a non-locking test before asserting the bus lock. Note that the
* extra test appears to be a small loss on some x86 platforms and a small
* win on others; it's by no means clear that we should keep it.
+ *
+ * When this was last tested, we didn't have separate TAS() and TAS_SPIN()
+ * macros. Nowadays it probably would be better to do a non-locking test
+ * in TAS_SPIN() but not in TAS(), like on x86_64, but no-one's done the
+ * testing to verify that. Without some empirical evidence, better to
+ * leave it alone.
*/
__asm__ __volatile__(
" cmpb $0,%1 \n"
#define TAS(lock) tas(lock)
+/*
+ * On Intel EM64T, it's a win to use a non-locking test before the xchg proper,
+ * but only when spinning.
+ *
+ * See also Implementing Scalable Atomic Locks for Multi-Core Intel(tm) EM64T
+ * and IA32, by Michael Chynoweth and Mary R. Lee. As of this writing, it is
+ * available at:
+ * http://software.intel.com/en-us/articles/implementing-scalable-atomic-locks-for-multi-core-intel-em64t-and-ia32-architectures
+ */
+#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
+
static __inline__ int
tas(volatile slock_t *lock)
{
register slock_t _res = 1;
- /*
- * On Opteron, using a non-locking test before the locking instruction
- * is a huge loss. On EM64T, it appears to be a wash or small loss,
- * so we needn't bother to try to distinguish the sub-architectures.
- */
__asm__ __volatile__(
" lock \n"
" xchgb %0,%1 \n"
#endif /* __x86_64__ */
-#if defined(__ia64__) || defined(__ia64) /* Intel Itanium */
+#if defined(__ia64__) || defined(__ia64)
+/*
+ * Intel Itanium, gcc or Intel's compiler.
+ *
+ * Itanium has weak memory ordering, but we rely on the compiler to enforce
+ * strict ordering of accesses to volatile data. In particular, while the
+ * xchg instruction implicitly acts as a memory barrier with 'acquire'
+ * semantics, we do not have an explicit memory fence instruction in the
+ * S_UNLOCK macro. We use a regular assignment to clear the spinlock, and
+ * trust that the compiler marks the generated store instruction with the
+ * ".rel" opcode.
+ *
+ * Testing shows that assumption to hold on gcc, although I could not find
+ * any explicit statement on that in the gcc manual. In Intel's compiler,
+ * the -m[no-]serialize-volatile option controls that, and testing shows that
+ * it is enabled by default.
+ */
#define HAS_TEST_AND_SET
typedef unsigned int slock_t;
#define TAS(lock) tas(lock)
+/* On IA64, it's a win to use a non-locking test before the xchg proper */
+#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
+
#ifndef __INTEL_COMPILER
static __inline__ int
#endif /* __ia64__ || __ia64 */
+/*
+ * On ARM, we use __sync_lock_test_and_set(int *, int) if available, and if
+ * not fall back on the SWPB instruction. SWPB does not work on ARMv6 or
+ * later, so the compiler builtin is preferred if available. Note also that
+ * the int-width variant of the builtin works on more chips than other widths.
+ */
#if defined(__arm__) || defined(__arm)
#define HAS_TEST_AND_SET
-typedef unsigned char slock_t;
-
#define TAS(lock) tas(lock)
+#ifdef HAVE_GCC_INT_ATOMICS
+
+typedef int slock_t;
+
+static __inline__ int
+tas(volatile slock_t *lock)
+{
+ return __sync_lock_test_and_set(lock, 1);
+}
+
+#define S_UNLOCK(lock) __sync_lock_release(lock)
+
+#else /* !HAVE_GCC_INT_ATOMICS */
+
+typedef unsigned char slock_t;
+
static __inline__ int
tas(volatile slock_t *lock)
{
return (int) _res;
}
+#endif /* HAVE_GCC_INT_ATOMICS */
#endif /* __arm__ */
+/*
+ * On ARM64, we use __sync_lock_test_and_set(int *, int) if available.
+ */
+#if defined(__aarch64__) || defined(__aarch64)
+#ifdef HAVE_GCC_INT_ATOMICS
+#define HAS_TEST_AND_SET
+
+#define TAS(lock) tas(lock)
+
+typedef int slock_t;
+
+static __inline__ int
+tas(volatile slock_t *lock)
+{
+ return __sync_lock_test_and_set(lock, 1);
+}
+
+#define S_UNLOCK(lock) __sync_lock_release(lock)
+
+#endif /* HAVE_GCC_INT_ATOMICS */
+#endif /* __aarch64__ */
+
+
/* S/390 and S/390x Linux (32- and 64-bit zSeries) */
#if defined(__s390__) || defined(__s390x__)
#define HAS_TEST_AND_SET
#if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
#define HAS_TEST_AND_SET
-#if defined(__ppc64__) || defined(__powerpc64__)
-typedef unsigned long slock_t;
-#else
typedef unsigned int slock_t;
-#endif
#define TAS(lock) tas(lock)
+
+/* On PPC, it's a win to use a non-locking test before the lwarx */
+#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
+
/*
* NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
* an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
+ * On newer machines, we can use lwsync instead for better performance.
*/
static __inline__ int
tas(volatile slock_t *lock)
int _res;
__asm__ __volatile__(
+#ifdef USE_PPC_LWARX_MUTEX_HINT
+" lwarx %0,0,%3,1 \n"
+#else
" lwarx %0,0,%3 \n"
+#endif
" cmpwi %0,0 \n"
" bne 1f \n"
" addi %0,%0,1 \n"
"1: li %1,1 \n"
" b 3f \n"
"2: \n"
+#ifdef USE_PPC_LWSYNC
+" lwsync \n"
+#else
" isync \n"
+#endif
" li %1,0 \n"
"3: \n"
return _res;
}
-/* PowerPC S_UNLOCK is almost standard but requires a "sync" instruction */
+/*
+ * PowerPC S_UNLOCK is almost standard but requires a "sync" instruction.
+ * On newer machines, we can use lwsync instead for better performance.
+ */
+#ifdef USE_PPC_LWSYNC
+#define S_UNLOCK(lock) \
+do \
+{ \
+ __asm__ __volatile__ (" lwsync \n"); \
+ *((volatile slock_t *) (lock)) = 0; \
+} while (0)
+#else
#define S_UNLOCK(lock) \
do \
{ \
__asm__ __volatile__ (" sync \n"); \
*((volatile slock_t *) (lock)) = 0; \
} while (0)
+#endif /* USE_PPC_LWSYNC */
#endif /* powerpc */
#endif /* __vax__ */
-#if defined(__ns32k__) /* National Semiconductor 32K */
-#define HAS_TEST_AND_SET
-
-typedef unsigned char slock_t;
-
-#define TAS(lock) tas(lock)
-
-static __inline__ int
-tas(volatile slock_t *lock)
-{
- register int _res;
-
- __asm__ __volatile__(
- " sbitb 0, %1 \n"
- " sfsd %0 \n"
-: "=r"(_res), "+m"(*lock)
-:
-: "memory");
- return _res;
-}
-
-#endif /* __ns32k__ */
-
-
-#if defined(__alpha) || defined(__alpha__) /* Alpha */
-/*
- * Correct multi-processor locking methods are explained in section 5.5.3
- * of the Alpha AXP Architecture Handbook, which at this writing can be
- * found at ftp://ftp.netbsd.org/pub/NetBSD/misc/dec-docs/index.html.
- * For gcc we implement the handbook's code directly with inline assembler.
- */
-#define HAS_TEST_AND_SET
-
-typedef unsigned long slock_t;
-
-#define TAS(lock) tas(lock)
-
-static __inline__ int
-tas(volatile slock_t *lock)
-{
- register slock_t _res;
-
- __asm__ __volatile__(
- " ldq $0, %1 \n"
- " bne $0, 2f \n"
- " ldq_l %0, %1 \n"
- " bne %0, 2f \n"
- " mov 1, $0 \n"
- " stq_c $0, %1 \n"
- " beq $0, 2f \n"
- " mb \n"
- " br 3f \n"
- "2: mov 1, %0 \n"
- "3: \n"
-: "=&r"(_res), "+m"(*lock)
-:
-: "memory", "0");
- return (int) _res;
-}
-
-#define S_UNLOCK(lock) \
-do \
-{\
- __asm__ __volatile__ (" mb \n"); \
- *((volatile slock_t *) (lock)) = 0; \
-} while (0)
-
-#endif /* __alpha || __alpha__ */
-
-
#if defined(__mips__) && !defined(__sgi) /* non-SGI MIPS */
/* Note: on SGI we use the OS' mutex ABI, see below */
/* Note: R10000 processors require a separate SYNC */
#endif /* defined(USE_UNIVEL_CC) */
-#if defined(__alpha) || defined(__alpha__) /* Tru64 Unix Alpha compiler */
-/*
- * The Tru64 compiler doesn't support gcc-style inline asm, but it does
- * have some builtin functions that accomplish much the same results.
- * For simplicity, slock_t is defined as long (ie, quadword) on Alpha
- * regardless of the compiler in use. LOCK_LONG and UNLOCK_LONG only
- * operate on an int (ie, longword), but that's OK as long as we define
- * S_INIT_LOCK to zero out the whole quadword.
- */
-#define HAS_TEST_AND_SET
-
-typedef unsigned long slock_t;
-
-#include <alpha/builtins.h>
-#define S_INIT_LOCK(lock) (*(lock) = 0)
-#define TAS(lock) (__LOCK_LONG_RETRY((lock), 1) == 0)
-#define S_UNLOCK(lock) __UNLOCK_LONG(lock)
-
-#endif /* __alpha || __alpha__ */
-
-
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
/*
* HP's PA-RISC
#if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
-
-#define HAS_TEST_AND_SET
-
-typedef unsigned int slock_t;
-
-#include <ia64/sys/inline.h>
-#define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
-
-#endif /* HPUX on IA64, non gcc */
-
-
-#if defined(__sgi) /* SGI compiler */
/*
- * SGI IRIX 5
- * slock_t is defined as a unsigned long. We use the standard SGI
- * mutex API.
+ * HP-UX on Itanium, non-gcc compiler
*
- * The following comment is left for historical reasons, but is probably
- * not a good idea since the mutex ABI is supported.
+ * We assume that the compiler enforces strict ordering of loads/stores on
+ * volatile data (see comments on the gcc-version earlier in this file).
+ * Note that this assumption does *not* hold if you use the
+ * +Ovolatile=__unordered option on the HP-UX compiler, so don't do that.
*
- * This stuff may be supplemented in the future with Masato Kataoka's MIPS-II
- * assembly from his NECEWS SVR4 port, but we probably ought to retain this
- * for the R3000 chips out there.
- */
-#define HAS_TEST_AND_SET
-
-typedef unsigned long slock_t;
-
-#include "mutex.h"
-#define TAS(lock) (test_and_set(lock,1))
-#define S_UNLOCK(lock) (test_then_and(lock,0))
-#define S_INIT_LOCK(lock) (test_then_and(lock,0))
-#define S_LOCK_FREE(lock) (test_then_add(lock,0) == 0)
-#endif /* __sgi */
-
-
-#if defined(sinix) /* Sinix */
-/*
- * SINIX / Reliant UNIX
- * slock_t is defined as a struct abilock_t, which has a single unsigned long
- * member. (Basically same as SGI)
+ * See also Implementing Spinlocks on the Intel Itanium Architecture and
+ * PA-RISC, by Tor Ekqvist and David Graves, for more information. As of
+ * this writing, version 1.0 of the manual is available at:
+ * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
*/
#define HAS_TEST_AND_SET
-#include "abi_mutex.h"
-typedef abilock_t slock_t;
+typedef unsigned int slock_t;
-#define TAS(lock) (!acquire_lock(lock))
-#define S_UNLOCK(lock) release_lock(lock)
-#define S_INIT_LOCK(lock) init_lock(lock)
-#define S_LOCK_FREE(lock) (stat_lock(lock) == UNLOCKED)
-#endif /* sinix */
+#include <ia64/sys/inline.h>
+#define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
+/* On IA64, it's a win to use a non-locking test before the xchg proper */
+#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
+#endif /* HPUX on IA64, non gcc */
#if defined(_AIX) /* AIX */
/*
#endif /* _AIX */
-#if defined (nextstep) /* Nextstep */
-#define HAS_TEST_AND_SET
-
-typedef struct mutex slock_t;
-
-#define S_LOCK(lock) mutex_lock(lock)
-#define S_UNLOCK(lock) mutex_unlock(lock)
-#define S_INIT_LOCK(lock) mutex_init(lock)
-/* For Mach, we have to delve inside the entrails of `struct mutex'. Ick! */
-#define S_LOCK_FREE(alock) ((alock)->lock == 0)
-#endif /* nextstep */
-
-
/* These are in s_lock.c */
-
-#if defined(sun3) /* Sun3 */
-#define HAS_TEST_AND_SET
-
-typedef unsigned char slock_t;
-#endif
-
-
#if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
#define HAS_TEST_AND_SET
#define SPIN_DELAY() spin_delay()
+/* If using Visual C++ on Win64, inline assembly is unavailable.
+ * Use a _mm_pause instrinsic instead of rep nop.
+ */
+#if defined(_WIN64)
+static __forceinline void
+spin_delay(void)
+{
+ _mm_pause();
+}
+#else
static __forceinline void
spin_delay(void)
{
/* See comment for gcc code. Same code, MASM syntax */
__asm rep nop;
}
+#endif
#endif
-
+
#endif /* !defined(HAS_TEST_AND_SET) */
* to fall foul of kernel limits on number of semaphores, so don't use this
* unless you must! The subroutines appear in spin.c.
*/
-typedef PGSemaphoreData slock_t;
+typedef int slock_t;
extern bool s_lock_free_sema(volatile slock_t *lock);
extern void s_unlock_sema(volatile slock_t *lock);
#if !defined(S_LOCK)
#define S_LOCK(lock) \
- do { \
- if (TAS(lock)) \
- s_lock((lock), __FILE__, __LINE__); \
- } while (0)
+ (TAS(lock) ? s_lock((lock), __FILE__, __LINE__) : 0)
#endif /* S_LOCK */
#if !defined(S_LOCK_FREE)
#define TAS(lock) tas(lock)
#endif /* TAS */
+#if !defined(TAS_SPIN)
+#define TAS_SPIN(lock) TAS(lock)
+#endif /* TAS_SPIN */
+
/*
* Platform-independent out-of-line support routines
*/
-extern void s_lock(volatile slock_t *lock, const char *file, int line);
+extern int s_lock(volatile slock_t *lock, const char *file, int line);
/* Support for dynamic adjustment of spins_per_delay */
#define DEFAULT_SPINS_PER_DELAY 100