1 /*-------------------------------------------------------------------------
4 * Hardware-dependent implementation of spinlocks.
6 * NOTE: none of the macros in this file are intended to be called directly.
7 * Call them through the hardware-independent macros in spin.h.
9 * The following hardware-dependent macros must be provided for each
12 * void S_INIT_LOCK(slock_t *lock)
13 * Initialize a spinlock (to the unlocked state).
15 * int S_LOCK(slock_t *lock)
16 * Acquire a spinlock, waiting if necessary.
17 * Time out and abort() if unable to acquire the lock in a
18 * "reasonable" amount of time --- typically ~ 1 minute.
19 * Should return number of "delays"; see s_lock.c
21 * void S_UNLOCK(slock_t *lock)
22 * Unlock a previously acquired lock.
24 * bool S_LOCK_FREE(slock_t *lock)
25 * Tests if the lock is free. Returns TRUE if free, FALSE if locked.
26 * This does *not* change the state of the lock.
28 * void SPIN_DELAY(void)
29 * Delay operation to occur inside spinlock wait loop.
31 * Note to implementors: there are default implementations for all these
32 * macros at the bottom of the file. Check if your platform can use
33 * these or needs to override them.
35 * Usually, S_LOCK() is implemented in terms of even lower-level macros
36 * TAS() and TAS_SPIN():
38 * int TAS(slock_t *lock)
39 * Atomic test-and-set instruction. Attempt to acquire the lock,
40 * but do *not* wait. Returns 0 if successful, nonzero if unable
41 * to acquire the lock.
43 * int TAS_SPIN(slock_t *lock)
44 * Like TAS(), but this version is used when waiting for a lock
45 * previously found to be contended. By default, this is the
46 * same as TAS(), but on some architectures it's better to poll a
47 * contended lock using an unlocked instruction and retry the
48 * atomic test-and-set only when it appears free.
50 * TAS() and TAS_SPIN() are NOT part of the API, and should never be called
53 * CAUTION: on some platforms TAS() and/or TAS_SPIN() may sometimes report
54 * failure to acquire a lock even when the lock is not locked. For example,
55 * on Alpha TAS() will "fail" if interrupted. Therefore a retry loop must
56 * always be used, even if you are certain the lock is free.
58 * Another caution for users of these macros is that it is the caller's
59 * responsibility to ensure that the compiler doesn't re-order accesses
60 * to shared memory to precede the actual lock acquisition, or follow the
61 * lock release. Typically we handle this by using volatile-qualified
62 * pointers to refer to both the spinlock itself and the shared data
63 * structure being accessed within the spinlocked critical section.
64 * That fixes it because compilers are not allowed to re-order accesses
65 * to volatile objects relative to other such accesses.
67 * On platforms with weak memory ordering, the TAS(), TAS_SPIN(), and
68 * S_UNLOCK() macros must further include hardware-level memory fence
69 * instructions to prevent similar re-ordering at the hardware level.
70 * TAS() and TAS_SPIN() must guarantee that loads and stores issued after
71 * the macro are not executed until the lock has been obtained. Conversely,
72 * S_UNLOCK() must guarantee that loads and stores issued before the macro
73 * have been executed before the lock is released.
75 * On most supported platforms, TAS() uses a tas() function written
76 * in assembly language to execute a hardware atomic-test-and-set
77 * instruction. Equivalent OS-supplied mutex routines could be used too.
79 * If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not
80 * defined), then we fall back on an emulation that uses SysV semaphores
81 * (see spin.c). This emulation will be MUCH MUCH slower than a proper TAS()
82 * implementation, because of the cost of a kernel call per lock or unlock.
83 * An old report is that Postgres spends around 40% of its time in semop(2)
84 * when using the SysV semaphore code.
87 * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
88 * Portions Copyright (c) 1994, Regents of the University of California
90 * src/include/storage/s_lock.h
92 *-------------------------------------------------------------------------
97 #ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
99 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
100 /*************************************************************************
101 * All the gcc inlines
102 * Gcc consistently defines the CPU as __cpu__.
103 * Other compilers use __cpu or __cpu__ so we test for both in those cases.
107 * Standard gcc asm format (assuming "volatile slock_t *lock"):
109 __asm__ __volatile__(
113 : "=r"(_res), "+m"(*lock) // return register, in/out lock value
114 : "r"(lock) // lock pointer, in input register
115 : "memory", "cc"); // show clobbered registers here
117 * The output-operands list (after first colon) should always include
118 * "+m"(*lock), whether or not the asm code actually refers to this
119 * operand directly. This ensures that gcc believes the value in the
120 * lock variable is used and set by the asm code. Also, the clobbers
121 * list (after third colon) should always include "memory"; this prevents
122 * gcc from thinking it can cache the values of shared-memory fields
123 * across the asm code. Add "cc" if your asm code changes the condition
124 * code register, and also list any temp registers the code uses.
129 #ifdef __i386__ /* 32-bit i386 */
130 #define HAS_TEST_AND_SET
132 typedef unsigned char slock_t;
134 #define TAS(lock) tas(lock)
136 static __inline__ int
137 tas(volatile slock_t *lock)
139 register slock_t _res = 1;
142 * Use a non-locking test before asserting the bus lock. Note that the
143 * extra test appears to be a small loss on some x86 platforms and a small
144 * win on others; it's by no means clear that we should keep it.
146 * When this was last tested, we didn't have separate TAS() and TAS_SPIN()
147 * macros. Nowadays it probably would be better to do a non-locking test
148 * in TAS_SPIN() but not in TAS(), like on x86_64, but no-one's done the
149 * testing to verify that. Without some empirical evidence, better to
152 __asm__ __volatile__(
158 : "+q"(_res), "+m"(*lock)
164 #define SPIN_DELAY() spin_delay()
166 static __inline__ void
170 * This sequence is equivalent to the PAUSE instruction ("rep" is
171 * ignored by old IA32 processors if the following instruction is
172 * not a string operation); the IA-32 Architecture Software
173 * Developer's Manual, Vol. 3, Section 7.7.2 describes why using
174 * PAUSE in the inner loop of a spin lock is necessary for good
177 * The PAUSE instruction improves the performance of IA-32
178 * processors supporting Hyper-Threading Technology when
179 * executing spin-wait loops and other routines where one
180 * thread is accessing a shared lock or semaphore in a tight
181 * polling loop. When executing a spin-wait loop, the
182 * processor can suffer a severe performance penalty when
183 * exiting the loop because it detects a possible memory order
184 * violation and flushes the core processor's pipeline. The
185 * PAUSE instruction provides a hint to the processor that the
186 * code sequence is a spin-wait loop. The processor uses this
187 * hint to avoid the memory order violation and prevent the
188 * pipeline flush. In addition, the PAUSE instruction
189 * de-pipelines the spin-wait loop to prevent it from
190 * consuming execution resources excessively.
192 __asm__ __volatile__(
196 #endif /* __i386__ */
199 #ifdef __x86_64__ /* AMD Opteron, Intel EM64T */
200 #define HAS_TEST_AND_SET
202 typedef unsigned char slock_t;
204 #define TAS(lock) tas(lock)
207 * On Intel EM64T, it's a win to use a non-locking test before the xchg proper,
208 * but only when spinning.
210 * See also Implementing Scalable Atomic Locks for Multi-Core Intel(tm) EM64T
211 * and IA32, by Michael Chynoweth and Mary R. Lee. As of this writing, it is
213 * http://software.intel.com/en-us/articles/implementing-scalable-atomic-locks-for-multi-core-intel-em64t-and-ia32-architectures
215 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
217 static __inline__ int
218 tas(volatile slock_t *lock)
220 register slock_t _res = 1;
222 __asm__ __volatile__(
225 : "+q"(_res), "+m"(*lock)
231 #define SPIN_DELAY() spin_delay()
233 static __inline__ void
237 * Adding a PAUSE in the spin delay loop is demonstrably a no-op on
238 * Opteron, but it may be of some use on EM64T, so we keep it.
240 __asm__ __volatile__(
244 #endif /* __x86_64__ */
247 #if defined(__ia64__) || defined(__ia64)
249 * Intel Itanium, gcc or Intel's compiler.
251 * Itanium has weak memory ordering, but we rely on the compiler to enforce
252 * strict ordering of accesses to volatile data. In particular, while the
253 * xchg instruction implicitly acts as a memory barrier with 'acquire'
254 * semantics, we do not have an explicit memory fence instruction in the
255 * S_UNLOCK macro. We use a regular assignment to clear the spinlock, and
256 * trust that the compiler marks the generated store instruction with the
259 * Testing shows that assumption to hold on gcc, although I could not find
260 * any explicit statement on that in the gcc manual. In Intel's compiler,
261 * the -m[no-]serialize-volatile option controls that, and testing shows that
262 * it is enabled by default.
264 #define HAS_TEST_AND_SET
266 typedef unsigned int slock_t;
268 #define TAS(lock) tas(lock)
270 /* On IA64, it's a win to use a non-locking test before the xchg proper */
271 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
273 #ifndef __INTEL_COMPILER
275 static __inline__ int
276 tas(volatile slock_t *lock)
280 __asm__ __volatile__(
282 : "=r"(ret), "+m"(*lock)
288 #else /* __INTEL_COMPILER */
290 static __inline__ int
291 tas(volatile slock_t *lock)
295 ret = _InterlockedExchange(lock,1); /* this is a xchg asm macro */
300 #endif /* __INTEL_COMPILER */
301 #endif /* __ia64__ || __ia64 */
305 * On ARM, we use __sync_lock_test_and_set(int *, int) if available, and if
306 * not fall back on the SWPB instruction. SWPB does not work on ARMv6 or
307 * later, so the compiler builtin is preferred if available. Note also that
308 * the int-width variant of the builtin works on more chips than other widths.
310 #if defined(__arm__) || defined(__arm)
311 #define HAS_TEST_AND_SET
313 #define TAS(lock) tas(lock)
315 #ifdef HAVE_GCC_INT_ATOMICS
319 static __inline__ int
320 tas(volatile slock_t *lock)
322 return __sync_lock_test_and_set(lock, 1);
325 #define S_UNLOCK(lock) __sync_lock_release(lock)
327 #else /* !HAVE_GCC_INT_ATOMICS */
329 typedef unsigned char slock_t;
331 static __inline__ int
332 tas(volatile slock_t *lock)
334 register slock_t _res = 1;
336 __asm__ __volatile__(
337 " swpb %0, %0, [%2] \n"
338 : "+r"(_res), "+m"(*lock)
344 #endif /* HAVE_GCC_INT_ATOMICS */
349 * On ARM64, we use __sync_lock_test_and_set(int *, int) if available.
351 #if defined(__aarch64__) || defined(__aarch64)
352 #ifdef HAVE_GCC_INT_ATOMICS
353 #define HAS_TEST_AND_SET
355 #define TAS(lock) tas(lock)
359 static __inline__ int
360 tas(volatile slock_t *lock)
362 return __sync_lock_test_and_set(lock, 1);
365 #define S_UNLOCK(lock) __sync_lock_release(lock)
367 #endif /* HAVE_GCC_INT_ATOMICS */
368 #endif /* __aarch64__ */
371 /* S/390 and S/390x Linux (32- and 64-bit zSeries) */
372 #if defined(__s390__) || defined(__s390x__)
373 #define HAS_TEST_AND_SET
375 typedef unsigned int slock_t;
377 #define TAS(lock) tas(lock)
379 static __inline__ int
380 tas(volatile slock_t *lock)
384 __asm__ __volatile__(
386 : "+d"(_res), "+m"(*lock)
392 #endif /* __s390__ || __s390x__ */
395 #if defined(__sparc__) /* Sparc */
396 #define HAS_TEST_AND_SET
398 typedef unsigned char slock_t;
400 #define TAS(lock) tas(lock)
402 static __inline__ int
403 tas(volatile slock_t *lock)
405 register slock_t _res;
408 * See comment in /pg/backend/port/tas/solaris_sparc.s for why this
409 * uses "ldstub", and that file uses "cas". gcc currently generates
410 * sparcv7-targeted binaries, so "cas" use isn't possible.
412 __asm__ __volatile__(
413 " ldstub [%2], %0 \n"
414 : "=r"(_res), "+m"(*lock)
420 #endif /* __sparc__ */
424 #if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
425 #define HAS_TEST_AND_SET
427 typedef unsigned int slock_t;
429 #define TAS(lock) tas(lock)
431 /* On PPC, it's a win to use a non-locking test before the lwarx */
432 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
435 * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
436 * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
437 * On newer machines, we can use lwsync instead for better performance.
439 static __inline__ int
440 tas(volatile slock_t *lock)
445 __asm__ __volatile__(
446 #ifdef USE_PPC_LWARX_MUTEX_HINT
447 " lwarx %0,0,%3,1 \n"
459 #ifdef USE_PPC_LWSYNC
467 : "=&r"(_t), "=r"(_res), "+m"(*lock)
474 * PowerPC S_UNLOCK is almost standard but requires a "sync" instruction.
475 * On newer machines, we can use lwsync instead for better performance.
477 #ifdef USE_PPC_LWSYNC
478 #define S_UNLOCK(lock) \
481 __asm__ __volatile__ (" lwsync \n"); \
482 *((volatile slock_t *) (lock)) = 0; \
485 #define S_UNLOCK(lock) \
488 __asm__ __volatile__ (" sync \n"); \
489 *((volatile slock_t *) (lock)) = 0; \
491 #endif /* USE_PPC_LWSYNC */
496 /* Linux Motorola 68k */
497 #if (defined(__mc68000__) || defined(__m68k__)) && defined(__linux__)
498 #define HAS_TEST_AND_SET
500 typedef unsigned char slock_t;
502 #define TAS(lock) tas(lock)
504 static __inline__ int
505 tas(volatile slock_t *lock)
509 __asm__ __volatile__(
513 : "=d"(rv), "+m"(*lock)
519 #endif /* (__mc68000__ || __m68k__) && __linux__ */
523 * VAXen -- even multiprocessor ones
524 * (thanks to Tom Ivar Helbekkmo)
527 #define HAS_TEST_AND_SET
529 typedef unsigned char slock_t;
531 #define TAS(lock) tas(lock)
533 static __inline__ int
534 tas(volatile slock_t *lock)
538 __asm__ __volatile__(
540 " bbssi $0, (%2), 1f \n"
543 : "=&r"(_res), "+m"(*lock)
552 #if defined(__mips__) && !defined(__sgi) /* non-SGI MIPS */
553 /* Note: on SGI we use the OS' mutex ABI, see below */
554 /* Note: R10000 processors require a separate SYNC */
555 #define HAS_TEST_AND_SET
557 typedef unsigned int slock_t;
559 #define TAS(lock) tas(lock)
561 static __inline__ int
562 tas(volatile slock_t *lock)
564 register volatile slock_t *_l = lock;
568 __asm__ __volatile__(
580 : "=&r" (_res), "=&r" (_tmp), "+R" (*_l)
586 /* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */
587 #define S_UNLOCK(lock) \
590 __asm__ __volatile__( \
593 " .set noreorder \n" \
597 *((volatile slock_t *) (lock)) = 0; \
600 #endif /* __mips__ && !__sgi */
603 #if defined(__m32r__) && defined(HAVE_SYS_TAS_H) /* Renesas' M32R */
604 #define HAS_TEST_AND_SET
610 #define TAS(lock) tas(lock)
612 #endif /* __m32r__ */
615 #if defined(__sh__) /* Renesas' SuperH */
616 #define HAS_TEST_AND_SET
618 typedef unsigned char slock_t;
620 #define TAS(lock) tas(lock)
622 static __inline__ int
623 tas(volatile slock_t *lock)
628 * This asm is coded as if %0 could be any register, but actually SuperH
629 * restricts the target of xor-immediate to be R0. That's handled by
630 * the "z" constraint on _res.
632 __asm__ __volatile__(
636 : "=z"(_res), "+m"(*lock)
645 /* These live in s_lock.c, but only for gcc */
648 #if defined(__m68k__) && !defined(__linux__) /* non-Linux Motorola 68k */
649 #define HAS_TEST_AND_SET
651 typedef unsigned char slock_t;
655 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
660 * ---------------------------------------------------------------------
661 * Platforms that use non-gcc inline assembly:
662 * ---------------------------------------------------------------------
665 #if !defined(HAS_TEST_AND_SET) /* We didn't trigger above, let's try here */
668 #if defined(USE_UNIVEL_CC) /* Unixware compiler */
669 #define HAS_TEST_AND_SET
671 typedef unsigned char slock_t;
673 #define TAS(lock) tas(lock)
676 tas(volatile slock_t *s_lock)
678 /* UNIVEL wants %mem in column 1, so we don't pg_indent this file */
688 #endif /* defined(USE_UNIVEL_CC) */
691 #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
695 * See src/backend/port/hpux/tas.c.template for details about LDCWX. Because
696 * LDCWX requires a 16-byte-aligned address, we declare slock_t as a 16-byte
697 * struct. The active word in the struct is whichever has the aligned address;
698 * the other three words just sit at -1.
700 * When using gcc, we can inline the required assembly code.
702 #define HAS_TEST_AND_SET
709 #define TAS_ACTIVE_WORD(lock) ((volatile int *) (((uintptr_t) (lock) + 15) & ~15))
711 #if defined(__GNUC__)
713 static __inline__ int
714 tas(volatile slock_t *lock)
716 volatile int *lockword = TAS_ACTIVE_WORD(lock);
717 register int lockval;
719 __asm__ __volatile__(
720 " ldcwx 0(0,%2),%0 \n"
721 : "=r"(lockval), "+m"(*lockword)
724 return (lockval == 0);
727 #endif /* __GNUC__ */
729 #define S_UNLOCK(lock) (*TAS_ACTIVE_WORD(lock) = -1)
731 #define S_INIT_LOCK(lock) \
733 volatile slock_t *lock_ = (lock); \
734 lock_->sema[0] = -1; \
735 lock_->sema[1] = -1; \
736 lock_->sema[2] = -1; \
737 lock_->sema[3] = -1; \
740 #define S_LOCK_FREE(lock) (*TAS_ACTIVE_WORD(lock) != 0)
742 #endif /* __hppa || __hppa__ */
745 #if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
747 * HP-UX on Itanium, non-gcc compiler
749 * We assume that the compiler enforces strict ordering of loads/stores on
750 * volatile data (see comments on the gcc-version earlier in this file).
751 * Note that this assumption does *not* hold if you use the
752 * +Ovolatile=__unordered option on the HP-UX compiler, so don't do that.
754 * See also Implementing Spinlocks on the Intel Itanium Architecture and
755 * PA-RISC, by Tor Ekqvist and David Graves, for more information. As of
756 * this writing, version 1.0 of the manual is available at:
757 * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
759 #define HAS_TEST_AND_SET
761 typedef unsigned int slock_t;
763 #include <ia64/sys/inline.h>
764 #define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
765 /* On IA64, it's a win to use a non-locking test before the xchg proper */
766 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
768 #endif /* HPUX on IA64, non gcc */
770 #if defined(_AIX) /* AIX */
774 #define HAS_TEST_AND_SET
776 #include <sys/atomic_op.h>
780 #define TAS(lock) _check_lock((slock_t *) (lock), 0, 1)
781 #define S_UNLOCK(lock) _clear_lock((slock_t *) (lock), 0)
785 /* These are in s_lock.c */
787 #if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
788 #define HAS_TEST_AND_SET
790 #if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus)
791 typedef unsigned int slock_t;
793 typedef unsigned char slock_t;
796 extern slock_t pg_atomic_cas(volatile slock_t *lock, slock_t with,
799 #define TAS(a) (pg_atomic_cas((a), 1, 0) != 0)
803 #ifdef WIN32_ONLY_COMPILER
804 typedef LONG slock_t;
806 #define HAS_TEST_AND_SET
807 #define TAS(lock) (InterlockedCompareExchange(lock, 1, 0))
809 #define SPIN_DELAY() spin_delay()
811 /* If using Visual C++ on Win64, inline assembly is unavailable.
812 * Use a _mm_pause instrinsic instead of rep nop.
815 static __forceinline void
821 static __forceinline void
824 /* See comment for gcc code. Same code, MASM syntax */
832 #endif /* !defined(HAS_TEST_AND_SET) */
835 /* Blow up if we didn't have any way to do spinlocks */
836 #ifndef HAS_TEST_AND_SET
837 #error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@postgresql.org.
841 #else /* !HAVE_SPINLOCKS */
845 * Fake spinlock implementation using semaphores --- slow and prone
846 * to fall foul of kernel limits on number of semaphores, so don't use this
847 * unless you must! The subroutines appear in spin.c.
851 extern bool s_lock_free_sema(volatile slock_t *lock);
852 extern void s_unlock_sema(volatile slock_t *lock);
853 extern void s_init_lock_sema(volatile slock_t *lock);
854 extern int tas_sema(volatile slock_t *lock);
856 #define S_LOCK_FREE(lock) s_lock_free_sema(lock)
857 #define S_UNLOCK(lock) s_unlock_sema(lock)
858 #define S_INIT_LOCK(lock) s_init_lock_sema(lock)
859 #define TAS(lock) tas_sema(lock)
862 #endif /* HAVE_SPINLOCKS */
866 * Default Definitions - override these above as needed.
870 #define S_LOCK(lock) \
871 (TAS(lock) ? s_lock((lock), __FILE__, __LINE__) : 0)
874 #if !defined(S_LOCK_FREE)
875 #define S_LOCK_FREE(lock) (*(lock) == 0)
876 #endif /* S_LOCK_FREE */
878 #if !defined(S_UNLOCK)
879 #define S_UNLOCK(lock) (*((volatile slock_t *) (lock)) = 0)
880 #endif /* S_UNLOCK */
882 #if !defined(S_INIT_LOCK)
883 #define S_INIT_LOCK(lock) S_UNLOCK(lock)
884 #endif /* S_INIT_LOCK */
886 #if !defined(SPIN_DELAY)
887 #define SPIN_DELAY() ((void) 0)
888 #endif /* SPIN_DELAY */
891 extern int tas(volatile slock_t *lock); /* in port/.../tas.s, or
894 #define TAS(lock) tas(lock)
897 #if !defined(TAS_SPIN)
898 #define TAS_SPIN(lock) TAS(lock)
899 #endif /* TAS_SPIN */
903 * Platform-independent out-of-line support routines
905 extern int s_lock(volatile slock_t *lock, const char *file, int line);
907 /* Support for dynamic adjustment of spins_per_delay */
908 #define DEFAULT_SPINS_PER_DELAY 100
910 extern void set_spins_per_delay(int shared_spins_per_delay);
911 extern int update_spins_per_delay(int shared_spins_per_delay);
913 #endif /* S_LOCK_H */