1 /*-------------------------------------------------------------------------
4 * This file contains the implementation (if any) for spinlocks.
6 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $Header: /cvsroot/pgsql/src/include/storage/s_lock.h,v 1.75 2000/12/03 14:41:42 thomas Exp $
13 *-------------------------------------------------------------------------
18 * The public macros that must be provided are:
20 * void S_INIT_LOCK(slock_t *lock)
22 * void S_LOCK(slock_t *lock)
24 * void S_UNLOCK(slock_t *lock)
26 * void S_LOCK_FREE(slock_t *lock)
27 * Tests if the lock is free. Returns non-zero if free, 0 if locked.
29 * The S_LOCK() macro implements a primitive but still useful random
30 * backoff to avoid hordes of busywaiting lockers chewing CPU.
34 * S_LOCK(slock_t *lock)
38 * // back off the cpu for a semi-random short time
42 * This implementation takes advantage of a tas function written
43 * (in assembly language) on machines that have a native test-and-set
44 * instruction. Alternative mutex implementations may also be used.
45 * This function is hidden under the TAS macro to allow substitutions.
47 * #define TAS(lock) tas(lock)
48 * int tas(slock_t *lock) // True if lock already set
50 * There are default implementations for all these macros at the bottom
51 * of this file. Check if your platform can use these or needs to
55 * If none of this can be done, POSTGRES will default to using
56 * System V semaphores (and take a large performance hit -- around 40%
57 * of its time on a DS5000/240 is spent in semop(3)...).
59 * AIX has a test-and-set but the recommended interface is the cs(3)
60 * system call. This provides an 8-instruction (plus system call
61 * overhead) uninterruptible compare-and-set operation. True
62 * spinlocks might be faster but using cs(3) still speeds up the
63 * regression test suite by about 25%. I don't have an assembler
64 * manual for POWER in any case.
70 #include "storage/ipc.h"
72 extern void s_lock_sleep(unsigned spin);
74 #if defined(HAS_TEST_AND_SET)
78 /*************************************************************************
84 #define TAS(lock) tas(lock)
87 tas(volatile slock_t *lock)
89 register slock_t _res = 1;
91 __asm__("lock; xchgb %0,%1": "=q"(_res), "=m"(*lock):"0"(_res));
99 #define TAS(lock) tas(lock)
101 static __inline__ int
102 tas (volatile slock_t *lock)
106 __asm__ __volatile__(
108 : "=r"(ret), "=m"(*lock)
114 #endif /* __ia64__ */
117 #if defined(__arm__) || defined(__arm__)
118 #define TAS(lock) tas(lock)
120 static __inline__ int
121 tas(volatile slock_t *lock)
123 register slock_t _res = 1;
125 __asm__("swpb %0, %0, [%3]": "=r"(_res), "=m"(*lock):"0"(_res), "r" (lock));
131 #if defined(__s390__)
135 #define TAS(lock) tas(lock)
138 tas(volatile slock_t *lock)
142 __asm__ __volatile(" la 1,1\n"
147 : "=m" (lock), "=d" (_res)
153 #endif /* __s390__ */
156 #if defined(__sparc__)
157 #define TAS(lock) tas(lock)
159 static __inline__ int
160 tas(volatile slock_t *lock)
162 register slock_t _res = 1;
164 __asm__("ldstub [%2], %0" \
165 : "=r"(_res), "=m"(*lock) \
170 #endif /* __sparc__ */
173 #if defined(__mc68000__) && defined(__linux__)
174 #define TAS(lock) tas(lock)
176 static __inline__ int
177 tas(volatile slock_t *lock)
181 __asm__ __volatile__ (
183 : "=d" (rv), "=m"(*lock)
189 #endif /* defined(__mc68000__) && defined(__linux__) */
192 #if defined(NEED_VAX_TAS_ASM)
194 * VAXen -- even multiprocessor ones
195 * (thanks to Tom Ivar Helbekkmo)
197 #define TAS(lock) tas(lock)
199 typedef unsigned char slock_t;
201 static __inline__ int
202 tas(volatile slock_t *lock)
206 __asm__(" movl $1, r0 \
207 bbssi $0, (%1), 1 f \
210 : "=r"(_res) /* return value, in register */
211 : "r"(lock) /* argument, 'lock pointer', in register */
212 : "r0"); /* inline code uses this register */
216 #endif /* NEED_VAX_TAS_ASM */
220 #if defined(NEED_NS32K_TAS_ASM)
221 #define TAS(lock) tas(lock)
223 static __inline__ int
224 tas(volatile slock_t *lock)
227 __asm__("sbitb 0, %0 \n\
229 : "=m"(*lock), "=r"(_res));
233 #endif /* NEED_NS32K_TAS_ASM */
238 /***************************************************************************
246 * Note that slock_t under QNX is sem_t instead of char
248 #define TAS(lock) (sem_trywait((lock)) < 0)
249 #define S_UNLOCK(lock) sem_post((lock))
250 #define S_INIT_LOCK(lock) sem_init((lock), 1, 1)
251 #define S_LOCK_FREE(lock) (lock)->value
255 #if defined(NEED_I386_TAS_ASM)
256 /* non gcc i386 based things */
258 #if defined(USE_UNIVEL_CC)
259 #define TAS(lock) tas(lock)
262 tas(volatile slock_t *s_lock)
264 /* UNIVEL wants %mem in column 1, so we don't pg_indent this file */
274 #endif /* USE_UNIVEL_CC */
276 #endif /* NEED_I386_TAS_ASM */
278 #endif /* defined(__GNUC__) */
282 /*************************************************************************
283 * These are the platforms that have common code for gcc and non-gcc
293 * Note that slock_t on the Alpha AXP is msemaphore instead of char
294 * (see storage/ipc.h).
296 #include <alpha/builtins.h>
298 #define TAS(lock) (msem_lock((lock), MSEM_IF_NOWAIT) < 0)
299 #define S_UNLOCK(lock) msem_unlock((lock), 0)
300 #define S_INIT_LOCK(lock) msem_init((lock), MSEM_UNLOCKED)
301 #define S_LOCK_FREE(lock) (!(lock)->msem_state)
303 #define TAS(lock) (__INTERLOCKED_TESTBITSS_QUAD((lock),0))
306 #else /* i.e. not __osf__ */
308 #define TAS(lock) tas(lock)
309 #define S_UNLOCK(lock) do { __asm__("mb"); *(lock) = 0; } while (0)
311 static __inline__ int
312 tas(volatile slock_t *lock)
314 register slock_t _res;
316 __asm__(" ldq $0, %0 \n\
327 3: bis $0, $0, %1 \n\
328 4: nop ": "=m"(*lock), "=r"(_res): :"0");
341 * Note that slock_t on PA-RISC is a structure instead of char
342 * (see include/port/hpux.h).
344 * a "set" slock_t has a single word cleared. a "clear" slock_t has
345 * all words set to non-zero. tas() in tas.s
348 #define S_UNLOCK(lock) \
350 volatile slock_t *lock_ = (volatile slock_t *) (lock); \
351 lock_->sema[0] = lock_->sema[1] = lock_->sema[2] = lock_->sema[3] = -1; \
354 #define S_LOCK_FREE(lock) ( *(int *) (((long) (lock) + 15) & ~15) != 0)
362 * slock_t is defined as a unsigned long. We use the standard SGI
365 * The following comment is left for historical reasons, but is probably
366 * not a good idea since the mutex ABI is supported.
368 * This stuff may be supplemented in the future with Masato Kataoka's MIPS-II
369 * assembly from his NECEWS SVR4 port, but we probably ought to retain this
370 * for the R3000 chips out there.
373 #define TAS(lock) (test_and_set(lock,1))
374 #define S_UNLOCK(lock) (test_then_and(lock,0))
375 #define S_INIT_LOCK(lock) (test_then_and(lock,0))
376 #define S_LOCK_FREE(lock) (test_then_add(lock,0) == 0)
381 * SINIX / Reliant UNIX
382 * slock_t is defined as a struct abilock_t, which has a single unsigned long
383 * member. (Basically same as SGI)
386 #define TAS(lock) (!acquire_lock(lock))
387 #define S_UNLOCK(lock) release_lock(lock)
388 #define S_INIT_LOCK(lock) init_lock(lock)
389 #define S_LOCK_FREE(lock) (stat_lock(lock) == UNLOCKED)
397 * Note that slock_t on POWER/POWER2/PowerPC is int instead of char
398 * (see storage/ipc.h).
400 #define TAS(lock) cs((int *) (lock), 0, 1)
404 #if defined (nextstep)
407 * slock_t is defined as a struct mutex.
410 #define S_LOCK(lock) mutex_lock(lock)
411 #define S_UNLOCK(lock) mutex_unlock(lock)
412 #define S_INIT_LOCK(lock) mutex_init(lock)
413 /* For Mach, we have to delve inside the entrails of `struct mutex'. Ick! */
414 #define S_LOCK_FREE(alock) ((alock)->lock == 0)
415 #endif /* nextstep */
420 /****************************************************************************
421 * Default Definitions - override these above as needed.
425 extern void s_lock(volatile slock_t *lock, const char *file, const int line);
427 #define S_LOCK(lock) \
429 if (TAS((volatile slock_t *) (lock))) \
430 s_lock((volatile slock_t *) (lock), __FILE__, __LINE__); \
434 #if !defined(S_LOCK_FREE)
435 #define S_LOCK_FREE(lock) (*(lock) == 0)
436 #endif /* S_LOCK_FREE */
438 #if !defined(S_UNLOCK)
439 #define S_UNLOCK(lock) (*(lock) = 0)
440 #endif /* S_UNLOCK */
442 #if !defined(S_INIT_LOCK)
443 #define S_INIT_LOCK(lock) S_UNLOCK(lock)
444 #endif /* S_INIT_LOCK */
447 extern int tas(volatile slock_t *lock); /* port/.../tas.s, or
450 #define TAS(lock) tas((volatile slock_t *) (lock))
454 #else /* !HAS_TEST_AND_SET */
457 * Fake spinlock implementation using SysV semaphores --- slow and prone
458 * to fall foul of kernel limits on number of semaphores, so don't use this
464 /* reference to semaphore used to implement this spinlock */
465 IpcSemaphoreId semId;
469 extern bool s_lock_free_sema(volatile slock_t *lock);
470 extern void s_unlock_sema(volatile slock_t *lock);
471 extern void s_init_lock_sema(volatile slock_t *lock);
472 extern int tas_sema(volatile slock_t *lock);
474 extern void s_lock(volatile slock_t *lock, const char *file, const int line);
476 #define S_LOCK(lock) \
478 if (TAS((volatile slock_t *) (lock))) \
479 s_lock((volatile slock_t *) (lock), __FILE__, __LINE__); \
482 #define S_LOCK_FREE(lock) s_lock_free_sema(lock)
483 #define S_UNLOCK(lock) s_unlock_sema(lock)
484 #define S_INIT_LOCK(lock) s_init_lock_sema(lock)
485 #define TAS(lock) tas_sema(lock)
487 #endif /* HAS_TEST_AND_SET */
489 #endif /* S_LOCK_H */