]> granicus.if.org Git - postgresql/blob - src/include/storage/s_lock.h
New ASM format:
[postgresql] / src / include / storage / s_lock.h
1 /*-------------------------------------------------------------------------
2  *
3  * s_lock.h
4  *         This file contains the in-line portion of the implementation
5  *         of spinlocks.
6  *
7  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  *
11  * IDENTIFICATION
12  *        $Header: /cvsroot/pgsql/src/include/storage/s_lock.h,v 1.79 2001/01/19 02:58:59 momjian Exp $
13  *
14  *-------------------------------------------------------------------------
15  */
16
17 /*----------
18  * DESCRIPTION
19  *      The public macros that must be provided are:
20  *
21  *      void S_INIT_LOCK(slock_t *lock)
22  *              Initialize a spinlock (to the unlocked state).
23  *
24  *      void S_LOCK(slock_t *lock)
25  *              Acquire a spinlock, waiting if necessary.
26  *              Time out and abort() if unable to acquire the lock in a
27  *              "reasonable" amount of time --- typically ~ 1 minute.
28  *
29  *      void S_UNLOCK(slock_t *lock)
30  *              Unlock a previously acquired lock.
31  *
32  *      bool S_LOCK_FREE(slock_t *lock)
33  *              Tests if the lock is free. Returns TRUE if free, FALSE if locked.
34  *              This does *not* change the state of the lock.
35  *
36  *      int TAS(slock_t *lock)
37  *              Atomic test-and-set instruction.  Attempt to acquire the lock,
38  *              but do *not* wait.      Returns 0 if successful, nonzero if unable
39  *              to acquire the lock.
40  *
41  *      TAS() is a lower-level part of the API, but is used directly in a
42  *      few places that want to do other things while waiting for a lock.
43  *      The S_LOCK() macro is equivalent to
44  *
45  *      void
46  *      S_LOCK(slock_t *lock)
47  *      {
48  *              unsigned        spins = 0;
49  *
50  *              while (TAS(lock))
51  *                      S_LOCK_SLEEP(lock, spins++);
52  *      }
53  *
54  *      where S_LOCK_SLEEP() checks for timeout and sleeps for a short
55  *      interval.  Callers that want to perform useful work while waiting
56  *      can write out this entire loop and insert the "useful work" inside
57  *      the loop.
58  *
59  *      CAUTION to TAS() callers: on some platforms TAS() may sometimes
60  *      report failure to acquire a lock even when the lock is not locked.
61  *      For example, on Alpha TAS() will "fail" if interrupted.  Therefore
62  *      TAS() must *always* be invoked in a retry loop as depicted, even when
63  *      you are certain the lock is free.
64  *
65  *      On most supported platforms, TAS() uses a tas() function written
66  *      in assembly language to execute a hardware atomic-test-and-set
67  *      instruction.  Equivalent OS-supplied mutex routines could be used too.
68  *
69  *      If no system-specific TAS() is available (ie, HAS_TEST_AND_SET is not
70  *      defined), then we fall back on an emulation that uses SysV semaphores.
71  *      This emulation will be MUCH MUCH MUCH slower than a proper TAS()
72  *      implementation, because of the cost of a kernel call per lock or unlock.
73  *      An old report is that Postgres spends around 40% of its time in semop(2)
74  *      when using the SysV semaphore code.
75  *
76  *      Note to implementors: there are default implementations for all these
77  *      macros at the bottom of the file.  Check if your platform can use
78  *      these or needs to override them.
79  *----------
80  */
81 #ifndef S_LOCK_H
82 #define S_LOCK_H
83
84 #include "storage/ipc.h"
85
86 /* Platform-independent out-of-line support routines */
87 extern void s_lock(volatile slock_t *lock,
88            const char *file, const int line);
89 extern void s_lock_sleep(unsigned spins, int microsec,
90                          volatile slock_t *lock,
91                          const char *file, const int line);
92
93
94 #if defined(HAS_TEST_AND_SET)
95
96
97 #if defined(__GNUC__)
98 /*************************************************************************
99  * All the gcc inlines
100  */
101
102 /*
103  * Standard __asm__ format:
104  *
105  *      __asm__(
106  *                      "command;"
107  *                      "command;"
108  *                      "command;"
109  *              :       "=r"(_res)                      return value, in register
110  *              :       "r"(lock)                       argument, 'lock pointer', in register
111  *              :       "r0");                          inline code uses this register
112  */
113
114
115 #if defined(__i386__)
116 #define TAS(lock) tas(lock)
117
118 static __inline__ int
119 tas(volatile slock_t *lock)
120 {
121         register slock_t _res = 1;
122
123         __asm__(
124                         "lock;"
125                         "xchgb %0,%1;"
126 :                       "=q"(_res), "=m"(*lock)
127 :                       "0"(_res));
128         return (int) _res;
129 }
130
131 #endif   /* __i386__ */
132
133
134 #ifdef __ia64__
135 #define TAS(lock) tas(lock)
136
137 static __inline__ int
138 tas(volatile slock_t *lock)
139 {
140         long int        ret;
141
142         __asm__         __volatile__(
143                                                                                  "xchg4 %0=%1,%2;"
144                                                          :                       "=r"(ret), "=m"(*lock)
145                                                          :                       "r"(1), "1"(*lock)
146                                                          :                       "memory");
147
148         return (int) ret;
149 }
150
151 #endif   /* __ia64__ */
152
153
154 #if defined(__arm__) || defined(__arm__)
155 #define TAS(lock) tas(lock)
156
157 static __inline__ int
158 tas(volatile slock_t *lock)
159 {
160         register slock_t _res = 1;
161
162         __asm__(
163                         "swpb %0, %0, [%3];"
164 :                       "=r"(_res), "=m"(*lock)
165 :                       "0"(_res), "r"(lock));
166         return (int) _res;
167 }
168
169 #endif   /* __arm__ */
170
171 #if defined(__s390__)
172 /*
173  * S/390 Linux
174  */
175 #define TAS(lock)          tas(lock)
176
177 static inline int
178 tas(volatile slock_t *lock)
179 {
180         int                     _res;
181
182         __asm__         __volatile(
183                                                                            "la 1,1;"
184                                                                            "l 2,%2;"
185                                                                            "slr 0,0;"
186                                                                            "cs 0,1,0(2);"
187                                                                            "lr %1,0;"
188                                                    :               "=m"(lock), "=d"(_res)
189                                                    :               "m"(lock)
190                                                    :               "0", "1", "2");
191
192         return (_res);
193 }
194
195 #endif   /* __s390__ */
196
197
198 #if defined(__sparc__)
199 #define TAS(lock) tas(lock)
200
201 static __inline__ int
202 tas(volatile slock_t *lock)
203 {
204         register slock_t _res = 1;
205
206         __asm__(
207                         "ldstub [%2], %0;"
208 :                       "=r"(_res), "=m"(*lock)
209 :                       "r"(lock));
210         return (int) _res;
211 }
212
213 #endif   /* __sparc__ */
214
215
216 #if defined(__mc68000__) && defined(__linux__)
217 #define TAS(lock) tas(lock)
218
219 static __inline__ int
220 tas(volatile slock_t *lock)
221 {
222         register int rv;
223
224         __asm__         __volatile__(
225                                                                                  "tas %1;"
226                                                                                  "sne %0;"
227                                                          :                       "=d"(rv), "=m"(*lock)
228                                                          :                       "1"(*lock)
229                                                          :                       "cc");
230
231         return rv;
232 }
233
234 #endif   /* defined(__mc68000__) && defined(__linux__) */
235
236
237 #if defined(NEED_VAX_TAS_ASM)
238 /*
239  * VAXen -- even multiprocessor ones
240  * (thanks to Tom Ivar Helbekkmo)
241  */
242 #define TAS(lock) tas(lock)
243
244 typedef unsigned char slock_t;
245
246 static __inline__ int
247 tas(volatile slock_t *lock)
248 {
249         register        _res;
250
251         __asm__(
252                         "movl $1, r0;"
253                         "bbssi $0, (%1), 1f;"
254                         "clrl r0;"
255                         "1: movl r0, %0;"
256 :                       "=r"(_res)
257 :                       "r"(lock)
258 :                       "r0");
259         return (int) _res;
260 }
261
262 #endif   /* NEED_VAX_TAS_ASM */
263
264
265 #if defined(NEED_NS32K_TAS_ASM)
266 #define TAS(lock) tas(lock)
267
268 static __inline__ int
269 tas(volatile slock_t *lock)
270 {
271         register        _res;
272
273         __asm__(
274                         "sbitb 0, %0;"
275                         "sfsd %1;"
276 :                       "=m"(*lock), "=r"(_res));
277         return (int) _res;
278 }
279
280 #endif   /* NEED_NS32K_TAS_ASM */
281
282
283
284 #else                                                   /* !__GNUC__ */
285
286 /***************************************************************************
287  * All non-gcc inlines
288  */
289
290 #if defined(NEED_I386_TAS_ASM) && defined(USE_UNIVEL_CC)
291 #define TAS(lock)       tas(lock)
292
293 asm int
294 tas(volatile slock_t *s_lock)
295 {
296 /* UNIVEL wants %mem in column 1, so we don't pg_indent this file */
297         %mem s_lock
298         pushl %ebx
299         movl s_lock, %ebx
300         movl $255, %eax
301         lock
302         xchgb %al, (%ebx)
303         popl %ebx
304 }
305
306 #endif   /* defined(NEED_I386_TAS_ASM) && defined(USE_UNIVEL_CC) */
307
308 #endif   /* defined(__GNUC__) */
309
310
311
312 /*************************************************************************
313  * These are the platforms that do not use inline assembler (and hence
314  * have common code for gcc and non-gcc compilers, if both are available).
315  */
316
317
318 #if defined(__alpha)
319
320 /*
321  * Correct multi-processor locking methods are explained in section 5.5.3
322  * of the Alpha AXP Architecture Handbook, which at this writing can be
323  * found at ftp://ftp.netbsd.org/pub/NetBSD/misc/dec-docs/index.html.
324  * For gcc we implement the handbook's code directly with inline assembler.
325  */
326 #if defined(__GNUC__)
327
328 #define TAS(lock)  tas(lock)
329 #define S_UNLOCK(lock)  do { __asm__ volatile ("mb"); *(lock) = 0; } while (0)
330
331 static __inline__ int
332 tas(volatile slock_t *lock)
333 {
334         register slock_t _res;
335
336         __asm__         volatile(
337                                                                          "ldq   $0, %0;"
338                                                                          "bne   $0, 2f;"
339                                                                          "ldq_l %1, %0;"
340                                                                          "bne   %1, 2f;"
341                                                                          "mov   1, $0;"
342                                                                          "stq_c $0, %0;"
343                                                                          "beq   $0, 2f;"
344                                                                          "mb;"
345                                                                          "br 3f;"
346                                                                          "2: mov   1, %1;"
347                                                                          "3:"
348                                                  :                       "=m"(*lock), "=r"(_res)
349                                                  :
350                                                  :                       "0");
351
352         return (int) _res;
353 }
354
355 #else                                                   /* !defined(__GNUC__) */
356
357 /*
358  * The Tru64 compiler doesn't support gcc-style inline asm, but it does
359  * have some builtin functions that accomplish much the same results.
360  * For simplicity, slock_t is defined as long (ie, quadword) on Alpha
361  * regardless of the compiler in use.  LOCK_LONG and UNLOCK_LONG only
362  * operate on an int (ie, longword), but that's OK as long as we define
363  * S_INIT_LOCK to zero out the whole quadword.
364  */
365
366 #include <alpha/builtins.h>
367
368 #define S_INIT_LOCK(lock)  (*(lock) = 0)
369 #define TAS(lock)                  (__LOCK_LONG_RETRY((lock), 1) == 0)
370 #define S_UNLOCK(lock)     __UNLOCK_LONG(lock)
371
372 #endif   /* defined(__GNUC__) */
373
374 #endif   /* __alpha */
375
376
377 #if defined(__hpux)
378 /*
379  * HP-UX (PA-RISC)
380  *
381  * Note that slock_t on PA-RISC is a structure instead of char
382  * (see include/port/hpux.h).
383  *
384  * a "set" slock_t has a single word cleared.  a "clear" slock_t has
385  * all words set to non-zero. tas() is in tas.s
386  */
387
388 #define S_UNLOCK(lock) \
389 do { \
390         volatile slock_t *lock_ = (volatile slock_t *) (lock); \
391         lock_->sema[0] = lock_->sema[1] = lock_->sema[2] = lock_->sema[3] = -1; \
392 } while (0)
393
394 #define S_LOCK_FREE(lock)       ( *(int *) (((long) (lock) + 15) & ~15) != 0)
395
396 #endif   /* __hpux */
397
398
399 #if defined(__QNX__)
400 /*
401  * QNX 4
402  *
403  * Note that slock_t under QNX is sem_t instead of char
404  */
405 #define TAS(lock)               (sem_trywait((lock)) < 0)
406 #define S_UNLOCK(lock)  sem_post((lock))
407 #define S_INIT_LOCK(lock)               sem_init((lock), 1, 1)
408 #define S_LOCK_FREE(lock)               ((lock)->value)
409 #endif   /* __QNX__ */
410
411
412 #if defined(__sgi)
413 /*
414  * SGI IRIX 5
415  * slock_t is defined as a unsigned long. We use the standard SGI
416  * mutex API.
417  *
418  * The following comment is left for historical reasons, but is probably
419  * not a good idea since the mutex ABI is supported.
420  *
421  * This stuff may be supplemented in the future with Masato Kataoka's MIPS-II
422  * assembly from his NECEWS SVR4 port, but we probably ought to retain this
423  * for the R3000 chips out there.
424  */
425 #include "mutex.h"
426 #define TAS(lock)       (test_and_set(lock,1))
427 #define S_UNLOCK(lock)  (test_then_and(lock,0))
428 #define S_INIT_LOCK(lock)       (test_then_and(lock,0))
429 #define S_LOCK_FREE(lock)       (test_then_add(lock,0) == 0)
430 #endif   /* __sgi */
431
432 #if defined(sinix)
433 /*
434  * SINIX / Reliant UNIX
435  * slock_t is defined as a struct abilock_t, which has a single unsigned long
436  * member. (Basically same as SGI)
437  *
438  */
439 #define TAS(lock)       (!acquire_lock(lock))
440 #define S_UNLOCK(lock)  release_lock(lock)
441 #define S_INIT_LOCK(lock)       init_lock(lock)
442 #define S_LOCK_FREE(lock)       (stat_lock(lock) == UNLOCKED)
443 #endif   /* sinix */
444
445
446 #if defined(_AIX)
447 /*
448  * AIX (POWER)
449  *
450  * Note that slock_t on POWER/POWER2/PowerPC is int instead of char
451  * (see storage/ipc.h).
452  */
453 #define TAS(lock)       cs((int *) (lock), 0, 1)
454 #endif   /* _AIX */
455
456
457 #if defined (nextstep)
458 /*
459  * NEXTSTEP (mach)
460  * slock_t is defined as a struct mutex.
461  */
462
463 #define S_LOCK(lock)    mutex_lock(lock)
464 #define S_UNLOCK(lock)  mutex_unlock(lock)
465 #define S_INIT_LOCK(lock)       mutex_init(lock)
466 /* For Mach, we have to delve inside the entrails of `struct mutex'.  Ick! */
467 #define S_LOCK_FREE(alock)      ((alock)->lock == 0)
468 #endif   /* nextstep */
469
470
471
472 #else                                                   /* !HAS_TEST_AND_SET */
473
474 /*
475  * Fake spinlock implementation using SysV semaphores --- slow and prone
476  * to fall foul of kernel limits on number of semaphores, so don't use this
477  * unless you must!
478  */
479
480 typedef struct
481 {
482         /* reference to semaphore used to implement this spinlock */
483         IpcSemaphoreId semId;
484         int                     sem;
485 } slock_t;
486
487 extern bool s_lock_free_sema(volatile slock_t *lock);
488 extern void s_unlock_sema(volatile slock_t *lock);
489 extern void s_init_lock_sema(volatile slock_t *lock);
490 extern int      tas_sema(volatile slock_t *lock);
491
492 #define S_LOCK_FREE(lock)       s_lock_free_sema(lock)
493 #define S_UNLOCK(lock)   s_unlock_sema(lock)
494 #define S_INIT_LOCK(lock)       s_init_lock_sema(lock)
495 #define TAS(lock)       tas_sema(lock)
496
497 #endif   /* HAS_TEST_AND_SET */
498
499
500
501 /****************************************************************************
502  * Default Definitions - override these above as needed.
503  */
504
505 #if !defined(S_LOCK)
506 #define S_LOCK(lock) \
507         do { \
508                 if (TAS(lock)) \
509                         s_lock((lock), __FILE__, __LINE__); \
510         } while (0)
511 #endif   /* S_LOCK */
512
513 #if !defined(S_LOCK_SLEEP)
514 #define S_LOCK_SLEEP(lock,spins) \
515         s_lock_sleep((spins), 0, (lock), __FILE__, __LINE__)
516 #endif   /* S_LOCK_SLEEP */
517
518 #if !defined(S_LOCK_SLEEP_INTERVAL)
519 #define S_LOCK_SLEEP_INTERVAL(lock,spins,microsec) \
520         s_lock_sleep((spins), (microsec), (lock), __FILE__, __LINE__)
521 #endif   /* S_LOCK_SLEEP_INTERVAL */
522
523 #if !defined(S_LOCK_FREE)
524 #define S_LOCK_FREE(lock)       (*(lock) == 0)
525 #endif   /* S_LOCK_FREE */
526
527 #if !defined(S_UNLOCK)
528 #define S_UNLOCK(lock)          (*(lock) = 0)
529 #endif   /* S_UNLOCK */
530
531 #if !defined(S_INIT_LOCK)
532 #define S_INIT_LOCK(lock)       S_UNLOCK(lock)
533 #endif   /* S_INIT_LOCK */
534
535 #if !defined(TAS)
536 extern int      tas(volatile slock_t *lock);            /* in port/.../tas.s, or
537                                                                                                  * s_lock.c */
538
539 #define TAS(lock)               tas(lock)
540 #endif   /* TAS */
541
542
543 #endif   /* S_LOCK_H */