]> granicus.if.org Git - postgresql/blob - src/include/port/atomics.h
Define integer limits independently from the system definitions.
[postgresql] / src / include / port / atomics.h
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.h
4  *        Atomic operations.
5  *
6  * Hardware and compiler dependent functions for manipulating memory
7  * atomically and dealing with cache coherency. Used to implement locking
8  * facilities and lockless algorithms/data structures.
9  *
10  * To bring up postgres on a platform/compiler at the very least
11  * implementations for the following operations should be provided:
12  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15  *
16  * There exist generic, hardware independent, implementations for several
17  * compilers which might be sufficient, although possibly not optimal, for a
18  * new platform. If no such generic implementation is available spinlocks (or
19  * even OS provided semaphores) will be used to implement the API.
20  *
21  * Implement the _u64 variantes if and only if your platform can use them
22  * efficiently (and obviously correctly).
23  *
24  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
25  * whenever possible. Writing correct code using these facilities is hard.
26  *
27  * For an introduction to using memory barriers within the PostgreSQL backend,
28  * see src/backend/storage/lmgr/README.barrier
29  *
30  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
31  * Portions Copyright (c) 1994, Regents of the University of California
32  *
33  * src/include/port/atomics.h
34  *
35  *-------------------------------------------------------------------------
36  */
37 #ifndef ATOMICS_H
38 #define ATOMICS_H
39
40 #define INSIDE_ATOMICS_H
41
42 #include <limits.h>
43
44 /*
45  * First a set of architecture specific files is included.
46  *
47  * These files can provide the full set of atomics or can do pretty much
48  * nothing if all the compilers commonly used on these platforms provide
49  * useable generics.
50  *
51  * Don't add an inline assembly of the actual atomic operations if all the
52  * common implementations of your platform provide intrinsics. Intrinsics are
53  * much easier to understand and potentially support more architectures.
54  *
55  * It will often make sense to define memory barrier semantics here, since
56  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
57  * postgres doesn't need x86 read/write barriers do anything more than a
58  * compiler barrier.
59  *
60  */
61 #if defined(__arm__) || defined(__arm) || \
62         defined(__aarch64__) || defined(__aarch64)
63 #       include "port/atomics/arch-arm.h"
64 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
65 #       include "port/atomics/arch-x86.h"
66 #elif defined(__ia64__) || defined(__ia64)
67 #       include "port/atomics/arch-ia64.h"
68 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
69 #       include "port/atomics/arch-ppc.h"
70 #elif defined(__hppa) || defined(__hppa__)
71 #       include "port/atomics/arch-hppa.h"
72 #endif
73
74 /*
75  * Compiler specific, but architecture independent implementations.
76  *
77  * Provide architecture independent implementations of the atomic
78  * facilities. At the very least compiler barriers should be provided, but a
79  * full implementation of
80  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
81  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
82  * using compiler intrinsics are a good idea.
83  */
84 /* gcc or compatible, including clang and icc */
85 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
86 #       include "port/atomics/generic-gcc.h"
87 #elif defined(WIN32_ONLY_COMPILER)
88 #       include "port/atomics/generic-msvc.h"
89 #elif defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
90 #       include "port/atomics/generic-acc.h"
91 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
92 #       include "port/atomics/generic-sunpro.h"
93 #elif (defined(__IBMC__) || defined(__IBMCPP__)) && !defined(__GNUC__)
94 #       include "port/atomics/generic-xlc.h"
95 #else
96 /*
97  * Unsupported compiler, we'll likely use slower fallbacks... At least
98  * compiler barriers should really be provided.
99  */
100 #endif
101
102 /*
103  * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
104  * pg_atomic_*_u32 APIs for platforms without sufficient spinlock and/or
105  * atomics support. In the case of spinlock backed atomics the emulation is
106  * expected to be efficient, although less so than native atomics support.
107  */
108 #include "port/atomics/fallback.h"
109
110 /*
111  * Provide additional operations using supported infrastructure. These are
112  * expected to be efficient if the underlying atomic operations are efficient.
113  */
114 #include "port/atomics/generic.h"
115
116 /*
117  * Provide declarations for all functions here - on most platforms static
118  * inlines are used and these aren't necessary, but when static inline is
119  * unsupported these will be external functions.
120  */
121 STATIC_IF_INLINE_DECLARE void pg_atomic_init_flag(volatile pg_atomic_flag *ptr);
122 STATIC_IF_INLINE_DECLARE bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr);
123 STATIC_IF_INLINE_DECLARE bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr);
124 STATIC_IF_INLINE_DECLARE void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr);
125
126 STATIC_IF_INLINE_DECLARE void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val);
127 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr);
128 STATIC_IF_INLINE_DECLARE void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val);
129 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval);
130 STATIC_IF_INLINE_DECLARE bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
131                                                                                                                          uint32 *expected, uint32 newval);
132 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_);
133 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_);
134 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_);
135 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_);
136 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_);
137 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_);
138
139 #ifdef PG_HAVE_ATOMIC_U64_SUPPORT
140
141 STATIC_IF_INLINE_DECLARE void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val_);
142 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr);
143 STATIC_IF_INLINE_DECLARE void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val);
144 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval);
145 STATIC_IF_INLINE_DECLARE bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
146                                                                                                                          uint64 *expected, uint64 newval);
147 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_);
148 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_);
149 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_);
150 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_);
151 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_);
152 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_);
153
154 #endif /* PG_HAVE_64_BIT_ATOMICS */
155
156
157 /*
158  * pg_compiler_barrier - prevent the compiler from moving code across
159  *
160  * A compiler barrier need not (and preferably should not) emit any actual
161  * machine code, but must act as an optimization fence: the compiler must not
162  * reorder loads or stores to main memory around the barrier.  However, the
163  * CPU may still reorder loads or stores at runtime, if the architecture's
164  * memory model permits this.
165  */
166 #define pg_compiler_barrier()   pg_compiler_barrier_impl()
167
168 /*
169  * pg_memory_barrier - prevent the CPU from reordering memory access
170  *
171  * A memory barrier must act as a compiler barrier, and in addition must
172  * guarantee that all loads and stores issued prior to the barrier are
173  * completed before any loads or stores issued after the barrier.  Unless
174  * loads and stores are totally ordered (which is not the case on most
175  * architectures) this requires issuing some sort of memory fencing
176  * instruction.
177  */
178 #define pg_memory_barrier()     pg_memory_barrier_impl()
179
180 /*
181  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
182  *
183  * A read barrier must act as a compiler barrier, and in addition must
184  * guarantee that any loads issued prior to the barrier are completed before
185  * any loads issued after the barrier.  Similarly, a write barrier acts
186  * as a compiler barrier, and also orders stores.  Read and write barriers
187  * are thus weaker than a full memory barrier, but stronger than a compiler
188  * barrier.  In practice, on machines with strong memory ordering, read and
189  * write barriers may require nothing more than a compiler barrier.
190  */
191 #define pg_read_barrier()       pg_read_barrier_impl()
192 #define pg_write_barrier()      pg_write_barrier_impl()
193
194 /*
195  * Spinloop delay - Allow CPU to relax in busy loops
196  */
197 #define pg_spin_delay() pg_spin_delay_impl()
198
199 /*
200  * The following functions are wrapper functions around the platform specific
201  * implementation of the atomic operations performing common checks.
202  */
203 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
204
205 /*
206  * pg_atomic_init_flag - initialize atomic flag.
207  *
208  * No barrier semantics.
209  */
210 STATIC_IF_INLINE_DECLARE void
211 pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
212 {
213         AssertPointerAlignment(ptr, sizeof(*ptr));
214
215         pg_atomic_init_flag_impl(ptr);
216 }
217
218 /*
219  * pg_atomic_test_and_set_flag - TAS()
220  *
221  * Returns true if the flag has successfully been set, false otherwise.
222  *
223  * Acquire (including read barrier) semantics.
224  */
225 STATIC_IF_INLINE_DECLARE bool
226 pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
227 {
228         AssertPointerAlignment(ptr, sizeof(*ptr));
229
230         return pg_atomic_test_set_flag_impl(ptr);
231 }
232
233 /*
234  * pg_atomic_unlocked_test_flag - Check if the lock is free
235  *
236  * Returns true if the flag currently is not set, false otherwise.
237  *
238  * No barrier semantics.
239  */
240 STATIC_IF_INLINE_DECLARE bool
241 pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
242 {
243         AssertPointerAlignment(ptr, sizeof(*ptr));
244
245         return pg_atomic_unlocked_test_flag_impl(ptr);
246 }
247
248 /*
249  * pg_atomic_clear_flag - release lock set by TAS()
250  *
251  * Release (including write barrier) semantics.
252  */
253 STATIC_IF_INLINE_DECLARE void
254 pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
255 {
256         AssertPointerAlignment(ptr, sizeof(*ptr));
257
258         pg_atomic_clear_flag_impl(ptr);
259 }
260
261
262 /*
263  * pg_atomic_init_u32 - initialize atomic variable
264  *
265  * Has to be done before any concurrent usage..
266  *
267  * No barrier semantics.
268  */
269 STATIC_IF_INLINE_DECLARE void
270 pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
271 {
272         AssertPointerAlignment(ptr, 4);
273
274         pg_atomic_init_u32_impl(ptr, val);
275 }
276
277 /*
278  * pg_atomic_write_u32 - unlocked write to atomic variable.
279  *
280  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
281  * observe a partial write for any reader.
282  *
283  * No barrier semantics.
284  */
285 STATIC_IF_INLINE uint32
286 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
287 {
288         AssertPointerAlignment(ptr, 4);
289         return pg_atomic_read_u32_impl(ptr);
290 }
291
292 /*
293  * pg_atomic_read_u32 - unlocked read from atomic variable.
294  *
295  * The read is guaranteed to return a value as it has been written by this or
296  * another process at some point in the past. There's however no cache
297  * coherency interaction guaranteeing the value hasn't since been written to
298  * again.
299  *
300  * No barrier semantics.
301  */
302 STATIC_IF_INLINE_DECLARE void
303 pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
304 {
305         AssertPointerAlignment(ptr, 4);
306
307         pg_atomic_write_u32_impl(ptr, val);
308 }
309
310 /*
311  * pg_atomic_exchange_u32 - exchange newval with current value
312  *
313  * Returns the old value of 'ptr' before the swap.
314  *
315  * Full barrier semantics.
316  */
317 STATIC_IF_INLINE uint32
318 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
319 {
320         AssertPointerAlignment(ptr, 4);
321
322         return pg_atomic_exchange_u32_impl(ptr, newval);
323 }
324
325 /*
326  * pg_atomic_compare_exchange_u32 - CAS operation
327  *
328  * Atomically compare the current value of ptr with *expected and store newval
329  * iff ptr and *expected have the same value. The current value of *ptr will
330  * always be stored in *expected.
331  *
332  * Return true if values have been exchanged, false otherwise.
333  *
334  * Full barrier semantics.
335  */
336 STATIC_IF_INLINE bool
337 pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
338                                                            uint32 *expected, uint32 newval)
339 {
340         AssertPointerAlignment(ptr, 4);
341         AssertPointerAlignment(expected, 4);
342
343         return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
344 }
345
346 /*
347  * pg_atomic_fetch_add_u32 - atomically add to variable
348  *
349  * Returns the value of ptr before the arithmetic operation.
350  *
351  * Full barrier semantics.
352  */
353 STATIC_IF_INLINE uint32
354 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
355 {
356         AssertPointerAlignment(ptr, 4);
357         return pg_atomic_fetch_add_u32_impl(ptr, add_);
358 }
359
360 /*
361  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
362  *
363  * Returns the value of ptr before the arithmetic operation. Note that sub_
364  * may not be INT_MIN due to platform limitations.
365  *
366  * Full barrier semantics.
367  */
368 STATIC_IF_INLINE uint32
369 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
370 {
371         AssertPointerAlignment(ptr, 4);
372         Assert(sub_ != INT_MIN);
373         return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
374 }
375
376 /*
377  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
378  *
379  * Returns the value of ptr before the arithmetic operation.
380  *
381  * Full barrier semantics.
382  */
383 STATIC_IF_INLINE uint32
384 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
385 {
386         AssertPointerAlignment(ptr, 4);
387         return pg_atomic_fetch_and_u32_impl(ptr, and_);
388 }
389
390 /*
391  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
392  *
393  * Returns the value of ptr before the arithmetic operation.
394  *
395  * Full barrier semantics.
396  */
397 STATIC_IF_INLINE uint32
398 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
399 {
400         AssertPointerAlignment(ptr, 4);
401         return pg_atomic_fetch_or_u32_impl(ptr, or_);
402 }
403
404 /*
405  * pg_atomic_add_fetch_u32 - atomically add to variable
406  *
407  * Returns the value of ptr after the arithmetic operation.
408  *
409  * Full barrier semantics.
410  */
411 STATIC_IF_INLINE uint32
412 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
413 {
414         AssertPointerAlignment(ptr, 4);
415         return pg_atomic_add_fetch_u32_impl(ptr, add_);
416 }
417
418 /*
419  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
420  *
421  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
422  * not be INT_MIN due to platform limitations.
423  *
424  * Full barrier semantics.
425  */
426 STATIC_IF_INLINE uint32
427 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
428 {
429         AssertPointerAlignment(ptr, 4);
430         Assert(sub_ != INT_MIN);
431         return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
432 }
433
434 /* ----
435  * The 64 bit operations have the same semantics as their 32bit counterparts
436  * if they are available. Check the corresponding 32bit function for
437  * documentation.
438  * ----
439  */
440 #ifdef PG_HAVE_ATOMIC_U64_SUPPORT
441
442 STATIC_IF_INLINE_DECLARE void
443 pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
444 {
445         AssertPointerAlignment(ptr, 8);
446
447         pg_atomic_init_u64_impl(ptr, val);
448 }
449
450 STATIC_IF_INLINE uint64
451 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
452 {
453         AssertPointerAlignment(ptr, 8);
454         return pg_atomic_read_u64_impl(ptr);
455 }
456
457 STATIC_IF_INLINE void
458 pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
459 {
460         AssertPointerAlignment(ptr, 8);
461         pg_atomic_write_u64_impl(ptr, val);
462 }
463
464 STATIC_IF_INLINE uint64
465 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
466 {
467         AssertPointerAlignment(ptr, 8);
468
469         return pg_atomic_exchange_u64_impl(ptr, newval);
470 }
471
472 STATIC_IF_INLINE bool
473 pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
474                                                            uint64 *expected, uint64 newval)
475 {
476         AssertPointerAlignment(ptr, 8);
477         AssertPointerAlignment(expected, 8);
478         return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
479 }
480
481 STATIC_IF_INLINE uint64
482 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
483 {
484         AssertPointerAlignment(ptr, 8);
485         return pg_atomic_fetch_add_u64_impl(ptr, add_);
486 }
487
488 STATIC_IF_INLINE uint64
489 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
490 {
491         AssertPointerAlignment(ptr, 8);
492         Assert(sub_ != PG_INT64_MIN);
493         return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
494 }
495
496 STATIC_IF_INLINE uint64
497 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
498 {
499         AssertPointerAlignment(ptr, 8);
500         return pg_atomic_fetch_and_u64_impl(ptr, and_);
501 }
502
503 STATIC_IF_INLINE uint64
504 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
505 {
506         AssertPointerAlignment(ptr, 8);
507         return pg_atomic_fetch_or_u64_impl(ptr, or_);
508 }
509
510 STATIC_IF_INLINE uint64
511 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
512 {
513         AssertPointerAlignment(ptr, 8);
514         return pg_atomic_add_fetch_u64_impl(ptr, add_);
515 }
516
517 STATIC_IF_INLINE uint64
518 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
519 {
520         AssertPointerAlignment(ptr, 8);
521         Assert(sub_ != PG_INT64_MIN);
522         return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
523 }
524
525 #endif /* PG_HAVE_64_BIT_ATOMICS */
526
527 #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
528
529 #undef INSIDE_ATOMICS_H
530
531 #endif /* ATOMICS_H */