1 /*-------------------------------------------------------------------------
6 * Hardware and compiler dependent functions for manipulating memory
7 * atomically and dealing with cache coherency. Used to implement locking
8 * facilities and lockless algorithms/data structures.
10 * To bring up postgres on a platform/compiler at the very least
11 * implementations for the following operations should be provided:
12 * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13 * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14 * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
16 * There exist generic, hardware independent, implementations for several
17 * compilers which might be sufficient, although possibly not optimal, for a
18 * new platform. If no such generic implementation is available spinlocks (or
19 * even OS provided semaphores) will be used to implement the API.
21 * Implement the _u64 variantes if and only if your platform can use them
22 * efficiently (and obviously correctly).
24 * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
25 * whenever possible. Writing correct code using these facilities is hard.
27 * For an introduction to using memory barriers within the PostgreSQL backend,
28 * see src/backend/storage/lmgr/README.barrier
30 * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
31 * Portions Copyright (c) 1994, Regents of the University of California
33 * src/include/port/atomics.h
35 *-------------------------------------------------------------------------
40 #define INSIDE_ATOMICS_H
45 * First a set of architecture specific files is included.
47 * These files can provide the full set of atomics or can do pretty much
48 * nothing if all the compilers commonly used on these platforms provide
51 * Don't add an inline assembly of the actual atomic operations if all the
52 * common implementations of your platform provide intrinsics. Intrinsics are
53 * much easier to understand and potentially support more architectures.
55 * It will often make sense to define memory barrier semantics here, since
56 * e.g. generic compiler intrinsics for x86 memory barriers can't know that
57 * postgres doesn't need x86 read/write barriers do anything more than a
61 #if defined(__arm__) || defined(__arm) || \
62 defined(__aarch64__) || defined(__aarch64)
63 # include "port/atomics/arch-arm.h"
64 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
65 # include "port/atomics/arch-x86.h"
66 #elif defined(__ia64__) || defined(__ia64)
67 # include "port/atomics/arch-ia64.h"
68 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
69 # include "port/atomics/arch-ppc.h"
70 #elif defined(__hppa) || defined(__hppa__)
71 # include "port/atomics/arch-hppa.h"
75 * Compiler specific, but architecture independent implementations.
77 * Provide architecture independent implementations of the atomic
78 * facilities. At the very least compiler barriers should be provided, but a
79 * full implementation of
80 * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
81 * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
82 * using compiler intrinsics are a good idea.
84 /* gcc or compatible, including clang and icc */
85 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
86 # include "port/atomics/generic-gcc.h"
87 #elif defined(WIN32_ONLY_COMPILER)
88 # include "port/atomics/generic-msvc.h"
89 #elif defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
90 # include "port/atomics/generic-acc.h"
91 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
92 # include "port/atomics/generic-sunpro.h"
93 #elif (defined(__IBMC__) || defined(__IBMCPP__)) && !defined(__GNUC__)
94 # include "port/atomics/generic-xlc.h"
97 * Unsupported compiler, we'll likely use slower fallbacks... At least
98 * compiler barriers should really be provided.
103 * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
104 * pg_atomic_*_u32 APIs for platforms without sufficient spinlock and/or
105 * atomics support. In the case of spinlock backed atomics the emulation is
106 * expected to be efficient, although less so than native atomics support.
108 #include "port/atomics/fallback.h"
111 * Provide additional operations using supported infrastructure. These are
112 * expected to be efficient if the underlying atomic operations are efficient.
114 #include "port/atomics/generic.h"
117 * Provide declarations for all functions here - on most platforms static
118 * inlines are used and these aren't necessary, but when static inline is
119 * unsupported these will be external functions.
121 STATIC_IF_INLINE_DECLARE void pg_atomic_init_flag(volatile pg_atomic_flag *ptr);
122 STATIC_IF_INLINE_DECLARE bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr);
123 STATIC_IF_INLINE_DECLARE bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr);
124 STATIC_IF_INLINE_DECLARE void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr);
126 STATIC_IF_INLINE_DECLARE void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val);
127 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr);
128 STATIC_IF_INLINE_DECLARE void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val);
129 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval);
130 STATIC_IF_INLINE_DECLARE bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
131 uint32 *expected, uint32 newval);
132 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_);
133 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_);
134 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_);
135 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_);
136 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_);
137 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_);
139 #ifdef PG_HAVE_ATOMIC_U64_SUPPORT
141 STATIC_IF_INLINE_DECLARE void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val_);
142 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr);
143 STATIC_IF_INLINE_DECLARE void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val);
144 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval);
145 STATIC_IF_INLINE_DECLARE bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
146 uint64 *expected, uint64 newval);
147 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_);
148 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_);
149 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_);
150 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_);
151 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_);
152 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_);
154 #endif /* PG_HAVE_64_BIT_ATOMICS */
158 * pg_compiler_barrier - prevent the compiler from moving code across
160 * A compiler barrier need not (and preferably should not) emit any actual
161 * machine code, but must act as an optimization fence: the compiler must not
162 * reorder loads or stores to main memory around the barrier. However, the
163 * CPU may still reorder loads or stores at runtime, if the architecture's
164 * memory model permits this.
166 #define pg_compiler_barrier() pg_compiler_barrier_impl()
169 * pg_memory_barrier - prevent the CPU from reordering memory access
171 * A memory barrier must act as a compiler barrier, and in addition must
172 * guarantee that all loads and stores issued prior to the barrier are
173 * completed before any loads or stores issued after the barrier. Unless
174 * loads and stores are totally ordered (which is not the case on most
175 * architectures) this requires issuing some sort of memory fencing
178 #define pg_memory_barrier() pg_memory_barrier_impl()
181 * pg_(read|write)_barrier - prevent the CPU from reordering memory access
183 * A read barrier must act as a compiler barrier, and in addition must
184 * guarantee that any loads issued prior to the barrier are completed before
185 * any loads issued after the barrier. Similarly, a write barrier acts
186 * as a compiler barrier, and also orders stores. Read and write barriers
187 * are thus weaker than a full memory barrier, but stronger than a compiler
188 * barrier. In practice, on machines with strong memory ordering, read and
189 * write barriers may require nothing more than a compiler barrier.
191 #define pg_read_barrier() pg_read_barrier_impl()
192 #define pg_write_barrier() pg_write_barrier_impl()
195 * Spinloop delay - Allow CPU to relax in busy loops
197 #define pg_spin_delay() pg_spin_delay_impl()
200 * The following functions are wrapper functions around the platform specific
201 * implementation of the atomic operations performing common checks.
203 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
206 * pg_atomic_init_flag - initialize atomic flag.
208 * No barrier semantics.
210 STATIC_IF_INLINE_DECLARE void
211 pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
213 AssertPointerAlignment(ptr, sizeof(*ptr));
215 pg_atomic_init_flag_impl(ptr);
219 * pg_atomic_test_and_set_flag - TAS()
221 * Returns true if the flag has successfully been set, false otherwise.
223 * Acquire (including read barrier) semantics.
225 STATIC_IF_INLINE_DECLARE bool
226 pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
228 AssertPointerAlignment(ptr, sizeof(*ptr));
230 return pg_atomic_test_set_flag_impl(ptr);
234 * pg_atomic_unlocked_test_flag - Check if the lock is free
236 * Returns true if the flag currently is not set, false otherwise.
238 * No barrier semantics.
240 STATIC_IF_INLINE_DECLARE bool
241 pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
243 AssertPointerAlignment(ptr, sizeof(*ptr));
245 return pg_atomic_unlocked_test_flag_impl(ptr);
249 * pg_atomic_clear_flag - release lock set by TAS()
251 * Release (including write barrier) semantics.
253 STATIC_IF_INLINE_DECLARE void
254 pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
256 AssertPointerAlignment(ptr, sizeof(*ptr));
258 pg_atomic_clear_flag_impl(ptr);
263 * pg_atomic_init_u32 - initialize atomic variable
265 * Has to be done before any concurrent usage..
267 * No barrier semantics.
269 STATIC_IF_INLINE_DECLARE void
270 pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
272 AssertPointerAlignment(ptr, 4);
274 pg_atomic_init_u32_impl(ptr, val);
278 * pg_atomic_write_u32 - unlocked write to atomic variable.
280 * The write is guaranteed to succeed as a whole, i.e. it's not possible to
281 * observe a partial write for any reader.
283 * No barrier semantics.
285 STATIC_IF_INLINE uint32
286 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
288 AssertPointerAlignment(ptr, 4);
289 return pg_atomic_read_u32_impl(ptr);
293 * pg_atomic_read_u32 - unlocked read from atomic variable.
295 * The read is guaranteed to return a value as it has been written by this or
296 * another process at some point in the past. There's however no cache
297 * coherency interaction guaranteeing the value hasn't since been written to
300 * No barrier semantics.
302 STATIC_IF_INLINE_DECLARE void
303 pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
305 AssertPointerAlignment(ptr, 4);
307 pg_atomic_write_u32_impl(ptr, val);
311 * pg_atomic_exchange_u32 - exchange newval with current value
313 * Returns the old value of 'ptr' before the swap.
315 * Full barrier semantics.
317 STATIC_IF_INLINE uint32
318 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
320 AssertPointerAlignment(ptr, 4);
322 return pg_atomic_exchange_u32_impl(ptr, newval);
326 * pg_atomic_compare_exchange_u32 - CAS operation
328 * Atomically compare the current value of ptr with *expected and store newval
329 * iff ptr and *expected have the same value. The current value of *ptr will
330 * always be stored in *expected.
332 * Return true if values have been exchanged, false otherwise.
334 * Full barrier semantics.
336 STATIC_IF_INLINE bool
337 pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
338 uint32 *expected, uint32 newval)
340 AssertPointerAlignment(ptr, 4);
341 AssertPointerAlignment(expected, 4);
343 return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
347 * pg_atomic_fetch_add_u32 - atomically add to variable
349 * Returns the value of ptr before the arithmetic operation.
351 * Full barrier semantics.
353 STATIC_IF_INLINE uint32
354 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
356 AssertPointerAlignment(ptr, 4);
357 return pg_atomic_fetch_add_u32_impl(ptr, add_);
361 * pg_atomic_fetch_sub_u32 - atomically subtract from variable
363 * Returns the value of ptr before the arithmetic operation. Note that sub_
364 * may not be INT_MIN due to platform limitations.
366 * Full barrier semantics.
368 STATIC_IF_INLINE uint32
369 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
371 AssertPointerAlignment(ptr, 4);
372 Assert(sub_ != INT_MIN);
373 return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
377 * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
379 * Returns the value of ptr before the arithmetic operation.
381 * Full barrier semantics.
383 STATIC_IF_INLINE uint32
384 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
386 AssertPointerAlignment(ptr, 4);
387 return pg_atomic_fetch_and_u32_impl(ptr, and_);
391 * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
393 * Returns the value of ptr before the arithmetic operation.
395 * Full barrier semantics.
397 STATIC_IF_INLINE uint32
398 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
400 AssertPointerAlignment(ptr, 4);
401 return pg_atomic_fetch_or_u32_impl(ptr, or_);
405 * pg_atomic_add_fetch_u32 - atomically add to variable
407 * Returns the value of ptr after the arithmetic operation.
409 * Full barrier semantics.
411 STATIC_IF_INLINE uint32
412 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
414 AssertPointerAlignment(ptr, 4);
415 return pg_atomic_add_fetch_u32_impl(ptr, add_);
419 * pg_atomic_sub_fetch_u32 - atomically subtract from variable
421 * Returns the value of ptr after the arithmetic operation. Note that sub_ may
422 * not be INT_MIN due to platform limitations.
424 * Full barrier semantics.
426 STATIC_IF_INLINE uint32
427 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
429 AssertPointerAlignment(ptr, 4);
430 Assert(sub_ != INT_MIN);
431 return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
435 * The 64 bit operations have the same semantics as their 32bit counterparts
436 * if they are available. Check the corresponding 32bit function for
440 #ifdef PG_HAVE_ATOMIC_U64_SUPPORT
442 STATIC_IF_INLINE_DECLARE void
443 pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
445 AssertPointerAlignment(ptr, 8);
447 pg_atomic_init_u64_impl(ptr, val);
450 STATIC_IF_INLINE uint64
451 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
453 AssertPointerAlignment(ptr, 8);
454 return pg_atomic_read_u64_impl(ptr);
457 STATIC_IF_INLINE void
458 pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
460 AssertPointerAlignment(ptr, 8);
461 pg_atomic_write_u64_impl(ptr, val);
464 STATIC_IF_INLINE uint64
465 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
467 AssertPointerAlignment(ptr, 8);
469 return pg_atomic_exchange_u64_impl(ptr, newval);
472 STATIC_IF_INLINE bool
473 pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
474 uint64 *expected, uint64 newval)
476 AssertPointerAlignment(ptr, 8);
477 AssertPointerAlignment(expected, 8);
478 return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
481 STATIC_IF_INLINE uint64
482 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
484 AssertPointerAlignment(ptr, 8);
485 return pg_atomic_fetch_add_u64_impl(ptr, add_);
488 STATIC_IF_INLINE uint64
489 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
491 AssertPointerAlignment(ptr, 8);
492 Assert(sub_ != PG_INT64_MIN);
493 return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
496 STATIC_IF_INLINE uint64
497 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
499 AssertPointerAlignment(ptr, 8);
500 return pg_atomic_fetch_and_u64_impl(ptr, and_);
503 STATIC_IF_INLINE uint64
504 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
506 AssertPointerAlignment(ptr, 8);
507 return pg_atomic_fetch_or_u64_impl(ptr, or_);
510 STATIC_IF_INLINE uint64
511 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
513 AssertPointerAlignment(ptr, 8);
514 return pg_atomic_add_fetch_u64_impl(ptr, add_);
517 STATIC_IF_INLINE uint64
518 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
520 AssertPointerAlignment(ptr, 8);
521 Assert(sub_ != PG_INT64_MIN);
522 return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
525 #endif /* PG_HAVE_64_BIT_ATOMICS */
527 #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
529 #undef INSIDE_ATOMICS_H
531 #endif /* ATOMICS_H */