From 257c082cb6e2f3ad8408ffc8dfd4ad7758fea25f Mon Sep 17 00:00:00 2001 From: ivmai Date: Wed, 16 Sep 2009 10:47:31 +0000 Subject: [PATCH] 2009-09-16 Ivan Maidanski * ChangeLog: Remove trailing spaces at EOLn. * doc/README.txt: Expand all tabs to spaces; remove trailing spaces at EOLn; remove multiple trailing blank lines. * src/atomic_ops.c: Ditto. * src/atomic_ops.h: Ditto. * src/atomic_ops/generalize-small.h: Ditto. * src/atomic_ops/generalize.h: Ditto. * src/atomic_ops/sysdeps/acquire_release_volatile.h: Ditto. * src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/all_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/ao_t_is_int.h: Ditto. * src/atomic_ops/sysdeps/armcc/arm_v6.h: Ditto. * src/atomic_ops/sysdeps/atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/char_acquire_release_volatile.h: Ditto. * src/atomic_ops/sysdeps/char_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/gcc/alpha.h: Ditto. * src/atomic_ops/sysdeps/gcc/arm.h: Ditto. * src/atomic_ops/sysdeps/gcc/hppa.h: Ditto. * src/atomic_ops/sysdeps/gcc/ia64.h: Ditto. * src/atomic_ops/sysdeps/gcc/m68k.h: Ditto. * src/atomic_ops/sysdeps/gcc/mips.h: Ditto. * src/atomic_ops/sysdeps/gcc/powerpc.h: Ditto. * src/atomic_ops/sysdeps/gcc/s390.h: Ditto. * src/atomic_ops/sysdeps/gcc/sparc.h: Ditto. * src/atomic_ops/sysdeps/gcc/x86.h: Ditto. * src/atomic_ops/sysdeps/gcc/x86_64.h: Ditto. * src/atomic_ops/sysdeps/generic_pthread.h: Ditto. * src/atomic_ops/sysdeps/hpc/hppa.h: Ditto. * src/atomic_ops/sysdeps/hpc/ia64.h: Ditto. * src/atomic_ops/sysdeps/ibmc/powerpc.h: Ditto. * src/atomic_ops/sysdeps/icc/ia64.h: Ditto. * src/atomic_ops/sysdeps/int_acquire_release_volatile.h: Ditto. * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/int_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/msftc/arm.h: Ditto. * src/atomic_ops/sysdeps/msftc/common32_defs.h: Ditto. * src/atomic_ops/sysdeps/msftc/x86.h: Ditto. * src/atomic_ops/sysdeps/msftc/x86_64.h: Ditto. * src/atomic_ops/sysdeps/ordered.h: Ditto. * src/atomic_ops/sysdeps/ordered_except_wr.h: Ditto. * src/atomic_ops/sysdeps/read_ordered.h: Ditto. * src/atomic_ops/sysdeps/short_acquire_release_volatile.h: Ditto. * src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/short_atomic_load_store.h: Ditto. * src/atomic_ops/sysdeps/standard_ao_double_t.h: Ditto. * src/atomic_ops/sysdeps/sunc/x86.h: Ditto. * src/atomic_ops/sysdeps/sunc/x86_64.h: Ditto. * src/atomic_ops/sysdeps/test_and_set_t_is_ao_t.h: Ditto. * src/atomic_ops_stack.c: Ditto. * src/atomic_ops_stack.h: Ditto. * src/atomic_ops/sysdeps/gcc/arm.h: Replace non-ASCII quotes in a comment. * src/atomic_ops/sysdeps/gcc/mips.h: Use Unix-style EOLn. --- ChangeLog | 104 +++- doc/README.txt | 78 +-- src/atomic_ops.c | 68 +-- src/atomic_ops.h | 262 +++++----- src/atomic_ops/generalize-small.h | 451 +++++++++--------- src/atomic_ops/generalize.h | 356 +++++++------- .../sysdeps/acquire_release_volatile.h | 14 +- .../sysdeps/aligned_atomic_load_store.h | 16 +- .../sysdeps/all_aligned_atomic_load_store.h | 8 +- .../sysdeps/all_atomic_load_store.h | 8 +- src/atomic_ops/sysdeps/ao_t_is_int.h | 47 +- src/atomic_ops/sysdeps/armcc/arm_v6.h | 184 +++---- src/atomic_ops/sysdeps/atomic_load_store.h | 16 +- .../sysdeps/char_acquire_release_volatile.h | 14 +- .../sysdeps/char_atomic_load_store.h | 16 +- src/atomic_ops/sysdeps/gcc/alpha.h | 18 +- src/atomic_ops/sysdeps/gcc/arm.h | 272 +++++------ src/atomic_ops/sysdeps/gcc/hppa.h | 73 ++- src/atomic_ops/sysdeps/gcc/ia64.h | 140 +++--- src/atomic_ops/sysdeps/gcc/m68k.h | 36 +- src/atomic_ops/sysdeps/gcc/mips.h | 214 ++++----- src/atomic_ops/sysdeps/gcc/powerpc.h | 95 ++-- src/atomic_ops/sysdeps/gcc/s390.h | 40 +- src/atomic_ops/sysdeps/gcc/sparc.h | 52 +- src/atomic_ops/sysdeps/gcc/x86.h | 80 ++-- src/atomic_ops/sysdeps/gcc/x86_64.h | 88 ++-- src/atomic_ops/sysdeps/generic_pthread.h | 47 +- src/atomic_ops/sysdeps/hpc/hppa.h | 71 ++- src/atomic_ops/sysdeps/hpc/ia64.h | 41 +- src/atomic_ops/sysdeps/ibmc/powerpc.h | 42 +- src/atomic_ops/sysdeps/icc/ia64.h | 37 +- .../sysdeps/int_acquire_release_volatile.h | 14 +- .../sysdeps/int_aligned_atomic_load_store.h | 16 +- .../sysdeps/int_atomic_load_store.h | 16 +- src/atomic_ops/sysdeps/msftc/arm.h | 40 +- src/atomic_ops/sysdeps/msftc/common32_defs.h | 38 +- src/atomic_ops/sysdeps/msftc/x86.h | 58 +-- src/atomic_ops/sysdeps/msftc/x86_64.h | 78 +-- src/atomic_ops/sysdeps/ordered.h | 12 +- src/atomic_ops/sysdeps/ordered_except_wr.h | 15 +- src/atomic_ops/sysdeps/read_ordered.h | 12 +- .../sysdeps/short_acquire_release_volatile.h | 14 +- .../sysdeps/short_aligned_atomic_load_store.h | 16 +- .../sysdeps/short_atomic_load_store.h | 16 +- src/atomic_ops/sysdeps/standard_ao_double_t.h | 6 +- src/atomic_ops/sysdeps/sunc/x86.h | 80 ++-- src/atomic_ops/sysdeps/sunc/x86_64.h | 90 ++-- .../sysdeps/test_and_set_t_is_ao_t.h | 12 +- src/atomic_ops_stack.c | 166 +++---- src/atomic_ops_stack.h | 62 +-- 50 files changed, 1884 insertions(+), 1865 deletions(-) diff --git a/ChangeLog b/ChangeLog index 0b6e54c..e9a3791 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,60 @@ +2009-09-16 Ivan Maidanski + * ChangeLog: Remove trailing spaces at EOLn. + * doc/README.txt: Expand all tabs to spaces; remove trailing + spaces at EOLn; remove multiple trailing blank lines. + * src/atomic_ops.c: Ditto. + * src/atomic_ops.h: Ditto. + * src/atomic_ops/generalize-small.h: Ditto. + * src/atomic_ops/generalize.h: Ditto. + * src/atomic_ops/sysdeps/acquire_release_volatile.h: Ditto. + * src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/all_atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/ao_t_is_int.h: Ditto. + * src/atomic_ops/sysdeps/armcc/arm_v6.h: Ditto. + * src/atomic_ops/sysdeps/atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/char_acquire_release_volatile.h: Ditto. + * src/atomic_ops/sysdeps/char_atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/gcc/alpha.h: Ditto. + * src/atomic_ops/sysdeps/gcc/arm.h: Ditto. + * src/atomic_ops/sysdeps/gcc/hppa.h: Ditto. + * src/atomic_ops/sysdeps/gcc/ia64.h: Ditto. + * src/atomic_ops/sysdeps/gcc/m68k.h: Ditto. + * src/atomic_ops/sysdeps/gcc/mips.h: Ditto. + * src/atomic_ops/sysdeps/gcc/powerpc.h: Ditto. + * src/atomic_ops/sysdeps/gcc/s390.h: Ditto. + * src/atomic_ops/sysdeps/gcc/sparc.h: Ditto. + * src/atomic_ops/sysdeps/gcc/x86.h: Ditto. + * src/atomic_ops/sysdeps/gcc/x86_64.h: Ditto. + * src/atomic_ops/sysdeps/generic_pthread.h: Ditto. + * src/atomic_ops/sysdeps/hpc/hppa.h: Ditto. + * src/atomic_ops/sysdeps/hpc/ia64.h: Ditto. + * src/atomic_ops/sysdeps/ibmc/powerpc.h: Ditto. + * src/atomic_ops/sysdeps/icc/ia64.h: Ditto. + * src/atomic_ops/sysdeps/int_acquire_release_volatile.h: Ditto. + * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/int_atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/msftc/arm.h: Ditto. + * src/atomic_ops/sysdeps/msftc/common32_defs.h: Ditto. + * src/atomic_ops/sysdeps/msftc/x86.h: Ditto. + * src/atomic_ops/sysdeps/msftc/x86_64.h: Ditto. + * src/atomic_ops/sysdeps/ordered.h: Ditto. + * src/atomic_ops/sysdeps/ordered_except_wr.h: Ditto. + * src/atomic_ops/sysdeps/read_ordered.h: Ditto. + * src/atomic_ops/sysdeps/short_acquire_release_volatile.h: Ditto. + * src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/short_atomic_load_store.h: Ditto. + * src/atomic_ops/sysdeps/standard_ao_double_t.h: Ditto. + * src/atomic_ops/sysdeps/sunc/x86.h: Ditto. + * src/atomic_ops/sysdeps/sunc/x86_64.h: Ditto. + * src/atomic_ops/sysdeps/test_and_set_t_is_ao_t.h: Ditto. + * src/atomic_ops_stack.c: Ditto. + * src/atomic_ops_stack.h: Ditto. + * src/atomic_ops/sysdeps/gcc/arm.h: Replace non-ASCII quotes in a + comment. + * src/atomic_ops/sysdeps/gcc/mips.h: Use Unix-style EOLn. + 2009-09-10 Ivan Maidanski (ivmai123.diff) * src/atomic_ops/sysdeps/msftc/arm.h: Add FIXME for InterlockedOps @@ -156,23 +212,23 @@ AO_TS_VAL_t for "oldval" (for 64-bit support). * src/atomic_ops/sysdeps/gcc/sparc.h (AO_compare_and_swap_full): New function implemented. - + 2009-08-12 Hans Boehm (Really Ivan Maidanski) (diff107_cvs, resembling diff78 and diff88_cvs) * src/atomic_ops/sysdeps/sunc/x86.h: New file. - * src/atomic_ops/sysdeps/sunc/x86_64.h: Ditto. - * src/atomic_ops.h (AO_INLINE): Support inlining for DigitalMars, - Watcom, Sun C. - * src/atomic_ops.h (AO_compiler_barrier): Use intrinsic-based - implementation for VC++ v8+ (include before it unless - WinCE target); use asm-based barrier implementation for Borland, - DigitalMars and Watcom. - * src/atomic_ops.h: Fix comment (for x86_64). - * src/atomic_ops.h: Include specialized x86.h and x86_64.h arch - headers for Sun C (if not AO_USE_PTHREAD_DEFS). - * src/atomic_ops.h: Include VC-specific arch headers for Borland, - DigitalMars and Watcom (Win32 target only). + * src/atomic_ops/sysdeps/sunc/x86_64.h: Ditto. + * src/atomic_ops.h (AO_INLINE): Support inlining for DigitalMars, + Watcom, Sun C. + * src/atomic_ops.h (AO_compiler_barrier): Use intrinsic-based + implementation for VC++ v8+ (include before it unless + WinCE target); use asm-based barrier implementation for Borland, + DigitalMars and Watcom. + * src/atomic_ops.h: Fix comment (for x86_64). + * src/atomic_ops.h: Include specialized x86.h and x86_64.h arch + headers for Sun C (if not AO_USE_PTHREAD_DEFS). + * src/atomic_ops.h: Include VC-specific arch headers for Borland, + DigitalMars and Watcom (Win32 target only). 2009-05-27 Hans Boehm (Really Ivan Maidanski) (diff87_cvs, resembling diff29, diff68, diff78 partly) @@ -195,7 +251,7 @@ * src/atomic_ops/sysdeps/read_ordered.h (AO_nop_read): Ditto. * src/atomic_ops/sysdeps/test_and_set_t_is_ao_t.h (AO_TS_val): Fix comment. - + 2009-02-24 Hans Boehm (Really primarily Earl Chew) * src/atomic_ops/sysdeps/gcc/powerpc.h: Add index, update modifiers to asms, refine clobbers to "cr0", use @@ -232,7 +288,7 @@ * src/atomic_ops.h: Fix comments. * src/atomic_ops_stack.c: Fix comments. * src/atomic_ops_stack.h: Fix comments. - + 2008-10-20 Hans Boehm (really Andrew Agno) * src/atomic_ops/sysdeps/gcc/x86_64.h (AO_int_fetch_and_add_full): fix return type. @@ -240,7 +296,7 @@ 2008-08-21 Hans Boehm * config.guess, config.sub, configure: Regenerate/replace. Use autoconf 2.61, automake 1.9.6. - + 2008-08-19 Hans Boehm (really Thiemo Seufer) * src/atomic_ops/sysdeps/gcc/powerpc.h: Add %U1 (update) to lwz instruction. @@ -283,7 +339,7 @@ 2008-07-10 Hans Boehm * src/atomic_ops/sysdeps/gcc/m68k.h: Remove SMP-unsafe AO_or_full, and let it be autogenerated instead. - + 2008-07-03 Hans Boehm (Really Thiemo Seufer) * src/atomic_ops/sysdeps/gcc/mips.h: Really add mips support, fixing a merge accident. @@ -349,11 +405,11 @@ powerpc.h. 2007-06-26 Hans Boehm (really Luca Barbato) - * src/atomic_ops/sysdeps/gcc/powerpc.h (AO_load_acquire): Add + * src/atomic_ops/sysdeps/gcc/powerpc.h (AO_load_acquire): Add 64-bit version. 2007-06-13 Hans Boehm - * src/atomic_ops.h: include stddef.h + * src/atomic_ops.h: include stddef.h 2007-06-06 Hans Boehm * src/atomic_ops/sysdeps/msftc/x86_64.h: New file. @@ -387,7 +443,7 @@ 2006-07-11 Hans Boehm * src/atomic_ops/sysdeps/hpc/ia64.h: Fix typos. - + 2006-03-28 Earl Chew (Agilent) * src/atomic_ops/sysdeps/gcc/powerpc.h: Remove unused variable cr. * src/atomic_ops/sysdeps/msftc/x86.h: @@ -395,7 +451,7 @@ Use inline assembler to generate mfence and byte sized xchg Use correct prototype for InterlockedCompareExchange. * src/atomic_ops.h: Add test for __PPC__ . - * tests/run_parallel.inc: Add simple VxWorks support. + * tests/run_parallel.inc: Add simple VxWorks support. * tests/test_atomic.c, tests/test_atomic_include.h: Add prototypes to silence compiler warnings. @@ -442,7 +498,7 @@ 2005-03 Hans Boehm Fixes for recently introduced bugs. Update x86 and x86-64 assembly syntax to deal with complaints by some recent gcc versions. - + 2005-02 Hans Boehm Added libatomic_ops_gpl library with support for mostly lock-free stack and malloc(). @@ -455,7 +511,7 @@ 2005-01 Hans Boehm * test_and_set_t_is_ao_t.h, test_and_set_t_is_char.h, others: Change most platforms to use byte-wide test-and-set locations. - + 2005-01 Hans Boehm * ao_t_is_int.h: Add to trivially support int-wide operations on platforms with int-sized pointers. @@ -503,7 +559,7 @@ 2003-12-08 Carlos O'Donell - * ao_sysdeps/gcc/hppa.h: Define AO_CLEAR macro. Change + * ao_sysdeps/gcc/hppa.h: Define AO_CLEAR macro. Change AO_pa_clearable_loc type. Add __ldcw, and __ldcw_align helper macros. AO_test_and_set_full uses helper macros. diff --git a/doc/README.txt b/doc/README.txt index 29c8597..d66905d 100644 --- a/doc/README.txt +++ b/doc/README.txt @@ -79,33 +79,33 @@ one of the following, where the corresponding argument and result types are also specified: void nop() - No atomic operation. The barrier may still be useful. + No atomic operation. The barrier may still be useful. AO_t load(const volatile AO_t * addr) - Atomic load of *addr. + Atomic load of *addr. void store(volatile AO_t * addr, AO_t new_val) - Atomically store new_val to *addr. + Atomically store new_val to *addr. AO_t fetch_and_add(volatile AO_t *addr, AO_t incr) - Atomically add incr to *addr, and return the original value of *addr. + Atomically add incr to *addr, and return the original value of *addr. AO_t fetch_and_add1(volatile AO_t *addr) - Equivalent to AO_fetch_and_add(addr, 1). + Equivalent to AO_fetch_and_add(addr, 1). AO_t fetch_and_sub1(volatile AO_t *addr) - Equivalent to AO_fetch_and_add(addr, (AO_t)(-1)). + Equivalent to AO_fetch_and_add(addr, (AO_t)(-1)). void or(volatile AO_t *addr, AO_t incr) - Atomically or incr into *addr. + Atomically or incr into *addr. int compare_and_swap(volatile AO_t * addr, AO_t old_val, AO_t new_val) - Atomically compare *addr to old_val, and replace *addr by new_val - if the first comparison succeeds. Returns nonzero if the comparison - succeeded and *addr was updated. + Atomically compare *addr to old_val, and replace *addr by new_val + if the first comparison succeeds. Returns nonzero if the comparison + succeeded and *addr was updated. AO_TS_VAL_t test_and_set(volatile AO_TS_t * addr) - Atomically read the binary value at *addr, and set it. AO_TS_VAL_t - is an enumeration type which includes the two values AO_TS_SET and - and AO_TS_CLEAR. An AO_TS_t location is capable of holding an - AO_TS_VAL_t, but may be much larger, as dictated by hardware - constraints. Test_and_set logically sets the value to AO_TS_SET. - It may be reset to AO_TS_CLEAR with the AO_CLEAR(AO_TS_t *) macro. - AO_TS_t locations should be initialized to AO_TS_INITIALIZER. - The values of AO_TS_SET and AO_TS_CLEAR are hardware dependent. - (On PA-RISC, AO_TS_SET is zero!) + Atomically read the binary value at *addr, and set it. AO_TS_VAL_t + is an enumeration type which includes the two values AO_TS_SET and + and AO_TS_CLEAR. An AO_TS_t location is capable of holding an + AO_TS_VAL_t, but may be much larger, as dictated by hardware + constraints. Test_and_set logically sets the value to AO_TS_SET. + It may be reset to AO_TS_CLEAR with the AO_CLEAR(AO_TS_t *) macro. + AO_TS_t locations should be initialized to AO_TS_INITIALIZER. + The values of AO_TS_SET and AO_TS_CLEAR are hardware dependent. + (On PA-RISC, AO_TS_SET is zero!) Test_and_set is a more limited version of compare_and_swap. Its only advantage is that it is more easily implementable on some hardware. It @@ -121,12 +121,12 @@ where the second replaces a double-width replacement, but performs a single-width comparison: int compare_double_and_swap_double(volatile AO_double_t * addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2); + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2); int compare_and_swap_double(volatile AO_double_t * addr, - AO_t old_val1, - AO_t new_val1, AO_t new_val2); + AO_t old_val1, + AO_t new_val1, AO_t new_val2); where AO_double_t is a structure containing AO_val1 and AO_val2 fields, both of type AO_t. For compare_and_swap_double, we compare against @@ -147,7 +147,7 @@ Ordering suffixes are one of the following: : No memory barrier. A plain AO_nop() really does nothing. _release: Earlier operations must become visible to other threads - before the atomic operation. + before the atomic operation. _acquire: Later operations must become visible after this operation. _read: Subsequent reads must become visible after reads included in the atomic operation or preceding it. Rarely useful for clients? @@ -157,23 +157,23 @@ _full: Ordered with respect to both earlier and later memops. AO_store_full or AO_nop_full are the normal ways to force a store to be ordered with respect to a later load. _release_write: Ordered with respect to earlier writes. This is - normally implemented as either a _write or _release - barrier. + normally implemented as either a _write or _release + barrier. _dd_acquire_read: Ordered with respect to later reads that are data - dependent on this one. This is needed on - a pointer read, which is later dereferenced to read a - second value, with the expectation that the second - read is ordered after the first one. On most architectures, - this is equivalent to no barrier. (This is very - hard to define precisely. It should probably be avoided. - A major problem is that optimizers tend to try to - eliminate dependencies from the generated code, since - dependencies force the hardware to execute the code - serially.) + dependent on this one. This is needed on + a pointer read, which is later dereferenced to read a + second value, with the expectation that the second + read is ordered after the first one. On most architectures, + this is equivalent to no barrier. (This is very + hard to define precisely. It should probably be avoided. + A major problem is that optimizers tend to try to + eliminate dependencies from the generated code, since + dependencies force the hardware to execute the code + serially.) _release_read: Ordered with respect to earlier reads. Useful for - implementing read locks. Can be implemented as _release, - but not as _read, since _read groups the current operation - with the earlier ones. + implementing read locks. Can be implemented as _release, + but not as _read, since _read groups the current operation + with the earlier ones. We assume that if a store is data-dependent on an a previous load, then the two are always implicitly ordered. diff --git a/src/atomic_ops.c b/src/atomic_ops.c index 8f2e491..61f3a42 100644 --- a/src/atomic_ops.c +++ b/src/atomic_ops.c @@ -1,23 +1,23 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* @@ -64,7 +64,7 @@ pthread_mutex_t AO_pt_lock = PTHREAD_MUTEX_INITIALIZER; /* * Out of line compare-and-swap emulation based on test and set. - * + * * We use a small table of locks for different compare_and_swap locations. * Before we update perform a compare-and-swap, we grab the corresponding * lock. Different locations may hash to the same lock, but since we @@ -79,14 +79,14 @@ pthread_mutex_t AO_pt_lock = PTHREAD_MUTEX_INITIALIZER; #define AO_HASH(x) (((unsigned long)(x) >> 12) & (AO_HASH_SIZE-1)) AO_TS_t AO_locks[AO_HASH_SIZE] = { - AO_TS_INITIALIZER, AO_TS_INITIALIZER, - AO_TS_INITIALIZER, AO_TS_INITIALIZER, - AO_TS_INITIALIZER, AO_TS_INITIALIZER, - AO_TS_INITIALIZER, AO_TS_INITIALIZER, - AO_TS_INITIALIZER, AO_TS_INITIALIZER, - AO_TS_INITIALIZER, AO_TS_INITIALIZER, - AO_TS_INITIALIZER, AO_TS_INITIALIZER, - AO_TS_INITIALIZER, AO_TS_INITIALIZER, + AO_TS_INITIALIZER, AO_TS_INITIALIZER, + AO_TS_INITIALIZER, AO_TS_INITIALIZER, + AO_TS_INITIALIZER, AO_TS_INITIALIZER, + AO_TS_INITIALIZER, AO_TS_INITIALIZER, + AO_TS_INITIALIZER, AO_TS_INITIALIZER, + AO_TS_INITIALIZER, AO_TS_INITIALIZER, + AO_TS_INITIALIZER, AO_TS_INITIALIZER, + AO_TS_INITIALIZER, AO_TS_INITIALIZER, }; static AO_T dummy = 1; @@ -116,10 +116,10 @@ void AO_pause(int n) # else struct timeval tv; - /* Short async-signal-safe sleep. */ - tv.tv_sec = 0; - tv.tv_usec = (n > 28? 100000 : (1 << (n - 12))); - select(0, 0, 0, 0, &tv); + /* Short async-signal-safe sleep. */ + tv.tv_sec = 0; + tv.tv_usec = (n > 28? 100000 : (1 << (n - 12))); + select(0, 0, 0, 0, &tv); # endif } } @@ -151,7 +151,7 @@ AO_INLINE void unlock(volatile AO_TS_t *l) static volatile AO_TS_t init_lock = AO_TS_INITIALIZER; int AO_compare_and_swap_emulation(volatile AO_t *addr, AO_t old, - AO_t new_val) + AO_t new_val) { AO_TS_t *my_lock = AO_locks + AO_HASH(addr); int result; @@ -166,13 +166,13 @@ int AO_compare_and_swap_emulation(volatile AO_t *addr, AO_t old, AO_store_release(&initialized, 1); } sigprocmask(SIG_BLOCK, &all_sigs, &old_sigs); - /* Neither sigprocmask nor pthread_sigmask is 100% */ - /* guaranteed to work here. Sigprocmask is not */ - /* guaranteed be thread safe, and pthread_sigmask */ - /* is not async-signal-safe. Under linuxthreads, */ - /* sigprocmask may block some pthreads-internal */ - /* signals. So long as we do that for short periods, */ - /* we should be OK. */ + /* Neither sigprocmask nor pthread_sigmask is 100% */ + /* guaranteed to work here. Sigprocmask is not */ + /* guaranteed be thread safe, and pthread_sigmask */ + /* is not async-signal-safe. Under linuxthreads, */ + /* sigprocmask may block some pthreads-internal */ + /* signals. So long as we do that for short periods, */ + /* we should be OK. */ # endif lock(my_lock); if (*addr == old) @@ -190,8 +190,8 @@ int AO_compare_and_swap_emulation(volatile AO_t *addr, AO_t old, } int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { AO_TS_t *my_lock = AO_locks + AO_HASH(addr); int result; @@ -206,13 +206,13 @@ int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr, AO_store_release(&initialized, 1); } sigprocmask(SIG_BLOCK, &all_sigs, &old_sigs); - /* Neither sigprocmask nor pthread_sigmask is 100% */ - /* guaranteed to work here. Sigprocmask is not */ - /* guaranteed be thread safe, and pthread_sigmask */ - /* is not async-signal-safe. Under linuxthreads, */ - /* sigprocmask may block some pthreads-internal */ - /* signals. So long as we do that for short periods, */ - /* we should be OK. */ + /* Neither sigprocmask nor pthread_sigmask is 100% */ + /* guaranteed to work here. Sigprocmask is not */ + /* guaranteed be thread safe, and pthread_sigmask */ + /* is not async-signal-safe. Under linuxthreads, */ + /* sigprocmask may block some pthreads-internal */ + /* signals. So long as we do that for short periods, */ + /* we should be OK. */ # endif lock(my_lock); if (addr -> AO_val1 == old_val1 && addr -> AO_val2 == old_val2) diff --git a/src/atomic_ops.h b/src/atomic_ops.h index d2a3399..5edcf1a 100755 --- a/src/atomic_ops.h +++ b/src/atomic_ops.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ #ifndef ATOMIC_OPS_H @@ -27,128 +27,128 @@ #include #include -/* We define various atomic operations on memory in a */ -/* machine-specific way. Unfortunately, this is complicated */ -/* by the fact that these may or may not be combined with */ -/* various memory barriers. Thus the actual operations we */ -/* define have the form AO__, for all */ -/* plausible combinations of and . */ -/* This of course results in a mild combinatorial explosion. */ -/* To deal with it, we try to generate derived */ -/* definitions for as many of the combinations as we can, as */ -/* automatically as possible. */ -/* */ -/* Our assumption throughout is that the programmer will */ -/* specify the least demanding operation and memory barrier */ -/* that will guarantee correctness for the implementation. */ -/* Our job is to find the least expensive way to implement it */ -/* on the applicable hardware. In many cases that will */ -/* involve, for example, a stronger memory barrier, or a */ -/* combination of hardware primitives. */ -/* */ -/* Conventions: */ -/* "plain" atomic operations are not guaranteed to include */ -/* a barrier. The suffix in the name specifies the barrier */ -/* type. Suffixes are: */ -/* _release: Earlier operations may not be delayed past it. */ -/* _acquire: Later operations may not move ahead of it. */ -/* _read: Subsequent reads must follow this operation and */ -/* preceding reads. */ -/* _write: Earlier writes precede both this operation and */ -/* later writes. */ +/* We define various atomic operations on memory in a */ +/* machine-specific way. Unfortunately, this is complicated */ +/* by the fact that these may or may not be combined with */ +/* various memory barriers. Thus the actual operations we */ +/* define have the form AO__, for all */ +/* plausible combinations of and . */ +/* This of course results in a mild combinatorial explosion. */ +/* To deal with it, we try to generate derived */ +/* definitions for as many of the combinations as we can, as */ +/* automatically as possible. */ +/* */ +/* Our assumption throughout is that the programmer will */ +/* specify the least demanding operation and memory barrier */ +/* that will guarantee correctness for the implementation. */ +/* Our job is to find the least expensive way to implement it */ +/* on the applicable hardware. In many cases that will */ +/* involve, for example, a stronger memory barrier, or a */ +/* combination of hardware primitives. */ +/* */ +/* Conventions: */ +/* "plain" atomic operations are not guaranteed to include */ +/* a barrier. The suffix in the name specifies the barrier */ +/* type. Suffixes are: */ +/* _release: Earlier operations may not be delayed past it. */ +/* _acquire: Later operations may not move ahead of it. */ +/* _read: Subsequent reads must follow this operation and */ +/* preceding reads. */ +/* _write: Earlier writes precede both this operation and */ +/* later writes. */ /* _full: Ordered with respect to both earlier and later memops.*/ -/* _release_write: Ordered with respect to earlier writes. */ -/* _acquire_read: Ordered with respect to later reads. */ -/* */ -/* Currently we try to define the following atomic memory */ -/* operations, in combination with the above barriers: */ -/* AO_nop */ -/* AO_load */ -/* AO_store */ -/* AO_test_and_set (binary) */ -/* AO_fetch_and_add */ -/* AO_fetch_and_add1 */ -/* AO_fetch_and_sub1 */ -/* AO_or */ -/* AO_compare_and_swap */ -/* */ -/* Note that atomicity guarantees are valid only if both */ -/* readers and writers use AO_ operations to access the */ -/* shared value, while ordering constraints are intended to */ -/* apply all memory operations. If a location can potentially */ -/* be accessed simultaneously from multiple threads, and one of */ -/* those accesses may be a write access, then all such */ -/* accesses to that location should be through AO_ primitives. */ -/* However if AO_ operations enforce sufficient ordering to */ -/* ensure that a location x cannot be accessed concurrently, */ -/* or can only be read concurrently, then x can be accessed */ -/* via ordinary references and assignments. */ -/* */ -/* Compare_and_exchange takes an address and an expected old */ -/* value and a new value, and returns an int. Nonzero */ -/* indicates that it succeeded. */ -/* Test_and_set takes an address, atomically replaces it by */ -/* AO_TS_SET, and returns the prior value. */ -/* An AO_TS_t location can be reset with the */ -/* AO_CLEAR macro, which normally uses AO_store_release. */ -/* AO_fetch_and_add takes an address and an AO_t increment */ -/* value. The AO_fetch_and_add1 and AO_fetch_and_sub1 variants */ -/* are provided, since they allow faster implementations on */ -/* some hardware. AO_or atomically ors an AO_t value into a */ +/* _release_write: Ordered with respect to earlier writes. */ +/* _acquire_read: Ordered with respect to later reads. */ +/* */ +/* Currently we try to define the following atomic memory */ +/* operations, in combination with the above barriers: */ +/* AO_nop */ +/* AO_load */ +/* AO_store */ +/* AO_test_and_set (binary) */ +/* AO_fetch_and_add */ +/* AO_fetch_and_add1 */ +/* AO_fetch_and_sub1 */ +/* AO_or */ +/* AO_compare_and_swap */ +/* */ +/* Note that atomicity guarantees are valid only if both */ +/* readers and writers use AO_ operations to access the */ +/* shared value, while ordering constraints are intended to */ +/* apply all memory operations. If a location can potentially */ +/* be accessed simultaneously from multiple threads, and one of */ +/* those accesses may be a write access, then all such */ +/* accesses to that location should be through AO_ primitives. */ +/* However if AO_ operations enforce sufficient ordering to */ +/* ensure that a location x cannot be accessed concurrently, */ +/* or can only be read concurrently, then x can be accessed */ +/* via ordinary references and assignments. */ +/* */ +/* Compare_and_exchange takes an address and an expected old */ +/* value and a new value, and returns an int. Nonzero */ +/* indicates that it succeeded. */ +/* Test_and_set takes an address, atomically replaces it by */ +/* AO_TS_SET, and returns the prior value. */ +/* An AO_TS_t location can be reset with the */ +/* AO_CLEAR macro, which normally uses AO_store_release. */ +/* AO_fetch_and_add takes an address and an AO_t increment */ +/* value. The AO_fetch_and_add1 and AO_fetch_and_sub1 variants */ +/* are provided, since they allow faster implementations on */ +/* some hardware. AO_or atomically ors an AO_t value into a */ /* memory location, but does not provide access to the original.*/ -/* */ -/* We expect this list to grow slowly over time. */ -/* */ -/* Note that AO_nop_full is a full memory barrier. */ -/* */ -/* Note that if some data is initialized with */ -/* data.x = ...; data.y = ...; ... */ -/* AO_store_release_write(&data_is_initialized, 1) */ -/* then data is guaranteed to be initialized after the test */ -/* if (AO_load_release_read(&data_is_initialized)) ... */ -/* succeeds. Furthermore, this should generate near-optimal */ -/* code on all common platforms. */ -/* */ -/* All operations operate on unsigned AO_t, which */ -/* is the natural word size, and usually unsigned long. */ -/* It is possible to check whether a particular operation op */ -/* is available on a particular platform by checking whether */ -/* AO_HAVE_op is defined. We make heavy use of these macros */ -/* internally. */ +/* */ +/* We expect this list to grow slowly over time. */ +/* */ +/* Note that AO_nop_full is a full memory barrier. */ +/* */ +/* Note that if some data is initialized with */ +/* data.x = ...; data.y = ...; ... */ +/* AO_store_release_write(&data_is_initialized, 1) */ +/* then data is guaranteed to be initialized after the test */ +/* if (AO_load_release_read(&data_is_initialized)) ... */ +/* succeeds. Furthermore, this should generate near-optimal */ +/* code on all common platforms. */ +/* */ +/* All operations operate on unsigned AO_t, which */ +/* is the natural word size, and usually unsigned long. */ +/* It is possible to check whether a particular operation op */ +/* is available on a particular platform by checking whether */ +/* AO_HAVE_op is defined. We make heavy use of these macros */ +/* internally. */ -/* The rest of this file basically has three sections: */ -/* */ -/* Some utility and default definitions. */ -/* */ -/* The architecture dependent section: */ -/* This defines atomic operations that have direct hardware */ -/* support on a particular platform, mostly by including the */ -/* appropriate compiler- and hardware-dependent file. */ -/* */ -/* The synthesis section: */ -/* This tries to define other atomic operations in terms of */ -/* those that are explicitly available on the platform. */ -/* This section is hardware independent. */ -/* We make no attempt to synthesize operations in ways that */ -/* effectively introduce locks, except for the debugging/demo */ -/* pthread-based implementation at the beginning. A more */ -/* realistic implementation that falls back to locks could be */ -/* added as a higher layer. But that would sacrifice */ -/* usability from signal handlers. */ -/* The synthesis section is implemented almost entirely in */ -/* atomic_ops_generalize.h. */ +/* The rest of this file basically has three sections: */ +/* */ +/* Some utility and default definitions. */ +/* */ +/* The architecture dependent section: */ +/* This defines atomic operations that have direct hardware */ +/* support on a particular platform, mostly by including the */ +/* appropriate compiler- and hardware-dependent file. */ +/* */ +/* The synthesis section: */ +/* This tries to define other atomic operations in terms of */ +/* those that are explicitly available on the platform. */ +/* This section is hardware independent. */ +/* We make no attempt to synthesize operations in ways that */ +/* effectively introduce locks, except for the debugging/demo */ +/* pthread-based implementation at the beginning. A more */ +/* realistic implementation that falls back to locks could be */ +/* added as a higher layer. But that would sacrifice */ +/* usability from signal handlers. */ +/* The synthesis section is implemented almost entirely in */ +/* atomic_ops_generalize.h. */ -/* Some common defaults. Overridden for some architectures. */ +/* Some common defaults. Overridden for some architectures. */ #define AO_t size_t -/* The test_and_set primitive returns an AO_TS_VAL_t value. */ -/* AO_TS_t is the type of an in-memory test-and-set location. */ +/* The test_and_set primitive returns an AO_TS_VAL_t value. */ +/* AO_TS_t is the type of an in-memory test-and-set location. */ #define AO_TS_INITIALIZER (AO_t)AO_TS_CLEAR -/* Platform-dependent stuff: */ +/* Platform-dependent stuff: */ #if defined(__GNUC__) || defined(_MSC_VER) || defined(__INTEL_COMPILER) \ - || defined(__DMC__) || defined(__WATCOMC__) + || defined(__DMC__) || defined(__WATCOMC__) # define AO_INLINE static __inline #elif defined(__sun) # define AO_INLINE static inline @@ -159,7 +159,7 @@ #if defined(__GNUC__) && !defined(__INTEL_COMPILER) # define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory") #elif defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \ - || defined(__WATCOMC__) + || defined(__WATCOMC__) # if defined(_AMD64_) || _MSC_VER >= 1400 # if defined(_WIN32_WCE) /* # include */ @@ -168,12 +168,12 @@ # endif # pragma intrinsic(_ReadWriteBarrier) # define AO_compiler_barrier() _ReadWriteBarrier() - /* We assume this does not generate a fence instruction. */ - /* The documentation is a bit unclear. */ + /* We assume this does not generate a fence instruction. */ + /* The documentation is a bit unclear. */ # else # define AO_compiler_barrier() __asm { } - /* The preceding implementation may be preferable here too. */ - /* But the documentation warns about VC++ 2003 and earlier. */ + /* The preceding implementation may be preferable here too. */ + /* But the documentation warns about VC++ 2003 and earlier. */ # endif #elif defined(__INTEL_COMPILER) # define AO_compiler_barrier() __memory_barrier() /* Too strong? IA64-only? */ @@ -182,14 +182,14 @@ # include # define AO_compiler_barrier() _Asm_sched_fence() # else - /* FIXME - We dont know how to do this. This is a guess. */ - /* And probably a bad one. */ + /* FIXME - We dont know how to do this. This is a guess. */ + /* And probably a bad one. */ static volatile int AO_barrier_dummy; # define AO_compiler_barrier() AO_barrier_dummy = AO_barrier_dummy # endif #else - /* We conjecture that the following usually gives us the right */ - /* semantics or an error. */ + /* We conjecture that the following usually gives us the right */ + /* semantics or an error. */ # define AO_compiler_barrier() asm("") #endif @@ -277,7 +277,7 @@ #endif #if defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \ - || (defined(__WATCOMC__) && defined(__NT__)) + || (defined(__WATCOMC__) && defined(__NT__)) # if defined(_AMD64_) # include "atomic_ops/sysdeps/msftc/x86_64.h" # elif defined(_M_IX86) || defined(x86) @@ -295,10 +295,10 @@ # else # error Cannot implement AO_compare_and_swap_full on this architecture. # endif -#endif /* AO_REQUIRE_CAS && !AO_HAVE_compare_and_swap ... */ +#endif /* AO_REQUIRE_CAS && !AO_HAVE_compare_and_swap ... */ -/* The most common way to clear a test-and-set location */ -/* at the end of a critical section. */ +/* The most common way to clear a test-and-set location */ +/* at the end of a critical section. */ #if AO_AO_TS_T && !defined(AO_CLEAR) # define AO_CLEAR(addr) AO_store_release((AO_TS_t *)(addr), AO_TS_CLEAR) #endif @@ -317,7 +317,7 @@ # include "atomic_ops/generalize.h" #endif -/* For compatibility with version 0.4 and earlier */ +/* For compatibility with version 0.4 and earlier */ #define AO_TS_T AO_TS_t #define AO_T AO_t #define AO_TS_VAL AO_TS_VAL_t diff --git a/src/atomic_ops/generalize-small.h b/src/atomic_ops/generalize-small.h index 2b989a6..067b0f0 100644 --- a/src/atomic_ops/generalize-small.h +++ b/src/atomic_ops/generalize-small.h @@ -26,7 +26,7 @@ { unsigned char result = AO_char_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ - /* beyond it. */ + /* beyond it. */ AO_nop_full(); return result; } @@ -40,7 +40,7 @@ { unsigned char result = AO_char_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ - /* beyond it. */ + /* beyond it. */ AO_nop_read(); return result; } @@ -52,7 +52,7 @@ # define AO_char_load_full(addr) (AO_nop_full(), AO_char_load_acquire(addr)) # define AO_HAVE_char_load_full #endif - + #if !defined(AO_HAVE_char_load_acquire_read) && defined(AO_HAVE_char_load_read) # define AO_char_load_acquire_read(addr) AO_char_load_read(addr) # define AO_HAVE_char_load_acquire_read @@ -66,13 +66,13 @@ #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_load_acquire_read) # define AO_char_load_dd_acquire_read(addr) \ - AO_char_load_acquire_read(addr) + AO_char_load_acquire_read(addr) # define AO_HAVE_char_load_dd_acquire_read # endif #else # if defined(AO_HAVE_char_load) # define AO_char_load_dd_acquire_read(addr) \ - AO_char_load(addr) + AO_char_load(addr) # define AO_HAVE_char_load_dd_acquire_read # endif #endif @@ -96,9 +96,9 @@ #endif #if defined(AO_HAVE_char_store_release) && \ - !defined(AO_HAVE_char_store_release_write) + !defined(AO_HAVE_char_store_release_write) # define AO_char_store_release_write(addr, val) \ - AO_char_store_release(addr,val) + AO_char_store_release(addr,val) # define AO_HAVE_char_store_release_write #endif @@ -110,14 +110,14 @@ #if defined(AO_HAVE_char_store) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_char_store_release) # define AO_char_store_release(addr,val) \ - (AO_nop_full(), AO_char_store(addr,val)) + (AO_nop_full(), AO_char_store(addr,val)) # define AO_HAVE_char_store_release #endif #if defined(AO_HAVE_nop_write) && defined(AO_HAVE_char_store) && \ !defined(AO_HAVE_char_store_write) # define AO_char_store_write(addr, val) \ - (AO_nop_write(), AO_char_store(addr,val)) + (AO_nop_write(), AO_char_store(addr,val)) # define AO_HAVE_char_store_write #endif @@ -130,7 +130,7 @@ #if defined(AO_HAVE_char_store_release) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_char_store_full) # define AO_char_store_full(addr, val) \ - (AO_char_store_release(addr, val), AO_nop_full()) + (AO_char_store_release(addr, val), AO_nop_full()) # define AO_HAVE_char_store_full #endif @@ -140,7 +140,7 @@ !defined(AO_HAVE_char_fetch_and_add_full) AO_INLINE AO_t AO_char_fetch_and_add_full(volatile unsigned char *addr, - unsigned char incr) + unsigned char incr) { unsigned char old; do @@ -157,7 +157,7 @@ !defined(AO_HAVE_char_fetch_and_add_acquire) AO_INLINE AO_t AO_char_fetch_and_add_acquire(volatile unsigned char *addr, - unsigned char incr) + unsigned char incr) { unsigned char old; do @@ -174,7 +174,7 @@ !defined(AO_HAVE_char_fetch_and_add_release) AO_INLINE AO_t AO_char_fetch_and_add_release(volatile unsigned char *addr, - unsigned char incr) + unsigned char incr) { unsigned char old; do @@ -190,22 +190,22 @@ #if defined(AO_HAVE_char_fetch_and_add_full) # if !defined(AO_HAVE_char_fetch_and_add_release) # define AO_char_fetch_and_add_release(addr, val) \ - AO_char_fetch_and_add_full(addr, val) + AO_char_fetch_and_add_full(addr, val) # define AO_HAVE_char_fetch_and_add_release # endif # if !defined(AO_HAVE_char_fetch_and_add_acquire) # define AO_char_fetch_and_add_acquire(addr, val) \ - AO_char_fetch_and_add_full(addr, val) + AO_char_fetch_and_add_full(addr, val) # define AO_HAVE_char_fetch_and_add_acquire # endif # if !defined(AO_HAVE_char_fetch_and_add_write) # define AO_char_fetch_and_add_write(addr, val) \ - AO_char_fetch_and_add_full(addr, val) + AO_char_fetch_and_add_full(addr, val) # define AO_HAVE_char_fetch_and_add_write # endif # if !defined(AO_HAVE_char_fetch_and_add_read) # define AO_char_fetch_and_add_read(addr, val) \ - AO_char_fetch_and_add_full(addr, val) + AO_char_fetch_and_add_full(addr, val) # define AO_HAVE_char_fetch_and_add_read # endif #endif /* AO_HAVE_char_fetch_and_add_full */ @@ -213,25 +213,25 @@ #if !defined(AO_HAVE_char_fetch_and_add) && \ defined(AO_HAVE_char_fetch_and_add_release) # define AO_char_fetch_and_add(addr, val) \ - AO_char_fetch_and_add_release(addr, val) + AO_char_fetch_and_add_release(addr, val) # define AO_HAVE_char_fetch_and_add #endif #if !defined(AO_HAVE_char_fetch_and_add) && \ defined(AO_HAVE_char_fetch_and_add_acquire) # define AO_char_fetch_and_add(addr, val) \ - AO_char_fetch_and_add_acquire(addr, val) + AO_char_fetch_and_add_acquire(addr, val) # define AO_HAVE_char_fetch_and_add #endif #if !defined(AO_HAVE_char_fetch_and_add) && \ defined(AO_HAVE_char_fetch_and_add_write) # define AO_char_fetch_and_add(addr, val) \ - AO_char_fetch_and_add_write(addr, val) + AO_char_fetch_and_add_write(addr, val) # define AO_HAVE_char_fetch_and_add #endif #if !defined(AO_HAVE_char_fetch_and_add) && \ defined(AO_HAVE_char_fetch_and_add_read) # define AO_char_fetch_and_add(addr, val) \ - AO_char_fetch_and_add_read(addr, val) + AO_char_fetch_and_add_read(addr, val) # define AO_HAVE_char_fetch_and_add #endif @@ -239,118 +239,118 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_char_fetch_and_add_full) # define AO_char_fetch_and_add_full(addr, val) \ - (AO_nop_full(), AO_char_fetch_and_add_acquire(addr, val)) + (AO_nop_full(), AO_char_fetch_and_add_acquire(addr, val)) #endif #if !defined(AO_HAVE_char_fetch_and_add_release_write) && \ defined(AO_HAVE_char_fetch_and_add_write) # define AO_char_fetch_and_add_release_write(addr, val) \ - AO_char_fetch_and_add_write(addr, val) + AO_char_fetch_and_add_write(addr, val) # define AO_HAVE_char_fetch_and_add_release_write #endif #if !defined(AO_HAVE_char_fetch_and_add_release_write) && \ defined(AO_HAVE_char_fetch_and_add_release) # define AO_char_fetch_and_add_release_write(addr, val) \ - AO_char_fetch_and_add_release(addr, val) + AO_char_fetch_and_add_release(addr, val) # define AO_HAVE_char_fetch_and_add_release_write #endif #if !defined(AO_HAVE_char_fetch_and_add_acquire_read) && \ defined(AO_HAVE_char_fetch_and_add_read) # define AO_char_fetch_and_add_acquire_read(addr, val) \ - AO_char_fetch_and_add_read(addr, val) + AO_char_fetch_and_add_read(addr, val) # define AO_HAVE_char_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_char_fetch_and_add_acquire_read) && \ defined(AO_HAVE_char_fetch_and_add_acquire) # define AO_char_fetch_and_add_acquire_read(addr, val) \ - AO_char_fetch_and_add_acquire(addr, val) + AO_char_fetch_and_add_acquire(addr, val) # define AO_HAVE_char_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_fetch_and_add_acquire_read) # define AO_char_fetch_and_add_dd_acquire_read(addr, val) \ - AO_char_fetch_and_add_acquire_read(addr, val) + AO_char_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_char_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_char_fetch_and_add) # define AO_char_fetch_and_add_dd_acquire_read(addr, val) \ - AO_char_fetch_and_add(addr, val) + AO_char_fetch_and_add(addr, val) # define AO_HAVE_char_fetch_and_add_dd_acquire_read # endif #endif - + /* char_fetch_and_add1 */ #if defined(AO_HAVE_char_fetch_and_add_full) &&\ !defined(AO_HAVE_char_fetch_and_add1_full) # define AO_char_fetch_and_add1_full(addr) \ - AO_char_fetch_and_add_full(addr,1) + AO_char_fetch_and_add_full(addr,1) # define AO_HAVE_char_fetch_and_add1_full #endif #if defined(AO_HAVE_char_fetch_and_add_release) &&\ !defined(AO_HAVE_char_fetch_and_add1_release) # define AO_char_fetch_and_add1_release(addr) \ - AO_char_fetch_and_add_release(addr,1) + AO_char_fetch_and_add_release(addr,1) # define AO_HAVE_char_fetch_and_add1_release #endif #if defined(AO_HAVE_char_fetch_and_add_acquire) &&\ !defined(AO_HAVE_char_fetch_and_add1_acquire) # define AO_char_fetch_and_add1_acquire(addr) \ - AO_char_fetch_and_add_acquire(addr,1) + AO_char_fetch_and_add_acquire(addr,1) # define AO_HAVE_char_fetch_and_add1_acquire #endif #if defined(AO_HAVE_char_fetch_and_add_write) &&\ !defined(AO_HAVE_char_fetch_and_add1_write) # define AO_char_fetch_and_add1_write(addr) \ - AO_char_fetch_and_add_write(addr,1) + AO_char_fetch_and_add_write(addr,1) # define AO_HAVE_char_fetch_and_add1_write #endif #if defined(AO_HAVE_char_fetch_and_add_read) &&\ !defined(AO_HAVE_char_fetch_and_add1_read) # define AO_char_fetch_and_add1_read(addr) \ - AO_char_fetch_and_add_read(addr,1) + AO_char_fetch_and_add_read(addr,1) # define AO_HAVE_char_fetch_and_add1_read #endif #if defined(AO_HAVE_char_fetch_and_add_release_write) &&\ !defined(AO_HAVE_char_fetch_and_add1_release_write) # define AO_char_fetch_and_add1_release_write(addr) \ - AO_char_fetch_and_add_release_write(addr,1) + AO_char_fetch_and_add_release_write(addr,1) # define AO_HAVE_char_fetch_and_add1_release_write #endif #if defined(AO_HAVE_char_fetch_and_add_acquire_read) &&\ !defined(AO_HAVE_char_fetch_and_add1_acquire_read) # define AO_char_fetch_and_add1_acquire_read(addr) \ - AO_char_fetch_and_add_acquire_read(addr,1) + AO_char_fetch_and_add_acquire_read(addr,1) # define AO_HAVE_char_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_char_fetch_and_add) &&\ !defined(AO_HAVE_char_fetch_and_add1) # define AO_char_fetch_and_add1(addr) \ - AO_char_fetch_and_add(addr,1) + AO_char_fetch_and_add(addr,1) # define AO_HAVE_char_fetch_and_add1 #endif #if defined(AO_HAVE_char_fetch_and_add1_full) # if !defined(AO_HAVE_char_fetch_and_add1_release) # define AO_char_fetch_and_add1_release(addr) \ - AO_char_fetch_and_add1_full(addr) + AO_char_fetch_and_add1_full(addr) # define AO_HAVE_char_fetch_and_add1_release # endif # if !defined(AO_HAVE_char_fetch_and_add1_acquire) # define AO_char_fetch_and_add1_acquire(addr) \ - AO_char_fetch_and_add1_full(addr) + AO_char_fetch_and_add1_full(addr) # define AO_HAVE_char_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_char_fetch_and_add1_write) # define AO_char_fetch_and_add1_write(addr) \ - AO_char_fetch_and_add1_full(addr) + AO_char_fetch_and_add1_full(addr) # define AO_HAVE_char_fetch_and_add1_write # endif # if !defined(AO_HAVE_char_fetch_and_add1_read) # define AO_char_fetch_and_add1_read(addr) \ - AO_char_fetch_and_add1_full(addr) + AO_char_fetch_and_add1_full(addr) # define AO_HAVE_char_fetch_and_add1_read # endif #endif /* AO_HAVE_char_fetch_and_add1_full */ @@ -358,25 +358,25 @@ #if !defined(AO_HAVE_char_fetch_and_add1) && \ defined(AO_HAVE_char_fetch_and_add1_release) # define AO_char_fetch_and_add1(addr) \ - AO_char_fetch_and_add1_release(addr) + AO_char_fetch_and_add1_release(addr) # define AO_HAVE_char_fetch_and_add1 #endif #if !defined(AO_HAVE_char_fetch_and_add1) && \ defined(AO_HAVE_char_fetch_and_add1_acquire) # define AO_char_fetch_and_add1(addr) \ - AO_char_fetch_and_add1_acquire(addr) + AO_char_fetch_and_add1_acquire(addr) # define AO_HAVE_char_fetch_and_add1 #endif #if !defined(AO_HAVE_char_fetch_and_add1) && \ defined(AO_HAVE_char_fetch_and_add1_write) # define AO_char_fetch_and_add1(addr) \ - AO_char_fetch_and_add1_write(addr) + AO_char_fetch_and_add1_write(addr) # define AO_HAVE_char_fetch_and_add1 #endif #if !defined(AO_HAVE_char_fetch_and_add1) && \ defined(AO_HAVE_char_fetch_and_add1_read) # define AO_char_fetch_and_add1(addr) \ - AO_char_fetch_and_add1_read(addr) + AO_char_fetch_and_add1_read(addr) # define AO_HAVE_char_fetch_and_add1 #endif @@ -384,45 +384,45 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_char_fetch_and_add1_full) # define AO_char_fetch_and_add1_full(addr) \ - (AO_nop_full(), AO_char_fetch_and_add1_acquire(addr)) + (AO_nop_full(), AO_char_fetch_and_add1_acquire(addr)) # define AO_HAVE_char_fetch_and_add1_full #endif #if !defined(AO_HAVE_char_fetch_and_add1_release_write) && \ defined(AO_HAVE_char_fetch_and_add1_write) # define AO_char_fetch_and_add1_release_write(addr) \ - AO_char_fetch_and_add1_write(addr) + AO_char_fetch_and_add1_write(addr) # define AO_HAVE_char_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_char_fetch_and_add1_release_write) && \ defined(AO_HAVE_char_fetch_and_add1_release) # define AO_char_fetch_and_add1_release_write(addr) \ - AO_char_fetch_and_add1_release(addr) + AO_char_fetch_and_add1_release(addr) # define AO_HAVE_char_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_char_fetch_and_add1_acquire_read) && \ defined(AO_HAVE_char_fetch_and_add1_read) # define AO_char_fetch_and_add1_acquire_read(addr) \ - AO_char_fetch_and_add1_read(addr) + AO_char_fetch_and_add1_read(addr) # define AO_HAVE_char_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_char_fetch_and_add1_acquire_read) && \ defined(AO_HAVE_char_fetch_and_add1_acquire) # define AO_char_fetch_and_add1_acquire_read(addr) \ - AO_char_fetch_and_add1_acquire(addr) + AO_char_fetch_and_add1_acquire(addr) # define AO_HAVE_char_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_fetch_and_add1_acquire_read) # define AO_char_fetch_and_add1_dd_acquire_read(addr) \ - AO_char_fetch_and_add1_acquire_read(addr) + AO_char_fetch_and_add1_acquire_read(addr) # define AO_HAVE_char_fetch_and_add1_dd_acquire_read # endif #else # if defined(AO_HAVE_char_fetch_and_add1) # define AO_char_fetch_and_add1_dd_acquire_read(addr) \ - AO_char_fetch_and_add1(addr) + AO_char_fetch_and_add1(addr) # define AO_HAVE_char_fetch_and_add1_dd_acquire_read # endif #endif @@ -432,71 +432,71 @@ #if defined(AO_HAVE_char_fetch_and_add_full) &&\ !defined(AO_HAVE_char_fetch_and_sub1_full) # define AO_char_fetch_and_sub1_full(addr) \ - AO_char_fetch_and_add_full(addr,(unsigned char)(-1)) + AO_char_fetch_and_add_full(addr,(unsigned char)(-1)) # define AO_HAVE_char_fetch_and_sub1_full #endif #if defined(AO_HAVE_char_fetch_and_add_release) &&\ !defined(AO_HAVE_char_fetch_and_sub1_release) # define AO_char_fetch_and_sub1_release(addr) \ - AO_char_fetch_and_add_release(addr,(unsigned char)(-1)) + AO_char_fetch_and_add_release(addr,(unsigned char)(-1)) # define AO_HAVE_char_fetch_and_sub1_release #endif #if defined(AO_HAVE_char_fetch_and_add_acquire) &&\ !defined(AO_HAVE_char_fetch_and_sub1_acquire) # define AO_char_fetch_and_sub1_acquire(addr) \ - AO_char_fetch_and_add_acquire(addr,(unsigned char)(-1)) + AO_char_fetch_and_add_acquire(addr,(unsigned char)(-1)) # define AO_HAVE_char_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_char_fetch_and_add_write) &&\ !defined(AO_HAVE_char_fetch_and_sub1_write) # define AO_char_fetch_and_sub1_write(addr) \ - AO_char_fetch_and_add_write(addr,(unsigned char)(-1)) + AO_char_fetch_and_add_write(addr,(unsigned char)(-1)) # define AO_HAVE_char_fetch_and_sub1_write #endif #if defined(AO_HAVE_char_fetch_and_add_read) &&\ !defined(AO_HAVE_char_fetch_and_sub1_read) # define AO_char_fetch_and_sub1_read(addr) \ - AO_char_fetch_and_add_read(addr,(unsigned char)(-1)) + AO_char_fetch_and_add_read(addr,(unsigned char)(-1)) # define AO_HAVE_char_fetch_and_sub1_read #endif #if defined(AO_HAVE_char_fetch_and_add_release_write) &&\ !defined(AO_HAVE_char_fetch_and_sub1_release_write) # define AO_char_fetch_and_sub1_release_write(addr) \ - AO_char_fetch_and_add_release_write(addr,(unsigned char)(-1)) + AO_char_fetch_and_add_release_write(addr,(unsigned char)(-1)) # define AO_HAVE_char_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_char_fetch_and_add_acquire_read) &&\ !defined(AO_HAVE_char_fetch_and_sub1_acquire_read) # define AO_char_fetch_and_sub1_acquire_read(addr) \ - AO_char_fetch_and_add_acquire_read(addr,(unsigned char)(-1)) + AO_char_fetch_and_add_acquire_read(addr,(unsigned char)(-1)) # define AO_HAVE_char_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_char_fetch_and_add) &&\ !defined(AO_HAVE_char_fetch_and_sub1) # define AO_char_fetch_and_sub1(addr) \ - AO_char_fetch_and_add(addr,(unsigned char)(-1)) + AO_char_fetch_and_add(addr,(unsigned char)(-1)) # define AO_HAVE_char_fetch_and_sub1 #endif #if defined(AO_HAVE_char_fetch_and_sub1_full) # if !defined(AO_HAVE_char_fetch_and_sub1_release) # define AO_char_fetch_and_sub1_release(addr) \ - AO_char_fetch_and_sub1_full(addr) + AO_char_fetch_and_sub1_full(addr) # define AO_HAVE_char_fetch_and_sub1_release # endif # if !defined(AO_HAVE_char_fetch_and_sub1_acquire) # define AO_char_fetch_and_sub1_acquire(addr) \ - AO_char_fetch_and_sub1_full(addr) + AO_char_fetch_and_sub1_full(addr) # define AO_HAVE_char_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_char_fetch_and_sub1_write) # define AO_char_fetch_and_sub1_write(addr) \ - AO_char_fetch_and_sub1_full(addr) + AO_char_fetch_and_sub1_full(addr) # define AO_HAVE_char_fetch_and_sub1_write # endif # if !defined(AO_HAVE_char_fetch_and_sub1_read) # define AO_char_fetch_and_sub1_read(addr) \ - AO_char_fetch_and_sub1_full(addr) + AO_char_fetch_and_sub1_full(addr) # define AO_HAVE_char_fetch_and_sub1_read # endif #endif /* AO_HAVE_char_fetch_and_sub1_full */ @@ -504,25 +504,25 @@ #if !defined(AO_HAVE_char_fetch_and_sub1) && \ defined(AO_HAVE_char_fetch_and_sub1_release) # define AO_char_fetch_and_sub1(addr) \ - AO_char_fetch_and_sub1_release(addr) + AO_char_fetch_and_sub1_release(addr) # define AO_HAVE_char_fetch_and_sub1 #endif #if !defined(AO_HAVE_char_fetch_and_sub1) && \ defined(AO_HAVE_char_fetch_and_sub1_acquire) # define AO_char_fetch_and_sub1(addr) \ - AO_char_fetch_and_sub1_acquire(addr) + AO_char_fetch_and_sub1_acquire(addr) # define AO_HAVE_char_fetch_and_sub1 #endif #if !defined(AO_HAVE_char_fetch_and_sub1) && \ defined(AO_HAVE_char_fetch_and_sub1_write) # define AO_char_fetch_and_sub1(addr) \ - AO_char_fetch_and_sub1_write(addr) + AO_char_fetch_and_sub1_write(addr) # define AO_HAVE_char_fetch_and_sub1 #endif #if !defined(AO_HAVE_char_fetch_and_sub1) && \ defined(AO_HAVE_char_fetch_and_sub1_read) # define AO_char_fetch_and_sub1(addr) \ - AO_char_fetch_and_sub1_read(addr) + AO_char_fetch_and_sub1_read(addr) # define AO_HAVE_char_fetch_and_sub1 #endif @@ -530,45 +530,45 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_char_fetch_and_sub1_full) # define AO_char_fetch_and_sub1_full(addr) \ - (AO_nop_full(), AO_char_fetch_and_sub1_acquire(addr)) + (AO_nop_full(), AO_char_fetch_and_sub1_acquire(addr)) # define AO_HAVE_char_fetch_and_sub1_full #endif #if !defined(AO_HAVE_char_fetch_and_sub1_release_write) && \ defined(AO_HAVE_char_fetch_and_sub1_write) # define AO_char_fetch_and_sub1_release_write(addr) \ - AO_char_fetch_and_sub1_write(addr) + AO_char_fetch_and_sub1_write(addr) # define AO_HAVE_char_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_char_fetch_and_sub1_release_write) && \ defined(AO_HAVE_char_fetch_and_sub1_release) # define AO_char_fetch_and_sub1_release_write(addr) \ - AO_char_fetch_and_sub1_release(addr) + AO_char_fetch_and_sub1_release(addr) # define AO_HAVE_char_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_char_fetch_and_sub1_acquire_read) && \ defined(AO_HAVE_char_fetch_and_sub1_read) # define AO_char_fetch_and_sub1_acquire_read(addr) \ - AO_char_fetch_and_sub1_read(addr) + AO_char_fetch_and_sub1_read(addr) # define AO_HAVE_char_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_char_fetch_and_sub1_acquire_read) && \ defined(AO_HAVE_char_fetch_and_sub1_acquire) # define AO_char_fetch_and_sub1_acquire_read(addr) \ - AO_char_fetch_and_sub1_acquire(addr) + AO_char_fetch_and_sub1_acquire(addr) # define AO_HAVE_char_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_fetch_and_sub1_acquire_read) # define AO_char_fetch_and_sub1_dd_acquire_read(addr) \ - AO_char_fetch_and_sub1_acquire_read(addr) + AO_char_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_char_fetch_and_sub1_dd_acquire_read # endif #else # if defined(AO_HAVE_char_fetch_and_sub1) # define AO_char_fetch_and_sub1_dd_acquire_read(addr) \ - AO_char_fetch_and_sub1(addr) + AO_char_fetch_and_sub1(addr) # define AO_HAVE_char_fetch_and_sub1_dd_acquire_read # endif #endif @@ -601,7 +601,7 @@ { unsigned short result = AO_short_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ - /* beyond it. */ + /* beyond it. */ AO_nop_full(); return result; } @@ -615,7 +615,7 @@ { unsigned short result = AO_short_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ - /* beyond it. */ + /* beyond it. */ AO_nop_read(); return result; } @@ -627,7 +627,7 @@ # define AO_short_load_full(addr) (AO_nop_full(), AO_short_load_acquire(addr)) # define AO_HAVE_short_load_full #endif - + #if !defined(AO_HAVE_short_load_acquire_read) && defined(AO_HAVE_short_load_read) # define AO_short_load_acquire_read(addr) AO_short_load_read(addr) # define AO_HAVE_short_load_acquire_read @@ -641,13 +641,13 @@ #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_load_acquire_read) # define AO_short_load_dd_acquire_read(addr) \ - AO_short_load_acquire_read(addr) + AO_short_load_acquire_read(addr) # define AO_HAVE_short_load_dd_acquire_read # endif #else # if defined(AO_HAVE_short_load) # define AO_short_load_dd_acquire_read(addr) \ - AO_short_load(addr) + AO_short_load(addr) # define AO_HAVE_short_load_dd_acquire_read # endif #endif @@ -671,9 +671,9 @@ #endif #if defined(AO_HAVE_short_store_release) && \ - !defined(AO_HAVE_short_store_release_write) + !defined(AO_HAVE_short_store_release_write) # define AO_short_store_release_write(addr, val) \ - AO_short_store_release(addr,val) + AO_short_store_release(addr,val) # define AO_HAVE_short_store_release_write #endif @@ -685,14 +685,14 @@ #if defined(AO_HAVE_short_store) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_short_store_release) # define AO_short_store_release(addr,val) \ - (AO_nop_full(), AO_short_store(addr,val)) + (AO_nop_full(), AO_short_store(addr,val)) # define AO_HAVE_short_store_release #endif #if defined(AO_HAVE_nop_write) && defined(AO_HAVE_short_store) && \ !defined(AO_HAVE_short_store_write) # define AO_short_store_write(addr, val) \ - (AO_nop_write(), AO_short_store(addr,val)) + (AO_nop_write(), AO_short_store(addr,val)) # define AO_HAVE_short_store_write #endif @@ -705,7 +705,7 @@ #if defined(AO_HAVE_short_store_release) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_short_store_full) # define AO_short_store_full(addr, val) \ - (AO_short_store_release(addr, val), AO_nop_full()) + (AO_short_store_release(addr, val), AO_nop_full()) # define AO_HAVE_short_store_full #endif @@ -715,7 +715,7 @@ !defined(AO_HAVE_short_fetch_and_add_full) AO_INLINE AO_t AO_short_fetch_and_add_full(volatile unsigned short *addr, - unsigned short incr) + unsigned short incr) { unsigned short old; do @@ -732,7 +732,7 @@ !defined(AO_HAVE_short_fetch_and_add_acquire) AO_INLINE AO_t AO_short_fetch_and_add_acquire(volatile unsigned short *addr, - unsigned short incr) + unsigned short incr) { unsigned short old; do @@ -749,7 +749,7 @@ !defined(AO_HAVE_short_fetch_and_add_release) AO_INLINE AO_t AO_short_fetch_and_add_release(volatile unsigned short *addr, - unsigned short incr) + unsigned short incr) { unsigned short old; do @@ -765,22 +765,22 @@ #if defined(AO_HAVE_short_fetch_and_add_full) # if !defined(AO_HAVE_short_fetch_and_add_release) # define AO_short_fetch_and_add_release(addr, val) \ - AO_short_fetch_and_add_full(addr, val) + AO_short_fetch_and_add_full(addr, val) # define AO_HAVE_short_fetch_and_add_release # endif # if !defined(AO_HAVE_short_fetch_and_add_acquire) # define AO_short_fetch_and_add_acquire(addr, val) \ - AO_short_fetch_and_add_full(addr, val) + AO_short_fetch_and_add_full(addr, val) # define AO_HAVE_short_fetch_and_add_acquire # endif # if !defined(AO_HAVE_short_fetch_and_add_write) # define AO_short_fetch_and_add_write(addr, val) \ - AO_short_fetch_and_add_full(addr, val) + AO_short_fetch_and_add_full(addr, val) # define AO_HAVE_short_fetch_and_add_write # endif # if !defined(AO_HAVE_short_fetch_and_add_read) # define AO_short_fetch_and_add_read(addr, val) \ - AO_short_fetch_and_add_full(addr, val) + AO_short_fetch_and_add_full(addr, val) # define AO_HAVE_short_fetch_and_add_read # endif #endif /* AO_HAVE_short_fetch_and_add_full */ @@ -788,25 +788,25 @@ #if !defined(AO_HAVE_short_fetch_and_add) && \ defined(AO_HAVE_short_fetch_and_add_release) # define AO_short_fetch_and_add(addr, val) \ - AO_short_fetch_and_add_release(addr, val) + AO_short_fetch_and_add_release(addr, val) # define AO_HAVE_short_fetch_and_add #endif #if !defined(AO_HAVE_short_fetch_and_add) && \ defined(AO_HAVE_short_fetch_and_add_acquire) # define AO_short_fetch_and_add(addr, val) \ - AO_short_fetch_and_add_acquire(addr, val) + AO_short_fetch_and_add_acquire(addr, val) # define AO_HAVE_short_fetch_and_add #endif #if !defined(AO_HAVE_short_fetch_and_add) && \ defined(AO_HAVE_short_fetch_and_add_write) # define AO_short_fetch_and_add(addr, val) \ - AO_short_fetch_and_add_write(addr, val) + AO_short_fetch_and_add_write(addr, val) # define AO_HAVE_short_fetch_and_add #endif #if !defined(AO_HAVE_short_fetch_and_add) && \ defined(AO_HAVE_short_fetch_and_add_read) # define AO_short_fetch_and_add(addr, val) \ - AO_short_fetch_and_add_read(addr, val) + AO_short_fetch_and_add_read(addr, val) # define AO_HAVE_short_fetch_and_add #endif @@ -814,118 +814,118 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_short_fetch_and_add_full) # define AO_short_fetch_and_add_full(addr, val) \ - (AO_nop_full(), AO_short_fetch_and_add_acquire(addr, val)) + (AO_nop_full(), AO_short_fetch_and_add_acquire(addr, val)) #endif #if !defined(AO_HAVE_short_fetch_and_add_release_write) && \ defined(AO_HAVE_short_fetch_and_add_write) # define AO_short_fetch_and_add_release_write(addr, val) \ - AO_short_fetch_and_add_write(addr, val) + AO_short_fetch_and_add_write(addr, val) # define AO_HAVE_short_fetch_and_add_release_write #endif #if !defined(AO_HAVE_short_fetch_and_add_release_write) && \ defined(AO_HAVE_short_fetch_and_add_release) # define AO_short_fetch_and_add_release_write(addr, val) \ - AO_short_fetch_and_add_release(addr, val) + AO_short_fetch_and_add_release(addr, val) # define AO_HAVE_short_fetch_and_add_release_write #endif #if !defined(AO_HAVE_short_fetch_and_add_acquire_read) && \ defined(AO_HAVE_short_fetch_and_add_read) # define AO_short_fetch_and_add_acquire_read(addr, val) \ - AO_short_fetch_and_add_read(addr, val) + AO_short_fetch_and_add_read(addr, val) # define AO_HAVE_short_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_short_fetch_and_add_acquire_read) && \ defined(AO_HAVE_short_fetch_and_add_acquire) # define AO_short_fetch_and_add_acquire_read(addr, val) \ - AO_short_fetch_and_add_acquire(addr, val) + AO_short_fetch_and_add_acquire(addr, val) # define AO_HAVE_short_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_fetch_and_add_acquire_read) # define AO_short_fetch_and_add_dd_acquire_read(addr, val) \ - AO_short_fetch_and_add_acquire_read(addr, val) + AO_short_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_short_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_short_fetch_and_add) # define AO_short_fetch_and_add_dd_acquire_read(addr, val) \ - AO_short_fetch_and_add(addr, val) + AO_short_fetch_and_add(addr, val) # define AO_HAVE_short_fetch_and_add_dd_acquire_read # endif #endif - + /* short_fetch_and_add1 */ #if defined(AO_HAVE_short_fetch_and_add_full) &&\ !defined(AO_HAVE_short_fetch_and_add1_full) # define AO_short_fetch_and_add1_full(addr) \ - AO_short_fetch_and_add_full(addr,1) + AO_short_fetch_and_add_full(addr,1) # define AO_HAVE_short_fetch_and_add1_full #endif #if defined(AO_HAVE_short_fetch_and_add_release) &&\ !defined(AO_HAVE_short_fetch_and_add1_release) # define AO_short_fetch_and_add1_release(addr) \ - AO_short_fetch_and_add_release(addr,1) + AO_short_fetch_and_add_release(addr,1) # define AO_HAVE_short_fetch_and_add1_release #endif #if defined(AO_HAVE_short_fetch_and_add_acquire) &&\ !defined(AO_HAVE_short_fetch_and_add1_acquire) # define AO_short_fetch_and_add1_acquire(addr) \ - AO_short_fetch_and_add_acquire(addr,1) + AO_short_fetch_and_add_acquire(addr,1) # define AO_HAVE_short_fetch_and_add1_acquire #endif #if defined(AO_HAVE_short_fetch_and_add_write) &&\ !defined(AO_HAVE_short_fetch_and_add1_write) # define AO_short_fetch_and_add1_write(addr) \ - AO_short_fetch_and_add_write(addr,1) + AO_short_fetch_and_add_write(addr,1) # define AO_HAVE_short_fetch_and_add1_write #endif #if defined(AO_HAVE_short_fetch_and_add_read) &&\ !defined(AO_HAVE_short_fetch_and_add1_read) # define AO_short_fetch_and_add1_read(addr) \ - AO_short_fetch_and_add_read(addr,1) + AO_short_fetch_and_add_read(addr,1) # define AO_HAVE_short_fetch_and_add1_read #endif #if defined(AO_HAVE_short_fetch_and_add_release_write) &&\ !defined(AO_HAVE_short_fetch_and_add1_release_write) # define AO_short_fetch_and_add1_release_write(addr) \ - AO_short_fetch_and_add_release_write(addr,1) + AO_short_fetch_and_add_release_write(addr,1) # define AO_HAVE_short_fetch_and_add1_release_write #endif #if defined(AO_HAVE_short_fetch_and_add_acquire_read) &&\ !defined(AO_HAVE_short_fetch_and_add1_acquire_read) # define AO_short_fetch_and_add1_acquire_read(addr) \ - AO_short_fetch_and_add_acquire_read(addr,1) + AO_short_fetch_and_add_acquire_read(addr,1) # define AO_HAVE_short_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_short_fetch_and_add) &&\ !defined(AO_HAVE_short_fetch_and_add1) # define AO_short_fetch_and_add1(addr) \ - AO_short_fetch_and_add(addr,1) + AO_short_fetch_and_add(addr,1) # define AO_HAVE_short_fetch_and_add1 #endif #if defined(AO_HAVE_short_fetch_and_add1_full) # if !defined(AO_HAVE_short_fetch_and_add1_release) # define AO_short_fetch_and_add1_release(addr) \ - AO_short_fetch_and_add1_full(addr) + AO_short_fetch_and_add1_full(addr) # define AO_HAVE_short_fetch_and_add1_release # endif # if !defined(AO_HAVE_short_fetch_and_add1_acquire) # define AO_short_fetch_and_add1_acquire(addr) \ - AO_short_fetch_and_add1_full(addr) + AO_short_fetch_and_add1_full(addr) # define AO_HAVE_short_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_short_fetch_and_add1_write) # define AO_short_fetch_and_add1_write(addr) \ - AO_short_fetch_and_add1_full(addr) + AO_short_fetch_and_add1_full(addr) # define AO_HAVE_short_fetch_and_add1_write # endif # if !defined(AO_HAVE_short_fetch_and_add1_read) # define AO_short_fetch_and_add1_read(addr) \ - AO_short_fetch_and_add1_full(addr) + AO_short_fetch_and_add1_full(addr) # define AO_HAVE_short_fetch_and_add1_read # endif #endif /* AO_HAVE_short_fetch_and_add1_full */ @@ -933,25 +933,25 @@ #if !defined(AO_HAVE_short_fetch_and_add1) && \ defined(AO_HAVE_short_fetch_and_add1_release) # define AO_short_fetch_and_add1(addr) \ - AO_short_fetch_and_add1_release(addr) + AO_short_fetch_and_add1_release(addr) # define AO_HAVE_short_fetch_and_add1 #endif #if !defined(AO_HAVE_short_fetch_and_add1) && \ defined(AO_HAVE_short_fetch_and_add1_acquire) # define AO_short_fetch_and_add1(addr) \ - AO_short_fetch_and_add1_acquire(addr) + AO_short_fetch_and_add1_acquire(addr) # define AO_HAVE_short_fetch_and_add1 #endif #if !defined(AO_HAVE_short_fetch_and_add1) && \ defined(AO_HAVE_short_fetch_and_add1_write) # define AO_short_fetch_and_add1(addr) \ - AO_short_fetch_and_add1_write(addr) + AO_short_fetch_and_add1_write(addr) # define AO_HAVE_short_fetch_and_add1 #endif #if !defined(AO_HAVE_short_fetch_and_add1) && \ defined(AO_HAVE_short_fetch_and_add1_read) # define AO_short_fetch_and_add1(addr) \ - AO_short_fetch_and_add1_read(addr) + AO_short_fetch_and_add1_read(addr) # define AO_HAVE_short_fetch_and_add1 #endif @@ -959,45 +959,45 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_short_fetch_and_add1_full) # define AO_short_fetch_and_add1_full(addr) \ - (AO_nop_full(), AO_short_fetch_and_add1_acquire(addr)) + (AO_nop_full(), AO_short_fetch_and_add1_acquire(addr)) # define AO_HAVE_short_fetch_and_add1_full #endif #if !defined(AO_HAVE_short_fetch_and_add1_release_write) && \ defined(AO_HAVE_short_fetch_and_add1_write) # define AO_short_fetch_and_add1_release_write(addr) \ - AO_short_fetch_and_add1_write(addr) + AO_short_fetch_and_add1_write(addr) # define AO_HAVE_short_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_short_fetch_and_add1_release_write) && \ defined(AO_HAVE_short_fetch_and_add1_release) # define AO_short_fetch_and_add1_release_write(addr) \ - AO_short_fetch_and_add1_release(addr) + AO_short_fetch_and_add1_release(addr) # define AO_HAVE_short_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_short_fetch_and_add1_acquire_read) && \ defined(AO_HAVE_short_fetch_and_add1_read) # define AO_short_fetch_and_add1_acquire_read(addr) \ - AO_short_fetch_and_add1_read(addr) + AO_short_fetch_and_add1_read(addr) # define AO_HAVE_short_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_short_fetch_and_add1_acquire_read) && \ defined(AO_HAVE_short_fetch_and_add1_acquire) # define AO_short_fetch_and_add1_acquire_read(addr) \ - AO_short_fetch_and_add1_acquire(addr) + AO_short_fetch_and_add1_acquire(addr) # define AO_HAVE_short_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_fetch_and_add1_acquire_read) # define AO_short_fetch_and_add1_dd_acquire_read(addr) \ - AO_short_fetch_and_add1_acquire_read(addr) + AO_short_fetch_and_add1_acquire_read(addr) # define AO_HAVE_short_fetch_and_add1_dd_acquire_read # endif #else # if defined(AO_HAVE_short_fetch_and_add1) # define AO_short_fetch_and_add1_dd_acquire_read(addr) \ - AO_short_fetch_and_add1(addr) + AO_short_fetch_and_add1(addr) # define AO_HAVE_short_fetch_and_add1_dd_acquire_read # endif #endif @@ -1007,71 +1007,71 @@ #if defined(AO_HAVE_short_fetch_and_add_full) &&\ !defined(AO_HAVE_short_fetch_and_sub1_full) # define AO_short_fetch_and_sub1_full(addr) \ - AO_short_fetch_and_add_full(addr,(unsigned short)(-1)) + AO_short_fetch_and_add_full(addr,(unsigned short)(-1)) # define AO_HAVE_short_fetch_and_sub1_full #endif #if defined(AO_HAVE_short_fetch_and_add_release) &&\ !defined(AO_HAVE_short_fetch_and_sub1_release) # define AO_short_fetch_and_sub1_release(addr) \ - AO_short_fetch_and_add_release(addr,(unsigned short)(-1)) + AO_short_fetch_and_add_release(addr,(unsigned short)(-1)) # define AO_HAVE_short_fetch_and_sub1_release #endif #if defined(AO_HAVE_short_fetch_and_add_acquire) &&\ !defined(AO_HAVE_short_fetch_and_sub1_acquire) # define AO_short_fetch_and_sub1_acquire(addr) \ - AO_short_fetch_and_add_acquire(addr,(unsigned short)(-1)) + AO_short_fetch_and_add_acquire(addr,(unsigned short)(-1)) # define AO_HAVE_short_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_short_fetch_and_add_write) &&\ !defined(AO_HAVE_short_fetch_and_sub1_write) # define AO_short_fetch_and_sub1_write(addr) \ - AO_short_fetch_and_add_write(addr,(unsigned short)(-1)) + AO_short_fetch_and_add_write(addr,(unsigned short)(-1)) # define AO_HAVE_short_fetch_and_sub1_write #endif #if defined(AO_HAVE_short_fetch_and_add_read) &&\ !defined(AO_HAVE_short_fetch_and_sub1_read) # define AO_short_fetch_and_sub1_read(addr) \ - AO_short_fetch_and_add_read(addr,(unsigned short)(-1)) + AO_short_fetch_and_add_read(addr,(unsigned short)(-1)) # define AO_HAVE_short_fetch_and_sub1_read #endif #if defined(AO_HAVE_short_fetch_and_add_release_write) &&\ !defined(AO_HAVE_short_fetch_and_sub1_release_write) # define AO_short_fetch_and_sub1_release_write(addr) \ - AO_short_fetch_and_add_release_write(addr,(unsigned short)(-1)) + AO_short_fetch_and_add_release_write(addr,(unsigned short)(-1)) # define AO_HAVE_short_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_short_fetch_and_add_acquire_read) &&\ !defined(AO_HAVE_short_fetch_and_sub1_acquire_read) # define AO_short_fetch_and_sub1_acquire_read(addr) \ - AO_short_fetch_and_add_acquire_read(addr,(unsigned short)(-1)) + AO_short_fetch_and_add_acquire_read(addr,(unsigned short)(-1)) # define AO_HAVE_short_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_short_fetch_and_add) &&\ !defined(AO_HAVE_short_fetch_and_sub1) # define AO_short_fetch_and_sub1(addr) \ - AO_short_fetch_and_add(addr,(unsigned short)(-1)) + AO_short_fetch_and_add(addr,(unsigned short)(-1)) # define AO_HAVE_short_fetch_and_sub1 #endif #if defined(AO_HAVE_short_fetch_and_sub1_full) # if !defined(AO_HAVE_short_fetch_and_sub1_release) # define AO_short_fetch_and_sub1_release(addr) \ - AO_short_fetch_and_sub1_full(addr) + AO_short_fetch_and_sub1_full(addr) # define AO_HAVE_short_fetch_and_sub1_release # endif # if !defined(AO_HAVE_short_fetch_and_sub1_acquire) # define AO_short_fetch_and_sub1_acquire(addr) \ - AO_short_fetch_and_sub1_full(addr) + AO_short_fetch_and_sub1_full(addr) # define AO_HAVE_short_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_short_fetch_and_sub1_write) # define AO_short_fetch_and_sub1_write(addr) \ - AO_short_fetch_and_sub1_full(addr) + AO_short_fetch_and_sub1_full(addr) # define AO_HAVE_short_fetch_and_sub1_write # endif # if !defined(AO_HAVE_short_fetch_and_sub1_read) # define AO_short_fetch_and_sub1_read(addr) \ - AO_short_fetch_and_sub1_full(addr) + AO_short_fetch_and_sub1_full(addr) # define AO_HAVE_short_fetch_and_sub1_read # endif #endif /* AO_HAVE_short_fetch_and_sub1_full */ @@ -1079,25 +1079,25 @@ #if !defined(AO_HAVE_short_fetch_and_sub1) && \ defined(AO_HAVE_short_fetch_and_sub1_release) # define AO_short_fetch_and_sub1(addr) \ - AO_short_fetch_and_sub1_release(addr) + AO_short_fetch_and_sub1_release(addr) # define AO_HAVE_short_fetch_and_sub1 #endif #if !defined(AO_HAVE_short_fetch_and_sub1) && \ defined(AO_HAVE_short_fetch_and_sub1_acquire) # define AO_short_fetch_and_sub1(addr) \ - AO_short_fetch_and_sub1_acquire(addr) + AO_short_fetch_and_sub1_acquire(addr) # define AO_HAVE_short_fetch_and_sub1 #endif #if !defined(AO_HAVE_short_fetch_and_sub1) && \ defined(AO_HAVE_short_fetch_and_sub1_write) # define AO_short_fetch_and_sub1(addr) \ - AO_short_fetch_and_sub1_write(addr) + AO_short_fetch_and_sub1_write(addr) # define AO_HAVE_short_fetch_and_sub1 #endif #if !defined(AO_HAVE_short_fetch_and_sub1) && \ defined(AO_HAVE_short_fetch_and_sub1_read) # define AO_short_fetch_and_sub1(addr) \ - AO_short_fetch_and_sub1_read(addr) + AO_short_fetch_and_sub1_read(addr) # define AO_HAVE_short_fetch_and_sub1 #endif @@ -1105,45 +1105,45 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_short_fetch_and_sub1_full) # define AO_short_fetch_and_sub1_full(addr) \ - (AO_nop_full(), AO_short_fetch_and_sub1_acquire(addr)) + (AO_nop_full(), AO_short_fetch_and_sub1_acquire(addr)) # define AO_HAVE_short_fetch_and_sub1_full #endif #if !defined(AO_HAVE_short_fetch_and_sub1_release_write) && \ defined(AO_HAVE_short_fetch_and_sub1_write) # define AO_short_fetch_and_sub1_release_write(addr) \ - AO_short_fetch_and_sub1_write(addr) + AO_short_fetch_and_sub1_write(addr) # define AO_HAVE_short_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_short_fetch_and_sub1_release_write) && \ defined(AO_HAVE_short_fetch_and_sub1_release) # define AO_short_fetch_and_sub1_release_write(addr) \ - AO_short_fetch_and_sub1_release(addr) + AO_short_fetch_and_sub1_release(addr) # define AO_HAVE_short_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_short_fetch_and_sub1_acquire_read) && \ defined(AO_HAVE_short_fetch_and_sub1_read) # define AO_short_fetch_and_sub1_acquire_read(addr) \ - AO_short_fetch_and_sub1_read(addr) + AO_short_fetch_and_sub1_read(addr) # define AO_HAVE_short_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_short_fetch_and_sub1_acquire_read) && \ defined(AO_HAVE_short_fetch_and_sub1_acquire) # define AO_short_fetch_and_sub1_acquire_read(addr) \ - AO_short_fetch_and_sub1_acquire(addr) + AO_short_fetch_and_sub1_acquire(addr) # define AO_HAVE_short_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_fetch_and_sub1_acquire_read) # define AO_short_fetch_and_sub1_dd_acquire_read(addr) \ - AO_short_fetch_and_sub1_acquire_read(addr) + AO_short_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_short_fetch_and_sub1_dd_acquire_read # endif #else # if defined(AO_HAVE_short_fetch_and_sub1) # define AO_short_fetch_and_sub1_dd_acquire_read(addr) \ - AO_short_fetch_and_sub1(addr) + AO_short_fetch_and_sub1(addr) # define AO_HAVE_short_fetch_and_sub1_dd_acquire_read # endif #endif @@ -1176,7 +1176,7 @@ { unsigned int result = AO_int_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ - /* beyond it. */ + /* beyond it. */ AO_nop_full(); return result; } @@ -1190,7 +1190,7 @@ { unsigned int result = AO_int_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ - /* beyond it. */ + /* beyond it. */ AO_nop_read(); return result; } @@ -1202,7 +1202,7 @@ # define AO_int_load_full(addr) (AO_nop_full(), AO_int_load_acquire(addr)) # define AO_HAVE_int_load_full #endif - + #if !defined(AO_HAVE_int_load_acquire_read) && defined(AO_HAVE_int_load_read) # define AO_int_load_acquire_read(addr) AO_int_load_read(addr) # define AO_HAVE_int_load_acquire_read @@ -1216,13 +1216,13 @@ #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_load_acquire_read) # define AO_int_load_dd_acquire_read(addr) \ - AO_int_load_acquire_read(addr) + AO_int_load_acquire_read(addr) # define AO_HAVE_int_load_dd_acquire_read # endif #else # if defined(AO_HAVE_int_load) # define AO_int_load_dd_acquire_read(addr) \ - AO_int_load(addr) + AO_int_load(addr) # define AO_HAVE_int_load_dd_acquire_read # endif #endif @@ -1246,9 +1246,9 @@ #endif #if defined(AO_HAVE_int_store_release) && \ - !defined(AO_HAVE_int_store_release_write) + !defined(AO_HAVE_int_store_release_write) # define AO_int_store_release_write(addr, val) \ - AO_int_store_release(addr,val) + AO_int_store_release(addr,val) # define AO_HAVE_int_store_release_write #endif @@ -1260,14 +1260,14 @@ #if defined(AO_HAVE_int_store) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_int_store_release) # define AO_int_store_release(addr,val) \ - (AO_nop_full(), AO_int_store(addr,val)) + (AO_nop_full(), AO_int_store(addr,val)) # define AO_HAVE_int_store_release #endif #if defined(AO_HAVE_nop_write) && defined(AO_HAVE_int_store) && \ !defined(AO_HAVE_int_store_write) # define AO_int_store_write(addr, val) \ - (AO_nop_write(), AO_int_store(addr,val)) + (AO_nop_write(), AO_int_store(addr,val)) # define AO_HAVE_int_store_write #endif @@ -1280,7 +1280,7 @@ #if defined(AO_HAVE_int_store_release) && defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_int_store_full) # define AO_int_store_full(addr, val) \ - (AO_int_store_release(addr, val), AO_nop_full()) + (AO_int_store_release(addr, val), AO_nop_full()) # define AO_HAVE_int_store_full #endif @@ -1290,7 +1290,7 @@ !defined(AO_HAVE_int_fetch_and_add_full) AO_INLINE AO_t AO_int_fetch_and_add_full(volatile unsigned int *addr, - unsigned int incr) + unsigned int incr) { unsigned int old; do @@ -1307,7 +1307,7 @@ !defined(AO_HAVE_int_fetch_and_add_acquire) AO_INLINE AO_t AO_int_fetch_and_add_acquire(volatile unsigned int *addr, - unsigned int incr) + unsigned int incr) { unsigned int old; do @@ -1324,7 +1324,7 @@ !defined(AO_HAVE_int_fetch_and_add_release) AO_INLINE AO_t AO_int_fetch_and_add_release(volatile unsigned int *addr, - unsigned int incr) + unsigned int incr) { unsigned int old; do @@ -1340,22 +1340,22 @@ #if defined(AO_HAVE_int_fetch_and_add_full) # if !defined(AO_HAVE_int_fetch_and_add_release) # define AO_int_fetch_and_add_release(addr, val) \ - AO_int_fetch_and_add_full(addr, val) + AO_int_fetch_and_add_full(addr, val) # define AO_HAVE_int_fetch_and_add_release # endif # if !defined(AO_HAVE_int_fetch_and_add_acquire) # define AO_int_fetch_and_add_acquire(addr, val) \ - AO_int_fetch_and_add_full(addr, val) + AO_int_fetch_and_add_full(addr, val) # define AO_HAVE_int_fetch_and_add_acquire # endif # if !defined(AO_HAVE_int_fetch_and_add_write) # define AO_int_fetch_and_add_write(addr, val) \ - AO_int_fetch_and_add_full(addr, val) + AO_int_fetch_and_add_full(addr, val) # define AO_HAVE_int_fetch_and_add_write # endif # if !defined(AO_HAVE_int_fetch_and_add_read) # define AO_int_fetch_and_add_read(addr, val) \ - AO_int_fetch_and_add_full(addr, val) + AO_int_fetch_and_add_full(addr, val) # define AO_HAVE_int_fetch_and_add_read # endif #endif /* AO_HAVE_int_fetch_and_add_full */ @@ -1363,25 +1363,25 @@ #if !defined(AO_HAVE_int_fetch_and_add) && \ defined(AO_HAVE_int_fetch_and_add_release) # define AO_int_fetch_and_add(addr, val) \ - AO_int_fetch_and_add_release(addr, val) + AO_int_fetch_and_add_release(addr, val) # define AO_HAVE_int_fetch_and_add #endif #if !defined(AO_HAVE_int_fetch_and_add) && \ defined(AO_HAVE_int_fetch_and_add_acquire) # define AO_int_fetch_and_add(addr, val) \ - AO_int_fetch_and_add_acquire(addr, val) + AO_int_fetch_and_add_acquire(addr, val) # define AO_HAVE_int_fetch_and_add #endif #if !defined(AO_HAVE_int_fetch_and_add) && \ defined(AO_HAVE_int_fetch_and_add_write) # define AO_int_fetch_and_add(addr, val) \ - AO_int_fetch_and_add_write(addr, val) + AO_int_fetch_and_add_write(addr, val) # define AO_HAVE_int_fetch_and_add #endif #if !defined(AO_HAVE_int_fetch_and_add) && \ defined(AO_HAVE_int_fetch_and_add_read) # define AO_int_fetch_and_add(addr, val) \ - AO_int_fetch_and_add_read(addr, val) + AO_int_fetch_and_add_read(addr, val) # define AO_HAVE_int_fetch_and_add #endif @@ -1389,118 +1389,118 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_int_fetch_and_add_full) # define AO_int_fetch_and_add_full(addr, val) \ - (AO_nop_full(), AO_int_fetch_and_add_acquire(addr, val)) + (AO_nop_full(), AO_int_fetch_and_add_acquire(addr, val)) #endif #if !defined(AO_HAVE_int_fetch_and_add_release_write) && \ defined(AO_HAVE_int_fetch_and_add_write) # define AO_int_fetch_and_add_release_write(addr, val) \ - AO_int_fetch_and_add_write(addr, val) + AO_int_fetch_and_add_write(addr, val) # define AO_HAVE_int_fetch_and_add_release_write #endif #if !defined(AO_HAVE_int_fetch_and_add_release_write) && \ defined(AO_HAVE_int_fetch_and_add_release) # define AO_int_fetch_and_add_release_write(addr, val) \ - AO_int_fetch_and_add_release(addr, val) + AO_int_fetch_and_add_release(addr, val) # define AO_HAVE_int_fetch_and_add_release_write #endif #if !defined(AO_HAVE_int_fetch_and_add_acquire_read) && \ defined(AO_HAVE_int_fetch_and_add_read) # define AO_int_fetch_and_add_acquire_read(addr, val) \ - AO_int_fetch_and_add_read(addr, val) + AO_int_fetch_and_add_read(addr, val) # define AO_HAVE_int_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_int_fetch_and_add_acquire_read) && \ defined(AO_HAVE_int_fetch_and_add_acquire) # define AO_int_fetch_and_add_acquire_read(addr, val) \ - AO_int_fetch_and_add_acquire(addr, val) + AO_int_fetch_and_add_acquire(addr, val) # define AO_HAVE_int_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_fetch_and_add_acquire_read) # define AO_int_fetch_and_add_dd_acquire_read(addr, val) \ - AO_int_fetch_and_add_acquire_read(addr, val) + AO_int_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_int_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_int_fetch_and_add) # define AO_int_fetch_and_add_dd_acquire_read(addr, val) \ - AO_int_fetch_and_add(addr, val) + AO_int_fetch_and_add(addr, val) # define AO_HAVE_int_fetch_and_add_dd_acquire_read # endif #endif - + /* int_fetch_and_add1 */ #if defined(AO_HAVE_int_fetch_and_add_full) &&\ !defined(AO_HAVE_int_fetch_and_add1_full) # define AO_int_fetch_and_add1_full(addr) \ - AO_int_fetch_and_add_full(addr,1) + AO_int_fetch_and_add_full(addr,1) # define AO_HAVE_int_fetch_and_add1_full #endif #if defined(AO_HAVE_int_fetch_and_add_release) &&\ !defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1_release(addr) \ - AO_int_fetch_and_add_release(addr,1) + AO_int_fetch_and_add_release(addr,1) # define AO_HAVE_int_fetch_and_add1_release #endif #if defined(AO_HAVE_int_fetch_and_add_acquire) &&\ !defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1_acquire(addr) \ - AO_int_fetch_and_add_acquire(addr,1) + AO_int_fetch_and_add_acquire(addr,1) # define AO_HAVE_int_fetch_and_add1_acquire #endif #if defined(AO_HAVE_int_fetch_and_add_write) &&\ !defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1_write(addr) \ - AO_int_fetch_and_add_write(addr,1) + AO_int_fetch_and_add_write(addr,1) # define AO_HAVE_int_fetch_and_add1_write #endif #if defined(AO_HAVE_int_fetch_and_add_read) &&\ !defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1_read(addr) \ - AO_int_fetch_and_add_read(addr,1) + AO_int_fetch_and_add_read(addr,1) # define AO_HAVE_int_fetch_and_add1_read #endif #if defined(AO_HAVE_int_fetch_and_add_release_write) &&\ !defined(AO_HAVE_int_fetch_and_add1_release_write) # define AO_int_fetch_and_add1_release_write(addr) \ - AO_int_fetch_and_add_release_write(addr,1) + AO_int_fetch_and_add_release_write(addr,1) # define AO_HAVE_int_fetch_and_add1_release_write #endif #if defined(AO_HAVE_int_fetch_and_add_acquire_read) &&\ !defined(AO_HAVE_int_fetch_and_add1_acquire_read) # define AO_int_fetch_and_add1_acquire_read(addr) \ - AO_int_fetch_and_add_acquire_read(addr,1) + AO_int_fetch_and_add_acquire_read(addr,1) # define AO_HAVE_int_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_int_fetch_and_add) &&\ !defined(AO_HAVE_int_fetch_and_add1) # define AO_int_fetch_and_add1(addr) \ - AO_int_fetch_and_add(addr,1) + AO_int_fetch_and_add(addr,1) # define AO_HAVE_int_fetch_and_add1 #endif #if defined(AO_HAVE_int_fetch_and_add1_full) # if !defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1_release(addr) \ - AO_int_fetch_and_add1_full(addr) + AO_int_fetch_and_add1_full(addr) # define AO_HAVE_int_fetch_and_add1_release # endif # if !defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1_acquire(addr) \ - AO_int_fetch_and_add1_full(addr) + AO_int_fetch_and_add1_full(addr) # define AO_HAVE_int_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1_write(addr) \ - AO_int_fetch_and_add1_full(addr) + AO_int_fetch_and_add1_full(addr) # define AO_HAVE_int_fetch_and_add1_write # endif # if !defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1_read(addr) \ - AO_int_fetch_and_add1_full(addr) + AO_int_fetch_and_add1_full(addr) # define AO_HAVE_int_fetch_and_add1_read # endif #endif /* AO_HAVE_int_fetch_and_add1_full */ @@ -1508,25 +1508,25 @@ #if !defined(AO_HAVE_int_fetch_and_add1) && \ defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1(addr) \ - AO_int_fetch_and_add1_release(addr) + AO_int_fetch_and_add1_release(addr) # define AO_HAVE_int_fetch_and_add1 #endif #if !defined(AO_HAVE_int_fetch_and_add1) && \ defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1(addr) \ - AO_int_fetch_and_add1_acquire(addr) + AO_int_fetch_and_add1_acquire(addr) # define AO_HAVE_int_fetch_and_add1 #endif #if !defined(AO_HAVE_int_fetch_and_add1) && \ defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1(addr) \ - AO_int_fetch_and_add1_write(addr) + AO_int_fetch_and_add1_write(addr) # define AO_HAVE_int_fetch_and_add1 #endif #if !defined(AO_HAVE_int_fetch_and_add1) && \ defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1(addr) \ - AO_int_fetch_and_add1_read(addr) + AO_int_fetch_and_add1_read(addr) # define AO_HAVE_int_fetch_and_add1 #endif @@ -1534,45 +1534,45 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_int_fetch_and_add1_full) # define AO_int_fetch_and_add1_full(addr) \ - (AO_nop_full(), AO_int_fetch_and_add1_acquire(addr)) + (AO_nop_full(), AO_int_fetch_and_add1_acquire(addr)) # define AO_HAVE_int_fetch_and_add1_full #endif #if !defined(AO_HAVE_int_fetch_and_add1_release_write) && \ defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1_release_write(addr) \ - AO_int_fetch_and_add1_write(addr) + AO_int_fetch_and_add1_write(addr) # define AO_HAVE_int_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_int_fetch_and_add1_release_write) && \ defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1_release_write(addr) \ - AO_int_fetch_and_add1_release(addr) + AO_int_fetch_and_add1_release(addr) # define AO_HAVE_int_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_int_fetch_and_add1_acquire_read) && \ defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1_acquire_read(addr) \ - AO_int_fetch_and_add1_read(addr) + AO_int_fetch_and_add1_read(addr) # define AO_HAVE_int_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_int_fetch_and_add1_acquire_read) && \ defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1_acquire_read(addr) \ - AO_int_fetch_and_add1_acquire(addr) + AO_int_fetch_and_add1_acquire(addr) # define AO_HAVE_int_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_fetch_and_add1_acquire_read) # define AO_int_fetch_and_add1_dd_acquire_read(addr) \ - AO_int_fetch_and_add1_acquire_read(addr) + AO_int_fetch_and_add1_acquire_read(addr) # define AO_HAVE_int_fetch_and_add1_dd_acquire_read # endif #else # if defined(AO_HAVE_int_fetch_and_add1) # define AO_int_fetch_and_add1_dd_acquire_read(addr) \ - AO_int_fetch_and_add1(addr) + AO_int_fetch_and_add1(addr) # define AO_HAVE_int_fetch_and_add1_dd_acquire_read # endif #endif @@ -1582,71 +1582,71 @@ #if defined(AO_HAVE_int_fetch_and_add_full) &&\ !defined(AO_HAVE_int_fetch_and_sub1_full) # define AO_int_fetch_and_sub1_full(addr) \ - AO_int_fetch_and_add_full(addr,(unsigned int)(-1)) + AO_int_fetch_and_add_full(addr,(unsigned int)(-1)) # define AO_HAVE_int_fetch_and_sub1_full #endif #if defined(AO_HAVE_int_fetch_and_add_release) &&\ !defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1_release(addr) \ - AO_int_fetch_and_add_release(addr,(unsigned int)(-1)) + AO_int_fetch_and_add_release(addr,(unsigned int)(-1)) # define AO_HAVE_int_fetch_and_sub1_release #endif #if defined(AO_HAVE_int_fetch_and_add_acquire) &&\ !defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1_acquire(addr) \ - AO_int_fetch_and_add_acquire(addr,(unsigned int)(-1)) + AO_int_fetch_and_add_acquire(addr,(unsigned int)(-1)) # define AO_HAVE_int_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_int_fetch_and_add_write) &&\ !defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1_write(addr) \ - AO_int_fetch_and_add_write(addr,(unsigned int)(-1)) + AO_int_fetch_and_add_write(addr,(unsigned int)(-1)) # define AO_HAVE_int_fetch_and_sub1_write #endif #if defined(AO_HAVE_int_fetch_and_add_read) &&\ !defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1_read(addr) \ - AO_int_fetch_and_add_read(addr,(unsigned int)(-1)) + AO_int_fetch_and_add_read(addr,(unsigned int)(-1)) # define AO_HAVE_int_fetch_and_sub1_read #endif #if defined(AO_HAVE_int_fetch_and_add_release_write) &&\ !defined(AO_HAVE_int_fetch_and_sub1_release_write) # define AO_int_fetch_and_sub1_release_write(addr) \ - AO_int_fetch_and_add_release_write(addr,(unsigned int)(-1)) + AO_int_fetch_and_add_release_write(addr,(unsigned int)(-1)) # define AO_HAVE_int_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_int_fetch_and_add_acquire_read) &&\ !defined(AO_HAVE_int_fetch_and_sub1_acquire_read) # define AO_int_fetch_and_sub1_acquire_read(addr) \ - AO_int_fetch_and_add_acquire_read(addr,(unsigned int)(-1)) + AO_int_fetch_and_add_acquire_read(addr,(unsigned int)(-1)) # define AO_HAVE_int_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_int_fetch_and_add) &&\ !defined(AO_HAVE_int_fetch_and_sub1) # define AO_int_fetch_and_sub1(addr) \ - AO_int_fetch_and_add(addr,(unsigned int)(-1)) + AO_int_fetch_and_add(addr,(unsigned int)(-1)) # define AO_HAVE_int_fetch_and_sub1 #endif #if defined(AO_HAVE_int_fetch_and_sub1_full) # if !defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1_release(addr) \ - AO_int_fetch_and_sub1_full(addr) + AO_int_fetch_and_sub1_full(addr) # define AO_HAVE_int_fetch_and_sub1_release # endif # if !defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1_acquire(addr) \ - AO_int_fetch_and_sub1_full(addr) + AO_int_fetch_and_sub1_full(addr) # define AO_HAVE_int_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1_write(addr) \ - AO_int_fetch_and_sub1_full(addr) + AO_int_fetch_and_sub1_full(addr) # define AO_HAVE_int_fetch_and_sub1_write # endif # if !defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1_read(addr) \ - AO_int_fetch_and_sub1_full(addr) + AO_int_fetch_and_sub1_full(addr) # define AO_HAVE_int_fetch_and_sub1_read # endif #endif /* AO_HAVE_int_fetch_and_sub1_full */ @@ -1654,25 +1654,25 @@ #if !defined(AO_HAVE_int_fetch_and_sub1) && \ defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1(addr) \ - AO_int_fetch_and_sub1_release(addr) + AO_int_fetch_and_sub1_release(addr) # define AO_HAVE_int_fetch_and_sub1 #endif #if !defined(AO_HAVE_int_fetch_and_sub1) && \ defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1(addr) \ - AO_int_fetch_and_sub1_acquire(addr) + AO_int_fetch_and_sub1_acquire(addr) # define AO_HAVE_int_fetch_and_sub1 #endif #if !defined(AO_HAVE_int_fetch_and_sub1) && \ defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1(addr) \ - AO_int_fetch_and_sub1_write(addr) + AO_int_fetch_and_sub1_write(addr) # define AO_HAVE_int_fetch_and_sub1 #endif #if !defined(AO_HAVE_int_fetch_and_sub1) && \ defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1(addr) \ - AO_int_fetch_and_sub1_read(addr) + AO_int_fetch_and_sub1_read(addr) # define AO_HAVE_int_fetch_and_sub1 #endif @@ -1680,46 +1680,45 @@ defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_int_fetch_and_sub1_full) # define AO_int_fetch_and_sub1_full(addr) \ - (AO_nop_full(), AO_int_fetch_and_sub1_acquire(addr)) + (AO_nop_full(), AO_int_fetch_and_sub1_acquire(addr)) # define AO_HAVE_int_fetch_and_sub1_full #endif #if !defined(AO_HAVE_int_fetch_and_sub1_release_write) && \ defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1_release_write(addr) \ - AO_int_fetch_and_sub1_write(addr) + AO_int_fetch_and_sub1_write(addr) # define AO_HAVE_int_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_int_fetch_and_sub1_release_write) && \ defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1_release_write(addr) \ - AO_int_fetch_and_sub1_release(addr) + AO_int_fetch_and_sub1_release(addr) # define AO_HAVE_int_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_int_fetch_and_sub1_acquire_read) && \ defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1_acquire_read(addr) \ - AO_int_fetch_and_sub1_read(addr) + AO_int_fetch_and_sub1_read(addr) # define AO_HAVE_int_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_int_fetch_and_sub1_acquire_read) && \ defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1_acquire_read(addr) \ - AO_int_fetch_and_sub1_acquire(addr) + AO_int_fetch_and_sub1_acquire(addr) # define AO_HAVE_int_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_fetch_and_sub1_acquire_read) # define AO_int_fetch_and_sub1_dd_acquire_read(addr) \ - AO_int_fetch_and_sub1_acquire_read(addr) + AO_int_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_int_fetch_and_sub1_dd_acquire_read # endif #else # if defined(AO_HAVE_int_fetch_and_sub1) # define AO_int_fetch_and_sub1_dd_acquire_read(addr) \ - AO_int_fetch_and_sub1(addr) + AO_int_fetch_and_sub1(addr) # define AO_HAVE_int_fetch_and_sub1_dd_acquire_read # endif #endif - diff --git a/src/atomic_ops/generalize.h b/src/atomic_ops/generalize.h index 45950c7..05dc508 100644 --- a/src/atomic_ops/generalize.h +++ b/src/atomic_ops/generalize.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * Generalize atomic operations for atomic_ops.h. @@ -49,27 +49,27 @@ #if AO_CHAR_TS_T # define AO_TS_COMPARE_AND_SWAP_FULL(a,o,n) \ - AO_char_compare_and_swap_full(a,o,n) + AO_char_compare_and_swap_full(a,o,n) # define AO_TS_COMPARE_AND_SWAP_ACQUIRE(a,o,n) \ - AO_char_compare_and_swap_acquire(a,o,n) + AO_char_compare_and_swap_acquire(a,o,n) # define AO_TS_COMPARE_AND_SWAP_RELEASE(a,o,n) \ - AO_char_compare_and_swap_release(a,o,n) + AO_char_compare_and_swap_release(a,o,n) # define AO_TS_COMPARE_AND_SWAP(a,o,n) \ - AO_char_compare_and_swap(a,o,n) + AO_char_compare_and_swap(a,o,n) #endif #if AO_AO_TS_T # define AO_TS_COMPARE_AND_SWAP_FULL(a,o,n) \ - AO_compare_and_swap_full(a,o,n) + AO_compare_and_swap_full(a,o,n) # define AO_TS_COMPARE_AND_SWAP_ACQUIRE(a,o,n) \ - AO_compare_and_swap_acquire(a,o,n) + AO_compare_and_swap_acquire(a,o,n) # define AO_TS_COMPARE_AND_SWAP_RELEASE(a,o,n) \ - AO_compare_and_swap_release(a,o,n) + AO_compare_and_swap_release(a,o,n) # define AO_TS_COMPARE_AND_SWAP(a,o,n) \ - AO_compare_and_swap(a,o,n) + AO_compare_and_swap(a,o,n) #endif -/* Generate test_and_set_full, if necessary and possible. */ +/* Generate test_and_set_full, if necessary and possible. */ #if !defined(AO_HAVE_test_and_set) && \ !defined(AO_HAVE_test_and_set_release) && \ !defined(AO_HAVE_test_and_set_acquire) && \ @@ -202,7 +202,7 @@ { AO_t result = AO_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ - /* beyond it. */ + /* beyond it. */ AO_nop_full(); return result; } @@ -216,7 +216,7 @@ { AO_t result = AO_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ - /* beyond it. */ + /* beyond it. */ AO_nop_read(); return result; } @@ -228,7 +228,7 @@ # define AO_load_full(addr) (AO_nop_full(), AO_load_acquire(addr)) # define AO_HAVE_load_full #endif - + #if !defined(AO_HAVE_load_acquire_read) && defined(AO_HAVE_load_read) # define AO_load_acquire_read(addr) AO_load_read(addr) # define AO_HAVE_load_acquire_read @@ -304,31 +304,31 @@ /* NEC LE-IT: Test and set */ #if defined(AO_HAVE_test_and_set) && \ - defined(AO_HAVE_nop_full) && \ + defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_test_and_set_release) -# define AO_test_and_set_release(addr) \ - (AO_nop_full(), AO_test_and_set(addr)) +# define AO_test_and_set_release(addr) \ + (AO_nop_full(), AO_test_and_set(addr)) # define AO_HAVE_test_and_set_release #endif #if defined(AO_HAVE_test_and_set) && \ - defined(AO_HAVE_nop_full) && \ + defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_test_and_set_acquire) AO_INLINE AO_TS_t AO_test_and_set_acquire(volatile AO_TS_t *addr) { - AO_TS_t res = AO_test_and_set(addr); - AO_nop_full(); - return res; -} + AO_TS_t res = AO_test_and_set(addr); + AO_nop_full(); + return res; +} # define AO_HAVE_test_and_set_acquire #endif - + /* Fetch_and_add */ -/* We first try to implement fetch_and_add variants in terms */ -/* of the corresponding compare_and_swap variants to minimize */ -/* adding barriers. */ +/* We first try to implement fetch_and_add variants in terms */ +/* of the corresponding compare_and_swap variants to minimize */ +/* adding barriers. */ #if defined(AO_HAVE_compare_and_swap_full) && \ !defined(AO_HAVE_fetch_and_add_full) AO_INLINE AO_t @@ -396,22 +396,22 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if defined(AO_HAVE_fetch_and_add_full) # if !defined(AO_HAVE_fetch_and_add_release) # define AO_fetch_and_add_release(addr, val) \ - AO_fetch_and_add_full(addr, val) + AO_fetch_and_add_full(addr, val) # define AO_HAVE_fetch_and_add_release # endif # if !defined(AO_HAVE_fetch_and_add_acquire) # define AO_fetch_and_add_acquire(addr, val) \ - AO_fetch_and_add_full(addr, val) + AO_fetch_and_add_full(addr, val) # define AO_HAVE_fetch_and_add_acquire # endif # if !defined(AO_HAVE_fetch_and_add_write) # define AO_fetch_and_add_write(addr, val) \ - AO_fetch_and_add_full(addr, val) + AO_fetch_and_add_full(addr, val) # define AO_HAVE_fetch_and_add_write # endif # if !defined(AO_HAVE_fetch_and_add_read) # define AO_fetch_and_add_read(addr, val) \ - AO_fetch_and_add_full(addr, val) + AO_fetch_and_add_full(addr, val) # define AO_HAVE_fetch_and_add_read # endif #endif /* AO_HAVE_fetch_and_add_full */ @@ -419,25 +419,25 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if !defined(AO_HAVE_fetch_and_add) && \ defined(AO_HAVE_fetch_and_add_release) # define AO_fetch_and_add(addr, val) \ - AO_fetch_and_add_release(addr, val) + AO_fetch_and_add_release(addr, val) # define AO_HAVE_fetch_and_add #endif #if !defined(AO_HAVE_fetch_and_add) && \ defined(AO_HAVE_fetch_and_add_acquire) # define AO_fetch_and_add(addr, val) \ - AO_fetch_and_add_acquire(addr, val) + AO_fetch_and_add_acquire(addr, val) # define AO_HAVE_fetch_and_add #endif #if !defined(AO_HAVE_fetch_and_add) && \ defined(AO_HAVE_fetch_and_add_write) # define AO_fetch_and_add(addr, val) \ - AO_fetch_and_add_write(addr, val) + AO_fetch_and_add_write(addr, val) # define AO_HAVE_fetch_and_add #endif #if !defined(AO_HAVE_fetch_and_add) && \ defined(AO_HAVE_fetch_and_add_read) # define AO_fetch_and_add(addr, val) \ - AO_fetch_and_add_read(addr, val) + AO_fetch_and_add_read(addr, val) # define AO_HAVE_fetch_and_add #endif @@ -445,49 +445,49 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_fetch_and_add_full) # define AO_fetch_and_add_full(addr, val) \ - (AO_nop_full(), AO_fetch_and_add_acquire(addr, val)) + (AO_nop_full(), AO_fetch_and_add_acquire(addr, val)) # define AO_HAVE_fetch_and_add_full #endif #if !defined(AO_HAVE_fetch_and_add_release_write) && \ defined(AO_HAVE_fetch_and_add_write) # define AO_fetch_and_add_release_write(addr, val) \ - AO_fetch_and_add_write(addr, val) + AO_fetch_and_add_write(addr, val) # define AO_HAVE_fetch_and_add_release_write #endif #if !defined(AO_HAVE_fetch_and_add_release_write) && \ defined(AO_HAVE_fetch_and_add_release) # define AO_fetch_and_add_release_write(addr, val) \ - AO_fetch_and_add_release(addr, val) + AO_fetch_and_add_release(addr, val) # define AO_HAVE_fetch_and_add_release_write #endif #if !defined(AO_HAVE_fetch_and_add_acquire_read) && \ defined(AO_HAVE_fetch_and_add_read) # define AO_fetch_and_add_acquire_read(addr, val) \ - AO_fetch_and_add_read(addr, val) + AO_fetch_and_add_read(addr, val) # define AO_HAVE_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_fetch_and_add_acquire_read) && \ defined(AO_HAVE_fetch_and_add_acquire) # define AO_fetch_and_add_acquire_read(addr, val) \ - AO_fetch_and_add_acquire(addr, val) + AO_fetch_and_add_acquire(addr, val) # define AO_HAVE_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_fetch_and_add_acquire_read) # define AO_fetch_and_add_dd_acquire_read(addr, val) \ - AO_fetch_and_add_acquire_read(addr, val) + AO_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_fetch_and_add) # define AO_fetch_and_add_dd_acquire_read(addr, val) \ - AO_fetch_and_add(addr, val) + AO_fetch_and_add(addr, val) # define AO_HAVE_fetch_and_add_dd_acquire_read # endif #endif - + /* Fetch_and_add1 */ #if defined(AO_HAVE_fetch_and_add_full) &&\ @@ -518,41 +518,41 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if defined(AO_HAVE_fetch_and_add_release_write) &&\ !defined(AO_HAVE_fetch_and_add1_release_write) # define AO_fetch_and_add1_release_write(addr) \ - AO_fetch_and_add_release_write(addr,1) + AO_fetch_and_add_release_write(addr,1) # define AO_HAVE_fetch_and_add1_release_write #endif #if defined(AO_HAVE_fetch_and_add_acquire_read) &&\ !defined(AO_HAVE_fetch_and_add1_acquire_read) # define AO_fetch_and_add1_acquire_read(addr) \ - AO_fetch_and_add_acquire_read(addr,1) + AO_fetch_and_add_acquire_read(addr,1) # define AO_HAVE_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_fetch_and_add) &&\ !defined(AO_HAVE_fetch_and_add1) # define AO_fetch_and_add1(addr) \ - AO_fetch_and_add(addr,1) + AO_fetch_and_add(addr,1) # define AO_HAVE_fetch_and_add1 #endif #if defined(AO_HAVE_fetch_and_add1_full) # if !defined(AO_HAVE_fetch_and_add1_release) # define AO_fetch_and_add1_release(addr) \ - AO_fetch_and_add1_full(addr) + AO_fetch_and_add1_full(addr) # define AO_HAVE_fetch_and_add1_release # endif # if !defined(AO_HAVE_fetch_and_add1_acquire) # define AO_fetch_and_add1_acquire(addr) \ - AO_fetch_and_add1_full(addr) + AO_fetch_and_add1_full(addr) # define AO_HAVE_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_fetch_and_add1_write) # define AO_fetch_and_add1_write(addr) \ - AO_fetch_and_add1_full(addr) + AO_fetch_and_add1_full(addr) # define AO_HAVE_fetch_and_add1_write # endif # if !defined(AO_HAVE_fetch_and_add1_read) # define AO_fetch_and_add1_read(addr) \ - AO_fetch_and_add1_full(addr) + AO_fetch_and_add1_full(addr) # define AO_HAVE_fetch_and_add1_read # endif #endif /* AO_HAVE_fetch_and_add1_full */ @@ -560,25 +560,25 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if !defined(AO_HAVE_fetch_and_add1) && \ defined(AO_HAVE_fetch_and_add1_release) # define AO_fetch_and_add1(addr) \ - AO_fetch_and_add1_release(addr) + AO_fetch_and_add1_release(addr) # define AO_HAVE_fetch_and_add1 #endif #if !defined(AO_HAVE_fetch_and_add1) && \ defined(AO_HAVE_fetch_and_add1_acquire) # define AO_fetch_and_add1(addr) \ - AO_fetch_and_add1_acquire(addr) + AO_fetch_and_add1_acquire(addr) # define AO_HAVE_fetch_and_add1 #endif #if !defined(AO_HAVE_fetch_and_add1) && \ defined(AO_HAVE_fetch_and_add1_write) # define AO_fetch_and_add1(addr) \ - AO_fetch_and_add1_write(addr) + AO_fetch_and_add1_write(addr) # define AO_HAVE_fetch_and_add1 #endif #if !defined(AO_HAVE_fetch_and_add1) && \ defined(AO_HAVE_fetch_and_add1_read) # define AO_fetch_and_add1(addr) \ - AO_fetch_and_add1_read(addr) + AO_fetch_and_add1_read(addr) # define AO_HAVE_fetch_and_add1 #endif @@ -586,39 +586,39 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_fetch_and_add1_full) # define AO_fetch_and_add1_full(addr) \ - (AO_nop_full(), AO_fetch_and_add1_acquire(addr)) + (AO_nop_full(), AO_fetch_and_add1_acquire(addr)) # define AO_HAVE_fetch_and_add1_full #endif #if !defined(AO_HAVE_fetch_and_add1_release_write) && \ defined(AO_HAVE_fetch_and_add1_write) # define AO_fetch_and_add1_release_write(addr) \ - AO_fetch_and_add1_write(addr) + AO_fetch_and_add1_write(addr) # define AO_HAVE_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_fetch_and_add1_release_write) && \ defined(AO_HAVE_fetch_and_add1_release) # define AO_fetch_and_add1_release_write(addr) \ - AO_fetch_and_add1_release(addr) + AO_fetch_and_add1_release(addr) # define AO_HAVE_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_fetch_and_add1_acquire_read) && \ defined(AO_HAVE_fetch_and_add1_read) # define AO_fetch_and_add1_acquire_read(addr) \ - AO_fetch_and_add1_read(addr) + AO_fetch_and_add1_read(addr) # define AO_HAVE_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_fetch_and_add1_acquire_read) && \ defined(AO_HAVE_fetch_and_add1_acquire) # define AO_fetch_and_add1_acquire_read(addr) \ - AO_fetch_and_add1_acquire(addr) + AO_fetch_and_add1_acquire(addr) # define AO_HAVE_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_fetch_and_add1_acquire_read) # define AO_fetch_and_add1_dd_acquire_read(addr) \ - AO_fetch_and_add1_acquire_read(addr) + AO_fetch_and_add1_acquire_read(addr) # define AO_HAVE_fetch_and_add1_dd_acquire_read # endif #else @@ -638,65 +638,65 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if defined(AO_HAVE_fetch_and_add_release) &&\ !defined(AO_HAVE_fetch_and_sub1_release) # define AO_fetch_and_sub1_release(addr) \ - AO_fetch_and_add_release(addr,(AO_t)(-1)) + AO_fetch_and_add_release(addr,(AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_release #endif #if defined(AO_HAVE_fetch_and_add_acquire) &&\ !defined(AO_HAVE_fetch_and_sub1_acquire) # define AO_fetch_and_sub1_acquire(addr) \ - AO_fetch_and_add_acquire(addr,(AO_t)(-1)) + AO_fetch_and_add_acquire(addr,(AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_fetch_and_add_write) &&\ !defined(AO_HAVE_fetch_and_sub1_write) # define AO_fetch_and_sub1_write(addr) \ - AO_fetch_and_add_write(addr,(AO_t)(-1)) + AO_fetch_and_add_write(addr,(AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_write #endif #if defined(AO_HAVE_fetch_and_add_read) &&\ !defined(AO_HAVE_fetch_and_sub1_read) # define AO_fetch_and_sub1_read(addr) \ - AO_fetch_and_add_read(addr,(AO_t)(-1)) + AO_fetch_and_add_read(addr,(AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_read #endif #if defined(AO_HAVE_fetch_and_add_release_write) &&\ !defined(AO_HAVE_fetch_and_sub1_release_write) # define AO_fetch_and_sub1_release_write(addr) \ - AO_fetch_and_add_release_write(addr,(AO_t)(-1)) + AO_fetch_and_add_release_write(addr,(AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_fetch_and_add_acquire_read) &&\ !defined(AO_HAVE_fetch_and_sub1_acquire_read) # define AO_fetch_and_sub1_acquire_read(addr) \ - AO_fetch_and_add_acquire_read(addr,(AO_t)(-1)) + AO_fetch_and_add_acquire_read(addr,(AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_fetch_and_add) &&\ !defined(AO_HAVE_fetch_and_sub1) # define AO_fetch_and_sub1(addr) \ - AO_fetch_and_add(addr,(AO_t)(-1)) + AO_fetch_and_add(addr,(AO_t)(-1)) # define AO_HAVE_fetch_and_sub1 #endif #if defined(AO_HAVE_fetch_and_sub1_full) # if !defined(AO_HAVE_fetch_and_sub1_release) # define AO_fetch_and_sub1_release(addr) \ - AO_fetch_and_sub1_full(addr) + AO_fetch_and_sub1_full(addr) # define AO_HAVE_fetch_and_sub1_release # endif # if !defined(AO_HAVE_fetch_and_sub1_acquire) # define AO_fetch_and_sub1_acquire(addr) \ - AO_fetch_and_sub1_full(addr) + AO_fetch_and_sub1_full(addr) # define AO_HAVE_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_fetch_and_sub1_write) # define AO_fetch_and_sub1_write(addr) \ - AO_fetch_and_sub1_full(addr) + AO_fetch_and_sub1_full(addr) # define AO_HAVE_fetch_and_sub1_write # endif # if !defined(AO_HAVE_fetch_and_sub1_read) # define AO_fetch_and_sub1_read(addr) \ - AO_fetch_and_sub1_full(addr) + AO_fetch_and_sub1_full(addr) # define AO_HAVE_fetch_and_sub1_read # endif #endif /* AO_HAVE_fetch_and_sub1_full */ @@ -704,25 +704,25 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if !defined(AO_HAVE_fetch_and_sub1) && \ defined(AO_HAVE_fetch_and_sub1_release) # define AO_fetch_and_sub1(addr) \ - AO_fetch_and_sub1_release(addr) + AO_fetch_and_sub1_release(addr) # define AO_HAVE_fetch_and_sub1 #endif #if !defined(AO_HAVE_fetch_and_sub1) && \ defined(AO_HAVE_fetch_and_sub1_acquire) # define AO_fetch_and_sub1(addr) \ - AO_fetch_and_sub1_acquire(addr) + AO_fetch_and_sub1_acquire(addr) # define AO_HAVE_fetch_and_sub1 #endif #if !defined(AO_HAVE_fetch_and_sub1) && \ defined(AO_HAVE_fetch_and_sub1_write) # define AO_fetch_and_sub1(addr) \ - AO_fetch_and_sub1_write(addr) + AO_fetch_and_sub1_write(addr) # define AO_HAVE_fetch_and_sub1 #endif #if !defined(AO_HAVE_fetch_and_sub1) && \ defined(AO_HAVE_fetch_and_sub1_read) # define AO_fetch_and_sub1(addr) \ - AO_fetch_and_sub1_read(addr) + AO_fetch_and_sub1_read(addr) # define AO_HAVE_fetch_and_sub1 #endif @@ -730,39 +730,39 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_fetch_and_sub1_full) # define AO_fetch_and_sub1_full(addr) \ - (AO_nop_full(), AO_fetch_and_sub1_acquire(addr)) + (AO_nop_full(), AO_fetch_and_sub1_acquire(addr)) # define AO_HAVE_fetch_and_sub1_full #endif #if !defined(AO_HAVE_fetch_and_sub1_release_write) && \ defined(AO_HAVE_fetch_and_sub1_write) # define AO_fetch_and_sub1_release_write(addr) \ - AO_fetch_and_sub1_write(addr) + AO_fetch_and_sub1_write(addr) # define AO_HAVE_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_fetch_and_sub1_release_write) && \ defined(AO_HAVE_fetch_and_sub1_release) # define AO_fetch_and_sub1_release_write(addr) \ - AO_fetch_and_sub1_release(addr) + AO_fetch_and_sub1_release(addr) # define AO_HAVE_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_fetch_and_sub1_acquire_read) && \ defined(AO_HAVE_fetch_and_sub1_read) # define AO_fetch_and_sub1_acquire_read(addr) \ - AO_fetch_and_sub1_read(addr) + AO_fetch_and_sub1_read(addr) # define AO_HAVE_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_fetch_and_sub1_acquire_read) && \ defined(AO_HAVE_fetch_and_sub1_acquire) # define AO_fetch_and_sub1_acquire_read(addr) \ - AO_fetch_and_sub1_acquire(addr) + AO_fetch_and_sub1_acquire(addr) # define AO_HAVE_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_fetch_and_sub1_acquire_read) # define AO_fetch_and_sub1_dd_acquire_read(addr) \ - AO_fetch_and_sub1_acquire_read(addr) + AO_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_fetch_and_sub1_dd_acquire_read # endif #else @@ -791,22 +791,22 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if defined(AO_HAVE_or_full) # if !defined(AO_HAVE_or_release) # define AO_or_release(addr, val) \ - AO_or_full(addr, val) + AO_or_full(addr, val) # define AO_HAVE_or_release # endif # if !defined(AO_HAVE_or_acquire) # define AO_or_acquire(addr, val) \ - AO_or_full(addr, val) + AO_or_full(addr, val) # define AO_HAVE_or_acquire # endif # if !defined(AO_HAVE_or_write) # define AO_or_write(addr, val) \ - AO_or_full(addr, val) + AO_or_full(addr, val) # define AO_HAVE_or_write # endif # if !defined(AO_HAVE_or_read) # define AO_or_read(addr, val) \ - AO_or_full(addr, val) + AO_or_full(addr, val) # define AO_HAVE_or_read # endif #endif /* AO_HAVE_or_full */ @@ -814,25 +814,25 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if !defined(AO_HAVE_or) && \ defined(AO_HAVE_or_release) # define AO_or(addr, val) \ - AO_or_release(addr, val) + AO_or_release(addr, val) # define AO_HAVE_or #endif #if !defined(AO_HAVE_or) && \ defined(AO_HAVE_or_acquire) # define AO_or(addr, val) \ - AO_or_acquire(addr, val) + AO_or_acquire(addr, val) # define AO_HAVE_or #endif #if !defined(AO_HAVE_or) && \ defined(AO_HAVE_or_write) # define AO_or(addr, val) \ - AO_or_write(addr, val) + AO_or_write(addr, val) # define AO_HAVE_or #endif #if !defined(AO_HAVE_or) && \ defined(AO_HAVE_or_read) # define AO_or(addr, val) \ - AO_or_read(addr, val) + AO_or_read(addr, val) # define AO_HAVE_or #endif @@ -840,57 +840,57 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_or_full) # define AO_or_full(addr, val) \ - (AO_nop_full(), AO_or_acquire(addr, val)) + (AO_nop_full(), AO_or_acquire(addr, val)) #endif #if !defined(AO_HAVE_or_release_write) && \ defined(AO_HAVE_or_write) # define AO_or_release_write(addr, val) \ - AO_or_write(addr, val) + AO_or_write(addr, val) # define AO_HAVE_or_release_write #endif #if !defined(AO_HAVE_or_release_write) && \ defined(AO_HAVE_or_release) # define AO_or_release_write(addr, val) \ - AO_or_release(addr, val) + AO_or_release(addr, val) # define AO_HAVE_or_release_write #endif #if !defined(AO_HAVE_or_acquire_read) && \ defined(AO_HAVE_or_read) # define AO_or_acquire_read(addr, val) \ - AO_or_read(addr, val) + AO_or_read(addr, val) # define AO_HAVE_or_acquire_read #endif #if !defined(AO_HAVE_or_acquire_read) && \ defined(AO_HAVE_or_acquire) # define AO_or_acquire_read(addr, val) \ - AO_or_acquire(addr, val) + AO_or_acquire(addr, val) # define AO_HAVE_or_acquire_read #endif -/* dd_aquire_read is meaningless. */ - +/* dd_aquire_read is meaningless. */ + /* Test_and_set */ - + #if defined(AO_HAVE_test_and_set_full) # if !defined(AO_HAVE_test_and_set_release) # define AO_test_and_set_release(addr) \ - AO_test_and_set_full(addr) + AO_test_and_set_full(addr) # define AO_HAVE_test_and_set_release # endif # if !defined(AO_HAVE_test_and_set_acquire) # define AO_test_and_set_acquire(addr) \ - AO_test_and_set_full(addr) + AO_test_and_set_full(addr) # define AO_HAVE_test_and_set_acquire # endif # if !defined(AO_HAVE_test_and_set_write) # define AO_test_and_set_write(addr) \ - AO_test_and_set_full(addr) + AO_test_and_set_full(addr) # define AO_HAVE_test_and_set_write # endif # if !defined(AO_HAVE_test_and_set_read) # define AO_test_and_set_read(addr) \ - AO_test_and_set_full(addr) + AO_test_and_set_full(addr) # define AO_HAVE_test_and_set_read # endif #endif /* AO_HAVE_test_and_set_full */ @@ -898,25 +898,25 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if !defined(AO_HAVE_test_and_set) && \ defined(AO_HAVE_test_and_set_release) # define AO_test_and_set(addr) \ - AO_test_and_set_release(addr) + AO_test_and_set_release(addr) # define AO_HAVE_test_and_set #endif #if !defined(AO_HAVE_test_and_set) && \ defined(AO_HAVE_test_and_set_acquire) # define AO_test_and_set(addr) \ - AO_test_and_set_acquire(addr) + AO_test_and_set_acquire(addr) # define AO_HAVE_test_and_set #endif #if !defined(AO_HAVE_test_and_set) && \ defined(AO_HAVE_test_and_set_write) # define AO_test_and_set(addr) \ - AO_test_and_set_write(addr) + AO_test_and_set_write(addr) # define AO_HAVE_test_and_set #endif #if !defined(AO_HAVE_test_and_set) && \ defined(AO_HAVE_test_and_set_read) # define AO_test_and_set(addr) \ - AO_test_and_set_read(addr) + AO_test_and_set_read(addr) # define AO_HAVE_test_and_set #endif @@ -924,39 +924,39 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_test_and_set_full) # define AO_test_and_set_full(addr) \ - (AO_nop_full(), AO_test_and_set_acquire(addr)) + (AO_nop_full(), AO_test_and_set_acquire(addr)) # define AO_HAVE_test_and_set_full #endif #if !defined(AO_HAVE_test_and_set_release_write) && \ defined(AO_HAVE_test_and_set_write) # define AO_test_and_set_release_write(addr) \ - AO_test_and_set_write(addr) + AO_test_and_set_write(addr) # define AO_HAVE_test_and_set_release_write #endif #if !defined(AO_HAVE_test_and_set_release_write) && \ defined(AO_HAVE_test_and_set_release) # define AO_test_and_set_release_write(addr) \ - AO_test_and_set_release(addr) + AO_test_and_set_release(addr) # define AO_HAVE_test_and_set_release_write #endif #if !defined(AO_HAVE_test_and_set_acquire_read) && \ defined(AO_HAVE_test_and_set_read) # define AO_test_and_set_acquire_read(addr) \ - AO_test_and_set_read(addr) + AO_test_and_set_read(addr) # define AO_HAVE_test_and_set_acquire_read #endif #if !defined(AO_HAVE_test_and_set_acquire_read) && \ defined(AO_HAVE_test_and_set_acquire) # define AO_test_and_set_acquire_read(addr) \ - AO_test_and_set_acquire(addr) + AO_test_and_set_acquire(addr) # define AO_HAVE_test_and_set_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_test_and_set_acquire_read) # define AO_test_and_set_dd_acquire_read(addr) \ - AO_test_and_set_acquire_read(addr) + AO_test_and_set_acquire_read(addr) # define AO_HAVE_test_and_set_dd_acquire_read # endif #else @@ -981,28 +981,28 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if defined(AO_HAVE_compare_and_swap) && defined(AO_HAVE_nop_full)\ && !defined(AO_HAVE_compare_and_swap_release) # define AO_compare_and_swap_release(addr, old, new_val) \ - (AO_nop_full(), AO_compare_and_swap(addr, old, new_val)) + (AO_nop_full(), AO_compare_and_swap(addr, old, new_val)) # define AO_HAVE_compare_and_swap_release #endif #if defined(AO_HAVE_compare_and_swap_full) # if !defined(AO_HAVE_compare_and_swap_release) # define AO_compare_and_swap_release(addr, old, new_val) \ - AO_compare_and_swap_full(addr, old, new_val) + AO_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_compare_and_swap_release # endif # if !defined(AO_HAVE_compare_and_swap_acquire) # define AO_compare_and_swap_acquire(addr, old, new_val) \ - AO_compare_and_swap_full(addr, old, new_val) + AO_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_compare_and_swap_acquire # endif # if !defined(AO_HAVE_compare_and_swap_write) # define AO_compare_and_swap_write(addr, old, new_val) \ - AO_compare_and_swap_full(addr, old, new_val) + AO_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_compare_and_swap_write # endif # if !defined(AO_HAVE_compare_and_swap_read) # define AO_compare_and_swap_read(addr, old, new_val) \ - AO_compare_and_swap_full(addr, old, new_val) + AO_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_compare_and_swap_read # endif #endif /* AO_HAVE_compare_and_swap_full */ @@ -1010,25 +1010,25 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if !defined(AO_HAVE_compare_and_swap) && \ defined(AO_HAVE_compare_and_swap_release) # define AO_compare_and_swap(addr, old, new_val) \ - AO_compare_and_swap_release(addr, old, new_val) + AO_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_compare_and_swap #endif #if !defined(AO_HAVE_compare_and_swap) && \ defined(AO_HAVE_compare_and_swap_acquire) # define AO_compare_and_swap(addr, old, new_val) \ - AO_compare_and_swap_acquire(addr, old, new_val) + AO_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_compare_and_swap #endif #if !defined(AO_HAVE_compare_and_swap) && \ defined(AO_HAVE_compare_and_swap_write) # define AO_compare_and_swap(addr, old, new_val) \ - AO_compare_and_swap_write(addr, old, new_val) + AO_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_compare_and_swap #endif #if !defined(AO_HAVE_compare_and_swap) && \ defined(AO_HAVE_compare_and_swap_read) # define AO_compare_and_swap(addr, old, new_val) \ - AO_compare_and_swap_read(addr, old, new_val) + AO_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_compare_and_swap #endif @@ -1036,45 +1036,45 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_compare_and_swap_full) # define AO_compare_and_swap_full(addr, old, new_val) \ - (AO_nop_full(), AO_compare_and_swap_acquire(addr, old, new_val)) + (AO_nop_full(), AO_compare_and_swap_acquire(addr, old, new_val)) # define AO_HAVE_compare_and_swap_full #endif #if !defined(AO_HAVE_compare_and_swap_release_write) && \ defined(AO_HAVE_compare_and_swap_write) # define AO_compare_and_swap_release_write(addr, old, new_val) \ - AO_compare_and_swap_write(addr, old, new_val) + AO_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_compare_and_swap_release_write #endif #if !defined(AO_HAVE_compare_and_swap_release_write) && \ defined(AO_HAVE_compare_and_swap_release) # define AO_compare_and_swap_release_write(addr, old, new_val) \ - AO_compare_and_swap_release(addr, old, new_val) + AO_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_compare_and_swap_release_write #endif #if !defined(AO_HAVE_compare_and_swap_acquire_read) && \ defined(AO_HAVE_compare_and_swap_read) # define AO_compare_and_swap_acquire_read(addr, old, new_val) \ - AO_compare_and_swap_read(addr, old, new_val) + AO_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_compare_and_swap_acquire_read) && \ defined(AO_HAVE_compare_and_swap_acquire) # define AO_compare_and_swap_acquire_read(addr, old, new_val) \ - AO_compare_and_swap_acquire(addr, old, new_val) + AO_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_compare_and_swap_acquire_read) # define AO_compare_and_swap_dd_acquire_read(addr, old, new_val) \ - AO_compare_and_swap_acquire_read(addr, old, new_val) + AO_compare_and_swap_acquire_read(addr, old, new_val) # define AO_HAVE_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_compare_and_swap) # define AO_compare_and_swap_dd_acquire_read(addr, old, new_val) \ - AO_compare_and_swap(addr, old, new_val) + AO_compare_and_swap(addr, old, new_val) # define AO_HAVE_compare_and_swap_dd_acquire_read # endif #endif @@ -1086,8 +1086,8 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) && !defined(AO_HAVE_compare_double_and_swap_double_acquire) AO_INLINE int AO_compare_double_and_swap_double_acquire(volatile AO_double_t *addr, - AO_t o1, AO_t o2, - AO_t n1, AO_t n2) + AO_t o1, AO_t o2, + AO_t n1, AO_t n2) { int result = AO_compare_double_and_swap_double(addr, o1, o2, n1, n2); AO_nop_full(); @@ -1099,28 +1099,28 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) && defined(AO_HAVE_nop_full)\ && !defined(AO_HAVE_compare_double_and_swap_double_release) # define AO_compare_double_and_swap_double_release(addr, o1, o2, n1, n2) \ - (AO_nop_full(), AO_compare_double_and_swap_double(addr, o1, o2, n1, n2)) + (AO_nop_full(), AO_compare_double_and_swap_double(addr, o1, o2, n1, n2)) # define AO_HAVE_compare_double_and_swap_double_release #endif #if defined(AO_HAVE_compare_double_and_swap_double_full) # if !defined(AO_HAVE_compare_double_and_swap_double_release) # define AO_compare_double_and_swap_double_release(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_release # endif # if !defined(AO_HAVE_compare_double_and_swap_double_acquire) # define AO_compare_double_and_swap_double_acquire(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_acquire # endif # if !defined(AO_HAVE_compare_double_and_swap_double_write) # define AO_compare_double_and_swap_double_write(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_write # endif # if !defined(AO_HAVE_compare_double_and_swap_double_read) # define AO_compare_double_and_swap_double_read(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_read # endif #endif /* AO_HAVE_compare_double_and_swap_double_full */ @@ -1128,25 +1128,25 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if !defined(AO_HAVE_compare_double_and_swap_double) && \ defined(AO_HAVE_compare_double_and_swap_double_release) # define AO_compare_double_and_swap_double(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_release(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_release(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double #endif #if !defined(AO_HAVE_compare_double_and_swap_double) && \ defined(AO_HAVE_compare_double_and_swap_double_acquire) # define AO_compare_double_and_swap_double(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_acquire(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_acquire(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double #endif #if !defined(AO_HAVE_compare_double_and_swap_double) && \ defined(AO_HAVE_compare_double_and_swap_double_write) # define AO_compare_double_and_swap_double(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_write(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_write(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double #endif #if !defined(AO_HAVE_compare_double_and_swap_double) && \ defined(AO_HAVE_compare_double_and_swap_double_read) # define AO_compare_double_and_swap_double(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_read(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_read(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double #endif @@ -1154,45 +1154,45 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_compare_double_and_swap_double_full) # define AO_compare_double_and_swap_double_full(addr, o1, o2, n1, n2) \ - (AO_nop_full(), AO_compare_double_and_swap_double_acquire(addr, o1, o2, n1, n2)) + (AO_nop_full(), AO_compare_double_and_swap_double_acquire(addr, o1, o2, n1, n2)) # define AO_HAVE_compare_double_and_swap_double_full #endif #if !defined(AO_HAVE_compare_double_and_swap_double_release_write) && \ defined(AO_HAVE_compare_double_and_swap_double_write) # define AO_compare_double_and_swap_double_release_write(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_write(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_write(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_release_write #endif #if !defined(AO_HAVE_compare_double_and_swap_double_release_write) && \ defined(AO_HAVE_compare_double_and_swap_double_release) # define AO_compare_double_and_swap_double_release_write(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_release(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_release(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_release_write #endif #if !defined(AO_HAVE_compare_double_and_swap_double_acquire_read) && \ defined(AO_HAVE_compare_double_and_swap_double_read) # define AO_compare_double_and_swap_double_acquire_read(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_read(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_read(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_acquire_read #endif #if !defined(AO_HAVE_compare_double_and_swap_double_acquire_read) && \ defined(AO_HAVE_compare_double_and_swap_double_acquire) # define AO_compare_double_and_swap_double_acquire_read(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_acquire(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_acquire(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_compare_double_and_swap_double_acquire_read) # define AO_compare_double_and_swap_double_dd_acquire_read(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double_acquire_read(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double_acquire_read(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_dd_acquire_read # endif #else # if defined(AO_HAVE_compare_double_and_swap_double) # define AO_compare_double_and_swap_double_dd_acquire_read(addr, o1, o2, n1, n2) \ - AO_compare_double_and_swap_double(addr, o1, o2, n1, n2) + AO_compare_double_and_swap_double(addr, o1, o2, n1, n2) # define AO_HAVE_compare_double_and_swap_double_dd_acquire_read # endif #endif @@ -1202,8 +1202,8 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) && !defined(AO_HAVE_compare_and_swap_double_acquire) AO_INLINE int AO_compare_and_swap_double_acquire(volatile AO_double_t *addr, - AO_t o1, - AO_t n1, AO_t n2) + AO_t o1, + AO_t n1, AO_t n2) { int result = AO_compare_and_swap_double(addr, o1, n1, n2); AO_nop_full(); @@ -1215,28 +1215,28 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) && defined(AO_HAVE_nop_full)\ && !defined(AO_HAVE_compare_and_swap_double_release) # define AO_compare_and_swap_double_release(addr, o1, n1, n2) \ - (AO_nop_full(), AO_compare_and_swap_double(addr, o1, n1, n2)) + (AO_nop_full(), AO_compare_and_swap_double(addr, o1, n1, n2)) # define AO_HAVE_compare_and_swap_double_release #endif #if defined(AO_HAVE_compare_and_swap_double_full) # if !defined(AO_HAVE_compare_and_swap_double_release) # define AO_compare_and_swap_double_release(addr, o1, n1, n2) \ - AO_compare_and_swap_double_full(addr, o1, n1, n2) + AO_compare_and_swap_double_full(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_release # endif # if !defined(AO_HAVE_compare_and_swap_double_acquire) # define AO_compare_and_swap_double_acquire(addr, o1, n1, n2) \ - AO_compare_and_swap_double_full(addr, o1, n1, n2) + AO_compare_and_swap_double_full(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_acquire # endif # if !defined(AO_HAVE_compare_and_swap_double_write) # define AO_compare_and_swap_double_write(addr, o1, n1, n2) \ - AO_compare_and_swap_double_full(addr, o1, n1, n2) + AO_compare_and_swap_double_full(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_write # endif # if !defined(AO_HAVE_compare_and_swap_double_read) # define AO_compare_and_swap_double_read(addr, o1, n1, n2) \ - AO_compare_and_swap_double_full(addr, o1, n1, n2) + AO_compare_and_swap_double_full(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_read # endif #endif /* AO_HAVE_compare_and_swap_double_full */ @@ -1244,25 +1244,25 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) #if !defined(AO_HAVE_compare_and_swap_double) && \ defined(AO_HAVE_compare_and_swap_double_release) # define AO_compare_and_swap_double(addr, o1, n1, n2) \ - AO_compare_and_swap_double_release(addr, o1, n1, n2) + AO_compare_and_swap_double_release(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double #endif #if !defined(AO_HAVE_compare_and_swap_double) && \ defined(AO_HAVE_compare_and_swap_double_acquire) # define AO_compare_and_swap_double(addr, o1, n1, n2) \ - AO_compare_and_swap_double_acquire(addr, o1, n1, n2) + AO_compare_and_swap_double_acquire(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double #endif #if !defined(AO_HAVE_compare_and_swap_double) && \ defined(AO_HAVE_compare_and_swap_double_write) # define AO_compare_and_swap_double(addr, o1, n1, n2) \ - AO_compare_and_swap_double_write(addr, o1, n1, n2) + AO_compare_and_swap_double_write(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double #endif #if !defined(AO_HAVE_compare_and_swap_double) && \ defined(AO_HAVE_compare_and_swap_double_read) # define AO_compare_and_swap_double(addr, o1, n1, n2) \ - AO_compare_and_swap_double_read(addr, o1, n1, n2) + AO_compare_and_swap_double_read(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double #endif @@ -1270,60 +1270,60 @@ AO_test_and_set_acquire(volatile AO_TS_t *addr) defined(AO_HAVE_nop_full) && \ !defined(AO_HAVE_compare_and_swap_double_full) # define AO_compare_and_swap_double_full(addr, o1, n1, n2) \ - (AO_nop_full(), AO_compare_and_swap_double_acquire(addr, o1, n1, n2)) + (AO_nop_full(), AO_compare_and_swap_double_acquire(addr, o1, n1, n2)) # define AO_HAVE_compare_and_swap_double_full #endif #if !defined(AO_HAVE_compare_and_swap_double_release_write) && \ defined(AO_HAVE_compare_and_swap_double_write) # define AO_compare_and_swap_double_release_write(addr, o1, n1, n2) \ - AO_compare_and_swap_double_write(addr, o1, n1, n2) + AO_compare_and_swap_double_write(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_release_write #endif #if !defined(AO_HAVE_compare_and_swap_double_release_write) && \ defined(AO_HAVE_compare_and_swap_double_release) # define AO_compare_and_swap_double_release_write(addr, o1, n1, n2) \ - AO_compare_and_swap_double_release(addr, o1, n1, n2) + AO_compare_and_swap_double_release(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_release_write #endif #if !defined(AO_HAVE_compare_and_swap_double_acquire_read) && \ defined(AO_HAVE_compare_and_swap_double_read) # define AO_compare_and_swap_double_acquire_read(addr, o1, n1, n2) \ - AO_compare_and_swap_double_read(addr, o1, n1, n2) + AO_compare_and_swap_double_read(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_acquire_read #endif #if !defined(AO_HAVE_compare_and_swap_double_acquire_read) && \ defined(AO_HAVE_compare_and_swap_double_acquire) # define AO_compare_and_swap_double_acquire_read(addr, o1, n1, n2) \ - AO_compare_and_swap_double_acquire(addr, o1, n1, n2) + AO_compare_and_swap_double_acquire(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_compare_and_swap_double_acquire_read) # define AO_compare_and_swap_double_dd_acquire_read(addr, o1, n1, n2) \ - AO_compare_and_swap_double_acquire_read(addr, o1, n1, n2) + AO_compare_and_swap_double_acquire_read(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_dd_acquire_read # endif #else # if defined(AO_HAVE_compare_and_swap_double) # define AO_compare_and_swap_double_dd_acquire_read(addr, o1, n1, n2) \ - AO_compare_and_swap_double(addr, o1, n1, n2) + AO_compare_and_swap_double(addr, o1, n1, n2) # define AO_HAVE_compare_and_swap_double_dd_acquire_read # endif #endif /* NEC LE-IT: Convenience functions for AO_double compare and swap which */ -/* types and reads easier in code */ +/* types and reads easier in code */ #if defined(AO_HAVE_compare_double_and_swap_double_release) && \ !defined(AO_HAVE_double_compare_and_swap_release) AO_INLINE int AO_double_compare_and_swap_release(volatile AO_double_t *addr, - AO_double_t old_val, AO_double_t new_val) + AO_double_t old_val, AO_double_t new_val) { - return AO_compare_double_and_swap_double_release(addr, - old_val.AO_val1, old_val.AO_val2, - new_val.AO_val1, new_val.AO_val2); + return AO_compare_double_and_swap_double_release(addr, + old_val.AO_val1, old_val.AO_val2, + new_val.AO_val1, new_val.AO_val2); } #define AO_HAVE_double_compare_and_swap_release #endif @@ -1332,11 +1332,11 @@ AO_double_compare_and_swap_release(volatile AO_double_t *addr, !defined(AO_HAVE_double_compare_and_swap_acquire) AO_INLINE int AO_double_compare_and_swap_acquire(volatile AO_double_t *addr, - AO_double_t old_val, AO_double_t new_val) + AO_double_t old_val, AO_double_t new_val) { - return AO_compare_double_and_swap_double_acquire(addr, - old_val.AO_val1, old_val.AO_val2, - new_val.AO_val1, new_val.AO_val2); + return AO_compare_double_and_swap_double_acquire(addr, + old_val.AO_val1, old_val.AO_val2, + new_val.AO_val1, new_val.AO_val2); } #define AO_HAVE_double_compare_and_swap_acquire #endif @@ -1345,11 +1345,11 @@ AO_double_compare_and_swap_acquire(volatile AO_double_t *addr, !defined(AO_HAVE_double_compare_and_swap_full) AO_INLINE int AO_double_compare_and_swap_full(volatile AO_double_t *addr, - AO_double_t old_val, AO_double_t new_val) + AO_double_t old_val, AO_double_t new_val) { - return AO_compare_double_and_swap_double_full(addr, - old_val.AO_val1, old_val.AO_val2, - new_val.AO_val1, new_val.AO_val2); + return AO_compare_double_and_swap_double_full(addr, + old_val.AO_val1, old_val.AO_val2, + new_val.AO_val1, new_val.AO_val2); } #define AO_HAVE_double_compare_and_swap_full #endif diff --git a/src/atomic_ops/sysdeps/acquire_release_volatile.h b/src/atomic_ops/sysdeps/acquire_release_volatile.h index b84dc82..6d54af9 100644 --- a/src/atomic_ops/sysdeps/acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/acquire_release_volatile.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* @@ -46,7 +46,7 @@ AO_INLINE AO_t AO_load_acquire(const volatile AO_t *p) { AO_t result = *p; - /* A normal volatile load generates an ld.acq */ + /* A normal volatile load generates an ld.acq */ AO_GCC_BARRIER(); return result; } @@ -56,9 +56,7 @@ AO_INLINE void AO_store_release(volatile AO_t *p, AO_t val) { AO_GCC_BARRIER(); - /* A normal volatile store generates an st.rel */ + /* A normal volatile store generates an st.rel */ *p = val; } #define AO_HAVE_store_release - - diff --git a/src/atomic_ops/sysdeps/aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/aligned_atomic_load_store.h index 13a6241..071bea0 100644 --- a/src/atomic_ops/sysdeps/aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/aligned_atomic_load_store.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * Definitions for architectures on which loads and stores of AO_t are @@ -29,8 +29,8 @@ AO_INLINE AO_t AO_load(const volatile AO_t *addr) { assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0); - /* Cast away the volatile for architectures where */ - /* volatile adds barrier semantics. */ + /* Cast away the volatile for architectures where */ + /* volatile adds barrier semantics. */ return *(AO_t *)addr; } @@ -44,5 +44,3 @@ AO_store(volatile AO_t *addr, AO_t new_val) } #define AO_HAVE_store - - diff --git a/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h index ef96e6e..db258df 100644 --- a/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* diff --git a/src/atomic_ops/sysdeps/all_atomic_load_store.h b/src/atomic_ops/sysdeps/all_atomic_load_store.h index e6d0e77..248d9a6 100644 --- a/src/atomic_ops/sysdeps/all_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/all_atomic_load_store.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* diff --git a/src/atomic_ops/sysdeps/ao_t_is_int.h b/src/atomic_ops/sysdeps/ao_t_is_int.h index c7803c5..8e57bb4 100644 --- a/src/atomic_ops/sysdeps/ao_t_is_int.h +++ b/src/atomic_ops/sysdeps/ao_t_is_int.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* @@ -31,97 +31,96 @@ #if defined(AO_HAVE_compare_and_swap_full) && \ !defined(AO_HAVE_int_compare_and_swap_full) # define AO_int_compare_and_swap_full(addr, old, new_val) \ - AO_compare_and_swap_full((volatile AO_t *)(addr), \ - (AO_t)(old), (AO_t)(new_val)) + AO_compare_and_swap_full((volatile AO_t *)(addr), \ + (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_full # endif #if defined(AO_HAVE_compare_and_swap_acquire) && \ !defined(AO_HAVE_int_compare_and_swap_acquire) # define AO_int_compare_and_swap_acquire(addr, old, new_val) \ - AO_compare_and_swap_acquire((volatile AO_t *)(addr), \ - (AO_t)(old), (AO_t)(new_val)) + AO_compare_and_swap_acquire((volatile AO_t *)(addr), \ + (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_acquire # endif #if defined(AO_HAVE_compare_and_swap_release) && \ !defined(AO_HAVE_int_compare_and_swap_release) # define AO_int_compare_and_swap_release(addr, old, new_val) \ - AO_compare_and_swap_release((volatile AO_t *)(addr), \ - (AO_t)(old), (AO_t)(new_val)) + AO_compare_and_swap_release((volatile AO_t *)(addr), \ + (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_release # endif #if defined(AO_HAVE_compare_and_swap_write) && \ !defined(AO_HAVE_int_compare_and_swap_write) # define AO_int_compare_and_swap_write(addr, old, new_val) \ - AO_compare_and_swap_write((volatile AO_t *)(addr), \ - (AO_t)(old), (AO_t)(new_val)) + AO_compare_and_swap_write((volatile AO_t *)(addr), \ + (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_write # endif #if defined(AO_HAVE_compare_and_swap_read) && \ !defined(AO_HAVE_int_compare_and_swap_read) # define AO_int_compare_and_swap_read(addr, old, new_val) \ - AO_compare_and_swap_read((volatile AO_t *)(addr), \ - (AO_t)(old), (AO_t)(new_val)) + AO_compare_and_swap_read((volatile AO_t *)(addr), \ + (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_read # endif #if defined(AO_HAVE_compare_and_swap) && \ !defined(AO_HAVE_int_compare_and_swap) # define AO_int_compare_and_swap(addr, old, new_val) \ - AO_compare_and_swap((volatile AO_t *)(addr), \ - (AO_t)(old), (AO_t)(new_val)) + AO_compare_and_swap((volatile AO_t *)(addr), \ + (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap # endif #if defined(AO_HAVE_load_acquire) && \ !defined(AO_HAVE_int_load_acquire) # define AO_int_load_acquire(addr) \ - (int)AO_load_acquire((const volatile AO_t *)(addr)) + (int)AO_load_acquire((const volatile AO_t *)(addr)) # define AO_HAVE_int_load_acquire # endif #if defined(AO_HAVE_store_release) && \ !defined(AO_HAVE_int_store_release) # define AO_int_store_release(addr, val) \ - AO_store_release((volatile AO_t *)(addr), (AO_t)(val)) + AO_store_release((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_store_release # endif #if defined(AO_HAVE_fetch_and_add_full) && \ !defined(AO_HAVE_int_fetch_and_add_full) # define AO_int_fetch_and_add_full(addr, incr) \ - (int)AO_fetch_and_add_full((volatile AO_t *)(addr), (AO_t)(incr)) + (int)AO_fetch_and_add_full((volatile AO_t *)(addr), (AO_t)(incr)) # define AO_HAVE_int_fetch_and_add_full # endif #if defined(AO_HAVE_fetch_and_add1_acquire) && \ !defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1_acquire(addr) \ - (int)AO_fetch_and_add1_acquire((volatile AO_t *)(addr)) + (int)AO_fetch_and_add1_acquire((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1_acquire # endif #if defined(AO_HAVE_fetch_and_add1_release) && \ !defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1_release(addr) \ - (int)AO_fetch_and_add1_release((volatile AO_t *)(addr)) + (int)AO_fetch_and_add1_release((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1_release # endif #if defined(AO_HAVE_fetch_and_sub1_acquire) && \ !defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1_acquire(addr) \ - (int)AO_fetch_and_sub1_acquire((volatile AO_t *)(addr)) + (int)AO_fetch_and_sub1_acquire((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1_acquire # endif #if defined(AO_HAVE_fetch_and_sub1_release) && \ !defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1_release(addr) \ - (int)AO_fetch_and_sub1_release((volatile AO_t *)(addr)) + (int)AO_fetch_and_sub1_release((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1_release # endif - diff --git a/src/atomic_ops/sysdeps/armcc/arm_v6.h b/src/atomic_ops/sysdeps/armcc/arm_v6.h index 199ead8..f7e186c 100644 --- a/src/atomic_ops/sysdeps/armcc/arm_v6.h +++ b/src/atomic_ops/sysdeps/armcc/arm_v6.h @@ -1,12 +1,12 @@ -/* - * Copyright (c) 2007 by NEC LE-IT: All rights reserved. +/* + * Copyright (c) 2007 by NEC LE-IT: All rights reserved. * A transcription of ARMv6 atomic operations for the ARM Realview Toolchain. * This code works with armcc from RVDS 3.1 * This is based on work in gcc/arm.h by * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. - * + * * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED @@ -29,9 +29,9 @@ Dont use with ARM instruction sets lower than v6 #include "../standard_ao_double_t.h" /* NEC LE-IT: ARMv6 is the first architecture providing support for simple LL/SC - * A data memory barrier must be raised via CP15 command (see documentation). + * A data memory barrier must be raised via CP15 command (see documentation). * - * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a + * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a * memory barrier (DMB). Raising it via CP15 should still work as told me by the * support engineers. If it turns out to be much quicker than we should implement * custom code for ARMv7 using the asm { dmb } command. @@ -44,10 +44,10 @@ AO_INLINE void AO_nop_full(void) { #ifndef AO_UNIPROCESSOR - unsigned int dest=0; - /* issue an data memory barrier (keeps ordering of memory transactions */ - /* before and after this operation) */ - __asm { mcr p15,0,dest,c7,c10,5 } ; + unsigned int dest=0; + /* issue an data memory barrier (keeps ordering of memory transactions */ + /* before and after this operation) */ + __asm { mcr p15,0,dest,c7,c10,5 } ; #endif } @@ -56,8 +56,8 @@ AO_nop_full(void) AO_INLINE AO_t AO_load(const volatile AO_t *addr) { - /* Cast away the volatile in case it adds fence semantics */ - return (*(const AO_t *)addr); + /* Cast away the volatile in case it adds fence semantics */ + return (*(const AO_t *)addr); } #define AO_HAVE_load @@ -72,44 +72,44 @@ AO_load(const volatile AO_t *addr) */ AO_INLINE void AO_store(volatile AO_t *addr, AO_t value) { - unsigned long tmp; - + unsigned long tmp; + retry: -__asm { - ldrex tmp, [addr] - strex tmp, value, [addr] - teq tmp, #0 - bne retry - }; +__asm { + ldrex tmp, [addr] + strex tmp, value, [addr] + teq tmp, #0 + bne retry + }; } #define AO_HAVE_store /* NEC LE-IT: replace the SWAP as recommended by ARM: "Applies to: ARM11 Cores - Though the SWP instruction will still work with ARM V6 cores, it is recommended - to use the new V6 synchronization instructions. The SWP instruction produces - locked read and write accesses which are atomic, i.e. another operation cannot - be done between these locked accesses which ties up external bus (AHB,AXI) - bandwidth and can increase worst case interrupt latencies. LDREX,STREX are - more flexible, other instructions can be done between the LDREX and STREX accesses. + Though the SWP instruction will still work with ARM V6 cores, it is recommended + to use the new V6 synchronization instructions. The SWP instruction produces + locked read and write accesses which are atomic, i.e. another operation cannot + be done between these locked accesses which ties up external bus (AHB,AXI) + bandwidth and can increase worst case interrupt latencies. LDREX,STREX are + more flexible, other instructions can be done between the LDREX and STREX accesses. " */ AO_INLINE AO_TS_t AO_test_and_set(volatile AO_TS_t *addr) { - - AO_TS_t oldval; - unsigned long tmp; - unsigned long one = 1; + + AO_TS_t oldval; + unsigned long tmp; + unsigned long one = 1; retry: -__asm { - ldrex oldval, [addr] - strex tmp, one, [addr] - teq tmp, #0 - bne retry - } - - return oldval; +__asm { + ldrex oldval, [addr] + strex tmp, one, [addr] + teq tmp, #0 + bne retry + } + + return oldval; } #define AO_HAVE_test_and_set @@ -118,18 +118,18 @@ __asm { AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *p, AO_t incr) { - unsigned long tmp,tmp2; - AO_t result; + unsigned long tmp,tmp2; + AO_t result; retry: __asm { - ldrex result, [p] - add tmp, incr, result - strex tmp2, tmp, [p] - teq tmp2, #0 - bne retry } + ldrex result, [p] + add tmp, incr, result + strex tmp2, tmp, [p] + teq tmp2, #0 + bne retry } - return result; + return result; } #define AO_HAVE_fetch_and_add @@ -138,19 +138,19 @@ __asm { AO_INLINE AO_t AO_fetch_and_add1(volatile AO_t *p) { - unsigned long tmp,tmp2; - AO_t result; + unsigned long tmp,tmp2; + AO_t result; retry: __asm { - ldrex result, [p] - add tmp, result, #1 - strex tmp2, tmp, [p] - teq tmp2, #0 - bne retry - } - - return result; + ldrex result, [p] + add tmp, result, #1 + strex tmp2, tmp, [p] + teq tmp2, #0 + bne retry + } + + return result; } #define AO_HAVE_fetch_and_add1 @@ -159,19 +159,19 @@ __asm { AO_INLINE AO_t AO_fetch_and_sub1(volatile AO_t *p) { - unsigned long tmp,tmp2; - AO_t result; + unsigned long tmp,tmp2; + AO_t result; retry: __asm { - ldrex result, [p] - sub tmp, result, #1 - strex tmp2, tmp, [p] - teq tmp2, #0 - bne retry - } - - return result; + ldrex result, [p] + sub tmp, result, #1 + strex tmp2, tmp, [p] + teq tmp2, #0 + bne retry + } + + return result; } #define AO_HAVE_fetch_and_sub1 @@ -180,52 +180,52 @@ __asm { /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, - AO_t old_val, AO_t new_val) + AO_t old_val, AO_t new_val) { - AO_t result,tmp; + AO_t result,tmp; retry: __asm__ { - mov result, #2 - ldrex tmp, [addr] - teq tmp, old_val - strexeq result, new_val, [addr] - teq result, #1 - beq retry - } - - return !(result&2); + mov result, #2 + ldrex tmp, [addr] + teq tmp, old_val + strexeq result, new_val, [addr] + teq result, #1 + beq retry + } + + return !(result&2); } #define AO_HAVE_compare_and_swap /* helper functions for the Realview compiler: LDREXD is not usable - * with inline assembler, so use the "embedded" assembler as + * with inline assembler, so use the "embedded" assembler as * suggested by ARM Dev. support (June 2008). */ __asm inline double_ptr_storage load_ex(volatile AO_double_t *addr) { - LDREXD r0,r1,[r0] + LDREXD r0,r1,[r0] } __asm inline int store_ex(AO_t val1, AO_t val2, volatile AO_double_t *addr) { - STREXD r3,r0,r1,[r2] - MOV r0,r3 + STREXD r3,r0,r1,[r2] + MOV r0,r3 } AO_INLINE int AO_compare_double_and_swap_double(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { - double_ptr_storage old_val = ((double_ptr_storage)old_val2 << 32) | old_val1; - + double_ptr_storage old_val = ((double_ptr_storage)old_val2 << 32) | old_val1; + double_ptr_storage tmp; - int result; - - while(1) { - tmp = load_ex(addr); - if(tmp != old_val) return 0; - result = store_ex(new_val1, new_val2, addr); - if(!result) return 1; - } + int result; + + while(1) { + tmp = load_ex(addr); + if(tmp != old_val) return 0; + result = store_ex(new_val1, new_val2, addr); + if(!result) return 1; + } } #define AO_HAVE_compare_double_and_swap_double diff --git a/src/atomic_ops/sysdeps/atomic_load_store.h b/src/atomic_ops/sysdeps/atomic_load_store.h index c69c97a..e4bf103 100644 --- a/src/atomic_ops/sysdeps/atomic_load_store.h +++ b/src/atomic_ops/sysdeps/atomic_load_store.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * Definitions for architectures on which loads and stores of AO_t are @@ -28,8 +28,8 @@ AO_INLINE AO_t AO_load(const volatile AO_t *addr) { - /* Cast away the volatile for architectures like IA64 where */ - /* volatile adds barrier semantics. */ + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ return (*(const AO_t *)addr); } @@ -42,5 +42,3 @@ AO_store(volatile AO_t *addr, AO_t new_val) } #define AO_HAVE_store - - diff --git a/src/atomic_ops/sysdeps/char_acquire_release_volatile.h b/src/atomic_ops/sysdeps/char_acquire_release_volatile.h index a227c1b..c988488 100644 --- a/src/atomic_ops/sysdeps/char_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/char_acquire_release_volatile.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* @@ -37,7 +37,7 @@ AO_INLINE unsigned char AO_char_load_acquire(const volatile unsigned char *p) { unsigned char result = *p; - /* A normal volatile load generates an ld.acq */ + /* A normal volatile load generates an ld.acq */ AO_GCC_BARRIER(); return result; } @@ -47,9 +47,7 @@ AO_INLINE void AO_char_store_release(volatile unsigned char *p, unsigned char val) { AO_GCC_BARRIER(); - /* A normal volatile store generates an st.rel */ + /* A normal volatile store generates an st.rel */ *p = val; } #define AO_HAVE_char_store_release - - diff --git a/src/atomic_ops/sysdeps/char_atomic_load_store.h b/src/atomic_ops/sysdeps/char_atomic_load_store.h index caac927..ca12541 100644 --- a/src/atomic_ops/sysdeps/char_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/char_atomic_load_store.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * Definitions for architectures on which loads and stores of unsigned char are @@ -28,8 +28,8 @@ AO_INLINE unsigned char AO_char_load(const volatile unsigned char *addr) { - /* Cast away the volatile for architectures like IA64 where */ - /* volatile adds barrier semantics. */ + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ return (*(const unsigned char *)addr); } @@ -42,5 +42,3 @@ AO_char_store(volatile unsigned char *addr, unsigned char new_val) } #define AO_HAVE_char_store - - diff --git a/src/atomic_ops/sysdeps/gcc/alpha.h b/src/atomic_ops/sysdeps/gcc/alpha.h index 4e26d47..ea6293c 100644 --- a/src/atomic_ops/sysdeps/gcc/alpha.h +++ b/src/atomic_ops/sysdeps/gcc/alpha.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -20,7 +20,7 @@ #include "../test_and_set_t_is_ao_t.h" #define AO_NO_DD_ORDERING - /* Data dependence does not imply read ordering. */ + /* Data dependence does not imply read ordering. */ AO_INLINE void AO_nop_full(void) @@ -38,13 +38,13 @@ AO_nop_write(void) #define AO_HAVE_nop_write -/* mb should be used for AO_nop_read(). That's the default. */ +/* mb should be used for AO_nop_read(). That's the default. */ -/* We believe that ldq_l ... stq_c does not imply any memory barrier. */ -/* We should add an explicit fetch_and_add definition. */ +/* We believe that ldq_l ... stq_c does not imply any memory barrier. */ +/* We should add an explicit fetch_and_add definition. */ AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { unsigned long was_equal; unsigned long temp; @@ -52,17 +52,15 @@ AO_compare_and_swap(volatile AO_t *addr, __asm__ __volatile__( "1: ldq_l %0,%1\n" " cmpeq %0,%4,%2\n" - " mov %3,%0\n" + " mov %3,%0\n" " beq %2,2f\n" " stq_c %0,%1\n" " beq %0,1b\n" "2:\n" :"=&r" (temp), "=m" (*addr), "=&r" (was_equal) : "r" (new_val), "Ir" (old) - :"memory"); + :"memory"); return was_equal; } #define AO_HAVE_compare_and_swap - - diff --git a/src/atomic_ops/sysdeps/gcc/arm.h b/src/atomic_ops/sysdeps/gcc/arm.h index e961369..41fc3e4 100644 --- a/src/atomic_ops/sysdeps/gcc/arm.h +++ b/src/atomic_ops/sysdeps/gcc/arm.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -20,23 +20,23 @@ #include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */ /* NEC LE-IT: ARMv6 is the first architecture providing support for simple LL/SC - * A data memory barrier must be raised via CP15 command (see documentation). - * - * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a + * A data memory barrier must be raised via CP15 command (see documentation). + * + * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a * memory barrier (DMB). Raising it via CP15 should still work as told me by the * support engineers. If it turns out to be much quicker than we should implement - * custom code for ARMv7 using the asm { dmb } command. + * custom code for ARMv7 using the asm { dmb } command. * * If only a single processor is used, we can define AO_UNIPROCESSOR - * and do not need to access CP15 for ensuring a DMB + * and do not need to access CP15 for ensuring a DMB */ /* NEC LE-IT: gcc has no way to easily check the arm architecture - * but defines only one of __ARM_ARCH_x__ to be true */ + * but defines only one of __ARM_ARCH_x__ to be true */ #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ - || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__) \ - || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ - || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__) + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__) \ + || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__) #include "../standard_ao_double_t.h" @@ -44,10 +44,10 @@ AO_INLINE void AO_nop_full(void) { #ifndef AO_UNIPROCESSOR - /* issue an data memory barrier (keeps ordering of memory transactions */ - /* before and after this operation) */ - unsigned int dest=0; - __asm__ __volatile__("mcr p15,0,%0,c7,c10,5" :"=&r"(dest) : : "memory"); + /* issue an data memory barrier (keeps ordering of memory transactions */ + /* before and after this operation) */ + unsigned int dest=0; + __asm__ __volatile__("mcr p15,0,%0,c7,c10,5" :"=&r"(dest) : : "memory"); #endif } @@ -57,8 +57,8 @@ AO_nop_full(void) AO_INLINE AO_t AO_load(const volatile AO_t *addr) { - /* Cast away the volatile for architectures like IA64 where */ - /* volatile adds barrier semantics. */ + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ return (*(const AO_t *)addr); } #define AO_HAVE_load @@ -67,7 +67,7 @@ AO_load(const volatile AO_t *addr) * the only safe way to set variables also used in LL/SC environment. * A direct write won't be recognized by the LL/SC construct on the _same_ CPU. * Support engineers response for behaviour of ARMv6: - * + * Core1 Core2 SUCCESS =================================== LDREX(x) @@ -82,7 +82,7 @@ AO_load(const volatile AO_t *addr) STREX(x) Yes ----------------------------------- - * ARMv7 behaves similar, see documentation CortexA8 TRM, point 8.5 + * ARMv7 behaves similar, see documentation CortexA8 TRM, point 8.5 * * HB: I think this is only a problem if interrupt handlers do not clear * the reservation, as they almost certainly should. Probably change this back @@ -90,47 +90,47 @@ AO_load(const volatile AO_t *addr) */ AO_INLINE void AO_store(volatile AO_t *addr, AO_t value) { - AO_t flag; - - __asm__ __volatile__("@AO_store\n" -"1: ldrex %0, [%2]\n" -" strex %0, %3, [%2]\n" -" teq %0, #0\n" -" bne 1b" - : "=&r"(flag), "+m"(*addr) - : "r" (addr), "r"(value) - : "cc"); + AO_t flag; + + __asm__ __volatile__("@AO_store\n" +"1: ldrex %0, [%2]\n" +" strex %0, %3, [%2]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r"(flag), "+m"(*addr) + : "r" (addr), "r"(value) + : "cc"); } #define AO_HAVE_store /* NEC LE-IT: replace the SWAP as recommended by ARM: "Applies to: ARM11 Cores - Though the SWP instruction will still work with ARM V6 cores, it is - recommended to use the new V6 synchronization instructions. The SWP - instruction produces ‘locked’ read and write accesses which are atomic, - i.e. another operation cannot be done between these locked accesses which - ties up external bus (AHB,AXI) bandwidth and can increase worst case - interrupt latencies. LDREX,STREX are more flexible, other instructions can - be done between the LDREX and STREX accesses. + Though the SWP instruction will still work with ARM V6 cores, it is + recommended to use the new V6 synchronization instructions. The SWP + instruction produces 'locked' read and write accesses which are atomic, + i.e. another operation cannot be done between these locked accesses which + ties up external bus (AHB,AXI) bandwidth and can increase worst case + interrupt latencies. LDREX,STREX are more flexible, other instructions can + be done between the LDREX and STREX accesses. " */ AO_INLINE AO_TS_t AO_test_and_set(volatile AO_TS_t *addr) { - - AO_TS_t oldval; - unsigned long flag; - - __asm__ __volatile__("@AO_test_and_set\n" -"1: ldrex %0, [%3]\n" -" strex %1, %4, [%3]\n" -" teq %1, #0\n" -" bne 1b\n" - : "=&r"(oldval),"=&r"(flag), "+m"(*addr) - : "r"(addr), "r"(1) - : "cc"); - - return oldval; + + AO_TS_t oldval; + unsigned long flag; + + __asm__ __volatile__("@AO_test_and_set\n" +"1: ldrex %0, [%3]\n" +" strex %1, %4, [%3]\n" +" teq %1, #0\n" +" bne 1b\n" + : "=&r"(oldval),"=&r"(flag), "+m"(*addr) + : "r"(addr), "r"(1) + : "cc"); + + return oldval; } #define AO_HAVE_test_and_set @@ -139,20 +139,20 @@ AO_test_and_set(volatile AO_TS_t *addr) { AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *p, AO_t incr) { - unsigned long flag,tmp; - AO_t result; - - __asm__ __volatile__("@AO_fetch_and_add\n" -"1: ldrex %0, [%5]\n" /* get original */ -" add %2, %0, %4\n" /* sum up in incr */ -" strex %1, %2, [%5]\n" /* store them */ -" teq %1, #0\n" -" bne 1b\n" - : "=&r"(result),"=&r"(flag),"=&r"(tmp),"+m"(*p) /* 0..3 */ - : "r"(incr), "r"(p) /* 4..5 */ - : "cc"); - - return result; + unsigned long flag,tmp; + AO_t result; + + __asm__ __volatile__("@AO_fetch_and_add\n" +"1: ldrex %0, [%5]\n" /* get original */ +" add %2, %0, %4\n" /* sum up in incr */ +" strex %1, %2, [%5]\n" /* store them */ +" teq %1, #0\n" +" bne 1b\n" + : "=&r"(result),"=&r"(flag),"=&r"(tmp),"+m"(*p) /* 0..3 */ + : "r"(incr), "r"(p) /* 4..5 */ + : "cc"); + + return result; } #define AO_HAVE_fetch_and_add @@ -161,20 +161,20 @@ AO_fetch_and_add(volatile AO_t *p, AO_t incr) AO_INLINE AO_t AO_fetch_and_add1(volatile AO_t *p) { - unsigned long flag,tmp; - AO_t result; - - __asm__ __volatile__("@AO_fetch_and_add1\n" -"1: ldrex %0, [%4]\n" /* get original */ -" add %1, %0, #1\n" /* increment */ -" strex %2, %1, [%4]\n" /* store them */ -" teq %2, #0\n" -" bne 1b\n" - : "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p) - : "r"(p) - : "cc"); - - return result; + unsigned long flag,tmp; + AO_t result; + + __asm__ __volatile__("@AO_fetch_and_add1\n" +"1: ldrex %0, [%4]\n" /* get original */ +" add %1, %0, #1\n" /* increment */ +" strex %2, %1, [%4]\n" /* store them */ +" teq %2, #0\n" +" bne 1b\n" + : "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p) + : "r"(p) + : "cc"); + + return result; } #define AO_HAVE_fetch_and_add1 @@ -183,20 +183,20 @@ AO_fetch_and_add1(volatile AO_t *p) AO_INLINE AO_t AO_fetch_and_sub1(volatile AO_t *p) { - unsigned long flag,tmp; - AO_t result; - - __asm__ __volatile__("@AO_fetch_and_sub1\n" -"1: ldrex %0, [%4]\n" /* get original */ -" sub %1, %0, #1\n" /* decrement */ -" strex %2, %1, [%4]\n" /* store them */ -" teq %2, #0\n" -" bne 1b\n" - : "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p) - : "r"(p) - : "cc"); - - return result; + unsigned long flag,tmp; + AO_t result; + + __asm__ __volatile__("@AO_fetch_and_sub1\n" +"1: ldrex %0, [%4]\n" /* get original */ +" sub %1, %0, #1\n" /* decrement */ +" strex %2, %1, [%4]\n" /* store them */ +" teq %2, #0\n" +" bne 1b\n" + : "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p) + : "r"(p) + : "cc"); + + return result; } #define AO_HAVE_fetch_and_sub1 @@ -205,73 +205,73 @@ AO_fetch_and_sub1(volatile AO_t *p) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, - AO_t old_val, AO_t new_val) + AO_t old_val, AO_t new_val) { - AO_t result,tmp; - - __asm__ __volatile__("@ AO_compare_and_swap\n" -"1: mov %0, #2\n" /* store a flag */ -" ldrex %1, [%3]\n" /* get original */ -" teq %1, %4\n" /* see if match */ -" strexeq %0, %5, [%3]\n" /* store new one if matched */ -" teq %0, #1\n" -" beq 1b\n" /* if update failed, repeat */ - : "=&r"(result), "=&r"(tmp), "+m"(*addr) - : "r"(addr), "r"(old_val), "r"(new_val) - : "cc"); - - return !(result&2); /* if succeded, return 1, else 0 */ + AO_t result,tmp; + + __asm__ __volatile__("@ AO_compare_and_swap\n" +"1: mov %0, #2\n" /* store a flag */ +" ldrex %1, [%3]\n" /* get original */ +" teq %1, %4\n" /* see if match */ +" strexeq %0, %5, [%3]\n" /* store new one if matched */ +" teq %0, #1\n" +" beq 1b\n" /* if update failed, repeat */ + : "=&r"(result), "=&r"(tmp), "+m"(*addr) + : "r"(addr), "r"(old_val), "r"(new_val) + : "cc"); + + return !(result&2); /* if succeded, return 1, else 0 */ } #define AO_HAVE_compare_and_swap AO_INLINE int AO_compare_double_and_swap_double(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { - double_ptr_storage old_val = ((double_ptr_storage)old_val2 << 32) | old_val1; - double_ptr_storage new_val = ((double_ptr_storage)new_val2 << 32) | new_val1; - + double_ptr_storage old_val = ((double_ptr_storage)old_val2 << 32) | old_val1; + double_ptr_storage new_val = ((double_ptr_storage)new_val2 << 32) | new_val1; + double_ptr_storage tmp; - int result; - - while(1) { - __asm__ __volatile__("@ AO_compare_and_swap_double\n" - " ldrexd %0, [%1]\n" /* get original to r1&r2*/ - : "=&r"(tmp) - : "r"(addr) - : "cc"); - if(tmp != old_val) return 0; - __asm__ __volatile__( - " strexd %0, %2, [%3]\n" /* store new one if matched */ - : "=&r"(result),"+m"(*addr) - : "r"(new_val), "r"(addr) - : "cc"); - if(!result) return 1; - } + int result; + + while(1) { + __asm__ __volatile__("@ AO_compare_and_swap_double\n" + " ldrexd %0, [%1]\n" /* get original to r1&r2*/ + : "=&r"(tmp) + : "r"(addr) + : "cc"); + if(tmp != old_val) return 0; + __asm__ __volatile__( + " strexd %0, %2, [%3]\n" /* store new one if matched */ + : "=&r"(result),"+m"(*addr) + : "r"(new_val), "r"(addr) + : "cc"); + if(!result) return 1; + } } #define AO_HAVE_compare_double_and_swap_double #else /* pre ARMv6 architecures ... */ - -/* I found a slide set that, if I read it correctly, claims that */ -/* Loads followed by either a Load or Store are ordered, but nothing */ -/* else is. */ -/* It appears that SWP is the only simple memory barrier. */ + +/* I found a slide set that, if I read it correctly, claims that */ +/* Loads followed by either a Load or Store are ordered, but nothing */ +/* else is. */ +/* It appears that SWP is the only simple memory barrier. */ #include "../all_atomic_load_store.h" AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_VAL_t oldval; - /* SWP on ARM is very similar to XCHG on x86. */ - /* The first operand is the result, the second the value */ - /* to be stored. Both registers must be different from addr. */ + /* SWP on ARM is very similar to XCHG on x86. */ + /* The first operand is the result, the second the value */ + /* to be stored. Both registers must be different from addr. */ /* Make the address operand an early clobber output so it */ /* doesn't overlap with the other operands. The early clobber*/ /* on oldval is necessary to prevent the compiler allocating */ - /* them to the same register if they are both unused. */ + /* them to the same register if they are both unused. */ __asm__ __volatile__("swp %0, %2, [%3]" : "=&r"(oldval), "=&r"(addr) : "r"(1), "1"(addr) diff --git a/src/atomic_ops/sysdeps/gcc/hppa.h b/src/atomic_ops/sysdeps/gcc/hppa.h index f303d7f..663f6b6 100644 --- a/src/atomic_ops/sysdeps/gcc/hppa.h +++ b/src/atomic_ops/sysdeps/gcc/hppa.h @@ -1,40 +1,40 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. * * Modified by Carlos O'Donell , 2003 - * - Added self-aligning lock. - * - */ + * - Added self-aligning lock. + * + */ #include "../all_atomic_load_store.h" -/* Some architecture set descriptions include special "ordered" memory */ -/* operations. As far as we can tell, no existing processors actually */ -/* require those. Nor does it appear likely that future processors */ -/* will. */ +/* Some architecture set descriptions include special "ordered" memory */ +/* operations. As far as we can tell, no existing processors actually */ +/* require those. Nor does it appear likely that future processors */ +/* will. */ #include "../ordered.h" -/* GCC will not guarantee the alignment we need, use four lock words */ -/* and select the correctly aligned datum. See the glibc 2.3.2 */ -/* linuxthread port for the original implementation. */ +/* GCC will not guarantee the alignment we need, use four lock words */ +/* and select the correctly aligned datum. See the glibc 2.3.2 */ +/* linuxthread port for the original implementation. */ struct AO_pa_clearable_loc { int data[4]; }; @@ -42,38 +42,38 @@ struct AO_pa_clearable_loc { #undef AO_TS_INITIALIZER #define AO_TS_t struct AO_pa_clearable_loc #define AO_TS_INITIALIZER {1,1,1,1} -/* Switch meaning of set and clear, since we only have an atomic clear */ -/* instruction. */ +/* Switch meaning of set and clear, since we only have an atomic clear */ +/* instruction. */ typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val; #define AO_TS_VAL_t AO_PA_TS_val #define AO_TS_CLEAR AO_PA_TS_clear #define AO_TS_SET AO_PA_TS_set -/* The hppa only has one atomic read and modify memory operation, */ -/* load and clear, so hppa spinlocks must use zero to signify that */ -/* someone is holding the lock. The address used for the ldcw */ -/* semaphore must be 16-byte aligned. */ +/* The hppa only has one atomic read and modify memory operation, */ +/* load and clear, so hppa spinlocks must use zero to signify that */ +/* someone is holding the lock. The address used for the ldcw */ +/* semaphore must be 16-byte aligned. */ #define __ldcw(a) ({ \ - volatile unsigned int __ret; \ - __asm__ __volatile__("ldcw 0(%2),%0" \ - : "=r" (__ret), "=m" (*(a)) : "r" (a)); \ - __ret; \ + volatile unsigned int __ret; \ + __asm__ __volatile__("ldcw 0(%2),%0" \ + : "=r" (__ret), "=m" (*(a)) : "r" (a)); \ + __ret; \ }) -/* Because malloc only guarantees 8-byte alignment for malloc'd data, */ -/* and GCC only guarantees 8-byte alignment for stack locals, we can't */ -/* be assured of 16-byte alignment for atomic lock data even if we */ -/* specify "__attribute ((aligned(16)))" in the type declaration. So, */ -/* we use a struct containing an array of four ints for the atomic lock */ -/* type and dynamically select the 16-byte aligned int from the array */ -/* for the semaphore. */ +/* Because malloc only guarantees 8-byte alignment for malloc'd data, */ +/* and GCC only guarantees 8-byte alignment for stack locals, we can't */ +/* be assured of 16-byte alignment for atomic lock data even if we */ +/* specify "__attribute ((aligned(16)))" in the type declaration. So, */ +/* we use a struct containing an array of four ints for the atomic lock */ +/* type and dynamically select the 16-byte aligned int from the array */ +/* for the semaphore. */ #define __PA_LDCW_ALIGNMENT 16 #define __ldcw_align(a) ({ \ - unsigned long __ret = (unsigned long) a; \ - __ret += __PA_LDCW_ALIGNMENT - 1; \ - __ret &= ~(__PA_LDCW_ALIGNMENT - 1); \ - (volatile unsigned int *) __ret; \ + unsigned long __ret = (unsigned long) a; \ + __ret += __PA_LDCW_ALIGNMENT - 1; \ + __ret &= ~(__PA_LDCW_ALIGNMENT - 1); \ + (volatile unsigned int *) __ret; \ }) /* Works on PA 1.1 and PA 2.0 systems */ @@ -82,7 +82,7 @@ AO_test_and_set_full(volatile AO_TS_t * addr) { volatile unsigned int *a = __ldcw_align (addr); return (AO_TS_VAL_t) __ldcw (a); -} +} AO_INLINE void AO_pa_clear(volatile AO_TS_t * addr) @@ -94,4 +94,3 @@ AO_pa_clear(volatile AO_TS_t * addr) #define AO_CLEAR(addr) AO_pa_clear(addr) #define AO_HAVE_test_and_set_full - diff --git a/src/atomic_ops/sysdeps/gcc/ia64.h b/src/atomic_ops/sysdeps/gcc/ia64.h index bd93f70..37f2f6a 100644 --- a/src/atomic_ops/sysdeps/gcc/ia64.h +++ b/src/atomic_ops/sysdeps/gcc/ia64.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ #include "../all_atomic_load_store.h" @@ -27,21 +27,21 @@ #include "../test_and_set_t_is_char.h" #ifdef _ILP32 - /* 32-bit HP/UX code. */ - /* This requires pointer "swizzling". Pointers need to be expanded */ - /* to 64 bits using the addp4 instruction before use. This makes it */ - /* hard to share code, but we try anyway. */ + /* 32-bit HP/UX code. */ + /* This requires pointer "swizzling". Pointers need to be expanded */ + /* to 64 bits using the addp4 instruction before use. This makes it */ + /* hard to share code, but we try anyway. */ # define AO_LEN "4" - /* We assume that addr always appears in argument position 1 in asm */ - /* code. If it is clobbered due to swizzling, we also need it in */ - /* second position. Any later arguments are referenced symbolically, */ + /* We assume that addr always appears in argument position 1 in asm */ + /* code. If it is clobbered due to swizzling, we also need it in */ + /* second position. Any later arguments are referenced symbolically, */ /* so that we don't have to worry about their position. This requires*/ - /* gcc 3.1, but you shouldn't be using anything older than that on */ - /* IA64 anyway. */ - /* The AO_MASK macro is a workaround for the fact that HP/UX gcc */ - /* appears to otherwise store 64-bit pointers in ar.ccv, i.e. it */ - /* doesn't appear to clear high bits in a pointer value we pass into */ - /* assembly code, even if it is supposedly of type AO_t. */ + /* gcc 3.1, but you shouldn't be using anything older than that on */ + /* IA64 anyway. */ + /* The AO_MASK macro is a workaround for the fact that HP/UX gcc */ + /* appears to otherwise store 64-bit pointers in ar.ccv, i.e. it */ + /* doesn't appear to clear high bits in a pointer value we pass into */ + /* assembly code, even if it is supposedly of type AO_t. */ # define AO_IN_ADDR "1"(addr) # define AO_OUT_ADDR , "=r"(addr) # define AO_SWIZZLE "addp4 %1=0,%1;;\n" @@ -67,8 +67,8 @@ AO_fetch_and_add1_acquire (volatile AO_t *addr) AO_t result; __asm__ __volatile__ (AO_SWIZZLE - "fetchadd" AO_LEN ".acq %0=[%1],1": - "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); + "fetchadd" AO_LEN ".acq %0=[%1],1": + "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); return result; } #define AO_HAVE_fetch_and_add1_acquire @@ -79,8 +79,8 @@ AO_fetch_and_add1_release (volatile AO_t *addr) AO_t result; __asm__ __volatile__ (AO_SWIZZLE - "fetchadd" AO_LEN ".rel %0=[%1],1": - "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); + "fetchadd" AO_LEN ".rel %0=[%1],1": + "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); return result; } @@ -92,8 +92,8 @@ AO_fetch_and_sub1_acquire (volatile AO_t *addr) AO_t result; __asm__ __volatile__ (AO_SWIZZLE - "fetchadd" AO_LEN ".acq %0=[%1],-1": - "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); + "fetchadd" AO_LEN ".acq %0=[%1],-1": + "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); return result; } @@ -105,8 +105,8 @@ AO_fetch_and_sub1_release (volatile AO_t *addr) AO_t result; __asm__ __volatile__ (AO_SWIZZLE - "fetchadd" AO_LEN ".rel %0=[%1],-1": - "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); + "fetchadd" AO_LEN ".rel %0=[%1],-1": + "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); return result; } @@ -120,7 +120,7 @@ AO_int_fetch_and_add1_acquire (volatile unsigned int *addr) unsigned int result; __asm__ __volatile__ ("fetchadd4.acq %0=[%1],1": - "=r" (result): AO_IN_ADDR :"memory"); + "=r" (result): AO_IN_ADDR :"memory"); return result; } #define AO_HAVE_int_fetch_and_add1_acquire @@ -131,7 +131,7 @@ AO_int_fetch_and_add1_release (volatile unsigned int *addr) unsigned int result; __asm__ __volatile__ ("fetchadd4.rel %0=[%1],1": - "=r" (result): AO_IN_ADDR :"memory"); + "=r" (result): AO_IN_ADDR :"memory"); return result; } @@ -143,7 +143,7 @@ AO_int_fetch_and_sub1_acquire (volatile unsigned int *addr) unsigned int result; __asm__ __volatile__ ("fetchadd4.acq %0=[%1],-1": - "=r" (result): AO_IN_ADDR :"memory"); + "=r" (result): AO_IN_ADDR :"memory"); return result; } @@ -155,7 +155,7 @@ AO_int_fetch_and_sub1_release (volatile unsigned int *addr) unsigned int result; __asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1": - "=r" (result): AO_IN_ADDR :"memory"); + "=r" (result): AO_IN_ADDR :"memory"); return result; } @@ -165,16 +165,16 @@ AO_int_fetch_and_sub1_release (volatile unsigned int *addr) AO_INLINE int AO_compare_and_swap_acquire(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { AO_t oldval; AO_MASK(old); __asm__ __volatile__(AO_SWIZZLE - "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN - ".acq %0=[%1],%[new_val],ar.ccv" - : "=r"(oldval) AO_OUT_ADDR - : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old) - : "memory"); + "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN + ".acq %0=[%1],%[new_val],ar.ccv" + : "=r"(oldval) AO_OUT_ADDR + : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old) + : "memory"); return (oldval == old); } @@ -182,16 +182,16 @@ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_INLINE int AO_compare_and_swap_release(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { AO_t oldval; AO_MASK(old); __asm__ __volatile__(AO_SWIZZLE - "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN - ".rel %0=[%1],%[new_val],ar.ccv" - : "=r"(oldval) AO_OUT_ADDR - : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old) - : "memory"); + "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN + ".rel %0=[%1],%[new_val],ar.ccv" + : "=r"(oldval) AO_OUT_ADDR + : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old) + : "memory"); return (oldval == old); } @@ -199,14 +199,14 @@ AO_compare_and_swap_release(volatile AO_t *addr, AO_INLINE int AO_char_compare_and_swap_acquire(volatile unsigned char *addr, - unsigned char old, unsigned char new_val) + unsigned char old, unsigned char new_val) { unsigned char oldval; __asm__ __volatile__(AO_SWIZZLE - "mov ar.ccv=%[old] ;; cmpxchg1.acq %0=[%1],%[new_val],ar.ccv" - : "=r"(oldval) AO_OUT_ADDR - : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) - : "memory"); + "mov ar.ccv=%[old] ;; cmpxchg1.acq %0=[%1],%[new_val],ar.ccv" + : "=r"(oldval) AO_OUT_ADDR + : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) + : "memory"); return (oldval == old); } @@ -214,14 +214,14 @@ AO_char_compare_and_swap_acquire(volatile unsigned char *addr, AO_INLINE int AO_char_compare_and_swap_release(volatile unsigned char *addr, - unsigned char old, unsigned char new_val) + unsigned char old, unsigned char new_val) { unsigned char oldval; __asm__ __volatile__(AO_SWIZZLE - "mov ar.ccv=%[old] ;; cmpxchg1.rel %0=[%1],%[new_val],ar.ccv" - : "=r"(oldval) AO_OUT_ADDR - : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) - : "memory"); + "mov ar.ccv=%[old] ;; cmpxchg1.rel %0=[%1],%[new_val],ar.ccv" + : "=r"(oldval) AO_OUT_ADDR + : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) + : "memory"); return (oldval == old); } @@ -229,14 +229,14 @@ AO_char_compare_and_swap_release(volatile unsigned char *addr, AO_INLINE int AO_short_compare_and_swap_acquire(volatile unsigned short *addr, - unsigned short old, unsigned short new_val) + unsigned short old, unsigned short new_val) { unsigned short oldval; __asm__ __volatile__(AO_SWIZZLE - "mov ar.ccv=%[old] ;; cmpxchg2.acq %0=[%1],%[new_val],ar.ccv" - : "=r"(oldval) AO_OUT_ADDR - : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) - : "memory"); + "mov ar.ccv=%[old] ;; cmpxchg2.acq %0=[%1],%[new_val],ar.ccv" + : "=r"(oldval) AO_OUT_ADDR + : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) + : "memory"); return (oldval == old); } @@ -244,14 +244,14 @@ AO_short_compare_and_swap_acquire(volatile unsigned short *addr, AO_INLINE int AO_short_compare_and_swap_release(volatile unsigned short *addr, - unsigned short old, unsigned short new_val) + unsigned short old, unsigned short new_val) { unsigned short oldval; __asm__ __volatile__(AO_SWIZZLE - "mov ar.ccv=%[old] ;; cmpxchg2.rel %0=[%1],%[new_val],ar.ccv" - : "=r"(oldval) AO_OUT_ADDR - : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) - : "memory"); + "mov ar.ccv=%[old] ;; cmpxchg2.rel %0=[%1],%[new_val],ar.ccv" + : "=r"(oldval) AO_OUT_ADDR + : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) + : "memory"); return (oldval == old); } @@ -261,12 +261,12 @@ AO_short_compare_and_swap_release(volatile unsigned short *addr, AO_INLINE int AO_int_compare_and_swap_acquire(volatile unsigned int *addr, - unsigned int old, unsigned int new_val) + unsigned int old, unsigned int new_val) { unsigned int oldval; __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.acq %0=[%1],%2,ar.ccv" - : "=r"(oldval) - : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory"); + : "=r"(oldval) + : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory"); return (oldval == old); } @@ -274,12 +274,12 @@ AO_int_compare_and_swap_acquire(volatile unsigned int *addr, AO_INLINE int AO_int_compare_and_swap_release(volatile unsigned int *addr, - unsigned int old, unsigned int new_val) + unsigned int old, unsigned int new_val) { unsigned int oldval; __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%1],%2,ar.ccv" - : "=r"(oldval) - : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory"); + : "=r"(oldval) + : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory"); return (oldval == old); } @@ -287,10 +287,10 @@ AO_int_compare_and_swap_release(volatile unsigned int *addr, #endif /* !_ILP32 */ -/* FIXME: Add compare_and_swap_double as soon as there is widely */ -/* available hardware that implements it. */ +/* FIXME: Add compare_and_swap_double as soon as there is widely */ +/* available hardware that implements it. */ -/* FIXME: Add compare_double_and_swap_double for the _ILP32 case. */ +/* FIXME: Add compare_double_and_swap_double for the _ILP32 case. */ #ifdef _ILP32 # include "../ao_t_is_int.h" diff --git a/src/atomic_ops/sysdeps/gcc/m68k.h b/src/atomic_ops/sysdeps/gcc/m68k.h index 4bec437..a815d81 100644 --- a/src/atomic_ops/sysdeps/gcc/m68k.h +++ b/src/atomic_ops/sysdeps/gcc/m68k.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -15,16 +15,16 @@ * */ -/* The cas instruction causes an emulation trap for the */ -/* 060 with a misaligned pointer, so let's avoid this. */ +/* The cas instruction causes an emulation trap for the */ +/* 060 with a misaligned pointer, so let's avoid this. */ #undef AO_t typedef unsigned long AO_t __attribute__ ((aligned (4))); /* FIXME. Very incomplete. */ #include "../all_aligned_atomic_load_store.h" -/* Are there any m68k multiprocessors still around? */ -/* AFAIK, Alliants were sequentially consistent. */ +/* Are there any m68k multiprocessors still around? */ +/* AFAIK, Alliants were sequentially consistent. */ #include "../ordered.h" #include "../test_and_set_t_is_char.h" @@ -34,15 +34,15 @@ AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_t oldval; - /* The value at addr is semi-phony. */ - /* 'tas' sets bit 7 while the return */ - /* value pretends all bits were set, */ - /* which at least matches AO_TS_SET. */ + /* The value at addr is semi-phony. */ + /* 'tas' sets bit 7 while the return */ + /* value pretends all bits were set, */ + /* which at least matches AO_TS_SET. */ __asm__ __volatile__( - "tas %1; sne %0" - : "=d" (oldval), "=m" (*addr) - : "m" (*addr) - : "memory"); + "tas %1; sne %0" + : "=d" (oldval), "=m" (*addr) + : "m" (*addr) + : "memory"); return oldval; } @@ -51,15 +51,15 @@ AO_test_and_set_full(volatile AO_TS_t *addr) { /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { char result; __asm__ __volatile__( - "cas.l %3,%4,%1; seq %0" - : "=d" (result), "=m" (*addr) - : "m" (*addr), "d" (old), "d" (new_val) - : "memory"); + "cas.l %3,%4,%1; seq %0" + : "=d" (result), "=m" (*addr) + : "m" (*addr), "d" (old), "d" (new_val) + : "memory"); return -result; } diff --git a/src/atomic_ops/sysdeps/gcc/mips.h b/src/atomic_ops/sysdeps/gcc/mips.h index c6fd2c1..bc2a236 100644 --- a/src/atomic_ops/sysdeps/gcc/mips.h +++ b/src/atomic_ops/sysdeps/gcc/mips.h @@ -1,107 +1,107 @@ -/* - * Copyright (c) 2005,2007 Thiemo Seufer - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - */ - -/* - * FIXME: This should probably make finer distinctions. SGI MIPS is - * much more strongly ordered, and in fact closer to sequentially - * consistent. This is really aimed at modern embedded implementations. - * It looks to me like this assumes a 32-bit ABI. -HB - */ - -#include "../all_aligned_atomic_load_store.h" -#include "../acquire_release_volatile.h" -#include "../test_and_set_t_is_ao_t.h" -#include "../standard_ao_double_t.h" - -/* Data dependence does not imply read ordering. */ -#define AO_NO_DD_ORDERING - -AO_INLINE void -AO_nop_full(void) -{ - __asm__ __volatile__( - " .set push \n" - " .set mips2 \n" - " .set noreorder \n" - " .set nomacro \n" - " sync \n" - " .set pop " - : : : "memory"); -} - -#define AO_HAVE_nop_full - -AO_INLINE int -AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) -{ - register int was_equal = 0; - register int temp; - - __asm__ __volatile__( - " .set push \n" - " .set mips2 \n" - " .set noreorder \n" - " .set nomacro \n" - "1: ll %0, %1 \n" - " bne %0, %4, 2f \n" - " move %0, %3 \n" - " sc %0, %1 \n" - " .set pop \n" - " beqz %0, 1b \n" - " li %2, 1 \n" - "2: " - : "=&r" (temp), "+R" (*addr), "+r" (was_equal) - : "r" (new_val), "r" (old) - : "memory"); - return was_equal; -} - -#define AO_HAVE_compare_and_swap - -/* FIXME: I think the implementations below should be automatically */ -/* generated if we omit them. - HB */ - -AO_INLINE int -AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) { - int result = AO_compare_and_swap(addr, old, new_val); - AO_nop_full(); - return result; -} - -#define AO_HAVE_compare_and_swap_acquire - -AO_INLINE int -AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) { - AO_nop_full(); - return AO_compare_and_swap(addr, old, new_val); -} - -#define AO_HAVE_compare_and_swap_release - -AO_INLINE int -AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { - AO_t result; - AO_nop_full(); - result = AO_compare_and_swap(addr, old, new_val); - AO_nop_full(); - return result; -} - -#define AO_HAVE_compare_and_swap_full - -/* - * FIXME: We should also implement fetch_and_add and or primitives - * directly. - */ - -#include "../ao_t_is_int.h" +/* + * Copyright (c) 2005,2007 Thiemo Seufer + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ + +/* + * FIXME: This should probably make finer distinctions. SGI MIPS is + * much more strongly ordered, and in fact closer to sequentially + * consistent. This is really aimed at modern embedded implementations. + * It looks to me like this assumes a 32-bit ABI. -HB + */ + +#include "../all_aligned_atomic_load_store.h" +#include "../acquire_release_volatile.h" +#include "../test_and_set_t_is_ao_t.h" +#include "../standard_ao_double_t.h" + +/* Data dependence does not imply read ordering. */ +#define AO_NO_DD_ORDERING + +AO_INLINE void +AO_nop_full(void) +{ + __asm__ __volatile__( + " .set push \n" + " .set mips2 \n" + " .set noreorder \n" + " .set nomacro \n" + " sync \n" + " .set pop " + : : : "memory"); +} + +#define AO_HAVE_nop_full + +AO_INLINE int +AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) +{ + register int was_equal = 0; + register int temp; + + __asm__ __volatile__( + " .set push \n" + " .set mips2 \n" + " .set noreorder \n" + " .set nomacro \n" + "1: ll %0, %1 \n" + " bne %0, %4, 2f \n" + " move %0, %3 \n" + " sc %0, %1 \n" + " .set pop \n" + " beqz %0, 1b \n" + " li %2, 1 \n" + "2: " + : "=&r" (temp), "+R" (*addr), "+r" (was_equal) + : "r" (new_val), "r" (old) + : "memory"); + return was_equal; +} + +#define AO_HAVE_compare_and_swap + +/* FIXME: I think the implementations below should be automatically */ +/* generated if we omit them. - HB */ + +AO_INLINE int +AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) { + int result = AO_compare_and_swap(addr, old, new_val); + AO_nop_full(); + return result; +} + +#define AO_HAVE_compare_and_swap_acquire + +AO_INLINE int +AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) { + AO_nop_full(); + return AO_compare_and_swap(addr, old, new_val); +} + +#define AO_HAVE_compare_and_swap_release + +AO_INLINE int +AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { + AO_t result; + AO_nop_full(); + result = AO_compare_and_swap(addr, old, new_val); + AO_nop_full(); + return result; +} + +#define AO_HAVE_compare_and_swap_full + +/* + * FIXME: We should also implement fetch_and_add and or primitives + * directly. + */ + +#include "../ao_t_is_int.h" diff --git a/src/atomic_ops/sysdeps/gcc/powerpc.h b/src/atomic_ops/sysdeps/gcc/powerpc.h index 38cd4fe..5d2f386 100644 --- a/src/atomic_ops/sysdeps/gcc/powerpc.h +++ b/src/atomic_ops/sysdeps/gcc/powerpc.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. @@ -15,26 +15,26 @@ * */ -/* Memory model documented at http://www-106.ibm.com/developerworks/ */ -/* eserver/articles/archguide.html and (clearer) */ +/* Memory model documented at http://www-106.ibm.com/developerworks/ */ +/* eserver/articles/archguide.html and (clearer) */ /* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */ -/* There appears to be no implicit ordering between any kind of */ -/* independent memory references. */ -/* Architecture enforces some ordering based on control dependence. */ -/* I don't know if that could help. */ -/* Data-dependent loads are always ordered. */ -/* Based on the above references, eieio is intended for use on */ -/* uncached memory, which we don't support. It does not order loads */ -/* from cached memory. */ -/* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */ -/* track some of this down and correcting my misunderstandings. -HB */ -/* Earl Chew subsequently contributed further fixes & additions. */ +/* There appears to be no implicit ordering between any kind of */ +/* independent memory references. */ +/* Architecture enforces some ordering based on control dependence. */ +/* I don't know if that could help. */ +/* Data-dependent loads are always ordered. */ +/* Based on the above references, eieio is intended for use on */ +/* uncached memory, which we don't support. It does not order loads */ +/* from cached memory. */ +/* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */ +/* track some of this down and correcting my misunderstandings. -HB */ +/* Earl Chew subsequently contributed further fixes & additions. */ #include "../all_aligned_atomic_load_store.h" #include "../test_and_set_t_is_ao_t.h" - /* There seems to be no byte equivalent of lwarx, so this */ - /* may really be what we want, at least in the 32-bit case. */ + /* There seems to be no byte equivalent of lwarx, so this */ + /* may really be what we want, at least in the 32-bit case. */ AO_INLINE void AO_nop_full(void) @@ -44,7 +44,7 @@ AO_nop_full(void) #define AO_HAVE_nop_full -/* lwsync apparently works for everything but a StoreLoad barrier. */ +/* lwsync apparently works for everything but a StoreLoad barrier. */ AO_INLINE void AO_lwsync(void) { @@ -61,12 +61,12 @@ AO_lwsync(void) #define AO_nop_read() AO_lwsync() #define AO_HAVE_nop_read -/* We explicitly specify load_acquire, since it is important, and can */ -/* be implemented relatively cheaply. It could be implemented */ -/* with an ordinary load followed by a lwsync. But the general wisdom */ -/* seems to be that a data dependent branch followed by an isync is */ -/* cheaper. And the documentation is fairly explicit that this also */ -/* has acquire semantics. */ +/* We explicitly specify load_acquire, since it is important, and can */ +/* be implemented relatively cheaply. It could be implemented */ +/* with an ordinary load followed by a lwsync. But the general wisdom */ +/* seems to be that a data dependent branch followed by an isync is */ +/* cheaper. And the documentation is fairly explicit that this also */ +/* has acquire semantics. */ /* ppc64 uses ld not lwz */ #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__) AO_INLINE AO_t @@ -89,9 +89,9 @@ AO_load_acquire(const volatile AO_t *addr) { AO_t result; - /* FIXME: We should get gcc to allocate one of the condition */ - /* registers. I always got "impossible constraint" when I */ - /* tried the "y" constraint. */ + /* FIXME: We should get gcc to allocate one of the condition */ + /* registers. I always got "impossible constraint" when I */ + /* tried the "y" constraint. */ __asm__ __volatile__ ( "lwz%U1%X1 %0,%1\n" "cmpw %0,%0\n" @@ -104,8 +104,8 @@ AO_load_acquire(const volatile AO_t *addr) #endif #define AO_HAVE_load_acquire -/* We explicitly specify store_release, since it relies */ -/* on the fact that lwsync is also a LoadStore barrier. */ +/* We explicitly specify store_release, since it relies */ +/* on the fact that lwsync is also a LoadStore barrier. */ AO_INLINE void AO_store_release(volatile AO_t *addr, AO_t value) { @@ -115,9 +115,9 @@ AO_store_release(volatile AO_t *addr, AO_t value) #define AO_HAVE_load_acquire -/* This is similar to the code in the garbage collector. Deleting */ -/* this and having it synthesized from compare_and_swap would probably */ -/* only cost us a load immediate instruction. */ +/* This is similar to the code in the garbage collector. Deleting */ +/* this and having it synthesized from compare_and_swap would probably */ +/* only cost us a load immediate instruction. */ #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__) /* Completely untested. And we should be using smaller objects anyway. */ AO_INLINE AO_TS_VAL_t @@ -193,7 +193,7 @@ AO_test_and_set_full(volatile AO_TS_t *addr) { #define AO_HAVE_test_and_set_full #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__) -/* FIXME: Completely untested. */ +/* FIXME: Completely untested. */ AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) { AO_t oldval; @@ -201,11 +201,11 @@ AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) { __asm__ __volatile__( "1:ldarx %0,0,%2\n" /* load and reserve */ - "cmpd %0, %4\n" /* if load is not equal to */ - "bne 2f\n" /* old, fail */ + "cmpd %0, %4\n" /* if load is not equal to */ + "bne 2f\n" /* old, fail */ "stdcx. %3,0,%2\n" /* else store conditional */ "bne- 1b\n" /* retry if lost reservation */ - "li %1,1\n" /* result = 1; */ + "li %1,1\n" /* result = 1; */ "2:\n" : "=&r"(oldval), "=&r"(result) : "r"(addr), "r"(new_val), "r"(old), "1"(result) @@ -223,11 +223,11 @@ AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) { __asm__ __volatile__( "1:lwarx %0,0,%2\n" /* load and reserve */ - "cmpw %0, %4\n" /* if load is not equal to */ - "bne 2f\n" /* old, fail */ + "cmpw %0, %4\n" /* if load is not equal to */ + "bne 2f\n" /* old, fail */ "stwcx. %3,0,%2\n" /* else store conditional */ "bne- 1b\n" /* retry if lost reservation */ - "li %1,1\n" /* result = 1; */ + "li %1,1\n" /* result = 1; */ "2:\n" : "=&r"(oldval), "=&r"(result) : "r"(addr), "r"(new_val), "r"(old), "1"(result) @@ -268,7 +268,7 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { #define AO_HAVE_compare_and_swap_full #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__) -/* FIXME: Completely untested. */ +/* FIXME: Completely untested. */ AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *addr, AO_t incr) { @@ -276,10 +276,10 @@ AO_fetch_and_add(volatile AO_t *addr, AO_t incr) { AO_t newval; __asm__ __volatile__( - "1:ldarx %0,0,%2\n" /* load and reserve */ - "add %1,%0,%3\n" /* increment */ - "stdcx. %1,0,%2\n" /* store conditional */ - "bne- 1b\n" /* retry if lost reservation */ + "1:ldarx %0,0,%2\n" /* load and reserve */ + "add %1,%0,%3\n" /* increment */ + "stdcx. %1,0,%2\n" /* store conditional */ + "bne- 1b\n" /* retry if lost reservation */ : "=&r"(oldval), "=&r"(newval) : "r"(addr), "r"(incr) : "memory", "cr0"); @@ -297,10 +297,10 @@ AO_fetch_and_add(volatile AO_t *addr, AO_t incr) { AO_t newval; __asm__ __volatile__( - "1:lwarx %0,0,%2\n" /* load and reserve */ - "add %1,%0,%3\n" /* increment */ - "stwcx. %1,0,%2\n" /* store conditional */ - "bne- 1b\n" /* retry if lost reservation */ + "1:lwarx %0,0,%2\n" /* load and reserve */ + "add %1,%0,%3\n" /* increment */ + "stwcx. %1,0,%2\n" /* store conditional */ + "bne- 1b\n" /* retry if lost reservation */ : "=&r"(oldval), "=&r"(newval) : "r"(addr), "r"(incr) : "memory", "cr0"); @@ -344,4 +344,3 @@ AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) { #else # include "../ao_t_is_int.h" #endif - diff --git a/src/atomic_ops/sysdeps/gcc/s390.h b/src/atomic_ops/sysdeps/gcc/s390.h index 410f2f6..c9facf6 100644 --- a/src/atomic_ops/sysdeps/gcc/s390.h +++ b/src/atomic_ops/sysdeps/gcc/s390.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -15,30 +15,30 @@ * */ -/* FIXME: untested. */ -/* The relevant documentation appears to be at */ -/* http://publibz.boulder.ibm.com/epubs/pdf/dz9zr003.pdf */ -/* around page 5-96. Apparently: */ -/* - Memory references in general are atomic only for a single */ -/* byte. But it appears that the most common load/store */ -/* instructions also guarantee atomicity for aligned */ -/* operands of standard types. WE FOOLISHLY ASSUME that */ -/* compilers only generate those. If that turns out to be */ -/* wrong, we need inline assembly code for AO_load and */ -/* AO_store. */ -/* - A store followed by a load is unordered since the store */ -/* may be delayed. Otherwise everything is ordered. */ -/* - There is a hardware compare-and-swap (CS) instruction. */ +/* FIXME: untested. */ +/* The relevant documentation appears to be at */ +/* http://publibz.boulder.ibm.com/epubs/pdf/dz9zr003.pdf */ +/* around page 5-96. Apparently: */ +/* - Memory references in general are atomic only for a single */ +/* byte. But it appears that the most common load/store */ +/* instructions also guarantee atomicity for aligned */ +/* operands of standard types. WE FOOLISHLY ASSUME that */ +/* compilers only generate those. If that turns out to be */ +/* wrong, we need inline assembly code for AO_load and */ +/* AO_store. */ +/* - A store followed by a load is unordered since the store */ +/* may be delayed. Otherwise everything is ordered. */ +/* - There is a hardware compare-and-swap (CS) instruction. */ #include "../ordered_except_wr.h" #include "../all_aligned_atomic_load_store.h" #include "../test_and_set_t_is_ao_t.h" -/* FIXME: Is there a way to do byte-sized test-and-set? */ +/* FIXME: Is there a way to do byte-sized test-and-set? */ -/* FIXME: AO_nop_full should probably be implemented directly. */ -/* It appears that certain BCR instructions have that effect. */ -/* Presumably they're cheaper than CS? */ +/* FIXME: AO_nop_full should probably be implemented directly. */ +/* It appears that certain BCR instructions have that effect. */ +/* Presumably they're cheaper than CS? */ AO_INLINE AO_t AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) @@ -60,4 +60,4 @@ AO_INLINE AO_t AO_compare_and_swap_full(volatile AO_t *addr, #define AO_HAVE_compare_and_swap_full -/* FIXME: Add double-wide compare-and-swap for 32-bit executables. */ +/* FIXME: Add double-wide compare-and-swap for 32-bit executables. */ diff --git a/src/atomic_ops/sysdeps/gcc/sparc.h b/src/atomic_ops/sysdeps/gcc/sparc.h index 5ee7a90..b264b0d 100644 --- a/src/atomic_ops/sysdeps/gcc/sparc.h +++ b/src/atomic_ops/sysdeps/gcc/sparc.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -15,13 +15,13 @@ * */ -/* FIXME. Very incomplete. No support for sparc64. */ -/* Non-ancient SPARCs provide compare-and-swap (casa). */ -/* We should make that available. */ +/* FIXME. Very incomplete. No support for sparc64. */ +/* Non-ancient SPARCs provide compare-and-swap (casa). */ +/* We should make that available. */ #include "../all_atomic_load_store.h" -/* Real SPARC code uses TSO: */ +/* Real SPARC code uses TSO: */ #include "../ordered_except_wr.h" /* Test_and_set location is just a byte. */ @@ -32,8 +32,8 @@ AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_VAL_t oldval; __asm__ __volatile__("ldstub %1,%0" - : "=r"(oldval), "=m"(*addr) - : "m"(*addr) : "memory"); + : "=r"(oldval), "=m"(*addr) + : "m"(*addr) : "memory"); return oldval; } @@ -45,28 +45,28 @@ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { char ret; __asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t" -# if defined(__arch64__) - "casx [%2],%0,%1\n\t" -# else - "cas [%2],%0,%1\n\t" /* 32-bit version */ -# endif - "membar #StoreLoad | #StoreStore\n\t" - "cmp %0,%1\n\t" - "be,a 0f\n\t" - "mov 1,%0\n\t"/* one insn after branch always executed */ - "clr %0\n\t" - "0:\n\t" - : "=r" (ret), "+r" (new_val) - : "r" (addr), "0" (old) - : "memory", "cc"); +# if defined(__arch64__) + "casx [%2],%0,%1\n\t" +# else + "cas [%2],%0,%1\n\t" /* 32-bit version */ +# endif + "membar #StoreLoad | #StoreStore\n\t" + "cmp %0,%1\n\t" + "be,a 0f\n\t" + "mov 1,%0\n\t"/* one insn after branch always executed */ + "clr %0\n\t" + "0:\n\t" + : "=r" (ret), "+r" (new_val) + : "r" (addr), "0" (old) + : "memory", "cc"); return (int)ret; } #define AO_HAVE_compare_and_swap_full #endif /* AO_NO_SPARC_V9 */ -/* FIXME: This needs to be extended for SPARC v8 and v9. */ -/* SPARC V8 also has swap. V9 has CAS. */ -/* There are barriers like membar #LoadStore. */ -/* CASA (32-bit) and CASXA(64-bit) instructions were */ -/* added in V9. */ +/* FIXME: This needs to be extended for SPARC v8 and v9. */ +/* SPARC V8 also has swap. V9 has CAS. */ +/* There are barriers like membar #LoadStore. */ +/* CASA (32-bit) and CASXA(64-bit) instructions were */ +/* added in V9. */ diff --git a/src/atomic_ops/sysdeps/gcc/x86.h b/src/atomic_ops/sysdeps/gcc/x86.h index 488cd20..10ef02f 100644 --- a/src/atomic_ops/sysdeps/gcc/x86.h +++ b/src/atomic_ops/sysdeps/gcc/x86.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -16,19 +16,19 @@ * Some of the machine specific code was borrowed from our GC distribution. */ -/* The following really assume we have a 486 or better. Unfortunately */ -/* gcc doesn't define a suitable feature test macro based on command */ -/* line options. */ -/* We should perhaps test dynamically. */ +/* The following really assume we have a 486 or better. Unfortunately */ +/* gcc doesn't define a suitable feature test macro based on command */ +/* line options. */ +/* We should perhaps test dynamically. */ #include "../all_aligned_atomic_load_store.h" -/* Real X86 implementations, except for some old WinChips, appear */ -/* to enforce ordering between memory operations, EXCEPT that a later */ -/* read can pass earlier writes, presumably due to the visible */ -/* presence of store buffers. */ -/* We ignore both the WinChips, and the fact that the official specs */ -/* seem to be much weaker (and arguably too weak to be usable). */ +/* Real X86 implementations, except for some old WinChips, appear */ +/* to enforce ordering between memory operations, EXCEPT that a later */ +/* read can pass earlier writes, presumably due to the visible */ +/* presence of store buffers. */ +/* We ignore both the WinChips, and the fact that the official specs */ +/* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" @@ -47,14 +47,14 @@ AO_nop_full(void) #else -/* We could use the cpuid instruction. But that seems to be slower */ -/* than the default implementation based on test_and_set_full. Thus */ -/* we omit that bit of misinformation here. */ +/* We could use the cpuid instruction. But that seems to be slower */ +/* than the default implementation based on test_and_set_full. Thus */ +/* we omit that bit of misinformation here. */ #endif -/* As far as we can tell, the lfence and sfence instructions are not */ -/* currently needed or useful for cached memory accesses. */ +/* As far as we can tell, the lfence and sfence instructions are not */ +/* currently needed or useful for cached memory accesses. */ /* Really only works for 486 and later */ AO_INLINE AO_t @@ -63,8 +63,8 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) AO_t result; __asm__ __volatile__ ("lock; xaddl %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) + : "memory"); return result; } @@ -76,8 +76,8 @@ AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr) unsigned char result; __asm__ __volatile__ ("lock; xaddb %0, %1" : - "=q" (result), "=m" (*p) : "0" (incr), "m" (*p) - : "memory"); + "=q" (result), "=m" (*p) : "0" (incr), "m" (*p) + : "memory"); return result; } @@ -89,8 +89,8 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr) unsigned short result; __asm__ __volatile__ ("lock; xaddw %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) + : "memory"); return result; } @@ -101,7 +101,7 @@ AO_INLINE void AO_or_full (volatile AO_t *p, AO_t incr) { __asm__ __volatile__ ("lock; orl %1, %0" : - "=m" (*p) : "r" (incr), "m" (*p) : "memory"); + "=m" (*p) : "r" (incr), "m" (*p) : "memory"); } #define AO_HAVE_or_full @@ -112,8 +112,8 @@ AO_test_and_set_full(volatile AO_TS_t *addr) unsigned char oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ __asm__ __volatile__("xchgb %0, %1" - : "=q"(oldval), "=m"(*addr) - : "0"(0xff), "m"(*addr) : "memory"); + : "=q"(oldval), "=m"(*addr) + : "0"(0xff), "m"(*addr) : "memory"); return (AO_TS_VAL_t)oldval; } @@ -122,23 +122,23 @@ AO_test_and_set_full(volatile AO_TS_t *addr) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { char result; __asm__ __volatile__("lock; cmpxchgl %3, %0; setz %1" - : "=m"(*addr), "=q"(result) - : "m"(*addr), "r" (new_val), "a"(old) : "memory"); + : "=m"(*addr), "=q"(result) + : "m"(*addr), "r" (new_val), "a"(old) : "memory"); return (int) result; } #define AO_HAVE_compare_and_swap_full /* Returns nonzero if the comparison succeeded. */ -/* Really requires at least a Pentium. */ +/* Really requires at least a Pentium. */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { char result; #if __PIC__ @@ -146,21 +146,21 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, GOT pointer. We can save and restore %ebx because GCC won't be using it for anything else (such as any of the m operands) */ __asm__ __volatile__("pushl %%ebx;" /* save ebx used for PIC GOT ptr */ - "movl %6,%%ebx;" /* move new_val2 to %ebx */ - "lock; cmpxchg8b %0; setz %1;" - "pop %%ebx;" /* restore %ebx */ - : "=m"(*addr), "=q"(result) - : "m"(*addr), "d" (old_val2), "a" (old_val1), - "c" (new_val2), "m" (new_val1) : "memory"); + "movl %6,%%ebx;" /* move new_val2 to %ebx */ + "lock; cmpxchg8b %0; setz %1;" + "pop %%ebx;" /* restore %ebx */ + : "=m"(*addr), "=q"(result) + : "m"(*addr), "d" (old_val2), "a" (old_val1), + "c" (new_val2), "m" (new_val1) : "memory"); #else /* We can't just do the same thing in non-PIC mode, because GCC * might be using %ebx as the memory operand. We could have ifdef'd * in a clobber, but there's no point doing the push/pop if we don't * have to. */ __asm__ __volatile__("lock; cmpxchg8b %0; setz %1;" - : "=m"(*addr), "=q"(result) - : "m"(*addr), "d" (old_val2), "a" (old_val1), - "c" (new_val2), "b" (new_val1) : "memory"); + : "=m"(*addr), "=q"(result) + : "m"(*addr), "d" (old_val2), "a" (old_val1), + "c" (new_val2), "b" (new_val1) : "memory"); #endif return (int) result; } diff --git a/src/atomic_ops/sysdeps/gcc/x86_64.h b/src/atomic_ops/sysdeps/gcc/x86_64.h index 36085c4..78a4a0f 100644 --- a/src/atomic_ops/sysdeps/gcc/x86_64.h +++ b/src/atomic_ops/sysdeps/gcc/x86_64.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -18,12 +18,12 @@ #include "../all_aligned_atomic_load_store.h" -/* Real X86 implementations appear */ -/* to enforce ordering between memory operations, EXCEPT that a later */ -/* read can pass earlier writes, presumably due to the visible */ -/* presence of store buffers. */ -/* We ignore the fact that the official specs */ -/* seem to be much weaker (and arguably too weak to be usable). */ +/* Real X86 implementations appear */ +/* to enforce ordering between memory operations, EXCEPT that a later */ +/* read can pass earlier writes, presumably due to the visible */ +/* presence of store buffers. */ +/* We ignore the fact that the official specs */ +/* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" @@ -42,14 +42,14 @@ AO_nop_full(void) #else -/* We could use the cpuid instruction. But that seems to be slower */ -/* than the default implementation based on test_and_set_full. Thus */ -/* we omit that bit of misinformation here. */ +/* We could use the cpuid instruction. But that seems to be slower */ +/* than the default implementation based on test_and_set_full. Thus */ +/* we omit that bit of misinformation here. */ #endif -/* As far as we can tell, the lfence and sfence instructions are not */ -/* currently needed or useful for cached memory accesses. */ +/* As far as we can tell, the lfence and sfence instructions are not */ +/* currently needed or useful for cached memory accesses. */ AO_INLINE AO_t AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) @@ -57,8 +57,8 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) AO_t result; __asm__ __volatile__ ("lock; xaddq %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) + : "memory"); return result; } @@ -70,8 +70,8 @@ AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr) unsigned char result; __asm__ __volatile__ ("lock; xaddb %0, %1" : - "=q" (result), "=m" (*p) : "0" (incr), "m" (*p) - : "memory"); + "=q" (result), "=m" (*p) : "0" (incr), "m" (*p) + : "memory"); return result; } @@ -83,8 +83,8 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr) unsigned short result; __asm__ __volatile__ ("lock; xaddw %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) + : "memory"); return result; } @@ -96,8 +96,8 @@ AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr) unsigned int result; __asm__ __volatile__ ("lock; xaddl %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr), "m" (*p) + : "memory"); return result; } @@ -107,7 +107,7 @@ AO_INLINE void AO_or_full (volatile AO_t *p, AO_t incr) { __asm__ __volatile__ ("lock; orq %1, %0" : - "=m" (*p) : "r" (incr), "m" (*p) : "memory"); + "=m" (*p) : "r" (incr), "m" (*p) : "memory"); } #define AO_HAVE_or_full @@ -118,8 +118,8 @@ AO_test_and_set_full(volatile AO_TS_t *addr) unsigned char oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ __asm__ __volatile__("xchgb %0, %1" - : "=q"(oldval), "=m"(*addr) - : "0"(0xff), "m"(*addr) : "memory"); + : "=q"(oldval), "=m"(*addr) + : "0"(0xff), "m"(*addr) : "memory"); return (AO_TS_VAL_t)oldval; } @@ -128,12 +128,12 @@ AO_test_and_set_full(volatile AO_TS_t *addr) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { char result; __asm__ __volatile__("lock; cmpxchgq %3, %0; setz %1" - : "=m"(*addr), "=q"(result) - : "m"(*addr), "r" (new_val), "a"(old) : "memory"); + : "=m"(*addr), "=q"(result) + : "m"(*addr), "r" (new_val), "a"(old) : "memory"); return (int) result; } @@ -143,7 +143,7 @@ AO_compare_and_swap_full(volatile AO_t *addr, /* NEC LE-IT: older AMD Opterons are missing this instruction. * On these machines SIGILL will be thrown. * Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated - * (lock based) version available */ + * (lock based) version available */ /* HB: Changed this to not define either by default. There are * enough machines and tool chains around on which cmpxchg16b * doesn't work. And the emulation is unsafe by our usual rules. @@ -151,38 +151,38 @@ AO_compare_and_swap_full(volatile AO_t *addr, */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { char result; __asm__ __volatile__("lock; cmpxchg16b %0; setz %1" - : "=m"(*addr), "=q"(result) - : "m"(*addr), - "d" (old_val2), - "a" (old_val1), - "c" (new_val2), - "b" (new_val1) : "memory"); + : "=m"(*addr), "=q"(result) + : "m"(*addr), + "d" (old_val2), + "a" (old_val1), + "c" (new_val2), + "b" (new_val1) : "memory"); return (int) result; } #define AO_HAVE_compare_double_and_swap_double_full #else -/* this one provides spinlock based emulation of CAS implemented in */ +/* this one provides spinlock based emulation of CAS implemented in */ /* atomic_ops.c. We probably do not want to do this here, since it is */ /* not atomic with respect to other kinds of updates of *addr. On the */ -/* other hand, this may be a useful facility on occasion. */ +/* other hand, this may be a useful facility on occasion. */ #ifdef AO_WEAK_DOUBLE_CAS_EMULATION int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2); + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2); AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { - return AO_compare_double_and_swap_double_emulation(addr, - old_val1, old_val2, - new_val1, new_val2); + return AO_compare_double_and_swap_double_emulation(addr, + old_val1, old_val2, + new_val1, new_val2); } #define AO_HAVE_compare_double_and_swap_double_full #endif /* AO_WEAK_DOUBLE_CAS_EMULATION */ diff --git a/src/atomic_ops/sysdeps/generic_pthread.h b/src/atomic_ops/sysdeps/generic_pthread.h index b38367f..90bf117 100644 --- a/src/atomic_ops/sysdeps/generic_pthread.h +++ b/src/atomic_ops/sysdeps/generic_pthread.h @@ -1,40 +1,40 @@ /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ -/* The following is useful primarily for debugging and documentation. */ -/* We define various atomic operations by acquiring a global pthread */ -/* lock. The resulting implementation will perform poorly, but should */ -/* be correct unless it is used from signal handlers. */ -/* We assume that all pthread operations act like full memory barriers. */ -/* (We believe that is the intent of the specification.) */ +/* The following is useful primarily for debugging and documentation. */ +/* We define various atomic operations by acquiring a global pthread */ +/* lock. The resulting implementation will perform poorly, but should */ +/* be correct unless it is used from signal handlers. */ +/* We assume that all pthread operations act like full memory barriers. */ +/* (We believe that is the intent of the specification.) */ #include #include "test_and_set_t_is_ao_t.h" - /* This is not necessarily compatible with the native */ - /* implementation. But those can't be safely mixed anyway. */ + /* This is not necessarily compatible with the native */ + /* implementation. But those can't be safely mixed anyway. */ -/* We define only the full barrier variants, and count on the */ -/* generalization section below to fill in the rest. */ +/* We define only the full barrier variants, and count on the */ +/* generalization section below to fill in the rest. */ extern pthread_mutex_t AO_pt_lock; AO_INLINE void @@ -219,7 +219,7 @@ AO_or_full(volatile AO_t *p, AO_t incr) AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { pthread_mutex_lock(&AO_pt_lock); if (*addr == old) @@ -238,16 +238,16 @@ AO_compare_and_swap_full(volatile AO_t *addr, /* Unlike real architectures, we define both double-width CAS variants. */ typedef struct { - AO_t AO_val1; - AO_t AO_val2; + AO_t AO_val1; + AO_t AO_val2; } AO_double_t; #define AO_HAVE_double_t AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old1, AO_t old2, - AO_t new1, AO_t new2) + AO_t old1, AO_t old2, + AO_t new1, AO_t new2) { pthread_mutex_lock(&AO_pt_lock); if (addr -> AO_val1 == old1 && addr -> AO_val2 == old2) @@ -266,8 +266,8 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_INLINE int AO_compare_and_swap_double_full(volatile AO_double_t *addr, - AO_t old1, - AO_t new1, AO_t new2) + AO_t old1, + AO_t new1, AO_t new2) { pthread_mutex_lock(&AO_pt_lock); if (addr -> AO_val1 == old1) @@ -284,6 +284,5 @@ AO_compare_and_swap_double_full(volatile AO_double_t *addr, #define AO_HAVE_compare_and_swap_double_full -/* We can't use hardware loads and stores, since they don't */ -/* interact correctly with atomic updates. */ - +/* We can't use hardware loads and stores, since they don't */ +/* interact correctly with atomic updates. */ diff --git a/src/atomic_ops/sysdeps/hpc/hppa.h b/src/atomic_ops/sysdeps/hpc/hppa.h index 35edef6..fe11fec 100644 --- a/src/atomic_ops/sysdeps/hpc/hppa.h +++ b/src/atomic_ops/sysdeps/hpc/hppa.h @@ -1,45 +1,45 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * + * SOFTWARE. + * * Derived from the corresponding header file for gcc. - * - */ + * + */ #include "../atomic_load_store.h" -/* Some architecture set descriptions include special "ordered" memory */ -/* operations. As far as we can tell, no existing processors actually */ -/* require those. Nor does it appear likely that future processors */ -/* will. */ -/* FIXME: */ -/* The PA emulator on Itanium may obey weaker restrictions. */ -/* There should be a mode in which we don't assume sequential */ -/* consistency here. */ +/* Some architecture set descriptions include special "ordered" memory */ +/* operations. As far as we can tell, no existing processors actually */ +/* require those. Nor does it appear likely that future processors */ +/* will. */ +/* FIXME: */ +/* The PA emulator on Itanium may obey weaker restrictions. */ +/* There should be a mode in which we don't assume sequential */ +/* consistency here. */ #include "../ordered.h" #include -/* GCC will not guarantee the alignment we need, use four lock words */ -/* and select the correctly aligned datum. See the glibc 2.3.2 */ -/* linuxthread port for the original implementation. */ +/* GCC will not guarantee the alignment we need, use four lock words */ +/* and select the correctly aligned datum. See the glibc 2.3.2 */ +/* linuxthread port for the original implementation. */ struct AO_pa_clearable_loc { int data[4]; }; @@ -47,34 +47,34 @@ struct AO_pa_clearable_loc { #undef AO_TS_INITIALIZER #define AO_TS_t struct AO_pa_clearable_loc #define AO_TS_INITIALIZER {1,1,1,1} -/* Switch meaning of set and clear, since we only have an atomic clear */ -/* instruction. */ +/* Switch meaning of set and clear, since we only have an atomic clear */ +/* instruction. */ typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val; #define AO_TS_VAL_t AO_PA_TS_val #define AO_TS_CLEAR AO_PA_TS_clear #define AO_TS_SET AO_PA_TS_set -/* The hppa only has one atomic read and modify memory operation, */ -/* load and clear, so hppa spinlocks must use zero to signify that */ -/* someone is holding the lock. The address used for the ldcw */ -/* semaphore must be 16-byte aligned. */ +/* The hppa only has one atomic read and modify memory operation, */ +/* load and clear, so hppa spinlocks must use zero to signify that */ +/* someone is holding the lock. The address used for the ldcw */ +/* semaphore must be 16-byte aligned. */ #define __ldcw(a, ret) \ _LDCWX(0 /* index */, 0 /* s */, a /* base */, ret); -/* Because malloc only guarantees 8-byte alignment for malloc'd data, */ -/* and GCC only guarantees 8-byte alignment for stack locals, we can't */ -/* be assured of 16-byte alignment for atomic lock data even if we */ -/* specify "__attribute ((aligned(16)))" in the type declaration. So, */ -/* we use a struct containing an array of four ints for the atomic lock */ -/* type and dynamically select the 16-byte aligned int from the array */ -/* for the semaphore. */ +/* Because malloc only guarantees 8-byte alignment for malloc'd data, */ +/* and GCC only guarantees 8-byte alignment for stack locals, we can't */ +/* be assured of 16-byte alignment for atomic lock data even if we */ +/* specify "__attribute ((aligned(16)))" in the type declaration. So, */ +/* we use a struct containing an array of four ints for the atomic lock */ +/* type and dynamically select the 16-byte aligned int from the array */ +/* for the semaphore. */ #define __PA_LDCW_ALIGNMENT 16 #define __ldcw_align(a, ret) { \ - ret = (unsigned long) a; \ - ret += __PA_LDCW_ALIGNMENT - 1; \ - ret &= ~(__PA_LDCW_ALIGNMENT - 1); \ + ret = (unsigned long) a; \ + ret += __PA_LDCW_ALIGNMENT - 1; \ + ret &= ~(__PA_LDCW_ALIGNMENT - 1); \ } /* Works on PA 1.1 and PA 2.0 systems */ @@ -86,7 +86,7 @@ AO_test_and_set_full(volatile AO_TS_t * addr) __ldcw_align (addr, a); __ldcw (a, ret); return ret; -} +} AO_INLINE void AO_pa_clear(volatile AO_TS_t * addr) @@ -99,4 +99,3 @@ AO_pa_clear(volatile AO_TS_t * addr) #define AO_CLEAR(addr) AO_pa_clear(addr) #define AO_HAVE_test_and_set_full - diff --git a/src/atomic_ops/sysdeps/hpc/ia64.h b/src/atomic_ops/sysdeps/hpc/ia64.h index 3fbcc4d..1cfec1c 100644 --- a/src/atomic_ops/sysdeps/hpc/ia64.h +++ b/src/atomic_ops/sysdeps/hpc/ia64.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* @@ -53,7 +53,7 @@ AO_INLINE AO_t AO_fetch_and_add1_acquire (volatile AO_t *p) { return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, 1, - _LDHINT_NONE, _DOWN_MEM_FENCE); + _LDHINT_NONE, _DOWN_MEM_FENCE); } #define AO_HAVE_fetch_and_add1_acquire @@ -61,7 +61,7 @@ AO_INLINE AO_t AO_fetch_and_add1_release (volatile AO_t *p) { return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, 1, - _LDHINT_NONE, _UP_MEM_FENCE); + _LDHINT_NONE, _UP_MEM_FENCE); } #define AO_HAVE_fetch_and_add1_release @@ -70,7 +70,7 @@ AO_INLINE AO_t AO_fetch_and_sub1_acquire (volatile AO_t *p) { return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, -1, - _LDHINT_NONE, _DOWN_MEM_FENCE); + _LDHINT_NONE, _DOWN_MEM_FENCE); } #define AO_HAVE_fetch_and_sub1_acquire @@ -79,20 +79,20 @@ AO_INLINE AO_t AO_fetch_and_sub1_release (volatile AO_t *p) { return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, -1, - _LDHINT_NONE, _UP_MEM_FENCE); + _LDHINT_NONE, _UP_MEM_FENCE); } #define AO_HAVE_fetch_and_sub1_release AO_INLINE int AO_compare_and_swap_acquire(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { AO_t oldval; _Asm_mov_to_ar(_AREG_CCV, old, _DOWN_MEM_FENCE); oldval = _Asm_cmpxchg(AO_T_SIZE, _SEM_ACQ, addr, - new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); + new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); return (oldval == old); } @@ -100,12 +100,12 @@ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_INLINE int AO_compare_and_swap_release(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { AO_t oldval; _Asm_mov_to_ar(_AREG_CCV, old, _UP_MEM_FENCE); oldval = _Asm_cmpxchg(AO_T_SIZE, _SEM_REL, addr, - new_val, _LDHINT_NONE, _UP_MEM_FENCE); + new_val, _LDHINT_NONE, _UP_MEM_FENCE); /* Hopefully the compiler knows not to reorder the above two? */ return (oldval == old); } @@ -114,13 +114,13 @@ AO_compare_and_swap_release(volatile AO_t *addr, AO_INLINE int AO_char_compare_and_swap_acquire(volatile unsigned char *addr, - unsigned char old, unsigned char new_val) + unsigned char old, unsigned char new_val) { unsigned char oldval; _Asm_mov_to_ar(_AREG_CCV, old, _DOWN_MEM_FENCE); oldval = _Asm_cmpxchg(_SZ_B, _SEM_ACQ, addr, - new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); + new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); return (oldval == old); } @@ -128,12 +128,12 @@ AO_char_compare_and_swap_acquire(volatile unsigned char *addr, AO_INLINE int AO_char_compare_and_swap_release(volatile unsigned char *addr, - unsigned char old, unsigned char new_val) + unsigned char old, unsigned char new_val) { unsigned char oldval; _Asm_mov_to_ar(_AREG_CCV, old, _UP_MEM_FENCE); oldval = _Asm_cmpxchg(_SZ_B, _SEM_REL, addr, - new_val, _LDHINT_NONE, _UP_MEM_FENCE); + new_val, _LDHINT_NONE, _UP_MEM_FENCE); /* Hopefully the compiler knows not to reorder the above two? */ return (oldval == old); } @@ -142,13 +142,13 @@ AO_char_compare_and_swap_release(volatile unsigned char *addr, AO_INLINE int AO_short_compare_and_swap_acquire(volatile unsigned short *addr, - unsigned short old, unsigned short new_val) + unsigned short old, unsigned short new_val) { unsigned short oldval; _Asm_mov_to_ar(_AREG_CCV, old, _DOWN_MEM_FENCE); oldval = _Asm_cmpxchg(_SZ_B, _SEM_ACQ, addr, - new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); + new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); return (oldval == old); } @@ -156,12 +156,12 @@ AO_short_compare_and_swap_acquire(volatile unsigned short *addr, AO_INLINE int AO_short_compare_and_swap_release(volatile unsigned short *addr, - unsigned short old, unsigned short new_val) + unsigned short old, unsigned short new_val) { unsigned short oldval; _Asm_mov_to_ar(_AREG_CCV, old, _UP_MEM_FENCE); oldval = _Asm_cmpxchg(_SZ_B, _SEM_REL, addr, - new_val, _LDHINT_NONE, _UP_MEM_FENCE); + new_val, _LDHINT_NONE, _UP_MEM_FENCE); /* Hopefully the compiler knows not to reorder the above two? */ return (oldval == old); } @@ -171,4 +171,3 @@ AO_short_compare_and_swap_release(volatile unsigned short *addr, #ifndef __LP64__ # include "../ao_t_is_int.h" #endif - diff --git a/src/atomic_ops/sysdeps/ibmc/powerpc.h b/src/atomic_ops/sysdeps/ibmc/powerpc.h index 6b858e5..ee0123c 100644 --- a/src/atomic_ops/sysdeps/ibmc/powerpc.h +++ b/src/atomic_ops/sysdeps/ibmc/powerpc.h @@ -1,18 +1,18 @@ -/* FIXME. This is only a placeholder for the AIX compiler. */ -/* It doesn't work. Please send a patch. */ -/* Memory model documented at http://www-106.ibm.com/developerworks/ */ -/* eserver/articles/archguide.html and (clearer) */ +/* FIXME. This is only a placeholder for the AIX compiler. */ +/* It doesn't work. Please send a patch. */ +/* Memory model documented at http://www-106.ibm.com/developerworks/ */ +/* eserver/articles/archguide.html and (clearer) */ /* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */ -/* There appears to be no implicit ordering between any kind of */ -/* independent memory references. */ -/* Architecture enforces some ordering based on control dependence. */ -/* I don't know if that could help. */ -/* Data-dependent loads are always ordered. */ -/* Based on the above references, eieio is intended for use on */ -/* uncached memory, which we don't support. It does not order loads */ -/* from cached memory. */ -/* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */ -/* track some of this down and correcting my misunderstandings. -HB */ +/* There appears to be no implicit ordering between any kind of */ +/* independent memory references. */ +/* Architecture enforces some ordering based on control dependence. */ +/* I don't know if that could help. */ +/* Data-dependent loads are always ordered. */ +/* Based on the above references, eieio is intended for use on */ +/* uncached memory, which we don't support. It does not order loads */ +/* from cached memory. */ +/* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */ +/* track some of this down and correcting my misunderstandings. -HB */ #include "../all_aligned_atomic_load_store.h" @@ -28,8 +28,8 @@ void AO_lwsync(void); #define AO_nop_read() AO_lwsync() #define AO_HAVE_nop_read -/* We explicitly specify load_acquire and store_release, since these */ -/* rely on the fact that lwsync is also a LoadStore barrier. */ +/* We explicitly specify load_acquire and store_release, since these */ +/* rely on the fact that lwsync is also a LoadStore barrier. */ AO_INLINE AO_t AO_load_acquire(const volatile AO_t *addr) { @@ -49,9 +49,9 @@ AO_store_release(volatile AO_t *addr, AO_t value) #define AO_HAVE_load_acquire -/* This is similar to the code in the garbage collector. Deleting */ -/* this and having it synthesized from compare_and_swap would probably */ -/* only cost us a load immediate instruction. */ +/* This is similar to the code in the garbage collector. Deleting */ +/* this and having it synthesized from compare_and_swap would probably */ +/* only cost us a load immediate instruction. */ /*AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { # error FIXME Implement me @@ -122,5 +122,5 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { #define AO_HAVE_compare_and_swap_full -/* FIXME: We should also implement fetch_and_add and or primitives */ -/* directly. */ +/* FIXME: We should also implement fetch_and_add and or primitives */ +/* directly. */ diff --git a/src/atomic_ops/sysdeps/icc/ia64.h b/src/atomic_ops/sysdeps/icc/ia64.h index 0278f8b..d0ca804 100644 --- a/src/atomic_ops/sysdeps/icc/ia64.h +++ b/src/atomic_ops/sysdeps/icc/ia64.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* @@ -32,9 +32,9 @@ #include -/* The acquire release semantics of volatile can be turned off. And volatile */ -/* operations in icc9 don't imply ordering with respect to other nonvolatile */ -/* operations. */ +/* The acquire release semantics of volatile can be turned off. And volatile */ +/* operations in icc9 don't imply ordering with respect to other nonvolatile */ +/* operations. */ #define AO_INTEL_PTR_t void * @@ -55,7 +55,7 @@ AO_store_release(volatile AO_t *p, AO_t val) AO_INLINE unsigned char AO_char_load_acquire(const volatile unsigned char *p) { - /* A normal volatile load generates an ld.acq */ + /* A normal volatile load generates an ld.acq */ return (__ld1_acq((AO_INTEL_PTR_t)p)); } #define AO_HAVE_char_load_acquire @@ -70,7 +70,7 @@ AO_char_store_release(volatile unsigned char *p, unsigned char val) AO_INLINE unsigned short AO_short_load_acquire(const volatile unsigned short *p) { - /* A normal volatile load generates an ld.acq */ + /* A normal volatile load generates an ld.acq */ return (__ld2_acq((AO_INTEL_PTR_t)p)); } #define AO_HAVE_short_load_acquire @@ -85,7 +85,7 @@ AO_short_store_release(volatile unsigned short *p, unsigned short val) AO_INLINE unsigned int AO_int_load_acquire(const volatile unsigned int *p) { - /* A normal volatile load generates an ld.acq */ + /* A normal volatile load generates an ld.acq */ return (__ld4_acq((AO_INTEL_PTR_t)p)); } #define AO_HAVE_int_load_acquire @@ -137,7 +137,7 @@ AO_fetch_and_sub1_release (volatile AO_t *p) AO_INLINE int AO_compare_and_swap_acquire(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { AO_t oldval; oldval = _InterlockedCompareExchange64_acq(addr, new_val, old); @@ -148,7 +148,7 @@ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_INLINE int AO_compare_and_swap_release(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { AO_t oldval; oldval = _InterlockedCompareExchange64_rel(addr, new_val, old); @@ -159,7 +159,7 @@ AO_compare_and_swap_release(volatile AO_t *addr, AO_INLINE int AO_char_compare_and_swap_acquire(volatile unsigned char *addr, - unsigned char old, unsigned char new_val) + unsigned char old, unsigned char new_val) { unsigned char oldval; oldval = _InterlockedCompareExchange8_acq(addr, new_val, old); @@ -170,7 +170,7 @@ AO_char_compare_and_swap_acquire(volatile unsigned char *addr, AO_INLINE int AO_char_compare_and_swap_release(volatile unsigned char *addr, - unsigned char old, unsigned char new_val) + unsigned char old, unsigned char new_val) { unsigned char oldval; oldval = _InterlockedCompareExchange8_rel(addr, new_val, old); @@ -181,7 +181,7 @@ AO_char_compare_and_swap_release(volatile unsigned char *addr, AO_INLINE int AO_short_compare_and_swap_acquire(volatile unsigned short *addr, - unsigned short old, unsigned short new_val) + unsigned short old, unsigned short new_val) { unsigned short oldval; oldval = _InterlockedCompareExchange16_acq(addr, new_val, old); @@ -192,7 +192,7 @@ AO_short_compare_and_swap_acquire(volatile unsigned short *addr, AO_INLINE int AO_short_compare_and_swap_release(volatile unsigned short *addr, - unsigned short old, unsigned short new_val) + unsigned short old, unsigned short new_val) { unsigned short oldval; oldval = _InterlockedCompareExchange16_rel(addr, new_val, old); @@ -203,7 +203,7 @@ AO_short_compare_and_swap_release(volatile unsigned short *addr, AO_INLINE int AO_int_compare_and_swap_acquire(volatile unsigned int *addr, - unsigned int old, unsigned int new_val) + unsigned int old, unsigned int new_val) { unsigned int oldval; oldval = _InterlockedCompareExchange_acq(addr, new_val, old); @@ -214,7 +214,7 @@ AO_int_compare_and_swap_acquire(volatile unsigned int *addr, AO_INLINE int AO_int_compare_and_swap_release(volatile unsigned int *addr, - unsigned int old, unsigned int new_val) + unsigned int old, unsigned int new_val) { unsigned int oldval; oldval = _InterlockedCompareExchange_rel(addr, new_val, old); @@ -222,4 +222,3 @@ AO_int_compare_and_swap_release(volatile unsigned int *addr, } #define AO_HAVE_int_compare_and_swap_release - diff --git a/src/atomic_ops/sysdeps/int_acquire_release_volatile.h b/src/atomic_ops/sysdeps/int_acquire_release_volatile.h index 44d0453..01037a2 100644 --- a/src/atomic_ops/sysdeps/int_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/int_acquire_release_volatile.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* @@ -37,7 +37,7 @@ AO_INLINE unsigned int AO_int_load_acquire(const volatile unsigned int *p) { unsigned int result = *p; - /* A normal volatile load generates an ld.acq */ + /* A normal volatile load generates an ld.acq */ AO_GCC_BARRIER(); return result; } @@ -47,9 +47,7 @@ AO_INLINE void AO_int_store_release(volatile unsigned int *p, unsigned int val) { AO_GCC_BARRIER(); - /* A normal volatile store generates an st.rel */ + /* A normal volatile store generates an st.rel */ *p = val; } #define AO_HAVE_int_store_release - - diff --git a/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h index e49ecd3..e1c373c 100644 --- a/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * Definitions for architectures on which loads and stores of unsigned int are @@ -29,8 +29,8 @@ AO_INLINE unsigned int AO_int_load(const volatile unsigned int *addr) { assert(((size_t)addr & (sizeof(unsigned int) - 1)) == 0); - /* Cast away the volatile for architectures like IA64 where */ - /* volatile adds barrier semantics. */ + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ return (*(unsigned int *)addr); } @@ -44,5 +44,3 @@ AO_int_store(volatile unsigned int *addr, unsigned int new_val) } #define AO_HAVE_int_store - - diff --git a/src/atomic_ops/sysdeps/int_atomic_load_store.h b/src/atomic_ops/sysdeps/int_atomic_load_store.h index 570a850..8892692 100644 --- a/src/atomic_ops/sysdeps/int_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/int_atomic_load_store.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * Definitions for architectures on which loads and stores of unsigned int are @@ -28,8 +28,8 @@ AO_INLINE unsigned int AO_int_load(const volatile unsigned int *addr) { - /* Cast away the volatile for architectures like IA64 where */ - /* volatile adds barrier semantics. */ + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ return (*(const unsigned int *)addr); } @@ -42,5 +42,3 @@ AO_int_store(volatile unsigned int *addr, unsigned int new_val) } #define AO_HAVE_int_store - - diff --git a/src/atomic_ops/sysdeps/msftc/arm.h b/src/atomic_ops/sysdeps/msftc/arm.h index 507b70a..36e45f0 100755 --- a/src/atomic_ops/sysdeps/msftc/arm.h +++ b/src/atomic_ops/sysdeps/msftc/arm.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ #include "../read_ordered.h" @@ -27,25 +27,25 @@ # define AO_ASSUME_WINDOWS98 #endif #include "common32_defs.h" -/* FIXME: Do _InterlockedOps really have a full memory barrier? */ -/* (MSDN WinCE docs say nothing about it.) */ +/* FIXME: Do _InterlockedOps really have a full memory barrier? */ +/* (MSDN WinCE docs say nothing about it.) */ #if _M_ARM >= 6 -/* ARMv6 is the first architecture providing support for simple LL/SC. */ +/* ARMv6 is the first architecture providing support for simple LL/SC. */ #include "../standard_ao_double_t.h" -/* If only a single processor is used, we can define AO_UNIPROCESSOR */ -/* and do not need to access CP15 for ensuring a DMB at all. */ +/* If only a single processor is used, we can define AO_UNIPROCESSOR */ +/* and do not need to access CP15 for ensuring a DMB at all. */ #ifdef AO_UNIPROCESSOR AO_INLINE void AO_nop_full(void) {} # define AO_HAVE_nop_full #else -/* AO_nop_full() is emulated using AO_test_and_set_full(). */ +/* AO_nop_full() is emulated using AO_test_and_set_full(). */ #endif #include "../test_and_set_t_is_ao_t.h" -/* AO_test_and_set() is emulated using CAS. */ +/* AO_test_and_set() is emulated using CAS. */ AO_INLINE AO_t AO_load(const volatile AO_t *addr) @@ -58,18 +58,18 @@ AO_load(const volatile AO_t *addr) AO_INLINE void AO_store_full(volatile AO_t *addr, AO_t value) { - /* Emulate atomic store using CAS. */ + /* Emulate atomic store using CAS. */ AO_t old = AO_load(addr); AO_t current; # ifdef AO_OLD_STYLE_INTERLOCKED_COMPARE_EXCHANGE while ((current = (AO_t)_InterlockedCompareExchange( - (PVOID AO_INTERLOCKED_VOLATILE *)addr, - (PVOID)value, (PVOID)old)) != old) + (PVOID AO_INTERLOCKED_VOLATILE *)addr, + (PVOID)value, (PVOID)old)) != old) old = current; # else while ((current = (AO_t)_InterlockedCompareExchange( - (LONG AO_INTERLOCKED_VOLATILE *)addr, - (LONG)value, (LONG)old)) != old) + (LONG AO_INTERLOCKED_VOLATILE *)addr, + (LONG)value, (LONG)old)) != old) old = current; # endif } @@ -79,12 +79,12 @@ AO_store_full(volatile AO_t *addr, AO_t value) #else /* _M_ARM < 6 */ -/* Some slide set, if it has been red correctly, claims that Loads */ -/* followed by either a Load or a Store are ordered, but nothing */ -/* else is. It appears that SWP is the only simple memory barrier. */ +/* Some slide set, if it has been red correctly, claims that Loads */ +/* followed by either a Load or a Store are ordered, but nothing */ +/* else is. It appears that SWP is the only simple memory barrier. */ #include "../all_atomic_load_store.h" #include "../test_and_set_t_is_ao_t.h" -/* AO_test_and_set_full() is emulated using CAS. */ +/* AO_test_and_set_full() is emulated using CAS. */ #endif /* _M_ARM < 6 */ diff --git a/src/atomic_ops/sysdeps/msftc/common32_defs.h b/src/atomic_ops/sysdeps/msftc/common32_defs.h index 052647e..96adba2 100755 --- a/src/atomic_ops/sysdeps/msftc/common32_defs.h +++ b/src/atomic_ops/sysdeps/msftc/common32_defs.h @@ -1,37 +1,37 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ -/* This file contains AO primitives based on VC++ built-in intrinsic */ -/* functions commonly available across 32-bit architectures. */ +/* This file contains AO primitives based on VC++ built-in intrinsic */ +/* functions commonly available across 32-bit architectures. */ -/* This file should be included from arch-specific header files. */ -/* Define AO_USE_INTERLOCKED_INTRINSICS if _Interlocked primitives */ -/* (used below) are available as intrinsic ones for a target arch */ -/* (otherwise "Interlocked" functions family is used instead). */ -/* Define AO_ASSUME_WINDOWS98 if CAS is available. */ +/* This file should be included from arch-specific header files. */ +/* Define AO_USE_INTERLOCKED_INTRINSICS if _Interlocked primitives */ +/* (used below) are available as intrinsic ones for a target arch */ +/* (otherwise "Interlocked" functions family is used instead). */ +/* Define AO_ASSUME_WINDOWS98 if CAS is available. */ #include - /* Seems like over-kill, but that's what MSDN recommends. */ - /* And apparently winbase.h is not always self-contained. */ + /* Seems like over-kill, but that's what MSDN recommends. */ + /* And apparently winbase.h is not always self-contained. */ #if _MSC_VER < 1310 || !defined(AO_USE_INTERLOCKED_INTRINSICS) @@ -61,7 +61,7 @@ LONG __cdecl _InterlockedExchangeAdd(LONG volatile *, LONG); LONG __cdecl _InterlockedExchange(LONG volatile *, LONG); LONG __cdecl _InterlockedCompareExchange(LONG volatile *, - LONG /* Exchange */, LONG /* Comp */); + LONG /* Exchange */, LONG /* Comp */); # ifdef __cplusplus } # endif @@ -81,7 +81,7 @@ AO_INLINE AO_t AO_fetch_and_add_full(volatile AO_t *p, AO_t incr) { return _InterlockedExchangeAdd((LONG AO_INTERLOCKED_VOLATILE *)p, - (LONG)incr); + (LONG)incr); } #define AO_HAVE_fetch_and_add_full @@ -109,12 +109,12 @@ AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { # ifdef AO_OLD_STYLE_INTERLOCKED_COMPARE_EXCHANGE return _InterlockedCompareExchange((PVOID AO_INTERLOCKED_VOLATILE *)addr, - (PVOID)new_val, (PVOID)old) - == (PVOID)old; + (PVOID)new_val, (PVOID)old) + == (PVOID)old; # else return _InterlockedCompareExchange((LONG AO_INTERLOCKED_VOLATILE *)addr, - (LONG)new_val, (LONG)old) - == (LONG)old; + (LONG)new_val, (LONG)old) + == (LONG)old; # endif } diff --git a/src/atomic_ops/sysdeps/msftc/x86.h b/src/atomic_ops/sysdeps/msftc/x86.h index 28b1424..347b66c 100644 --- a/src/atomic_ops/sysdeps/msftc/x86.h +++ b/src/atomic_ops/sysdeps/msftc/x86.h @@ -1,54 +1,54 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ -/* If AO_ASSUME_WINDOWS98 is defined, we assume Windows 98 or newer. */ -/* If AO_ASSUME_VISTA is defined, we assume Windows Server 2003, Vista */ -/* or later. */ +/* If AO_ASSUME_WINDOWS98 is defined, we assume Windows 98 or newer. */ +/* If AO_ASSUME_VISTA is defined, we assume Windows Server 2003, Vista */ +/* or later. */ #include "../all_aligned_atomic_load_store.h" -/* Real X86 implementations, except for some old WinChips, appear */ -/* to enforce ordering between memory operations, EXCEPT that a later */ -/* read can pass earlier writes, presumably due to the visible */ -/* presence of store buffers. */ -/* We ignore both the WinChips, and the fact that the official specs */ -/* seem to be much weaker (and arguably too weak to be usable). */ +/* Real X86 implementations, except for some old WinChips, appear */ +/* to enforce ordering between memory operations, EXCEPT that a later */ +/* read can pass earlier writes, presumably due to the visible */ +/* presence of store buffers. */ +/* We ignore both the WinChips, and the fact that the official specs */ +/* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" #include "../test_and_set_t_is_char.h" #ifndef AO_USE_INTERLOCKED_INTRINSICS - /* _Interlocked primitives (Inc, Dec, Xchg, Add) are always available */ + /* _Interlocked primitives (Inc, Dec, Xchg, Add) are always available */ # define AO_USE_INTERLOCKED_INTRINSICS #endif #include "common32_defs.h" -/* As far as we can tell, the lfence and sfence instructions are not */ -/* currently needed or useful for cached memory accesses. */ +/* As far as we can tell, the lfence and sfence instructions are not */ +/* currently needed or useful for cached memory accesses. */ -/* Unfortunately mfence doesn't exist everywhere. */ -/* IsProcessorFeaturePresent(PF_COMPARE_EXCHANGE128) is */ -/* probably a conservative test for it? */ +/* Unfortunately mfence doesn't exist everywhere. */ +/* IsProcessorFeaturePresent(PF_COMPARE_EXCHANGE128) is */ +/* probably a conservative test for it? */ #if defined(AO_USE_PENTIUM4_INSTRS) @@ -62,9 +62,9 @@ AO_nop_full(void) #else -/* We could use the cpuid instruction. But that seems to be slower */ -/* than the default implementation based on test_and_set_full. Thus */ -/* we omit that bit of misinformation here. */ +/* We could use the cpuid instruction. But that seems to be slower */ +/* than the default implementation based on test_and_set_full. Thus */ +/* we omit that bit of misinformation here. */ #endif @@ -73,9 +73,9 @@ AO_test_and_set_full(volatile AO_TS_t *addr) { __asm { - mov eax,0xff ; /* AO_TS_SET */ - mov ebx,addr ; - xchg byte ptr [ebx],al ; + mov eax,0xff ; /* AO_TS_SET */ + mov ebx,addr ; + xchg byte ptr [ebx],al ; } /* Ignore possible "missing return value" warning here. */ } @@ -95,8 +95,8 @@ AO_test_and_set_full(volatile AO_TS_t *addr) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { __int64 oldv = (__int64)old_val1 | ((__int64)old_val2 << 32); __int64 newv = (__int64)new_val1 | ((__int64)new_val2 << 32); @@ -108,10 +108,10 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, #ifdef __cplusplus AO_INLINE int AO_double_compare_and_swap_full(volatile AO_double_t *addr, - AO_double_t old_val, AO_double_t new_val) + AO_double_t old_val, AO_double_t new_val) { return _InterlockedCompareExchange64((__int64 volatile *)addr, - new_val.AO_whole, old_val.AO_whole) == old_val.AO_whole; + new_val.AO_whole, old_val.AO_whole) == old_val.AO_whole; } #define AO_HAVE_double_compare_and_swap_full #endif /* __cplusplus */ diff --git a/src/atomic_ops/sysdeps/msftc/x86_64.h b/src/atomic_ops/sysdeps/msftc/x86_64.h index fb0b0ee..150a97f 100644 --- a/src/atomic_ops/sysdeps/msftc/x86_64.h +++ b/src/atomic_ops/sysdeps/msftc/x86_64.h @@ -1,33 +1,33 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ #include "../all_aligned_atomic_load_store.h" -/* Real X86 implementations appear */ -/* to enforce ordering between memory operations, EXCEPT that a later */ -/* read can pass earlier writes, presumably due to the visible */ -/* presence of store buffers. */ -/* We ignore the fact that the official specs */ -/* seem to be much weaker (and arguably too weak to be usable). */ +/* Real X86 implementations appear */ +/* to enforce ordering between memory operations, EXCEPT that a later */ +/* read can pass earlier writes, presumably due to the visible */ +/* presence of store buffers. */ +/* We ignore the fact that the official specs */ +/* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" @@ -40,8 +40,8 @@ #include "../standard_ao_double_t.h" #include - /* Seems like over-kill, but that's what MSDN recommends. */ - /* And apparently winbase.h is not always self-contained. */ + /* Seems like over-kill, but that's what MSDN recommends. */ + /* And apparently winbase.h is not always self-contained. */ /* Assume _MSC_VER >= 1400 */ #include @@ -80,21 +80,21 @@ AO_fetch_and_sub1_full (volatile AO_t *p) AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { return _InterlockedCompareExchange64((LONGLONG volatile *)addr, (LONGLONG)new_val, (LONGLONG)old) - == (LONGLONG)old; + == (LONGLONG)old; } #define AO_HAVE_compare_and_swap_full -/* As far as we can tell, the lfence and sfence instructions are not */ -/* currently needed or useful for cached memory accesses. */ +/* As far as we can tell, the lfence and sfence instructions are not */ +/* currently needed or useful for cached memory accesses. */ -/* Unfortunately mfence doesn't exist everywhere. */ -/* IsProcessorFeaturePresent(PF_COMPARE_EXCHANGE128) is */ -/* probably a conservative test for it? */ +/* Unfortunately mfence doesn't exist everywhere. */ +/* IsProcessorFeaturePresent(PF_COMPARE_EXCHANGE128) is */ +/* probably a conservative test for it? */ #if defined(AO_USE_PENTIUM4_INSTRS) @@ -108,9 +108,9 @@ AO_nop_full(void) #else -/* We could use the cpuid instruction. But that seems to be slower */ -/* than the default implementation based on test_and_set_full. Thus */ -/* we omit that bit of misinformation here. */ +/* We could use the cpuid instruction. But that seems to be slower */ +/* than the default implementation based on test_and_set_full. Thus */ +/* we omit that bit of misinformation here. */ #endif @@ -121,9 +121,9 @@ AO_test_and_set_full(volatile AO_TS_t *addr) { __asm { - mov rax,AO_TS_SET ; - mov rbx,addr ; - xchg byte ptr [rbx],al ; + mov rax,AO_TS_SET ; + mov rbx,addr ; + xchg byte ptr [rbx],al ; } } @@ -143,14 +143,14 @@ AO_test_and_set_full(volatile AO_TS_t *addr) AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { __int64 comparandResult[2]; comparandResult[0] = old_val1; /* low */ comparandResult[1] = old_val2; /* high */ return _InterlockedCompareExchange128((volatile __int64 *)addr, - new_val2 /* high */, new_val1 /* low */, comparandResult); + new_val2 /* high */, new_val1 /* low */, comparandResult); } # define AO_HAVE_compare_double_and_swap_double_full @@ -163,18 +163,18 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { - __asm - { - mov rdx,QWORD PTR [old_val2] ; - mov rax,QWORD PTR [old_val1] ; - mov rcx,QWORD PTR [new_val2] ; - mov rbx,QWORD PTR [new_val1] ; - lock cmpxchg16b [addr] ; - setz rax ; - } + __asm + { + mov rdx,QWORD PTR [old_val2] ; + mov rax,QWORD PTR [old_val1] ; + mov rcx,QWORD PTR [new_val2] ; + mov rbx,QWORD PTR [new_val1] ; + lock cmpxchg16b [addr] ; + setz rax ; + } } # define AO_HAVE_compare_double_and_swap_double_full diff --git a/src/atomic_ops/sysdeps/ordered.h b/src/atomic_ops/sysdeps/ordered.h index cdd2d8e..da77b63 100644 --- a/src/atomic_ops/sysdeps/ordered.h +++ b/src/atomic_ops/sysdeps/ordered.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * These are common definitions for architectures that provide processor @@ -34,5 +34,3 @@ AO_nop_full(void) } #define AO_HAVE_nop_full - - diff --git a/src/atomic_ops/sysdeps/ordered_except_wr.h b/src/atomic_ops/sysdeps/ordered_except_wr.h index 3e700b1..ee51aff 100644 --- a/src/atomic_ops/sysdeps/ordered_except_wr.h +++ b/src/atomic_ops/sysdeps/ordered_except_wr.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * These are common definitions for architectures that provide processor @@ -33,8 +33,8 @@ AO_INLINE void AO_nop_write(void) { AO_compiler_barrier(); - /* sfence according to Intel docs. Pentium 3 and up. */ - /* Unnecessary for cached accesses? */ + /* sfence according to Intel docs. Pentium 3 and up. */ + /* Unnecessary for cached accesses? */ } #define AO_HAVE_NOP_WRITE @@ -98,4 +98,3 @@ AO_int_store_write(volatile unsigned int *addr, unsigned int val) # define AO_HAVE_int_store_release #endif /* AO_HAVE_int_store */ - diff --git a/src/atomic_ops/sysdeps/read_ordered.h b/src/atomic_ops/sysdeps/read_ordered.h index 922f5ea..1589e5c 100644 --- a/src/atomic_ops/sysdeps/read_ordered.h +++ b/src/atomic_ops/sysdeps/read_ordered.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * These are common definitions for architectures that provide processor @@ -98,5 +98,3 @@ AO_int_load_read(const volatile unsigned int *addr) #define AO_HAVE_int_load_acquire #endif /* AO_HAVE_int_load */ - - diff --git a/src/atomic_ops/sysdeps/short_acquire_release_volatile.h b/src/atomic_ops/sysdeps/short_acquire_release_volatile.h index 56db599..dcf3c04 100644 --- a/src/atomic_ops/sysdeps/short_acquire_release_volatile.h +++ b/src/atomic_ops/sysdeps/short_acquire_release_volatile.h @@ -1,23 +1,23 @@ /* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ /* @@ -37,7 +37,7 @@ AO_INLINE unsigned short AO_short_load_acquire(const volatile unsigned short *p) { unsigned short result = *p; - /* A normal volatile load generates an ld.acq */ + /* A normal volatile load generates an ld.acq */ AO_GCC_BARRIER(); return result; } @@ -47,9 +47,7 @@ AO_INLINE void AO_short_store_release(volatile unsigned short *p, unsigned short val) { AO_GCC_BARRIER(); - /* A normal volatile store generates an st.rel */ + /* A normal volatile store generates an st.rel */ *p = val; } #define AO_HAVE_short_store_release - - diff --git a/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h index 675766b..164ba97 100644 --- a/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * Definitions for architectures on which loads and stores of unsigned short @@ -29,8 +29,8 @@ AO_INLINE unsigned short AO_short_load(const volatile unsigned short *addr) { assert(((size_t)addr & (sizeof(unsigned short) - 1)) == 0); - /* Cast away the volatile for architectures like IA64 where */ - /* volatile adds barrier semantics. */ + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ return (*(unsigned short *)addr); } @@ -44,5 +44,3 @@ AO_short_store(volatile unsigned short *addr, unsigned short new_val) } #define AO_HAVE_short_store - - diff --git a/src/atomic_ops/sysdeps/short_atomic_load_store.h b/src/atomic_ops/sysdeps/short_atomic_load_store.h index 9ec9cf4..9a88db5 100644 --- a/src/atomic_ops/sysdeps/short_atomic_load_store.h +++ b/src/atomic_ops/sysdeps/short_atomic_load_store.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * Definitions for architectures on which loads and stores of unsigned short @@ -28,8 +28,8 @@ AO_INLINE unsigned short AO_short_load(const volatile unsigned short *addr) { - /* Cast away the volatile for architectures like IA64 where */ - /* volatile adds barrier semantics. */ + /* Cast away the volatile for architectures like IA64 where */ + /* volatile adds barrier semantics. */ return (*(const unsigned short *)addr); } @@ -42,5 +42,3 @@ AO_short_store(volatile unsigned short *addr, unsigned short new_val) } #define AO_HAVE_short_store - - diff --git a/src/atomic_ops/sysdeps/standard_ao_double_t.h b/src/atomic_ops/sysdeps/standard_ao_double_t.h index 1b52d2d..e92424d 100644 --- a/src/atomic_ops/sysdeps/standard_ao_double_t.h +++ b/src/atomic_ops/sysdeps/standard_ao_double_t.h @@ -1,8 +1,8 @@ /* NEC LE-IT: For 64Bit OS we extend the double type to hold two int64's -* +* * x86-64: __m128 serves as placeholder which also requires the compiler -* to align it on 16 byte boundary (as required by cmpxchg16. -* Similar things could be done for PowerPC 64bit using a VMX data type... */ +* to align it on 16 byte boundary (as required by cmpxchg16. +* Similar things could be done for PowerPC 64bit using a VMX data type... */ #if (defined(__x86_64__) && defined(__GNUC__)) || defined(_WIN64) # include diff --git a/src/atomic_ops/sysdeps/sunc/x86.h b/src/atomic_ops/sysdeps/sunc/x86.h index 8cf797a..be83b44 100644 --- a/src/atomic_ops/sysdeps/sunc/x86.h +++ b/src/atomic_ops/sysdeps/sunc/x86.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -16,16 +16,16 @@ * Some of the machine specific code was borrowed from our GC distribution. */ -/* The following really assume we have a 486 or better. */ +/* The following really assume we have a 486 or better. */ #include "../all_aligned_atomic_load_store.h" -/* Real X86 implementations, except for some old WinChips, appear */ -/* to enforce ordering between memory operations, EXCEPT that a later */ -/* read can pass earlier writes, presumably due to the visible */ -/* presence of store buffers. */ -/* We ignore both the WinChips, and the fact that the official specs */ -/* seem to be much weaker (and arguably too weak to be usable). */ +/* Real X86 implementations, except for some old WinChips, appear */ +/* to enforce ordering between memory operations, EXCEPT that a later */ +/* read can pass earlier writes, presumably due to the visible */ +/* presence of store buffers. */ +/* We ignore both the WinChips, and the fact that the official specs */ +/* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" @@ -44,14 +44,14 @@ AO_nop_full(void) #else -/* We could use the cpuid instruction. But that seems to be slower */ -/* than the default implementation based on test_and_set_full. Thus */ -/* we omit that bit of misinformation here. */ +/* We could use the cpuid instruction. But that seems to be slower */ +/* than the default implementation based on test_and_set_full. Thus */ +/* we omit that bit of misinformation here. */ #endif -/* As far as we can tell, the lfence and sfence instructions are not */ -/* currently needed or useful for cached memory accesses. */ +/* As far as we can tell, the lfence and sfence instructions are not */ +/* currently needed or useful for cached memory accesses. */ /* Really only works for 486 and later */ AO_INLINE AO_t @@ -60,8 +60,8 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) AO_t result; __asm__ __volatile__ ("lock; xaddl %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ + : "memory"); return result; } @@ -73,8 +73,8 @@ AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr) unsigned char result; __asm__ __volatile__ ("lock; xaddb %0, %1" : - "=q" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ - : "memory"); + "=q" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ + : "memory"); return result; } @@ -86,8 +86,8 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr) unsigned short result; __asm__ __volatile__ ("lock; xaddw %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ + : "memory"); return result; } @@ -98,8 +98,8 @@ AO_INLINE void AO_or_full (volatile AO_t *p, AO_t incr) { __asm__ __volatile__ ("lock; orl %1, %0" : - "=m" (*p) : "r" (incr) /* , "m" (*p) */ - : "memory"); + "=m" (*p) : "r" (incr) /* , "m" (*p) */ + : "memory"); } #define AO_HAVE_or_full @@ -109,11 +109,11 @@ AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_t oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ - /* Note 2: "xchgb" is not recognized by Sun CC assembler yet. */ + /* Note 2: "xchgb" is not recognized by Sun CC assembler yet. */ __asm__ __volatile__("xchgl %0, %1" - : "=q"(oldval), "=m"(*addr) - : "0"(0xff) /* , "m"(*addr) */ - : "memory"); + : "=q"(oldval), "=m"(*addr) + : "0"(0xff) /* , "m"(*addr) */ + : "memory"); return (AO_TS_VAL_t)oldval; } @@ -122,23 +122,23 @@ AO_test_and_set_full(volatile AO_TS_t *addr) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { char result; __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1" - : "=m"(*addr), "=q"(result) - : "r" (new_val), "a"(old) : "memory"); + : "=m"(*addr), "=q"(result) + : "r" (new_val), "a"(old) : "memory"); return (int) result; } #define AO_HAVE_compare_and_swap_full /* Returns nonzero if the comparison succeeded. */ -/* Really requires at least a Pentium. */ +/* Really requires at least a Pentium. */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { char result; /* FIXME: not tested */ @@ -147,21 +147,21 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, GOT pointer. We can save and restore %ebx because GCC won't be using it for anything else (such as any of the m operands) */ __asm__ __volatile__("pushl %%ebx;" /* save ebx used for PIC GOT ptr */ - "movl %6,%%ebx;" /* move new_val2 to %ebx */ - "lock; cmpxchg8b %0; setz %1;" - "pop %%ebx;" /* restore %ebx */ - : "=m"(*addr), "=q"(result) - : "m"(*addr), "d" (old_val2), "a" (old_val1), - "c" (new_val2), "m" (new_val1) : "memory"); + "movl %6,%%ebx;" /* move new_val2 to %ebx */ + "lock; cmpxchg8b %0; setz %1;" + "pop %%ebx;" /* restore %ebx */ + : "=m"(*addr), "=q"(result) + : "m"(*addr), "d" (old_val2), "a" (old_val1), + "c" (new_val2), "m" (new_val1) : "memory"); #else /* We can't just do the same thing in non-PIC mode, because GCC * might be using %ebx as the memory operand. We could have ifdef'd * in a clobber, but there's no point doing the push/pop if we don't * have to. */ __asm__ __volatile__("lock; cmpxchg8b %0; setz %1;" - : "=m"(*addr), "=q"(result) - : /* "m"(*addr), */ "d" (old_val2), "a" (old_val1), - "c" (new_val2), "b" (new_val1) : "memory"); + : "=m"(*addr), "=q"(result) + : /* "m"(*addr), */ "d" (old_val2), "a" (old_val1), + "c" (new_val2), "b" (new_val1) : "memory"); #endif return (int) result; } diff --git a/src/atomic_ops/sysdeps/sunc/x86_64.h b/src/atomic_ops/sysdeps/sunc/x86_64.h index 72dd185..9822833 100644 --- a/src/atomic_ops/sysdeps/sunc/x86_64.h +++ b/src/atomic_ops/sysdeps/sunc/x86_64.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. @@ -18,12 +18,12 @@ #include "../all_aligned_atomic_load_store.h" -/* Real X86 implementations, appear */ -/* to enforce ordering between memory operations, EXCEPT that a later */ -/* read can pass earlier writes, presumably due to the visible */ -/* presence of store buffers. */ -/* We ignore the fact that the official specs */ -/* seem to be much weaker (and arguably too weak to be usable). */ +/* Real X86 implementations, appear */ +/* to enforce ordering between memory operations, EXCEPT that a later */ +/* read can pass earlier writes, presumably due to the visible */ +/* presence of store buffers. */ +/* We ignore the fact that the official specs */ +/* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" @@ -34,14 +34,14 @@ AO_INLINE void AO_nop_full(void) { - /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */ + /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */ __asm__ __volatile__("mfence" : : : "memory"); } #define AO_HAVE_nop_full -/* As far as we can tell, the lfence and sfence instructions are not */ -/* currently needed or useful for cached memory accesses. */ +/* As far as we can tell, the lfence and sfence instructions are not */ +/* currently needed or useful for cached memory accesses. */ AO_INLINE AO_t AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) @@ -49,8 +49,8 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) AO_t result; __asm__ __volatile__ ("lock; xaddq %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ + : "memory"); return result; } @@ -62,8 +62,8 @@ AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr) unsigned char result; __asm__ __volatile__ ("lock; xaddb %0, %1" : - "=q" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ - : "memory"); + "=q" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ + : "memory"); return result; } @@ -75,8 +75,8 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr) unsigned short result; __asm__ __volatile__ ("lock; xaddw %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ + : "memory"); return result; } @@ -88,8 +88,8 @@ AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr) unsigned int result; __asm__ __volatile__ ("lock; xaddl %0, %1" : - "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ - : "memory"); + "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */ + : "memory"); return result; } @@ -99,8 +99,8 @@ AO_INLINE void AO_or_full (volatile AO_t *p, AO_t incr) { __asm__ __volatile__ ("lock; orq %1, %0" : - "=m" (*p) : "r" (incr) /* , "m" (*p) */ - : "memory"); + "=m" (*p) : "r" (incr) /* , "m" (*p) */ + : "memory"); } #define AO_HAVE_or_full @@ -110,11 +110,11 @@ AO_test_and_set_full(volatile AO_TS_t *addr) { unsigned int oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ - /* Note 2: "xchgb" is not recognized by Sun CC assembler yet. */ + /* Note 2: "xchgb" is not recognized by Sun CC assembler yet. */ __asm__ __volatile__("xchgl %0, %1" - : "=q"(oldval), "=m"(*addr) - : "0"(0xff) /* , "m"(*addr) */ - : "memory"); + : "=q"(oldval), "=m"(*addr) + : "0"(0xff) /* , "m"(*addr) */ + : "memory"); return (AO_TS_VAL_t)oldval; } @@ -123,12 +123,12 @@ AO_test_and_set_full(volatile AO_TS_t *addr) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) + AO_t old, AO_t new_val) { char result; __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1" - : "=m"(*addr), "=q"(result) - : "r" (new_val), "a"(old) : "memory"); + : "=m"(*addr), "=q"(result) + : "r" (new_val), "a"(old) : "memory"); return (int) result; } @@ -138,7 +138,7 @@ AO_compare_and_swap_full(volatile AO_t *addr, /* NEC LE-IT: older AMD Opterons are missing this instruction. * On these machines SIGILL will be thrown. * Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated - * (lock based) version available */ + * (lock based) version available */ /* HB: Changed this to not define either by default. There are * enough machines and tool chains around on which cmpxchg16b * doesn't work. And the emulation is unsafe by our usual rules. @@ -146,38 +146,38 @@ AO_compare_and_swap_full(volatile AO_t *addr, */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { char result; __asm__ __volatile__("lock; cmpxchg16b %0; setz %1" - : "=m"(*addr), "=q"(result) - : "m"(*addr), - "d" (old_val2), - "a" (old_val1), - "c" (new_val2), - "b" (new_val1) : "memory"); + : "=m"(*addr), "=q"(result) + : "m"(*addr), + "d" (old_val2), + "a" (old_val1), + "c" (new_val2), + "b" (new_val1) : "memory"); return (int) result; } #define AO_HAVE_compare_double_and_swap_double_full #else -/* this one provides spinlock based emulation of CAS implemented in */ +/* this one provides spinlock based emulation of CAS implemented in */ /* atomic_ops.c. We probably do not want to do this here, since it is */ /* not atomic with respect to other kinds of updates of *addr. On the */ -/* other hand, this may be a useful facility on occasion. */ +/* other hand, this may be a useful facility on occasion. */ #ifdef AO_WEAK_DOUBLE_CAS_EMULATION int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2); + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2); AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { - return AO_compare_double_and_swap_double_emulation(addr, - old_val1, old_val2, - new_val1, new_val2); + return AO_compare_double_and_swap_double_emulation(addr, + old_val1, old_val2, + new_val1, new_val2); } #define AO_HAVE_compare_double_and_swap_double_full #endif /* AO_WEAK_DOUBLE_CAS_EMULATION */ diff --git a/src/atomic_ops/sysdeps/test_and_set_t_is_ao_t.h b/src/atomic_ops/sysdeps/test_and_set_t_is_ao_t.h index 53c2c5c..606f7ac 100644 --- a/src/atomic_ops/sysdeps/test_and_set_t_is_ao_t.h +++ b/src/atomic_ops/sysdeps/test_and_set_t_is_ao_t.h @@ -1,24 +1,24 @@ /* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ + * SOFTWARE. + */ /* * These are common definitions for architectures on which test_and_set @@ -26,7 +26,7 @@ * all zeroes, and the "set" value contains only one lowest bit set. * This can be used if test_and_set is synthesized from compare_and_swap. */ -typedef enum {AO_TS_clear = 0, AO_TS_set = 1} AO_TS_val; +typedef enum {AO_TS_clear = 0, AO_TS_set = 1} AO_TS_val; #define AO_TS_VAL_t AO_TS_val #define AO_TS_CLEAR AO_TS_clear #define AO_TS_SET AO_TS_set diff --git a/src/atomic_ops_stack.c b/src/atomic_ops_stack.c index 6cf43fd..6de7a57 100644 --- a/src/atomic_ops_stack.c +++ b/src/atomic_ops_stack.c @@ -1,11 +1,11 @@ -/* +/* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. * Original Author: Hans Boehm * * This file may be redistributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2, or (at your option) any later version. - * + * * It is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the @@ -25,7 +25,7 @@ #if defined(_MSC_VER) \ || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__) /* AO_pause not defined elsewhere */ - /* FIXME: At least AO_spin should be factored out. */ + /* FIXME: At least AO_spin should be factored out. */ #include AO_t dummy; @@ -52,13 +52,13 @@ void AO_pause(int n) { DWORD msecs; - /* Short async-signal-safe sleep. */ - msecs = (n > 18? 100 : (1 << (n - 12))); - Sleep(msecs); + /* Short async-signal-safe sleep. */ + msecs = (n > 18? 100 : (1 << (n - 12))); + Sleep(msecs); } } -#else +#else /* AO_pause is available elsewhere */ @@ -68,36 +68,36 @@ extern void AO_pause(int); #ifdef AO_USE_ALMOST_LOCK_FREE -/* LIFO linked lists based on compare-and-swap. We need to avoid */ -/* the case of a node deletion and reinsertion while I'm deleting */ -/* it, since that may cause my CAS to succeed eventhough the next */ -/* pointer is now wrong. Our solution is not fully lock-free, but it */ -/* is good enough for signal handlers, provided we have a suitably low */ -/* bound on the number of recursive signal handler reentries. */ -/* A list consists of a first pointer and a blacklist */ -/* of pointer values that are currently being removed. No list element */ -/* on the blacklist may be inserted. If we would otherwise do so, we */ -/* are allowed to insert a variant that differs only in the least */ -/* significant, ignored, bits. If the list is full, we wait. */ - -/* Crucial observation: A particular padded pointer x (i.e. pointer */ -/* plus arbitrary low order bits) can never be newly inserted into */ -/* a list while it's in the corresponding auxiliary data structure. */ - -/* The second argument is a pointer to the link field of the element */ -/* to be inserted. */ -/* Both list headers and link fields contain "perturbed" pointers, i.e. */ -/* pointers with extra bits "or"ed into the low order bits. */ +/* LIFO linked lists based on compare-and-swap. We need to avoid */ +/* the case of a node deletion and reinsertion while I'm deleting */ +/* it, since that may cause my CAS to succeed eventhough the next */ +/* pointer is now wrong. Our solution is not fully lock-free, but it */ +/* is good enough for signal handlers, provided we have a suitably low */ +/* bound on the number of recursive signal handler reentries. */ +/* A list consists of a first pointer and a blacklist */ +/* of pointer values that are currently being removed. No list element */ +/* on the blacklist may be inserted. If we would otherwise do so, we */ +/* are allowed to insert a variant that differs only in the least */ +/* significant, ignored, bits. If the list is full, we wait. */ + +/* Crucial observation: A particular padded pointer x (i.e. pointer */ +/* plus arbitrary low order bits) can never be newly inserted into */ +/* a list while it's in the corresponding auxiliary data structure. */ + +/* The second argument is a pointer to the link field of the element */ +/* to be inserted. */ +/* Both list headers and link fields contain "perturbed" pointers, i.e. */ +/* pointers with extra bits "or"ed into the low order bits. */ void AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x, - AO_stack_aux *a) + AO_stack_aux *a) { int i; AO_t x_bits = (AO_t)x; AO_t next; - - /* No deletions of x can start here, since x is not currently in the */ - /* list. */ + + /* No deletions of x can start here, since x is not currently in the */ + /* list. */ retry: # if AO_BL_SIZE == 2 { @@ -106,13 +106,13 @@ AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x, AO_t entry2 = AO_load(a -> AO_stack_bl + 1); if (entry1 == x_bits || entry2 == x_bits) { - /* Entry is currently being removed. Change it a little. */ - ++x_bits; - if ((x_bits & AO_BIT_MASK) == 0) - /* Version count overflowed; */ - /* EXTREMELY unlikely, but possible. */ - x_bits = (AO_t)x; - goto retry; + /* Entry is currently being removed. Change it a little. */ + ++x_bits; + if ((x_bits & AO_BIT_MASK) == 0) + /* Version count overflowed; */ + /* EXTREMELY unlikely, but possible. */ + x_bits = (AO_t)x; + goto retry; } } # else @@ -120,13 +120,13 @@ AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x, { if (AO_load(a -> AO_stack_bl + i) == x_bits) { - /* Entry is currently being removed. Change it a little. */ - ++x_bits; - if ((x_bits & AO_BIT_MASK) == 0) - /* Version count overflowed; */ - /* EXTREMELY unlikely, but possible. */ - x_bits = (AO_t)x; - goto retry; + /* Entry is currently being removed. Change it a little. */ + ++x_bits; + if ((x_bits & AO_BIT_MASK) == 0) + /* Version count overflowed; */ + /* EXTREMELY unlikely, but possible. */ + x_bits = (AO_t)x; + goto retry; } } # endif @@ -167,30 +167,30 @@ AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux * a) retry: first = AO_load(list); if (0 == first) return 0; - /* Insert first into aux black list. */ - /* This may spin if more than AO_BL_SIZE removals using auxiliary */ - /* structure a are currently in progress. */ + /* Insert first into aux black list. */ + /* This may spin if more than AO_BL_SIZE removals using auxiliary */ + /* structure a are currently in progress. */ for (i = 0; ; ) { if (PRECHECK(a -> AO_stack_bl[i]) - AO_compare_and_swap_acquire(a->AO_stack_bl+i, 0, first)) + AO_compare_and_swap_acquire(a->AO_stack_bl+i, 0, first)) break; ++i; if ( i >= AO_BL_SIZE ) - { - i = 0; - AO_pause(++j); - } + { + i = 0; + AO_pause(++j); + } } assert(i >= 0 && i < AO_BL_SIZE); assert(a -> AO_stack_bl[i] == first); - /* First is on the auxiliary black list. It may be removed by */ - /* another thread before we get to it, but a new insertion of x */ - /* cannot be started here. */ - /* Only we can remove it from the black list. */ - /* We need to make sure that first is still the first entry on the */ - /* list. Otherwise it's possible that a reinsertion of it was */ - /* already started before we added the black list entry. */ + /* First is on the auxiliary black list. It may be removed by */ + /* another thread before we get to it, but a new insertion of x */ + /* cannot be started here. */ + /* Only we can remove it from the black list. */ + /* We need to make sure that first is still the first entry on the */ + /* list. Otherwise it's possible that a reinsertion of it was */ + /* already started before we added the black list entry. */ if (first != AO_load(list)) { AO_store_release(a->AO_stack_bl+i, 0); goto retry; @@ -202,15 +202,15 @@ AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux * a) goto retry; } assert(*list != first); - /* Since we never insert an entry on the black list, this cannot have */ - /* succeeded unless first remained on the list while we were running. */ - /* Thus its next link cannot have changed out from under us, and we */ - /* removed exactly one entry and preserved the rest of the list. */ - /* Note that it is quite possible that an additional entry was */ - /* inserted and removed while we were running; this is OK since the */ - /* part of the list following first must have remained unchanged, and */ - /* first must again have been at the head of the list when the */ - /* compare_and_swap succeeded. */ + /* Since we never insert an entry on the black list, this cannot have */ + /* succeeded unless first remained on the list while we were running. */ + /* Thus its next link cannot have changed out from under us, and we */ + /* removed exactly one entry and preserved the rest of the list. */ + /* Note that it is quite possible that an additional entry was */ + /* inserted and removed while we were running; this is OK since the */ + /* part of the list following first must have remained unchanged, and */ + /* first must again have been at the head of the list when the */ + /* compare_and_swap succeeded. */ AO_store_release(a->AO_stack_bl+i, 0); return first_ptr; } @@ -231,11 +231,11 @@ void AO_stack_push_release(AO_stack_t *list, AO_t *element) next = AO_load(&(list -> ptr)); *element = next; } while (!AO_compare_and_swap_release - ( &(list -> ptr), next, (AO_t) element)); - /* This uses a narrow CAS here, an old optimization suggested */ - /* by Treiber. Pop is still safe, since we run into the ABA */ - /* problem only if there were both intervening "pop"s and "push"es. */ - /* In that case we still see a change in the version number. */ + ( &(list -> ptr), next, (AO_t) element)); + /* This uses a narrow CAS here, an old optimization suggested */ + /* by Treiber. Pop is still safe, since we run into the ABA */ + /* problem only if there were both intervening "pop"s and "push"es. */ + /* In that case we still see a change in the version number. */ } AO_t *AO_stack_pop_acquire(AO_stack_t *list) @@ -245,13 +245,13 @@ AO_t *AO_stack_pop_acquire(AO_stack_t *list) AO_t cversion; do { - /* Version must be loaded first. */ + /* Version must be loaded first. */ cversion = AO_load_acquire(&(list -> version)); cptr = (AO_t *)AO_load(&(list -> ptr)); if (cptr == 0) return 0; next = *cptr; } while (!AO_compare_double_and_swap_double_release - (list, cversion, (AO_t) cptr, cversion+1, (AO_t) next)); + (list, cversion, (AO_t) cptr, cversion+1, (AO_t) next)); return cptr; } @@ -262,22 +262,22 @@ AO_t *AO_stack_pop_acquire(AO_stack_t *list) #error Untested! Probably doesnt work. -/* We have a wide CAS, but only does an AO_t-wide comparison. */ -/* We can't use the Treiber optimization, since we only check */ -/* for an unchanged version number, not an unchanged pointer. */ +/* We have a wide CAS, but only does an AO_t-wide comparison. */ +/* We can't use the Treiber optimization, since we only check */ +/* for an unchanged version number, not an unchanged pointer. */ void AO_stack_push_release(AO_stack_t *list, AO_t *element) { AO_t version; AO_t next_ptr; do { - /* Again version must be loaded first, for different reason. */ + /* Again version must be loaded first, for different reason. */ version = AO_load_acquire(&(list -> version)); next_ptr = AO_load(&(list -> ptr)); *element = next_ptr; } while (!AO_compare_and_swap_double_release( - list, version, - version+1, (AO_t) element)); + list, version, + version+1, (AO_t) element)); } AO_t *AO_stack_pop_acquire(AO_stack_t *list) @@ -292,7 +292,7 @@ AO_t *AO_stack_pop_acquire(AO_stack_t *list) if (cptr == 0) return 0; next = *cptr; } while (!AO_compare_double_and_swap_double_release - (list, cversion, (AO_t) cptr, cversion+1, next)); + (list, cversion, (AO_t) cptr, cversion+1, next)); return cptr; } diff --git a/src/atomic_ops_stack.h b/src/atomic_ops_stack.h index 3b9d46b..6c8b5bb 100644 --- a/src/atomic_ops_stack.h +++ b/src/atomic_ops_stack.h @@ -5,27 +5,27 @@ /* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. - * + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: - * + * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SOFTWARE. */ -/* Almost lock-free LIFO linked lists (linked stacks). */ +/* Almost lock-free LIFO linked lists (linked stacks). */ #ifndef AO_STACK_H #define AO_STACK_H @@ -36,9 +36,9 @@ && defined(AO_HAVE_compare_and_swap) # define AO_USE_ALMOST_LOCK_FREE #else - /* If we have no compare-and-swap operation defined, we assume */ - /* that we will actually be using CAS emulation. If we do that, */ - /* it's cheaper to use the version-based implementation. */ + /* If we have no compare-and-swap operation defined, we assume */ + /* that we will actually be using CAS emulation. If we do that, */ + /* it's cheaper to use the version-based implementation. */ # define AO_STACK_IS_LOCK_FREE #endif @@ -46,7 +46,7 @@ * These are not guaranteed to be completely lock-free. * List insertion may spin under extremely unlikely conditions. * It cannot deadlock due to recursive reentry unless AO_list_remove - * is called while at least AO_BL_SIZE activations of + * is called while at least AO_BL_SIZE activations of * AO_list_remove are currently active in the same thread, i.e. * we must have at least AO_BL_SIZE recursive signal handler * invocations. @@ -67,10 +67,10 @@ */ #ifdef AO_USE_ALMOST_LOCK_FREE -/* The number of low order pointer bits we can use for a small */ -/* version number. */ +/* The number of low order pointer bits we can use for a small */ +/* version number. */ # if defined(__LP64__) || defined(_LP64) || defined(_WIN64) - /* WIN64 isn't really supported yet. */ + /* WIN64 isn't really supported yet. */ # define AO_N_BITS 3 # else # define AO_N_BITS 2 @@ -94,25 +94,25 @@ typedef struct AO__stack_aux { volatile AO_t AO_stack_bl[AO_BL_SIZE]; } AO_stack_aux; -/* The stack implementation knows only about the location of */ -/* link fields in nodes, and nothing about the rest of the */ -/* stack elements. Link fields hold an AO_t, which is not */ -/* necessarily a real pointer. This converts the AO_t to a */ -/* real (AO_t *) which is either o, or points at the link */ -/* field in the next node. */ +/* The stack implementation knows only about the location of */ +/* link fields in nodes, and nothing about the rest of the */ +/* stack elements. Link fields hold an AO_t, which is not */ +/* necessarily a real pointer. This converts the AO_t to a */ +/* real (AO_t *) which is either o, or points at the link */ +/* field in the next node. */ #define AO_REAL_NEXT_PTR(x) (AO_t *)((x) & ~AO_BIT_MASK) -/* The following two routines should not normally be used directly. */ -/* We make them visible here for the rare cases in which it makes sense */ -/* to share the an AO_stack_aux between stacks. */ +/* The following two routines should not normally be used directly. */ +/* We make them visible here for the rare cases in which it makes sense */ +/* to share the an AO_stack_aux between stacks. */ void AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x, - AO_stack_aux *); + AO_stack_aux *); AO_t * AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux *); -/* And now AO_stack_t for the real interface: */ +/* And now AO_stack_t for the real interface: */ typedef struct AO__stack { volatile AO_t AO_ptr; @@ -134,28 +134,28 @@ AO_INLINE void AO_stack_init(AO_stack_t *list) list -> AO_ptr = 0; } -/* Convert an AO_stack_t to a pointer to the link field in */ -/* the first element. */ +/* Convert an AO_stack_t to a pointer to the link field in */ +/* the first element. */ #define AO_REAL_HEAD_PTR(x) AO_REAL_NEXT_PTR((x).AO_ptr) #define AO_stack_push_release(l, e) \ - AO_stack_push_explicit_aux_release(&((l)->AO_ptr), e, &((l)->AO_aux)) + AO_stack_push_explicit_aux_release(&((l)->AO_ptr), e, &((l)->AO_aux)) #define AO_HAVE_stack_push_release #define AO_stack_pop_acquire(l) \ - AO_stack_pop_explicit_aux_acquire(&((l)->AO_ptr), &((l)->AO_aux)) + AO_stack_pop_explicit_aux_acquire(&((l)->AO_ptr), &((l)->AO_aux)) #define AO_HAVE_stack_pop_acquire -# else /* Use fully non-blocking data structure, wide CAS */ +# else /* Use fully non-blocking data structure, wide CAS */ #ifndef AO_HAVE_double_t - /* Can happen if we're using CAS emulation, since we don't want to */ - /* force that here, in case other atomic_ops clients don't want it. */ + /* Can happen if we're using CAS emulation, since we don't want to */ + /* force that here, in case other atomic_ops clients don't want it. */ # include "atomic_ops/sysdeps/standard_ao_double_t.h" #endif typedef volatile AO_double_t AO_stack_t; -/* AO_val1 is version, AO_val2 is pointer. */ +/* AO_val1 is version, AO_val2 is pointer. */ #define AO_STACK_INITIALIZER {0} -- 2.40.0