--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file adds definitions appropriate for environments in which a
+ * volatile load has acquire semantics, and a volatile store has release
+ * semantics. This is true with the standard Itanium ABI.
+ */
+AO_INLINE AO_T
+AO_load_acquire(volatile AO_T *p)
+{
+ /* A normal volatile load generates an ld.acq */
+ return *p;
+}
+#define AO_HAVE_load_acquire
+
+AO_INLINE void
+AO_store_release(volatile AO_T *p, AO_T val)
+{
+ /* A normal volatile store generates an st.rel */
+ *p = val;
+}
+#define AO_HAVE_store_release
+
+
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Definitions for architecturs on which loads and stores of AO_T are
+ * atomic fo all legal alignments.
+ */
+
+AO_INLINE AO_T
+AO_load(volatile AO_T *addr)
+{
+ assert(((unsigned long)addr & (sizeof(AO_T) - 1)) == 0);
+ /* Cast away the volatile for architectures where */
+ /* volatile adds barrier semantics. */
+ return *(AO_T *)addr;
+}
+
+#define AO_HAVE_load
+
+AO_INLINE void
+AO_store(volatile AO_T *addr, AO_T new_val)
+{
+ assert(((unsigned long)addr & (sizeof(AO_T) - 1)) == 0);
+ (*(AO_T *)addr) = new_val;
+}
+
+#define AO_HAVE_store
+
+
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Definitions for architecturs on which loads and stores of AO_T are
+ * atomic fo all legal alignments.
+ */
+
+AO_INLINE AO_T
+AO_load(volatile AO_T *addr)
+{
+ /* Cast away the volatile for architectures like IA64 where */
+ /* volatile adds barrier semantics. */
+ return (*(AO_T *)addr);
+}
+
+#define AO_HAVE_load
+
+AO_INLINE void
+AO_store(volatile AO_T *addr, AO_T new_val)
+{
+ (*(AO_T *)addr) = new_val;
+}
+
+#define AO_HAVE_store
+
+
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file specifies Itanimum primitives for use with the Intel (ecc)
+ * compiler. We use intrinsics instead of the inline assembly code in the
+ * gcc file.
+ */
+
+#include "../atomic_load_store.h"
+
+#include "../acquire_release_volatile.h"
+
+#include <ia64intrin.h>
+
+AO_INLINE void
+AO_nop_full()
+{
+ __mf();
+}
+#define AO_HAVE_nop_full
+
+AO_INLINE AO_T
+AO_fetch_and_add1_acquire (volatile AO_T *p)
+{
+ return __fetchadd8_acq((unsigned __int64 *)p, 1);
+}
+#define AO_HAVE_fetch_and_add1_acquire
+
+AO_INLINE AO_T
+AO_fetch_and_add1_release (volatile AO_T *p)
+{
+ return __fetchadd8_rel((unsigned __int64 *)p, 1);
+}
+
+#define AO_HAVE_fetch_and_add1_release
+
+AO_INLINE AO_T
+AO_fetch_and_sub1_acquire (volatile AO_T *p)
+{
+ return __fetchadd8_acq((unsigned __int64 *)p, -1);
+}
+
+#define AO_HAVE_fetch_and_sub1_acquire
+
+AO_INLINE AO_T
+AO_fetch_and_sub1_release (volatile AO_T *p)
+{
+ return __fetchadd8_rel((unsigned __int64 *)p, -1);
+}
+
+#define AO_HAVE_fetch_and_sub1_release
+
+AO_INLINE int
+AO_compare_and_swap_acquire(volatile AO_T *addr,
+ AO_T old, AO_T new_val)
+{
+ AO_T oldval;
+ oldval = _InterlockedCompareExchange64_acq(addr, new_val, old);
+ return (oldval == old);
+}
+
+#define AO_HAVE_compare_and_swap_acquire
+
+AO_INLINE int
+AO_compare_and_swap_release(volatile AO_T *addr,
+ AO_T old, AO_T new_val)
+{
+ AO_T oldval;
+ oldval = _InterlockedCompareExchange64_rel(addr, new_val, old);
+ return (oldval == old);
+}
+
+#define AO_HAVE_compare_and_swap_release
+
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Ensure, if at all possible, that AO_compare_and_swap_full() is
+ * available. The emulation should be brute-force signal-safe, even
+ * though it actually blocks.
+ * Including this file will generate an error if AO_compare_and_swap_full()
+ * cannot be made available.
+ * This will be included from platform-specific atomic_ops files
+ * id appropriate, and if AO_FORCE_CAS is defined. It should not be
+ * included directly, especially since it affects the implementation
+ * of other atomic update primitives.
+ * The implementation assumes that only AO_store_XXX and AO_test_and_set_XXX
+ * variants are defined, and that AO_test_and_set_XXX is not used to
+ * operate on compare_and_swap locations.
+ */
+
+#if !defined(ATOMIC_OPS_H)
+# error This file should not be included directly.
+#endif
+
+AO_T AO_compare_and_swap_emulation(volatile AO_T *addr, AO_T old,
+ AO_T new_val);
+
+void AO_store_full_emulation(volatile AO_T *addr, AO_T val);
+
+#define AO_compare_and_swap_full(addr, old, newval) \
+ AO_compare_and_swap_emulation(addr, old, newval)
+#define AO_HAVE_compare_and_swap_full
+
+#undef AO_store
+#undef AO_HAVE_store
+#undef AO_store_write
+#undef AO_HAVE_store_write
+#undef AO_store_release
+#undef AO_HAVE_store_release
+#undef AO_store_full
+#undef AO_HAVE_store_full
+#define AO_store_full(addr, val) AO_store_full_emulation(addr, val)
+#define AO_HAVE_store_full
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+#include "../atomic_load_store.h"
+
+AO_INLINE void
+AO_nop_full()
+{
+ __asm__ __volatile__("mb" : : : "memory");
+}
+
+#define AO_HAVE_nop_full
+
+AO_INLINE void
+AO_nop_write()
+{
+ __asm__ __volatile__("wmb" : : : "memory");
+}
+
+#define AO_HAVE_nop_write
+
+/* mb should be used for AO_nop_read(). That's the default. */
+
+/* We believe that ldq_l ... stq_c does not imply any memory barrier. */
+/* We should add an explicit fetch_and_add definition. */
+AO_INLINE int
+AO_compare_and_swap(volatile AO_T *addr,
+ AO_T old, AO_T new_val)
+{
+ unsigned long was_equal;
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%1\n"
+ " cmpeq %0,%4,%2\n"
+ " mov %3,%0\n"
+ " beq %2,2f\n"
+ " stq_c %0,%1\n"
+ " beq %0,1b\n"
+ "2:\n"
+ :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
+ : "r" (new_val), "Ir" (old)
+ :"memory");
+ return was_equal;
+}
+
+#define AO_HAVE_compare_and_swap
+
+
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* FIXME. Very incomplete. No support for 64 bits. */
+
+#include "../atomic_load_store.h"
+
+AO_INLINE AO_TS_VAL
+AO_test_and_set_full(volatile AO_TS_T *addr) {
+ int oldval;
+ int temp = 1; /* locked value */
+
+ __asm__ __volatile__ (
+ " l %0,0(%2)\n"
+ "0: cs %0,%1,0(%2)\n"
+ " jl 0b"
+ : "=&d" (ret)
+ : "d" (1), "a" (addr)
+ : "cc", "memory");
+ return oldval;
+}
+
+#define AO_HAVE_test_and_set_full
+
+
+
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "../atomic_load_store.h"
+
+/* Some architecture set descriptions include special "ordered" memory */
+/* operations. As far as we can tell, no existing processors actually */
+/* require those. Nor does it appear likely that future processors */
+/* will. */
+#include "../ordered.h"
+
+/* It's not clear this should really be used from user mode. But we */
+/* include it here to demonstrate that it could be handled. */
+union AO_pa_clearable_loc {
+ int data;
+ double align_16[2]; /* Make the size 16 bytes */
+} __attribute__ ((aligned (16)));
+
+#undef AO_TS_T
+#undef AO_TS_INITIALIZER
+#define AO_TS_T union AO_pa_clearable_loc
+#define AO_TS_INITIALIZER { 1 }
+/* Switch meaning of set and clear, since we only have an atomic clear */
+/* instruction. */
+#undef AO_TS_VAL
+#undef AO_TS_CLEAR
+#undef AO_TS_SET
+typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
+#define AO_TS_VAL AO_PA_TS_val
+#define AO_TS_CLEAR AO_PA_TS_clear
+#define AO_TS_SET AO_PA_TS_set
+
+static AO_TS_VAL
+AO_test_and_set_full(volatile AO_TS_T * addr)
+{
+ int result;
+
+ __asm__ __volatile__("ldcw 0(%1),%0"
+ : "=r"(result) : "r"(addr) : "memory");
+ return result;
+}
+
+#define AO_HAVE_test_and_set_full
+
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "../atomic_load_store.h"
+
+#include "../acquire_release_volatile.h"
+
+AO_INLINE void
+AO_nop_full()
+{
+ __asm__ __volatile__("mf" : : : "memory");
+}
+#define AO_HAVE_nop_full
+
+AO_INLINE AO_T
+AO_fetch_and_add1_acquire (volatile AO_T *p)
+{
+ AO_T result;
+
+ __asm__ __volatile__ ("fetchadd8.acq %0=[%1],1":
+ "=r" (result): "r"(p) :"memory");
+ return result;
+}
+#define AO_HAVE_fetch_and_add1_acquire
+
+AO_INLINE AO_T
+AO_fetch_and_add1_release (volatile AO_T *p)
+{
+ AO_T result;
+
+ __asm__ __volatile__ ("fetchadd8.rel %0=[%1],1":
+ "=r" (result): "r"(p) :"memory");
+ return result;
+}
+
+#define AO_HAVE_fetch_and_add1_release
+
+AO_INLINE AO_T
+AO_fetch_and_sub1_acquire (volatile AO_T *p)
+{
+ AO_T result;
+
+ __asm__ __volatile__ ("fetchadd8.acq %0=[%1],-1":
+ "=r" (result): "r"(p) :"memory");
+ return result;
+}
+
+#define AO_HAVE_fetch_and_sub1_acquire
+
+AO_INLINE AO_T
+AO_fetch_and_sub1_release (volatile AO_T *p)
+{
+ AO_T result;
+
+ __asm__ __volatile__ ("fetchadd8.rel %0=[%1],-1":
+ "=r" (result): "r"(p) :"memory");
+ return result;
+}
+
+#define AO_HAVE_fetch_and_sub1_release
+
+AO_INLINE int
+AO_compare_and_swap_acquire(volatile AO_T *addr,
+ AO_T old, AO_T new_val)
+{
+ AO_T oldval;
+ __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg8.acq %0=%1,%2,ar.ccv"
+ : "=r"(oldval), "+S"(*addr)
+ : "r"(new_val), "r"(old) : "memory");
+ return (oldval == old);
+}
+
+#define AO_HAVE_compare_and_swap_acquire
+
+AO_INLINE int
+AO_compare_and_swap_release(volatile AO_T *addr,
+ AO_T old, AO_T new_val)
+{
+ AO_T oldval;
+ __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
+ : "=r"(oldval), "+S"(*addr)
+ : "r"(new_val), "r"(old) : "memory");
+ return (oldval == old);
+}
+
+#define AO_HAVE_compare_and_swap_release
+
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* FIXME. Very incomplete. */
+#include "../aligned_atomic_load_store.h"
+
+/* Contributed by Tony Mantler or new. Should be changed to MIT license? */
+AO_INLINE AO_TS_VAL
+AO_test_and_set_full(volatile AO_TS_T *addr) {
+ int oldval;
+
+ /* The return value is semi-phony. */
+ /* 'tas' sets bit 7 while the return */
+ /* value pretends bit 0 was set */
+ __asm__ __volatile__(
+ "tas %1@; sne %0; negb %0"
+ : "=d" (oldval)
+ : "a" (addr) : "memory");
+ return oldval;
+}
+
+#define AO_HAVE_test_and_set_full
+
+
+
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* FIXME. Very incomplete. No support for 64 bits. */
+/* I have also been told that the behavior of some of the barriers is model */
+/* dependent. This will probably require help from IBM to fix up. */
+
+#include "../atomic_load_store.h"
+
+AO_INLINE void
+AO_nop_full()
+{
+ __asm__ __volatile__("sync" : : : "memory");
+}
+
+AO_INLINE void
+AO_nop_write()
+{
+ __asm__ __volatile__("eieio" : : : "memory");
+}
+
+#define AO_HAVE_NOP_FULL
+
+AO_INLINE AO_TS_VAL
+AO_test_and_set_full(volatile AO_TS_T *addr) {
+ int oldval;
+ int temp = 1; /* locked value */
+
+ __asm__ __volatile__(
+ "1:\tlwarx %0,0,%3\n" /* load and reserve */
+ "\tcmpwi %0, 0\n" /* if load is */
+ "\tbne 2f\n" /* non-zero, return already set */
+ "\tstwcx. %2,0,%1\n" /* else store conditional */
+ "\tbne- 1b\n" /* retry if lost reservation */
+ "2:\t\n" /* oldval is zero if we set */
+ : "=&r"(oldval), "=p"(addr)
+ : "r"(temp), "1"(addr)
+ : "memory");
+
+ return oldval;
+}
+
+#define AO_HAVE_test_and_set_full
+
+
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* FIXME. Very incomplete. */
+
+AO_INLINE AO_T AO_compare_and_swap_full(volatile AO_T *addr,
+ AO_T old, AO_T new_val)
+{
+ int retval;
+ __asm__ __volatile__ (
+# ifndef __s390x__
+ " cs %1,%2,0(%3)\n"
+# else
+ " csg %1,%2,0(%3)\n"
+# endif
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=&d" (retval), "+d" (old)
+ : "d" (new_val), "a" (addr)
+ : "cc", "memory");
+ return retval == 0;
+}
+
+#define AO_HAVE_compare_and_swap_full
+
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* FIXME. Very incomplete. No support for sparc64. */
+
+#include "../atomic_load_store.h"
+
+AO_INLINE AO_TS_VAL
+AO_test_and_set_full(volatile AO_TS_T *addr) {
+ int oldval;
+
+ __asm__ __volatile__("ldstub %1,%0"
+ : "=r"(oldval), "=m"(*addr)
+ : "m"(*addr) : "memory");
+ return oldval;
+}
+
+#define AO_HAVE_test_and_set_full
+
+
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Some of the machine specific code was borrowed from our GC distribution.
+ */
+
+/* The following really assume we have a 486 or better. Unfortunately */
+/* gcc doesn't define a suitable feature test macro based on command */
+/* line options. */
+/* We should perhaps test dynamically. */
+
+#include "../aligned_atomic_load_store.h"
+
+/* Real X86 implementations, except for some old WinChips, appear */
+/* to enforce ordering between memory operations, EXCEPT that a later */
+/* read can pass earlier writes, presumably due to the visible */
+/* presence of store buffers. */
+/* We ignore both the WinChips, and the fact that the official specs */
+/* seem to be much weaker (and arguably too weak to be usable). */
+
+#include "../ordered_except_wr.h"
+
+#if defined(USE_PENTIUM4_INSTRS)
+AO_INLINE void
+AO_nop_full()
+{
+ __asm__ __volatile__("mfence" : : : "memory");
+}
+
+#define AO_HAVE_NOP_FULL
+
+#else
+
+/* We could use the cpuid instruction. But that seems to be slower */
+/* than the default implementation based on test_and_set_full. Thus */
+/* we omit that bit of misinformation here. */
+
+#endif
+
+/* As far as we can tell, the lfence and sfence instructions are not */
+/* currently needed or useful for cached memory accesses. */
+
+/* Really only works for 486 and later */
+AO_INLINE AO_T
+AO_fetch_and_add_full (volatile AO_T *p, long incr)
+{
+ AO_T result = incr;
+
+ __asm__ __volatile__ ("lock; xaddl %0, %1" :
+ "+r" (result), "+m" (*p) : : "memory");
+ return result;
+}
+
+#define AO_HAVE_fetch_and_add_full
+
+AO_INLINE AO_TS_T
+AO_test_and_set_full(volatile AO_T *addr)
+{
+ int oldval;
+ /* Note: the "xchg" instruction does not need a "lock" prefix */
+ __asm__ __volatile__("xchgl %0, %1"
+ : "=r"(oldval), "+m"(*(addr))
+ : "0"(1) : "memory");
+ return oldval;
+}
+
+#define AO_HAVE_test_and_set_full
+
+/* Returns nonzero if the comparison succeeded. */
+AO_INLINE int
+AO_compare_and_swap_full(volatile AO_T *addr,
+ AO_T old, AO_T new_val)
+{
+ char result;
+ __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
+ : "+m"(*(addr)), "=q"(result)
+ : "r" (new_val), "a"(old) : "memory");
+ return (int) result;
+}
+
+#define AO_HAVE_compare_and_swap_full
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* The following is useful primarily for debugging and documentation. */
+/* We define various atomic operations by acquiring a global pthread */
+/* lock. The resulting implementation will perform poorly, but should */
+/* be correct unless it is used from signal handlers. */
+/* We assume that all pthread operations act like full memory barriers. */
+/* (We believe that is the intent of the specification.) */
+
+#include <pthread.h>
+
+/* We define only the full barrier variants, and count on the */
+/* generalization section below to fill in the rest. */
+static pthread_mutex_t AO_lock = PTHREAD_MUTEX_INITIALIZER;
+
+AO_INLINE void
+AO_nop_full()
+{
+ pthread_mutex_lock(&AO_lock);
+ pthread_mutex_unlock(&AO_lock);
+}
+
+#define AO_HAVE_nop_full
+
+AO_INLINE AO_T
+AO_load_full(volatile AO_T *addr)
+{
+ AO_T result;
+ pthread_mutex_lock(&AO_lock);
+ result = *addr;
+ pthread_mutex_unlock(&AO_lock);
+ return result;
+}
+
+#define AO_HAVE_load_full
+
+AO_INLINE void
+AO_store_full(volatile AO_T *addr, AO_T val)
+{
+ pthread_mutex_lock(&AO_lock);
+ *addr = val;
+ pthread_mutex_unlock(&AO_lock);
+}
+
+#define AO_HAVE_store_full
+
+AO_INLINE AO_TS_VAL
+AO_test_and_set_full(volatile AO_TS_T *addr)
+{
+ int result;
+ pthread_mutex_lock(&AO_lock);
+ result = (int)(*addr);
+ *addr = AO_TS_SET;
+ pthread_mutex_unlock(&AO_lock);
+ assert(result == AO_TS_SET || result == AO_TS_CLEAR);
+ return result;
+}
+
+#define AO_HAVE_test_and_set_full
+
+static AO_T
+AO_fetch_and_add_full(volatile AO_T *p, long incr)
+{
+ AO_T tmp;
+
+ pthread_mutex_lock(&AO_lock);
+ tmp = *p;
+ *p = tmp + incr;
+ pthread_mutex_unlock(&AO_lock);
+ return tmp;
+}
+
+#define AO_HAVE_fetch_and_add_full
+
+#define AO_fetch_and_add1_full(addr) AO_fetch_and_add_full(addr,1)
+#define AO_fetch_and_sub1_full(addr) AO_fetch_and_add_full(addr,-1)
+
+AO_INLINE int
+AO_compare_and_swap_full(volatile AO_T *addr,
+ AO_T old, AO_T new_val)
+{
+ pthread_mutex_lock(&AO_lock);
+ if (*addr == old)
+ {
+ *addr = new_val;
+ pthread_mutex_unlock(&AO_lock);
+ return 1;
+ }
+ else
+ pthread_mutex_unlock(&AO_lock);
+ return 0;
+}
+
+#define AO_HAVE_compare_and_swap_full
+
+/* We can't use hardware loads and stores, since they don't */
+/* interact correctly. */
+
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * These are common definitions for architectures that provide processor
+ * ordered memory operations.
+ */
+
+#include "ordered_except_wr.h"
+
+AO_INLINE void
+AO_nop_full()
+{
+ AO_compiler_barrier();
+}
+
+#define AO_HAVE_nop_full
+
+
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * These are common definitions for architectures that provide processor
+ * ordered memory operations except that a later read may pass an
+ * earlier write. Real x86 implementations seem to be in this category,
+ * except apparently for some IDT WinChips, which we ignore.
+ */
+
+AO_INLINE void
+AO_nop_write()
+{
+ AO_compiler_barrier();
+ /* sfence according to Intel docs. Pentium 3 and up. */
+ /* Unnecessary for cached accesses? */
+}
+
+#define AO_HAVE_NOP_WRITE
+
+AO_INLINE void
+AO_nop_read()
+{
+ AO_compiler_barrier();
+}
+
+#define AO_HAVE_NOP_READ
+
+#ifdef AO_HAVE_load
+
+AO_INLINE AO_T
+AO_load_read(volatile AO_T *addr)
+{
+ AO_T result = AO_load(addr);
+ AO_compiler_barrier();
+ return result;
+}
+#define AO_HAVE_load_read
+
+#define AO_load_acquire(addr) AO_load_read(addr)
+#define AO_HAVE_load_acquire
+
+#endif /* AO_HAVE_load */
+
+#if defined(AO_HAVE_store)
+
+AO_INLINE void
+AO_store_write(volatile AO_T *addr, AO_T val)
+{
+ AO_compiler_barrier();
+ AO_store(addr, val);
+}
+# define AO_HAVE_store_write
+
+# define AO_store_release(addr, val) AO_store_write(addr, val)
+# define AO_HAVE_store_release
+
+#endif /* AO_HAVE_store */
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Initialized data and out-of-line functions to support atomic_ops.h
+ * go here. Currently this is needed only for pthread-based atomics
+ * emulation, or for compare-and-swap emulation.
+ */
+
+#undef AO_FORCE_CAS
+
+#include <pthread.h>
+#include <signal.h>
+#include <sys/select.h>
+#include "atomic_ops.h" /* Without cas emulation! */
+
+/*
+ * Lock for pthreads-based implementation.
+ */
+
+static pthread_mutex_t AO_pt_lock = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Out of line compare-and-swap emulation based on test and set.
+ *
+ * We use a small table of locks for different compare_and_swap locations.
+ * Before we update perform a compare-and-swap, we grap the corresponding
+ * lock. Different locations may hash to the same lock, but since we
+ * never acquire more than one lock at a time, this can't deadlock.
+ * We explicitly disable signals while we perform this operation.
+ *
+ * FIXME: We should probably also suppport emulation based on Lamport
+ * locks, since we may not have test_and_set either.
+ */
+#define AO_HASH_SIZE 16
+
+#define AO_HASH(x) (((unsigned long)(x) >> 12) & (AO_HASH_SIZE-1))
+
+AO_TS_T AO_locks[AO_HASH_SIZE] = {
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+ AO_TS_INITIALIZER, AO_TS_INITIALIZER,
+};
+
+static AO_T dummy = 1;
+
+/* Spin for 2**n units. */
+static void spin(int n)
+{
+ int i;
+ AO_T j = AO_load(&dummy);
+
+ for (i = 0; i < (2 << n); ++i)
+ {
+ j *= 5;
+ j -= 4;
+ }
+ AO_store(&dummy, j);
+}
+
+static void lock_ool(volatile AO_TS_T *l)
+{
+ int i = 0;
+ struct timeval tv;
+
+ while (AO_test_and_set_acquire(l) == AO_TS_SET) {
+ if (++i < 12)
+ spin(i);
+ else
+ {
+ /* Short async-signal-safe sleep. */
+ tv.tv_sec = 0;
+ tv.tv_usec = (i > 28? 100000 : (1 << (i - 12)));
+ select(0, 0, 0, 0, &tv);
+ }
+ }
+}
+
+AO_INLINE void lock(volatile AO_TS_T *l)
+{
+ if (AO_test_and_set_acquire(l) == AO_TS_SET)
+ lock_ool(l);
+}
+
+AO_INLINE void unlock(volatile AO_TS_T *l)
+{
+ AO_CLEAR(l);
+}
+
+static sigset_t all_sigs;
+
+static volatile AO_T initialized = 0;
+
+static volatile AO_TS_T init_lock = AO_TS_INITIALIZER;
+
+int AO_compare_and_swap_emulation(volatile AO_T *addr, AO_T old,
+ AO_T new_val)
+{
+ AO_TS_T *my_lock = AO_locks + AO_HASH(addr);
+ sigset_t old_sigs;
+ int result;
+
+ if (!AO_load_acquire(&initialized))
+ {
+ lock(&init_lock);
+ if (!initialized) sigfillset(&all_sigs);
+ unlock(&init_lock);
+ AO_store_release(&initialized, 1);
+ }
+ sigprocmask(SIG_BLOCK, &all_sigs, &old_sigs);
+ /* Neither sigprocmask nor pthread_sigmask is 100% */
+ /* guaranteed to work here. Sigprocmask is not */
+ /* guaranteed be thread safe, and pthread_sigmask */
+ /* is not async-signal-safe. Under linuxthreads, */
+ /* sigprocmask may block some pthreads-internal */
+ /* signals. So long as we do that for short periods, */
+ /* we should be OK. */
+ lock(my_lock);
+ if (*addr == old)
+ {
+ *addr = new_val;
+ result = 1;
+ }
+ else
+ result = 0;
+ unlock(my_lock);
+ sigprocmask(SIG_SETMASK, &old_sigs, NULL);
+ return result;
+}
+
+void AO_store_full_emulation(volatile AO_T *addr, AO_T val)
+{
+ AO_TS_T *my_lock = AO_locks + AO_HASH(addr);
+ lock(my_lock);
+ *addr = val;
+ unlock(my_lock);
+}
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ATOMIC_OPS_H
+
+#define ATOMIC_OPS_H
+
+#include <assert.h>
+
+/* We define various atomic operations on memory in a */
+/* machine-specific way. Unfortunately, this is complicated */
+/* by the fact that these may or may not be combined with */
+/* various memory barriers. Thus the actual operations we */
+/* define have the form AO_<atomic-op>_<barrier>, for all */
+/* plausible combinations of <atomic-op> and <barrier>. */
+/* This of course results in a mild combinatorial explosion. */
+/* To deal with it, we try to generate derived */
+/* definitions for as many of the combinations as we can, as */
+/* automatically as possible. */
+/* */
+/* Our assumption throughout is that the programmer will */
+/* specify the least demanding operation and memory barrier */
+/* that will guarantee correctness for the implementation. */
+/* Our job is to find the least expensive way to implement it */
+/* on the applicable hardware. In many cases that will */
+/* involve, for example, a stronger memory barrier, or a */
+/* combination of hardware primitives. */
+/* */
+/* Conventions: */
+/* "plain" atomic operations are not guaranteed to include */
+/* a barrier. The suffix in the name specifies the barrier */
+/* type. Suffixes are: */
+/* _release: Earlier operations may not be delayed past it. */
+/* _acquire: Later operations may not move ahead of it. */
+/* _read: Subsequent reads must follow this operation and */
+/* preceding reads. */
+/* _write: Earlier writes precede both this operation and */
+/* later writes. */
+/* _full: Ordered with respect to both earlier and later memops.*/
+/* _release_write: Ordered with respect to earlier writes. */
+/* _acquire_read: Ordered with repsect to later reads. */
+/* */
+/* Currently we try to define the following atomic memory */
+/* operations, in combination with the above barriers: */
+/* AO_nop */
+/* AO_load */
+/* AO_store */
+/* AO_test_and_set (binary) */
+/* AO_fetch_and_add */
+/* AO_fetch_and_add1 */
+/* AO_fetch_and_sub1 */
+/* AO_compare_and_swap */
+/* */
+/* Note that atomicity guarantees are valid only if both */
+/* readers and writers use AO_ operations to access the */
+/* shared value, while ordering constraints are intended to */
+/* apply all memory operations. If a location can potentially */
+/* be accessed simultaneously from multiple threads, and one of */
+/* those accesses may be a write access, then all such */
+/* accesses to that location should be through AO_ primitives. */
+/* However if AO_ operations enforce sufficient ordering to */
+/* ensure that a location x cannot be accessed concurrently, */
+/* or can only be read concurrently, then x can be accessed */
+/* via ordinary references and assignments. */
+/* */
+/* Compare_and_exchange takes an address and an expected old */
+/* value and a new value, and returns an int. Nonzero */
+/* indicates that it succeeded. */
+/* Test_and_set takes an address, atomically replaces it by */
+/* AO_TS_SET, and returns the prior value. */
+/* An AO_TS_T clear location can be reset with the */
+/* AO_CLEAR macro, which normally uses AO_store_release. */
+/* AO_fetch_and_add takes an address and a long increment */
+/* value. The AO_fetch_and_add1 and AO_fetch_and_sub1 variants */
+/* are provided, since they allow faster implementations on */
+/* some hardware. */
+/* */
+/* We expect this list to grow slowly over time. */
+/* */
+/* Note that AO_nop_full is a full memory barrier. */
+/* */
+/* Note that if some data is initialized with */
+/* data.x = ...; data.y = ...; ... */
+/* AO_store_release_write(&data_is_initialized, 1) */
+/* then data is guaranteed to be initialized after the test */
+/* if (AO_load_release_read(&data_is_initialized)) ... */
+/* succeeds. Furthermore, this should generate near-optimal */
+/* code on all common platforms. */
+/* */
+/* All operations operate on unsigned AO_T, which */
+/* is the natural word size, and usually unsigned long. */
+/* It is possible to check whether a particular operation op */
+/* is available on a particular platform by checking whether */
+/* AO_HAVE_op is defined. We make heavy use of these macros */
+/* internally. */
+
+/* The rest of this file basically has three sections: */
+/* */
+/* Some utility and default definitions. */
+/* */
+/* The architecture dependent section: */
+/* This defines atomic operations that have direct hardware */
+/* support on a particular platform, mostly by uncluding the */
+/* appropriate compiler- and hardware-dependent file. */
+/* */
+/* The synthesis section: */
+/* This tries to define other atomic operations in terms of */
+/* those that are explicitly available on the platform. */
+/* This section is hardware independent. */
+/* We make no attempt to synthesize operations in ways that */
+/* effectively introduce locks, except for the debugging/demo */
+/* pthread-based implementation at the beginning. A more */
+/* relistic implementation that falls back to locks could be */
+/* added as a higher layer. But that would sacrifice */
+/* usability from signal handlers. */
+/* The synthesis section is implemented almost entirely in */
+/* atomic_ops_generalize.h. */
+
+/* Some common defaults. Overridden for some architectures. */
+#define AO_T unsigned long
+ /* Could conceivably be redefined below if/when we add */
+ /* win64 support. */
+
+/* The test_and_set primitive returns an AO_TS_VAL value: */
+typedef enum {AO_TS_clear = 0, AO_TS_set = 1} AO_TS_val;
+#define AO_TS_VAL AO_TS_val
+#define AO_TS_CLEAR AO_TS_clear
+#define AO_TS_SET AO_TS_set
+
+/* AO_TS_T is the type of an in-memory test-and-set location. */
+#define AO_TS_T AO_T /* Make sure this has the right size */
+#define AO_TS_INITIALIZER (AO_T)AO_TS_CLEAR
+
+/* The most common way to clear a test-and-set location */
+/* at the end of a critical section. */
+#define AO_CLEAR(addr) AO_store_release((AO_T *)addr, AO_TS_CLEAR)
+
+/* Platform-dependent stuff: */
+#ifdef __GNUC__
+/* Currently gcc is much better supported than anything else ... */
+# define AO_INLINE static inline
+# define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory")
+#else
+# define AO_INLINE static
+ /* We conjecture that the following usually gives us the right */
+ /* semantics or an error. */
+# define AO_compiler_barrier() asm("");
+#endif
+
+#if defined(AO_USE_PTHREAD_DEFS)
+# include "ao_sysdeps/generic_pthread.h"
+#endif /* AO_USE_PTHREAD_DEFS */
+
+#if defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
+# if defined(__i386__)
+# include "ao_sysdeps/gcc/x86.h"
+# endif /* __i386__ */
+# if defined(__ia64__)
+# include "ao_sysdeps/gcc/ia64.h"
+# define AO_GENERALIZE_TWICE
+# endif /* __ia64__ */
+# if defined(__hppa__)
+# include "ao_sysdeps/gcc/hppa.h"
+# define AO_CAN_EMUL_CAS
+# endif /* __hppa__ */
+# if defined(__alpha__)
+# include "ao_sysdeps/gcc/alpha.h"
+# define AO_GENERALIZE_TWICE
+# endif /* __alpha__ */
+# if defined(__s390__)
+# include "ao_sysdeps/gcc/s390.h"
+# endif /* __s390__ */
+# if defined(__sparc__)
+# include "ao_sysdeps/gcc/sparc.h"
+# endif /* __sparc__ */
+# if defined(__m68k__)
+# include "ao_sysdeps/gcc/m68k.h"
+# endif /* __m68k__ */
+# if defined(__powerpc__)
+# include "ao_sysdeps/gcc/powerpc.h"
+# endif /* __powerpc__ */
+# if defined(__arm__) && !defined(AO_USE_PTHREAD_DEFS)
+# include "ao_sysdeps/gcc/arm.h"
+# endif /* __arm__ */
+#endif /* __GNUC__ && !AO_USE_PTHREAD_DEFS */
+
+#if defined(__INTEL_COMPILER) && !defined(AO_USE_PTHREAD_DEFS)
+# if defined(__ia64__)
+# include "ao_sysdeps/ecc/ia64.h"
+# define AO_GENERALIZE_TWICE
+# endif
+#endif
+
+#if defined(AO_REQUIRE_CAS) && !defined(AO_HAVE_compare_and_swap) \
+ && !defined(AO_HAVE_compare_and_swap_full) \
+ && !defined(AO_HAVE_compare_and_swap_acquire)
+# if defined(AO_CAN_EMUL_CAS)
+# include "ao_sysdeps/emul_cas.h"
+# else
+# error Cannot implement AO_compare_and_swap_full on this architecture.
+# endif
+#endif /* AO_REQUIRE_CAS && !AO_HAVE_compare_and_swap ... */
+
+/*
+ * The generalization section.
+ * Theoretically this should repeatedly include atomic_ops_generalize.h.
+ * In fact, we observe that this converges after a small fixed number
+ * of iterations, usually one.
+ */
+#include "atomic_ops_generalize.h"
+#ifdef AO_GENERALIZE_TWICE
+# include "atomic_ops_generalize.h"
+#endif
+
+#endif /* ATOMIC_OPS_H */
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Generalize atomic operations for atomic_ops.h.
+ * Should not be included directly.
+ *
+ * We make no attempt to define useless operations, such as
+ * AO_nop_acquire
+ * AO_nop_release
+ *
+ * We have also so far neglected to define some others, which
+ * do not appear likely to be useful, e.g. stores with acquire
+ * or read barriers.
+ *
+ * This file is sometimes included twice by atomic_ops.h.
+ * All definitions include explicit checks that we are not replacing
+ * an earlier definition. In general, more desirable expansions
+ * appear earlier so that we are more likely to use them.
+ */
+
+#ifndef ATOMIC_OPS_H
+# error Atomic_ops_generalize.h should not be included directly.
+#endif
+
+/* Generate test_and_set_full, if necessary and possible. */
+#if !defined(AO_HAVE_test_and_set) && \
+ !defined(AO_HAVE_test_and_set_release) && \
+ !defined(AO_HAVE_test_and_set_acquire) && \
+ !defined(AO_HAVE_test_and_set_read) && \
+ !defined(AO_HAVE_test_and_set_full)
+# if defined(AO_HAVE_compare_and_swap_full)
+ AO_INLINE AO_TS_VAL
+ AO_test_and_set_full(volatile AO_TS_T *addr)
+ {
+ if (AO_compare_and_swap_full(addr, AO_TS_CLEAR,
+ AO_TS_SET))
+ return AO_TS_CLEAR;
+ else
+ return AO_TS_SET;
+ }
+# define AO_HAVE_test_and_set_full
+# endif /* AO_HAVE_compare_and_swap_full */
+
+# if defined(AO_HAVE_compare_and_swap_acquire)
+ AO_INLINE AO_TS_VAL
+ AO_test_and_set_acquire(volatile AO_TS_T *addr)
+ {
+ if (AO_compare_and_swap_acquire(addr, AO_TS_CLEAR,
+ AO_TS_SET))
+ return AO_TS_CLEAR;
+ else
+ return AO_TS_SET;
+ }
+# define AO_HAVE_test_and_set_acquire
+# endif /* AO_HAVE_compare_and_swap_acquire */
+
+# if defined(AO_HAVE_compare_and_swap_release)
+ AO_INLINE AO_TS_VAL
+ AO_test_and_set_release(volatile AO_TS_T *addr)
+ {
+ if (AO_compare_and_swap_release(addr, AO_TS_CLEAR,
+ AO_TS_SET))
+ return AO_TS_CLEAR;
+ else
+ return AO_TS_SET;
+ }
+# define AO_HAVE_test_and_set_release
+# endif /* AO_HAVE_compare_and_swap_release */
+
+# if defined(AO_HAVE_compare_and_swap)
+ AO_INLINE AO_TS_VAL
+ AO_test_and_set(volatile AO_TS_T *addr)
+ {
+ if (AO_compare_and_swap(addr, AO_TS_CLEAR, AO_TS_SET))
+ return AO_TS_CLEAR;
+ else
+ return AO_TS_SET;
+ }
+# define AO_HAVE_test_and_set
+# endif /* AO_HAVE_compare_and_swap */
+
+# if defined(AO_HAVE_test_and_set) && defined(AO_HAVE_nop_full) \
+ && !defined(AO_HAVE_test_and_set_acquire)
+ AO_INLINE AO_TS_VAL
+ AO_test_and_set_acquire(volatile AO_TS_T *addr)
+ {
+ AO_TS_VAL result = AO_test_and_set(addr);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_test_and_set_acquire
+# endif
+
+#endif /* No prior test and set */
+
+/* Nop */
+#if !defined(AO_HAVE_nop)
+ AO_INLINE void AO_nop(void) {};
+# define AO_HAVE_nop
+#endif
+
+#if defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_nop_full)
+ AO_INLINE void
+ AO_nop_full()
+ {
+ AO_TS_T dummy = AO_TS_INITIALIZER;
+ AO_test_and_set_full(&dummy);
+ }
+# define AO_HAVE_nop_full
+#endif
+
+#if defined(AO_HAVE_nop_acquire)
+# error AO_nop_acquire is useless: dont define.
+#endif
+#if defined(AO_HAVE_nop_release)
+# error AO_nop_release is useless: dont define.
+#endif
+
+#if defined(AO_HAVE_nop_full) && !defined(AO_HAVE_nop_read)
+# define AO_nop_read() AO_nop_full()
+# define AO_HAVE_nop_read
+#endif
+
+#if defined(AO_HAVE_nop_full) && !defined(AO_HAVE_nop_write)
+# define AO_nop_write() AO_nop_full()
+# define AO_HAVE_nop_write
+#endif
+
+/* Load */
+#if defined(AO_HAVE_load_acquire) && !defined(AO_HAVE_load)
+# define AO_load(addr) AO_load_acquire(addr)
+# define AO_HAVE_load
+#endif
+
+#if defined(AO_HAVE_load_full) && !defined(AO_HAVE_load_acquire)
+# define AO_load_acquire(addr) AO_load_full(addr)
+# define AO_HAVE_load_acquire
+#endif
+
+#if defined(AO_HAVE_load_full) && !defined(AO_HAVE_load_read)
+# define AO_load_read(addr) AO_load_full(addr)
+# define AO_HAVE_load_read
+#endif
+
+#if !defined(AO_HAVE_load_acquire_read) && defined(AO_HAVE_load_acquire)
+# define AO_load_acquire_read(addr) AO_load_acquire(addr)
+# define AO_HAVE_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_load) && defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_load_acquire)
+ AO_INLINE AO_T
+ AO_load_acquire(volatile AO_T *addr)
+ {
+ AO_T result = AO_load(addr);
+ /* Acquire barrier would be useless, since the load could be delayed */
+ /* beyond it. */
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_load_acquire
+#endif
+
+#if defined(AO_HAVE_load) && defined(AO_HAVE_nop_read) && \
+ !defined(AO_HAVE_load_read)
+ AO_INLINE AO_T
+ AO_load_read(volatile AO_T *addr)
+ {
+ AO_T result = AO_load(addr);
+ /* Acquire barrier would be useless, since the load could be delayed */
+ /* beyond it. */
+ AO_nop_read();
+ return result;
+ }
+# define AO_HAVE_load_read
+#endif
+
+#if defined(AO_HAVE_load_acquire) && defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_load_full)
+# define AO_load_full(addr) (AO_nop_full(), AO_load_acquire(addr))
+# define AO_HAVE_load_full
+#endif
+
+#if !defined(AO_HAVE_load_acquire_read) && defined(AO_HAVE_load_read)
+# define AO_load_acquire_read(addr) AO_load_read(addr)
+# define AO_HAVE_load_acquire_read
+#endif
+
+#if defined(AO_HAVE_load_acquire_read) && !defined(AO_HAVE_load)
+# define AO_load(addr) AO_load_acquire_read(addr)
+# define AO_HAVE_load
+#endif
+
+
+/* Store */
+
+#if defined(AO_HAVE_store_release) && !defined(AO_HAVE_store)
+# define AO_store(addr, val) AO_store_release(addr,val)
+# define AO_HAVE_store
+#endif
+
+#if defined(AO_HAVE_store_full) && !defined(AO_HAVE_store_release)
+# define AO_store_release(addr,val) AO_store_full(addr,val)
+# define AO_HAVE_store_release
+#endif
+
+#if defined(AO_HAVE_store_full) && !defined(AO_HAVE_store_write)
+# define AO_store_write(addr,val) AO_store_full(addr,val)
+# define AO_HAVE_store_write
+#endif
+
+#if defined(AO_HAVE_store_release) && !defined(AO_HAVE_store_release_write)
+# define AO_store_release_write(addr, val) AO_store_release(addr,val)
+# define AO_HAVE_store_release_write
+#endif
+
+#if defined(AO_HAVE_store_write) && !defined(AO_HAVE_store)
+# define AO_store(addr, val) AO_store_write(addr,val)
+# define AO_HAVE_store
+#endif
+
+#if defined(AO_HAVE_store) && defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_store_release)
+# define AO_store_release(addr,val) (AO_nop_full(), AO_store(addr,val))
+# define AO_HAVE_store_release
+#endif
+
+#if defined(AO_HAVE_nop_write) && defined(AO_HAVE_store) && \
+ !defined(AO_HAVE_store_write)
+# define AO_store_write(addr, val) (AO_nop_write(), AO_store(addr,val))
+# define AO_HAVE_store_write
+#endif
+
+#if defined(AO_HAVE_store_write) && !defined(AO_HAVE_store_release_write)
+# define AO_store_release_write(addr, val) AO_store_write(addr,val)
+# define AO_HAVE_store_release_write
+#endif
+
+#if defined(AO_HAVE_store_release) && defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_store_full)
+# define AO_store_full(addr, val) (AO_store_release(addr, val), AO_nop_full())
+# define AO_HAVE_store_full
+#endif
+
+
+/* Fetch_and_add */
+#if defined(AO_HAVE_compare_and_swap_full) && \
+ !defined(AO_HAVE_fetch_and_add_full)
+ AO_INLINE AO_T
+ AO_fetch_and_add_full(volatile AO_T *addr, long incr)
+ {
+ AO_T old;
+ do
+ {
+ old = *addr;
+ }
+ while (!AO_compare_and_swap_full(addr, old, old+incr));
+ return old;
+ }
+# define AO_HAVE_fetch_and_add_full
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_full)
+# if !defined(AO_HAVE_fetch_and_add_release)
+# define AO_fetch_and_add_release(addr, val) \
+ AO_fetch_and_add_full(addr, val)
+# define AO_HAVE_fetch_and_add_release
+# endif
+# if !defined(AO_HAVE_fetch_and_add_acquire)
+# define AO_fetch_and_add_acquire(addr, val) \
+ AO_fetch_and_add_full(addr, val)
+# define AO_HAVE_fetch_and_add_acquire
+# endif
+# if !defined(AO_HAVE_fetch_and_add_write)
+# define AO_fetch_and_add_write(addr, val) \
+ AO_fetch_and_add_full(addr, val)
+# define AO_HAVE_fetch_and_add_write
+# endif
+# if !defined(AO_HAVE_fetch_and_add_read)
+# define AO_fetch_and_add_read(addr, val) \
+ AO_fetch_and_add_full(addr, val)
+# define AO_HAVE_fetch_and_add_read
+# endif
+#endif /* AO_HAVE_fetch_and_add_full */
+
+#if !defined(AO_HAVE_fetch_and_add) && \
+ defined(AO_HAVE_fetch_and_add_release)
+# define AO_fetch_and_add(addr, val) \
+ AO_fetch_and_add_release(addr, val)
+# define AO_HAVE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_fetch_and_add) && \
+ defined(AO_HAVE_fetch_and_add_acquire)
+# define AO_fetch_and_add(addr, val) \
+ AO_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_fetch_and_add) && \
+ defined(AO_HAVE_fetch_and_add_write)
+# define AO_fetch_and_add(addr, val) \
+ AO_fetch_and_add_write(addr, val)
+# define AO_HAVE_fetch_and_add
+#endif
+#if !defined(AO_HAVE_fetch_and_add) && \
+ defined(AO_HAVE_fetch_and_add_read)
+# define AO_fetch_and_add(addr, val) \
+ AO_fetch_and_add_read(addr, val)
+# define AO_HAVE_fetch_and_add
+#endif
+
+#if defined(AO_HAVE_fetch_and_add_acquire) &&\
+ defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_fetch_and_add_full)
+# define AO_fetch_and_add_full(addr, val) \
+ (AO_nop_full(), AO_fetch_and_add_acquire(addr, val))
+#endif
+
+#if !defined(AO_HAVE_fetch_and_add_release_write) && \
+ defined(AO_HAVE_fetch_and_add_write)
+# define AO_fetch_and_add_release_write(addr, val) \
+ AO_fetch_and_add_write(addr, val)
+# define AO_HAVE_fetch_and_add_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_add_release_write) && \
+ defined(AO_HAVE_fetch_and_add_release)
+# define AO_fetch_and_add_release_write(addr, val) \
+ AO_fetch_and_add_release(addr, val)
+# define AO_HAVE_fetch_and_add_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_add_acquire_read) && \
+ defined(AO_HAVE_fetch_and_add_read)
+# define AO_fetch_and_add_acquire_read(addr, val) \
+ AO_fetch_and_add_read(addr, val)
+# define AO_HAVE_fetch_and_add_acquire_read
+#endif
+#if !defined(AO_HAVE_fetch_and_add_acquire_read) && \
+ defined(AO_HAVE_fetch_and_add_acquire)
+# define AO_fetch_and_add_acquire_read(addr, val) \
+ AO_fetch_and_add_acquire(addr, val)
+# define AO_HAVE_fetch_and_add_acquire_read
+#endif
+
+
+/* Fetch_and_add1 */
+
+#if defined(AO_HAVE_fetch_and_add_full) &&\
+ !defined(AO_HAVE_fetch_and_add1_full)
+# define AO_fetch_and_add1_full(addr) AO_fetch_and_add_full(addr,1)
+# define AO_HAVE_fetch_and_add1_full
+#endif
+#if defined(AO_HAVE_fetch_and_add_release) &&\
+ !defined(AO_HAVE_fetch_and_add1_release)
+# define AO_fetch_and_add1_release(addr) AO_fetch_and_add_release(addr,1)
+# define AO_HAVE_fetch_and_add1_release
+#endif
+#if defined(AO_HAVE_fetch_and_add_acquire) &&\
+ !defined(AO_HAVE_fetch_and_add1_acquire)
+# define AO_fetch_and_add1_acquire(addr) AO_fetch_and_add_acquire(addr,1)
+# define AO_HAVE_fetch_and_add1_acquire
+#endif
+#if defined(AO_HAVE_fetch_and_add_write) &&\
+ !defined(AO_HAVE_fetch_and_add1_write)
+# define AO_fetch_and_add1_write(addr) AO_fetch_and_add_write(addr,1)
+# define AO_HAVE_fetch_and_add1_write
+#endif
+#if defined(AO_HAVE_fetch_and_add_read) &&\
+ !defined(AO_HAVE_fetch_and_add1_read)
+# define AO_fetch_and_add1_read(addr) AO_fetch_and_add_read(addr,1)
+# define AO_HAVE_fetch_and_add1_read
+#endif
+#if defined(AO_HAVE_fetch_and_add_release_write) &&\
+ !defined(AO_HAVE_fetch_and_add1_release_write)
+# define AO_fetch_and_add1_release_write(addr) \
+ AO_fetch_and_add_release_write(addr,1)
+# define AO_HAVE_fetch_and_add1_release_write
+#endif
+#if defined(AO_HAVE_fetch_and_add_acquire_read) &&\
+ !defined(AO_HAVE_fetch_and_add1_acquire_read)
+# define AO_fetch_and_add1_acquire_read(addr) \
+ AO_fetch_and_add_acquire_read(addr,1)
+# define AO_HAVE_fetch_and_add1_acquire_read
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_full)
+# if !defined(AO_HAVE_fetch_and_add1_release)
+# define AO_fetch_and_add1_release(addr) \
+ AO_fetch_and_add1_full(addr)
+# define AO_HAVE_fetch_and_add1_release
+# endif
+# if !defined(AO_HAVE_fetch_and_add1_acquire)
+# define AO_fetch_and_add1_acquire(addr) \
+ AO_fetch_and_add1_full(addr)
+# define AO_HAVE_fetch_and_add1_acquire
+# endif
+# if !defined(AO_HAVE_fetch_and_add1_write)
+# define AO_fetch_and_add1_write(addr) \
+ AO_fetch_and_add1_full(addr)
+# define AO_HAVE_fetch_and_add1_write
+# endif
+# if !defined(AO_HAVE_fetch_and_add1_read)
+# define AO_fetch_and_add1_read(addr) \
+ AO_fetch_and_add1_full(addr)
+# define AO_HAVE_fetch_and_add1_read
+# endif
+#endif /* AO_HAVE_fetch_and_add1_full */
+
+#if !defined(AO_HAVE_fetch_and_add1) && \
+ defined(AO_HAVE_fetch_and_add1_release)
+# define AO_fetch_and_add1(addr) \
+ AO_fetch_and_add1_release(addr)
+# define AO_HAVE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_fetch_and_add1) && \
+ defined(AO_HAVE_fetch_and_add1_acquire)
+# define AO_fetch_and_add1(addr) \
+ AO_fetch_and_add1_acquire(addr)
+# define AO_HAVE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_fetch_and_add1) && \
+ defined(AO_HAVE_fetch_and_add1_write)
+# define AO_fetch_and_add1(addr) \
+ AO_fetch_and_add1_write(addr)
+# define AO_HAVE_fetch_and_add1
+#endif
+#if !defined(AO_HAVE_fetch_and_add1) && \
+ defined(AO_HAVE_fetch_and_add1_read)
+# define AO_fetch_and_add1(addr) \
+ AO_fetch_and_add1_read(addr)
+# define AO_HAVE_fetch_and_add1
+#endif
+
+#if defined(AO_HAVE_fetch_and_add1_acquire) &&\
+ defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_fetch_and_add1_full)
+# define AO_fetch_and_add1_full(addr) \
+ (AO_nop_full(), AO_fetch_and_add1_acquire(addr))
+# define AO_HAVE_fetch_and_add1_full
+#endif
+
+#if !defined(AO_HAVE_fetch_and_add1_release_write) && \
+ defined(AO_HAVE_fetch_and_add1_write)
+# define AO_fetch_and_add1_release_write(addr) \
+ AO_fetch_and_add1_write(addr)
+# define AO_HAVE_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_add1_release_write) && \
+ defined(AO_HAVE_fetch_and_add1_release)
+# define AO_fetch_and_add1_release_write(addr) \
+ AO_fetch_and_add1_release(addr)
+# define AO_HAVE_fetch_and_add1_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_add1_acquire_read) && \
+ defined(AO_HAVE_fetch_and_add1_read)
+# define AO_fetch_and_add1_acquire_read(addr) \
+ AO_fetch_and_add1_read(addr)
+# define AO_HAVE_fetch_and_add1_acquire_read
+#endif
+#if !defined(AO_HAVE_fetch_and_add1_acquire_read) && \
+ defined(AO_HAVE_fetch_and_add1_acquire)
+# define AO_fetch_and_add1_acquire_read(addr) \
+ AO_fetch_and_add1_acquire(addr)
+# define AO_HAVE_fetch_and_add1_acquire_read
+#endif
+
+
+/* Fetch_and_sub1 */
+
+#if defined(AO_HAVE_fetch_and_add_full) &&\
+ !defined(AO_HAVE_fetch_and_sub1_full)
+# define AO_fetch_and_sub1_full(addr) AO_fetch_and_add_full(addr,-1)
+# define AO_HAVE_fetch_and_sub1_full
+#endif
+#if defined(AO_HAVE_fetch_and_add_release) &&\
+ !defined(AO_HAVE_fetch_and_sub1_release)
+# define AO_fetch_and_sub1_release(addr) AO_fetch_and_add_release(addr,-1)
+# define AO_HAVE_fetch_and_sub1_release
+#endif
+#if defined(AO_HAVE_fetch_and_add_acquire) &&\
+ !defined(AO_HAVE_fetch_and_sub1_acquire)
+# define AO_fetch_and_sub1_acquire(addr) AO_fetch_and_add_acquire(addr,-1)
+# define AO_HAVE_fetch_and_sub1_acquire
+#endif
+#if defined(AO_HAVE_fetch_and_add_write) &&\
+ !defined(AO_HAVE_fetch_and_sub1_write)
+# define AO_fetch_and_sub1_write(addr) AO_fetch_and_add_write(addr,-1)
+# define AO_HAVE_fetch_and_sub1_write
+#endif
+#if defined(AO_HAVE_fetch_and_add_read) &&\
+ !defined(AO_HAVE_fetch_and_sub1_read)
+# define AO_fetch_and_sub1_read(addr) AO_fetch_and_add_read(addr,-1)
+# define AO_HAVE_fetch_and_sub1_read
+#endif
+#if defined(AO_HAVE_fetch_and_add_release_write) &&\
+ !defined(AO_HAVE_fetch_and_sub1_release_write)
+# define AO_fetch_and_sub1_release_write(addr) \
+ AO_fetch_and_add_release_write(addr,-1)
+# define AO_HAVE_fetch_and_sub1_release_write
+#endif
+#if defined(AO_HAVE_fetch_and_add_acquire_read) &&\
+ !defined(AO_HAVE_fetch_and_sub1_acquire_read)
+# define AO_fetch_and_sub1_acquire_read(addr) \
+ AO_fetch_and_add_acquire_read(addr,-1)
+# define AO_HAVE_fetch_and_sub1_acquire_read
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_full)
+# if !defined(AO_HAVE_fetch_and_sub1_release)
+# define AO_fetch_and_sub1_release(addr) \
+ AO_fetch_and_sub1_full(addr)
+# define AO_HAVE_fetch_and_sub1_release
+# endif
+# if !defined(AO_HAVE_fetch_and_sub1_acquire)
+# define AO_fetch_and_sub1_acquire(addr) \
+ AO_fetch_and_sub1_full(addr)
+# define AO_HAVE_fetch_and_sub1_acquire
+# endif
+# if !defined(AO_HAVE_fetch_and_sub1_write)
+# define AO_fetch_and_sub1_write(addr) \
+ AO_fetch_and_sub1_full(addr)
+# define AO_HAVE_fetch_and_sub1_write
+# endif
+# if !defined(AO_HAVE_fetch_and_sub1_read)
+# define AO_fetch_and_sub1_read(addr) \
+ AO_fetch_and_sub1_full(addr)
+# define AO_HAVE_fetch_and_sub1_read
+# endif
+#endif /* AO_HAVE_fetch_and_sub1_full */
+
+#if !defined(AO_HAVE_fetch_and_sub1) && \
+ defined(AO_HAVE_fetch_and_sub1_release)
+# define AO_fetch_and_sub1(addr) \
+ AO_fetch_and_sub1_release(addr)
+# define AO_HAVE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1) && \
+ defined(AO_HAVE_fetch_and_sub1_acquire)
+# define AO_fetch_and_sub1(addr) \
+ AO_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1) && \
+ defined(AO_HAVE_fetch_and_sub1_write)
+# define AO_fetch_and_sub1(addr) \
+ AO_fetch_and_sub1_write(addr)
+# define AO_HAVE_fetch_and_sub1
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1) && \
+ defined(AO_HAVE_fetch_and_sub1_read)
+# define AO_fetch_and_sub1(addr) \
+ AO_fetch_and_sub1_read(addr)
+# define AO_HAVE_fetch_and_sub1
+#endif
+
+#if defined(AO_HAVE_fetch_and_sub1_acquire) &&\
+ defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_fetch_and_sub1_full)
+# define AO_fetch_and_sub1_full(addr) \
+ (AO_nop_full(), AO_fetch_and_sub1_acquire(addr))
+# define AO_HAVE_fetch_and_sub1_full
+#endif
+
+#if !defined(AO_HAVE_fetch_and_sub1_release_write) && \
+ defined(AO_HAVE_fetch_and_sub1_write)
+# define AO_fetch_and_sub1_release_write(addr) \
+ AO_fetch_and_sub1_write(addr)
+# define AO_HAVE_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1_release_write) && \
+ defined(AO_HAVE_fetch_and_sub1_release)
+# define AO_fetch_and_sub1_release_write(addr) \
+ AO_fetch_and_sub1_release(addr)
+# define AO_HAVE_fetch_and_sub1_release_write
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1_acquire_read) && \
+ defined(AO_HAVE_fetch_and_sub1_read)
+# define AO_fetch_and_sub1_acquire_read(addr) \
+ AO_fetch_and_sub1_read(addr)
+# define AO_HAVE_fetch_and_sub1_acquire_read
+#endif
+#if !defined(AO_HAVE_fetch_and_sub1_acquire_read) && \
+ defined(AO_HAVE_fetch_and_sub1_acquire)
+# define AO_fetch_and_sub1_acquire_read(addr) \
+ AO_fetch_and_sub1_acquire(addr)
+# define AO_HAVE_fetch_and_sub1_acquire_read
+#endif
+
+
+/* Test_and_set */
+
+#if defined(AO_HAVE_test_and_set_full)
+# if !defined(AO_HAVE_test_and_set_release)
+# define AO_test_and_set_release(addr) \
+ AO_test_and_set_full(addr)
+# define AO_HAVE_test_and_set_release
+# endif
+# if !defined(AO_HAVE_test_and_set_acquire)
+# define AO_test_and_set_acquire(addr) \
+ AO_test_and_set_full(addr)
+# define AO_HAVE_test_and_set_acquire
+# endif
+# if !defined(AO_HAVE_test_and_set_write)
+# define AO_test_and_set_write(addr) \
+ AO_test_and_set_full(addr)
+# define AO_HAVE_test_and_set_write
+# endif
+# if !defined(AO_HAVE_test_and_set_read)
+# define AO_test_and_set_read(addr) \
+ AO_test_and_set_full(addr)
+# define AO_HAVE_test_and_set_read
+# endif
+#endif /* AO_HAVE_test_and_set_full */
+
+#if !defined(AO_HAVE_test_and_set) && \
+ defined(AO_HAVE_test_and_set_release)
+# define AO_test_and_set(addr) \
+ AO_test_and_set_release(addr)
+# define AO_HAVE_test_and_set
+#endif
+#if !defined(AO_HAVE_test_and_set) && \
+ defined(AO_HAVE_test_and_set_acquire)
+# define AO_test_and_set(addr) \
+ AO_test_and_set_acquire(addr)
+# define AO_HAVE_test_and_set
+#endif
+#if !defined(AO_HAVE_test_and_set) && \
+ defined(AO_HAVE_test_and_set_write)
+# define AO_test_and_set(addr) \
+ AO_test_and_set_write(addr)
+# define AO_HAVE_test_and_set
+#endif
+#if !defined(AO_HAVE_test_and_set) && \
+ defined(AO_HAVE_test_and_set_read)
+# define AO_test_and_set(addr) \
+ AO_test_and_set_read(addr)
+# define AO_HAVE_test_and_set
+#endif
+
+#if defined(AO_HAVE_test_and_set_acquire) &&\
+ defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_test_and_set_full)
+# define AO_test_and_set_full(addr) \
+ (AO_nop_full(), AO_test_and_set_acquire(addr))
+# define AO_HAVE_test_and_set_full
+#endif
+
+#if !defined(AO_HAVE_test_and_set_release_write) && \
+ defined(AO_HAVE_test_and_set_write)
+# define AO_test_and_set_release_write(addr) \
+ AO_test_and_set_write(addr)
+# define AO_HAVE_test_and_set_release_write
+#endif
+#if !defined(AO_HAVE_test_and_set_release_write) && \
+ defined(AO_HAVE_test_and_set_release)
+# define AO_test_and_set_release_write(addr) \
+ AO_test_and_set_release(addr)
+# define AO_HAVE_test_and_set_release_write
+#endif
+#if !defined(AO_HAVE_test_and_set_acquire_read) && \
+ defined(AO_HAVE_test_and_set_read)
+# define AO_test_and_set_acquire_read(addr) \
+ AO_test_and_set_read(addr)
+# define AO_HAVE_test_and_set_acquire_read
+#endif
+#if !defined(AO_HAVE_test_and_set_acquire_read) && \
+ defined(AO_HAVE_test_and_set_acquire)
+# define AO_test_and_set_acquire_read(addr) \
+ AO_test_and_set_acquire(addr)
+# define AO_HAVE_test_and_set_acquire_read
+#endif
+
+/* Compare_and_swap */
+#if defined(AO_HAVE_compare_and_swap) && defined(AO_HAVE_nop_full)\
+ && !defined(AO_HAVE_compare_and_swap_acquire)
+ AO_INLINE int
+ AO_compare_and_swap_acquire(volatile AO_T *addr, AO_T old, AO_T new_val)
+ {
+ int result = AO_compare_and_swap(addr, old, new_val);
+ AO_nop_full();
+ return result;
+ }
+# define AO_HAVE_compare_and_swap_acquire
+#endif
+#if defined(AO_HAVE_compare_and_swap) && defined(AO_HAVE_nop_full)\
+ && !defined(AO_HAVE_compare_and_swap_release)
+# define AO_compare_and_swap_release(addr, old, new_val) \
+ (AO_nop_full(), AO_compare_and_swap(addr, old, new_val))
+# define AO_HAVE_compare_and_swap_release
+#endif
+#if defined(AO_HAVE_compare_and_swap_full)
+# if !defined(AO_HAVE_compare_and_swap_release)
+# define AO_compare_and_swap_release(addr, old, new_val) \
+ AO_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_release
+# endif
+# if !defined(AO_HAVE_compare_and_swap_acquire)
+# define AO_compare_and_swap_acquire(addr, old, new_val) \
+ AO_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_acquire
+# endif
+# if !defined(AO_HAVE_compare_and_swap_write)
+# define AO_compare_and_swap_write(addr, old, new_val) \
+ AO_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_write
+# endif
+# if !defined(AO_HAVE_compare_and_swap_read)
+# define AO_compare_and_swap_read(addr, old, new_val) \
+ AO_compare_and_swap_full(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_read
+# endif
+#endif /* AO_HAVE_compare_and_swap_full */
+
+#if !defined(AO_HAVE_compare_and_swap) && \
+ defined(AO_HAVE_compare_and_swap_release)
+# define AO_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_compare_and_swap) && \
+ defined(AO_HAVE_compare_and_swap_acquire)
+# define AO_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_compare_and_swap) && \
+ defined(AO_HAVE_compare_and_swap_write)
+# define AO_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_compare_and_swap
+#endif
+#if !defined(AO_HAVE_compare_and_swap) && \
+ defined(AO_HAVE_compare_and_swap_read)
+# define AO_compare_and_swap(addr, old, new_val) \
+ AO_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_compare_and_swap
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_acquire) &&\
+ defined(AO_HAVE_nop_full) && \
+ !defined(AO_HAVE_compare_and_swap_full)
+# define AO_compare_and_swap_full(addr, old, new_val) \
+ (AO_nop_full(), AO_compare_and_swap_acquire(addr, old, new_val))
+# define AO_HAVE_compare_and_swap_full
+#endif
+
+#if !defined(AO_HAVE_compare_and_swap_release_write) && \
+ defined(AO_HAVE_compare_and_swap_write)
+# define AO_compare_and_swap_release_write(addr, old, new_val) \
+ AO_compare_and_swap_write(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_compare_and_swap_release_write) && \
+ defined(AO_HAVE_compare_and_swap_release)
+# define AO_compare_and_swap_release_write(addr, old, new_val) \
+ AO_compare_and_swap_release(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_release_write
+#endif
+#if !defined(AO_HAVE_compare_and_swap_acquire_read) && \
+ defined(AO_HAVE_compare_and_swap_read)
+# define AO_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_compare_and_swap_read(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_acquire_read
+#endif
+#if !defined(AO_HAVE_compare_and_swap_acquire_read) && \
+ defined(AO_HAVE_compare_and_swap_acquire)
+# define AO_compare_and_swap_acquire_read(addr, old, new_val) \
+ AO_compare_and_swap_acquire(addr, old, new_val)
+# define AO_HAVE_compare_and_swap_acquire_read
+#endif
+