# (somewhat inconvenient) use of the profiler without setting up or accessing
# a separate installation directory.
PREFIX=XXPWDXX/installed
-CC= gcc
+CC= cc
#Add -DHW_EVENT_SUPPORT if you have Itanium perfmon installed
CFLAGS= -g -O2
RANLIB=ranlib
-VERSION=0.3
-LIBDIR=$(PREFIX)/lib/ao-$(VERSION)
-DOCDIR=$(PREFIX)/doc/ao-$(VERSION)
-INCDIR=$(PREFIX)/include/ao-$(VERSION)
+VERSION=0.5
+DOCDIR=$(PREFIX)/doc/atomic_ops-$(VERSION)
+INCDIR=$(PREFIX)/include/atomic_ops-$(VERSION)
C_SOURCES= test_atomic.c atomic_ops.c
-TOP_LEVEL_HEADERS= atomic_ops.h atomic_ops_generalize.h
+TOP_LEVEL_HEADERS= atomic_ops.h
DERIVED_HEADERS= test_atomic_include.h
-DOC= doc/COPYING doc/LICENSING.txt doc/README_atomic_ops.txt
-SYSDEP_GCC_HEADERS= ao_sysdeps/gcc/x86.h ao_sysdeps/gcc/ia64.h \
- ao_sysdeps/gcc/alpha.h ao_sysdeps/gcc/arm.h \
- ao_sysdeps/gcc/powerpc.h ao_sysdeps/gcc/sparc.h \
- ao_sysdeps/gcc/hppa.h ao_sysdeps/gcc/m68k.h ao_sysdeps/gcc/s390.h
-SYSDEP_ECC_HEADERS= ao_sysdeps/ecc/ia64.h
-SYSDEP_VENDORC_HEADERS= ao_sysdeps/vendorc/none_yet
-SYSDEP_HEADERS= ao_sysdeps/generic_pthread.h ao_sysdeps/atomic_load_store.h \
- ao_sysdeps/aligned_atomic_load_store.h ao_sysdeps/ordered_except_wr.h \
- ao_sysdeps/acquire_release_volatile.h ao_sysdeps/ordered.h \
- ao_sysdeps/emul_cas.h
+AO_SD_DIR=atomic_ops/sysdeps
+DOC= doc/COPYING doc/LICENSING.txt doc/README_atomic_ops.txt $(AO_SD_DIR)/README
+SYSDEP_GCC_HEADERS= $(AO_SD_DIR)/gcc/x86.h $(AO_SD_DIR)/gcc/ia64.h \
+ $(AO_SD_DIR)/gcc/alpha.h $(AO_SD_DIR)/gcc/arm.h \
+ $(AO_SD_DIR)/gcc/powerpc.h $(AO_SD_DIR)/gcc/sparc.h \
+ $(AO_SD_DIR)/gcc/hppa.h $(AO_SD_DIR)/gcc/m68k.h $(AO_SD_DIR)/gcc/s390.h
+SYSDEP_ECC_HEADERS= $(AO_SD_DIR)/ecc/ia64.h
+SYSDEP_MSFTC_HEADERS= $(AO_SD_DIR)/msftc/x86.h
+SYSDEP_HPC_HEADERS= $(AO_SD_DIR)/hpc/ia64.h $(AO_SD_DIR)/hpc/hppa.h
+SYSDEP_HEADERS= $(AO_SD_DIR)/generic_pthread.h \
+ $(AO_SD_DIR)/atomic_load_store.h \
+ $(AO_SD_DIR)/aligned_atomic_load_store.h \
+ $(AO_SD_DIR)/ordered_except_wr.h \
+ $(AO_SD_DIR)/acquire_release_volatile.h \
+ $(AO_SD_DIR)/ordered.h \
+ $(AO_SD_DIR)/emul_cas.h
ALL_SYSDEP_HEADERS= $(SYSDEP_GCC_HEADERS) $(SYSDEP_ECC_HEADERS) \
-$(SYSDEP_VENDORC_HEADERS) $(SYSDEP_HEADERS)
-ATOMIC_OPS_HEADERS= atomic_ops.h atomic_ops_generalize.h $(ALL_SYSDEP_HEADERS)
-HEADERS= $(TOP_LEVEL_HEADERS) $(SYSDEP_GCC_HEADERS) $(SYSDEP_HEADERS)
-OTHER_FILES=Makefile README test_atomic.template list_atomic.template
+$(SYSDEP_MSFTC_HEADERS) $(SYSDEP_HEADERS)
+ATOMIC_OPS_PRIV_HEADERS=atomic_ops/generalize.h
+ATOMIC_OPS_HEADERS= atomic_ops.h $(ATOMIC_OPS_PRIV_HEADERS) \
+ $(ALL_SYSDEP_HEADERS)
+HEADERS= $(TOP_LEVEL_HEADERS) $(SYSDEP_GCC_HEADERS) $(SYSDEP_HEADERS) \
+ $(ATOMIC_OPS_PRIV_HEADERS) $(SYSDEP_ECC_HEADERS)
+OTHER_FILES=Makefile README test_atomic.template list_atomic.template \
+ Makefile.msft
ALL_DIST_FILES= $(DOC) $(C_SOURCES) $(HEADERS) $(OTHER_FILES)
-all: atomic_ops.a Makefile.expanded
+all: libatomic_ops.a Makefile.expanded
Makefile.expanded: Makefile
sed -e s:XXPWDXX:`pwd`: Makefile > Makefile.expanded
atomic_ops.o: atomic_ops.c $(ATOMIC_OPS_HEADERS)
$(CC) $(CFLAGS) -c -fPIC atomic_ops.c
-atomic_ops.a: atomic_ops.o
- $(AR) ruc atomic_ops.a atomic_ops.o
- $(RANLIB) atomic_ops.a
+libatomic_ops.a: atomic_ops.o
+ $(AR) ruc libatomic_ops.a atomic_ops.o
+ $(RANLIB) libatomic_ops.a
test_atomic: test_atomic.c atomic_ops.c test_atomic_include.h $(ATOMIC_OPS_HEADERS)
$(CC) $(CFLAGS) test_atomic.c atomic_ops.c -o test_atomic -lpthread
sed -e s/XX/_acquire_read/ list_atomic.template \
>> list_atomic.c
-list_atomic.i: list_atomic.c atomic_ops.h atomic_ops_generalize.h \
+list_atomic.i: list_atomic.c atomic_ops.h $(ATOMIC_OPS_PRIV_HEADERS) \
$(SYSDEP_HEADERS) $(SYSDEP_GCC_HEADERS)
cc -E list_atomic.c > list_atomic.i
dist: $(ALL_DIST_FILES)
- # The same thing again for the ao distribution.
- mkdir ao-$(VERSION)
- ln atomic_ops.h atomic_ops_generalize.h atomic_ops.c ao-$(VERSION)
- mkdir ao-$(VERSION)/ao_sysdeps
- ln $(SYSDEP_HEADERS) ao-$(VERSION)/ao_sysdeps
- mkdir ao-$(VERSION)/ao_sysdeps/gcc
- ln $(SYSDEP_GCC_HEADERS) ao-$(VERSION)/ao_sysdeps/gcc
- mkdir ao-$(VERSION)/ao_sysdeps/ecc
- ln $(SYSDEP_ECC_HEADERS) ao-$(VERSION)/ao_sysdeps/ecc
- mkdir ao-$(VERSION)/ao_sysdeps/vendorc
- ln $(SYSDEP_VENDORC_HEADERS) ao-$(VERSION)/ao_sysdeps/vendorc
- mkdir ao-$(VERSION)/doc
- ln doc/README_atomic_ops.txt doc/LICENSING.txt ao-$(VERSION)/doc
- tar cvfzh ao-$(VERSION).tar.gz ao-$(VERSION)
- rm -rf ao-$(VERSION)
+ # The same thing again for the atomic_ops distribution.
+ mkdir atomic_ops-$(VERSION)
+ ln Makefile atomic_ops.h $(ATOMIC_OPS_PRIV_HEADERS) atomic_ops.c \
+ Makefile.msft test_atomic_include.h atomic_ops-$(VERSION)
+ ln test_atomic.c test_atomic.template list_atomic.template \
+ atomic_ops-$(VERSION)
+ mkdir atomic_ops-$(VERSION)/atomic_ops
+ ln $(ATOMIC_OPS_PRIV_HEADERS) atomic_ops-$(VERSION)/atomic_ops
+ mkdir atomic_ops-$(VERSION)/$(AO_SD_DIR)
+ ln $(SYSDEP_HEADERS) README atomic_ops-$(VERSION)/$(AO_SD_DIR)
+ mkdir atomic_ops-$(VERSION)/$(AO_SD_DIR)/gcc
+ ln $(SYSDEP_GCC_HEADERS) atomic_ops-$(VERSION)/$(AO_SD_DIR)/gcc
+ mkdir atomic_ops-$(VERSION)/$(AO_SD_DIR)/ecc
+ ln $(SYSDEP_ECC_HEADERS) atomic_ops-$(VERSION)/$(AO_SD_DIR)/ecc
+ mkdir atomic_ops-$(VERSION)/$(AO_SD_DIR)/msftc
+ ln $(SYSDEP_MSFTC_HEADERS) atomic_ops-$(VERSION)/$(AO_SD_DIR)/msftc
+ mkdir atomic_ops-$(VERSION)/$(AO_SD_DIR)/hpc
+ ln $(SYSDEP_HPC_HEADERS) atomic_ops-$(VERSION)/$(AO_SD_DIR)/hpc
+ mkdir atomic_ops-$(VERSION)/doc
+ ln doc/COPYING doc/README_atomic_ops.txt doc/LICENSING.txt atomic_ops-$(VERSION)/doc
+ tar cvfzh atomic_ops-$(VERSION).tar.gz atomic_ops-$(VERSION)
+ rm -rf atomic_ops-$(VERSION)
install: all
make -f Makefile.expanded real_install
# of PREFIX.
real_install:
- install -d $(LIBDIR)
install -d $(INCDIR)
install -d $(DOCDIR)
- ln -s -f $(LIBDIR) $(PREFIX)/lib/ao
- ln -s -f $(DOCDIR) $(PREFIX)/doc/ao
- ln -s -f $(INCDIR) $(PREFIX)/include/ao
- /usr/bin/install -c -m 644 $(DOC) $(DOCDIR)
- /usr/bin/install -c -m 644 atomic_ops.a $(LIBDIR)
- /usr/bin/install -c -m 644 $(TOP_LEVEL_HEADERS) $(INCDIR)
- cp -r ao_sysdeps $(INCDIR)
+ install -d $(PREFIX)/include
+ install -d $(PREFIX)/lib
+ /usr/bin/install -m 644 $(DOC) $(DOCDIR)
+ /usr/bin/install -m 644 libatomic_ops.a $(PREFIX)/lib/libatomic_ops-$(VERSION).a
+ /usr/bin/install -m 644 $(TOP_LEVEL_HEADERS) $(PREFIX)/include
+ cp -r atomic_ops atomic_ops.h $(INCDIR)
+ ln -s -f $(PREFIX)/lib/libatomic_ops-$(VERSION).a $(PREFIX)/lib/libatomic_ops.a
+ ln -s -f $(INCDIR)/atomic_ops $(PREFIX)/include/atomic_ops
+ ln -s -f $(INCDIR)/atomic_ops.h $(PREFIX)/include/atomic_ops.h
--- /dev/null
+#
+# Copyright (c) 2003 Hewlett-Packard Developlment Company, L.P.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+
+# The really trivial win32/VC++ Makefile. Note that atomic_ops.c isn't useful.
+# And we rely on a pre-built test_atomic_include.h, since we can't rely on sed.
+# Win32 clients only need to include the header files.
+# To install, copy atomic_ops.h and the atomic_ops/... tree to your favorite
+# include directory.
+
+all: check
+
+test_atomic: test_atomic.c test_atomic_include.h $(ATOMIC_OPS_HEADERS)
+ cl -O2 -DAO_ASSUME_WINDOWS98 test_atomic.c -o test_atomic
+
+test_atomic_w95: test_atomic.c test_atomic_include.h $(ATOMIC_OPS_HEADERS)
+ cl -O2 test_atomic.c -o test_atomic_w95
+
+check: test_atomic test_atomic_w95
+ echo The following will print lots of \"Missing ...\" messages.
+ test_atomic_w95
+ echo The following will print some \"Missing ...\" messages.
+ test_atomic
+++ /dev/null
-/*
- * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "../atomic_load_store.h"
-
-/* Some architecture set descriptions include special "ordered" memory */
-/* operations. As far as we can tell, no existing processors actually */
-/* require those. Nor does it appear likely that future processors */
-/* will. */
-#include "../ordered.h"
-
-/* It's not clear this should really be used from user mode. But we */
-/* include it here to demonstrate that it could be handled. */
-union AO_pa_clearable_loc {
- int data;
- double align_16[2]; /* Make the size 16 bytes */
-} __attribute__ ((aligned (16)));
-
-#undef AO_TS_T
-#undef AO_TS_INITIALIZER
-#define AO_TS_T union AO_pa_clearable_loc
-#define AO_TS_INITIALIZER { 1 }
-/* Switch meaning of set and clear, since we only have an atomic clear */
-/* instruction. */
-#undef AO_TS_VAL
-#undef AO_TS_CLEAR
-#undef AO_TS_SET
-typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
-#define AO_TS_VAL AO_PA_TS_val
-#define AO_TS_CLEAR AO_PA_TS_clear
-#define AO_TS_SET AO_PA_TS_set
-
-AO_INLINE AO_TS_VAL
-AO_test_and_set_full(volatile AO_TS_T * addr)
-{
- int result;
-
- __asm__ __volatile__("ldcw 0(%1),%0"
- : "=r"(result) : "r"(addr) : "memory");
- return result;
-}
-
-#define AO_HAVE_test_and_set_full
-
* Initialized data and out-of-line functions to support atomic_ops.h
* go here. Currently this is needed only for pthread-based atomics
* emulation, or for compare-and-swap emulation.
+ * Pthreads emulation isn't useful on a native Windows platform, and
+ * cas emulation is not needed. Thus we skip this on Windows.
*/
+#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__BORLANDC__)
+
#undef AO_FORCE_CAS
#include <pthread.h>
#include <signal.h>
-#include <sys/select.h>
+#ifdef _HPUX_SOURCE
+# include <sys/time.h>
+#else
+# include <sys/select.h>
+#endif
#include "atomic_ops.h" /* Without cas emulation! */
/*
*addr = val;
unlock(my_lock);
}
+
+#else /* Non-posix platform */
+
+int AO_non_posix_implementation_is_entirely_in_headers;
+
+#endif
/*
- * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
/* indicates that it succeeded. */
/* Test_and_set takes an address, atomically replaces it by */
/* AO_TS_SET, and returns the prior value. */
-/* An AO_TS_T clear location can be reset with the */
+/* An AO_TS_t clear location can be reset with the */
/* AO_CLEAR macro, which normally uses AO_store_release. */
-/* AO_fetch_and_add takes an address and an AO_T increment */
+/* AO_fetch_and_add takes an address and an AO_t increment */
/* value. The AO_fetch_and_add1 and AO_fetch_and_sub1 variants */
/* are provided, since they allow faster implementations on */
-/* some hardware. AO_or atomically ors an AO_T value into a */
+/* some hardware. AO_or atomically ors an AO_t value into a */
/* memory location, but does not provide access to the original.*/
/* */
/* We expect this list to grow slowly over time. */
/* succeeds. Furthermore, this should generate near-optimal */
/* code on all common platforms. */
/* */
-/* All operations operate on unsigned AO_T, which */
+/* All operations operate on unsigned AO_t, which */
/* is the natural word size, and usually unsigned long. */
/* It is possible to check whether a particular operation op */
/* is available on a particular platform by checking whether */
/* atomic_ops_generalize.h. */
/* Some common defaults. Overridden for some architectures. */
-#define AO_T unsigned long
+#define AO_t unsigned long
/* Could conceivably be redefined below if/when we add */
/* win64 support. */
-/* The test_and_set primitive returns an AO_TS_VAL value: */
+/* The test_and_set primitive returns an AO_TS_VAL_t value: */
typedef enum {AO_TS_clear = 0, AO_TS_set = 1} AO_TS_val;
-#define AO_TS_VAL AO_TS_val
+#define AO_TS_VAL_t AO_TS_val
#define AO_TS_CLEAR AO_TS_clear
#define AO_TS_SET AO_TS_set
-/* AO_TS_T is the type of an in-memory test-and-set location. */
-#define AO_TS_T AO_T /* Make sure this has the right size */
-#define AO_TS_INITIALIZER (AO_T)AO_TS_CLEAR
+/* AO_TS_t is the type of an in-memory test-and-set location. */
+#define AO_TS_t AO_t /* Make sure this has the right size */
+#define AO_TS_INITIALIZER (AO_t)AO_TS_CLEAR
/* The most common way to clear a test-and-set location */
/* at the end of a critical section. */
-#define AO_CLEAR(addr) AO_store_release((AO_T *)addr, AO_TS_CLEAR)
+#define AO_CLEAR(addr) AO_store_release((AO_t *)addr, AO_TS_CLEAR)
/* Platform-dependent stuff: */
-#ifdef __GNUC__
-/* Currently gcc is much better supported than anything else ... */
-# define AO_INLINE static inline
-# define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory")
+#if defined(__GNUC__) || defined(_MSC_VER) || defined(__INTEL_COMPILER)
+# define AO_INLINE static __inline
#else
# define AO_INLINE static
+#endif
+
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+# define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory")
+#elif defined(_MSC_VER)
+# define AO_compiler_barrier() __asm { }
+#elif defined(__INTEL_COMPILER)
+# define AO_compiler_barrier() __memory_barrier() /* Too strong? IA64-only? */
+#elif defined(_HPUX_SOURCE)
+# if defined(__ia64)
+# include <machine/sys/inline.h>
+# define AO_compiler_barrier() _Asm_sched_fence()
+# else
+ /* FIXME - We dont know how to do this. This is a guess. */
+ /* And probably a bad one. */
+ static volatile int AO_barrier_dummy;
+# define AO_compiler_barrier() AO_barrier_dummy = AO_barrier_dummy
+# endif
+#else
/* We conjecture that the following usually gives us the right */
/* semantics or an error. */
-# define AO_compiler_barrier() asm("");
+# define AO_compiler_barrier() asm("")
#endif
#if defined(AO_USE_PTHREAD_DEFS)
-# include "ao_sysdeps/generic_pthread.h"
+# include "atomic_ops/sysdeps/generic_pthread.h"
#endif /* AO_USE_PTHREAD_DEFS */
#if defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
# if defined(__i386__)
-# include "ao_sysdeps/gcc/x86.h"
+# include "atomic_ops/sysdeps/gcc/x86.h"
# endif /* __i386__ */
# if defined(__ia64__)
-# include "ao_sysdeps/gcc/ia64.h"
+# include "atomic_ops/sysdeps/gcc/ia64.h"
# define AO_GENERALIZE_TWICE
# endif /* __ia64__ */
# if defined(__hppa__)
-# include "ao_sysdeps/gcc/hppa.h"
+# include "atomic_ops/sysdeps/gcc/hppa.h"
# define AO_CAN_EMUL_CAS
# endif /* __hppa__ */
# if defined(__alpha__)
-# include "ao_sysdeps/gcc/alpha.h"
+# include "atomic_ops/sysdeps/gcc/alpha.h"
# define AO_GENERALIZE_TWICE
# endif /* __alpha__ */
# if defined(__s390__)
-# include "ao_sysdeps/gcc/s390.h"
+# include "atomic_ops/sysdeps/gcc/s390.h"
# endif /* __s390__ */
# if defined(__sparc__)
-# include "ao_sysdeps/gcc/sparc.h"
+# include "atomic_ops/sysdeps/gcc/sparc.h"
# endif /* __sparc__ */
# if defined(__m68k__)
-# include "ao_sysdeps/gcc/m68k.h"
+# include "atomic_ops/sysdeps/gcc/m68k.h"
# endif /* __m68k__ */
# if defined(__powerpc__)
-# include "ao_sysdeps/gcc/powerpc.h"
+# include "atomic_ops/sysdeps/gcc/powerpc.h"
# endif /* __powerpc__ */
# if defined(__arm__) && !defined(AO_USE_PTHREAD_DEFS)
-# include "ao_sysdeps/gcc/arm.h"
+# include "atomic_ops/sysdeps/gcc/arm.h"
# endif /* __arm__ */
#endif /* __GNUC__ && !AO_USE_PTHREAD_DEFS */
#if defined(__INTEL_COMPILER) && !defined(AO_USE_PTHREAD_DEFS)
# if defined(__ia64__)
-# include "ao_sysdeps/ecc/ia64.h"
+# include "atomic_ops/sysdeps/ecc/ia64.h"
+# define AO_GENERALIZE_TWICE
+# endif
+#endif
+
+#if defined(_HPUX_SOURCE) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
+# if defined(__ia64)
+# include "atomic_ops/sysdeps/hpc/ia64.h"
# define AO_GENERALIZE_TWICE
+# else
+# include "atomic_ops/sysdeps/hpc/hppa.h"
+# endif
+#endif
+
+#if defined(_MSC_VER)
+# if _M_IX86 >= 400
+# include "atomic_ops/sysdeps/msftc/x86.h"
# endif
#endif
&& !defined(AO_HAVE_compare_and_swap_full) \
&& !defined(AO_HAVE_compare_and_swap_acquire)
# if defined(AO_CAN_EMUL_CAS)
-# include "ao_sysdeps/emul_cas.h"
+# include "atomic_ops/sysdeps/emul_cas.h"
# else
# error Cannot implement AO_compare_and_swap_full on this architecture.
# endif
* In fact, we observe that this converges after a small fixed number
* of iterations, usually one.
*/
-#include "atomic_ops_generalize.h"
+#include "atomic_ops/generalize.h"
#ifdef AO_GENERALIZE_TWICE
-# include "atomic_ops_generalize.h"
+# include "atomic_ops/generalize.h"
#endif
+/* For compatibility with version 0.4 and earlier */
+#define AO_TS_T AO_TS_t
+#define AO_T AO_t
+#define AO_TS_VAL AO_TS_VAL_t
+
#endif /* ATOMIC_OPS_H */
!defined(AO_HAVE_test_and_set_read) && \
!defined(AO_HAVE_test_and_set_full)
# if defined(AO_HAVE_compare_and_swap_full)
- AO_INLINE AO_TS_VAL
- AO_test_and_set_full(volatile AO_TS_T *addr)
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
{
if (AO_compare_and_swap_full(addr, AO_TS_CLEAR,
AO_TS_SET))
# endif /* AO_HAVE_compare_and_swap_full */
# if defined(AO_HAVE_compare_and_swap_acquire)
- AO_INLINE AO_TS_VAL
- AO_test_and_set_acquire(volatile AO_TS_T *addr)
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_acquire(volatile AO_TS_t *addr)
{
if (AO_compare_and_swap_acquire(addr, AO_TS_CLEAR,
AO_TS_SET))
# endif /* AO_HAVE_compare_and_swap_acquire */
# if defined(AO_HAVE_compare_and_swap_release)
- AO_INLINE AO_TS_VAL
- AO_test_and_set_release(volatile AO_TS_T *addr)
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_release(volatile AO_TS_t *addr)
{
if (AO_compare_and_swap_release(addr, AO_TS_CLEAR,
AO_TS_SET))
# endif /* AO_HAVE_compare_and_swap_release */
# if defined(AO_HAVE_compare_and_swap)
- AO_INLINE AO_TS_VAL
- AO_test_and_set(volatile AO_TS_T *addr)
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set(volatile AO_TS_t *addr)
{
if (AO_compare_and_swap(addr, AO_TS_CLEAR, AO_TS_SET))
return AO_TS_CLEAR;
# if defined(AO_HAVE_test_and_set) && defined(AO_HAVE_nop_full) \
&& !defined(AO_HAVE_test_and_set_acquire)
- AO_INLINE AO_TS_VAL
- AO_test_and_set_acquire(volatile AO_TS_T *addr)
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_acquire(volatile AO_TS_t *addr)
{
- AO_TS_VAL result = AO_test_and_set(addr);
+ AO_TS_VAL_t result = AO_test_and_set(addr);
AO_nop_full();
return result;
}
/* Nop */
#if !defined(AO_HAVE_nop)
- AO_INLINE void AO_nop(void) {};
+ AO_INLINE void AO_nop(void) {}
# define AO_HAVE_nop
#endif
AO_INLINE void
AO_nop_full()
{
- AO_TS_T dummy = AO_TS_INITIALIZER;
+ AO_TS_t dummy = AO_TS_INITIALIZER;
AO_test_and_set_full(&dummy);
}
# define AO_HAVE_nop_full
#if defined(AO_HAVE_load) && defined(AO_HAVE_nop_full) && \
!defined(AO_HAVE_load_acquire)
- AO_INLINE AO_T
- AO_load_acquire(volatile AO_T *addr)
+ AO_INLINE AO_t
+ AO_load_acquire(volatile AO_t *addr)
{
- AO_T result = AO_load(addr);
+ AO_t result = AO_load(addr);
/* Acquire barrier would be useless, since the load could be delayed */
/* beyond it. */
AO_nop_full();
#if defined(AO_HAVE_load) && defined(AO_HAVE_nop_read) && \
!defined(AO_HAVE_load_read)
- AO_INLINE AO_T
- AO_load_read(volatile AO_T *addr)
+ AO_INLINE AO_t
+ AO_load_read(volatile AO_t *addr)
{
- AO_T result = AO_load(addr);
+ AO_t result = AO_load(addr);
/* Acquire barrier would be useless, since the load could be delayed */
/* beyond it. */
AO_nop_read();
/* Fetch_and_add */
#if defined(AO_HAVE_compare_and_swap_full) && \
!defined(AO_HAVE_fetch_and_add_full)
- AO_INLINE AO_T
- AO_fetch_and_add_full(volatile AO_T *addr, AO_T incr)
+ AO_INLINE AO_t
+ AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr)
{
- AO_T old;
+ AO_t old;
do
{
old = *addr;
#if defined(AO_HAVE_fetch_and_add_full) &&\
!defined(AO_HAVE_fetch_and_sub1_full)
-# define AO_fetch_and_sub1_full(addr) AO_fetch_and_add_full(addr,(AO_T)(-1))
+# define AO_fetch_and_sub1_full(addr) AO_fetch_and_add_full(addr,(AO_t)(-1))
# define AO_HAVE_fetch_and_sub1_full
#endif
#if defined(AO_HAVE_fetch_and_add_release) &&\
!defined(AO_HAVE_fetch_and_sub1_release)
# define AO_fetch_and_sub1_release(addr) \
- AO_fetch_and_add_release(addr,(AO_T)(-1))
+ AO_fetch_and_add_release(addr,(AO_t)(-1))
# define AO_HAVE_fetch_and_sub1_release
#endif
#if defined(AO_HAVE_fetch_and_add_acquire) &&\
!defined(AO_HAVE_fetch_and_sub1_acquire)
# define AO_fetch_and_sub1_acquire(addr) \
- AO_fetch_and_add_acquire(addr,(AO_T)(-1))
+ AO_fetch_and_add_acquire(addr,(AO_t)(-1))
# define AO_HAVE_fetch_and_sub1_acquire
#endif
#if defined(AO_HAVE_fetch_and_add_write) &&\
!defined(AO_HAVE_fetch_and_sub1_write)
# define AO_fetch_and_sub1_write(addr) \
- AO_fetch_and_add_write(addr,(AO_T)(-1))
+ AO_fetch_and_add_write(addr,(AO_t)(-1))
# define AO_HAVE_fetch_and_sub1_write
#endif
#if defined(AO_HAVE_fetch_and_add_read) &&\
!defined(AO_HAVE_fetch_and_sub1_read)
# define AO_fetch_and_sub1_read(addr) \
- AO_fetch_and_add_read(addr,(AO_T)(-1))
+ AO_fetch_and_add_read(addr,(AO_t)(-1))
# define AO_HAVE_fetch_and_sub1_read
#endif
#if defined(AO_HAVE_fetch_and_add_release_write) &&\
!defined(AO_HAVE_fetch_and_sub1_release_write)
# define AO_fetch_and_sub1_release_write(addr) \
- AO_fetch_and_add_release_write(addr,(AO_T)(-1))
+ AO_fetch_and_add_release_write(addr,(AO_t)(-1))
# define AO_HAVE_fetch_and_sub1_release_write
#endif
#if defined(AO_HAVE_fetch_and_add_acquire_read) &&\
!defined(AO_HAVE_fetch_and_sub1_acquire_read)
# define AO_fetch_and_sub1_acquire_read(addr) \
- AO_fetch_and_add_acquire_read(addr,(AO_T)(-1))
+ AO_fetch_and_add_acquire_read(addr,(AO_t)(-1))
# define AO_HAVE_fetch_and_sub1_acquire_read
#endif
#if defined(AO_HAVE_compare_and_swap_full) && \
!defined(AO_HAVE_or_full)
AO_INLINE void
- AO_or_full(volatile AO_T *addr, AO_T incr)
+ AO_or_full(volatile AO_t *addr, AO_t incr)
{
- AO_T old;
+ AO_t old;
do
{
old = *addr;
#if defined(AO_HAVE_compare_and_swap) && defined(AO_HAVE_nop_full)\
&& !defined(AO_HAVE_compare_and_swap_acquire)
AO_INLINE int
- AO_compare_and_swap_acquire(volatile AO_T *addr, AO_T old, AO_T new_val)
+ AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
{
int result = AO_compare_and_swap(addr, old, new_val);
AO_nop_full();
--- /dev/null
+Copyright (c) 2002 by Hewlett-Packard Company.
+
+This is qprof, a set of simple profiling utilities.
+
+See doc/README.txt for licensing and instructions.
* volatile load has acquire semantics, and a volatile store has release
* semantics. This is true with the standard Itanium ABI.
*/
-AO_INLINE AO_T
-AO_load_acquire(volatile AO_T *p)
+AO_INLINE AO_t
+AO_load_acquire(volatile AO_t *p)
{
/* A normal volatile load generates an ld.acq */
return *p;
#define AO_HAVE_load_acquire
AO_INLINE void
-AO_store_release(volatile AO_T *p, AO_T val)
+AO_store_release(volatile AO_t *p, AO_t val)
{
- AO_compiler_barrier(); /* Empirically necessary. Gcc bug? */
+# if defined(__GNUC_MINOR__) && \
+ (__GNUC__ < 3 || __GNUC__ == 3 && __GNUC_MINOR__ < 4)
+ AO_compiler_barrier(); /* Empirically necessary for older gcc versions */
+# endif
/* A normal volatile store generates an st.rel */
*p = val;
}
*/
/*
- * Definitions for architecturs on which loads and stores of AO_T are
+ * Definitions for architecturs on which loads and stores of AO_t are
* atomic fo all legal alignments.
*/
-AO_INLINE AO_T
-AO_load(volatile AO_T *addr)
+AO_INLINE AO_t
+AO_load(volatile AO_t *addr)
{
- assert(((unsigned long)addr & (sizeof(AO_T) - 1)) == 0);
+ assert(((unsigned long)addr & (sizeof(AO_t) - 1)) == 0);
/* Cast away the volatile for architectures where */
/* volatile adds barrier semantics. */
- return *(AO_T *)addr;
+ return *(AO_t *)addr;
}
#define AO_HAVE_load
AO_INLINE void
-AO_store(volatile AO_T *addr, AO_T new_val)
+AO_store(volatile AO_t *addr, AO_t new_val)
{
- assert(((unsigned long)addr & (sizeof(AO_T) - 1)) == 0);
- (*(AO_T *)addr) = new_val;
+ assert(((unsigned long)addr & (sizeof(AO_t) - 1)) == 0);
+ (*(AO_t *)addr) = new_val;
}
#define AO_HAVE_store
*/
/*
- * Definitions for architecturs on which loads and stores of AO_T are
+ * Definitions for architecturs on which loads and stores of AO_t are
* atomic fo all legal alignments.
*/
-AO_INLINE AO_T
-AO_load(volatile AO_T *addr)
+AO_INLINE AO_t
+AO_load(volatile AO_t *addr)
{
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier semantics. */
- return (*(AO_T *)addr);
+ return (*(AO_t *)addr);
}
#define AO_HAVE_load
AO_INLINE void
-AO_store(volatile AO_T *addr, AO_T new_val)
+AO_store(volatile AO_t *addr, AO_t new_val)
{
- (*(AO_T *)addr) = new_val;
+ (*(AO_t *)addr) = new_val;
}
#define AO_HAVE_store
}
#define AO_HAVE_nop_full
-AO_INLINE AO_T
-AO_fetch_and_add1_acquire (volatile AO_T *p)
+AO_INLINE AO_t
+AO_fetch_and_add1_acquire (volatile AO_t *p)
{
return __fetchadd8_acq((unsigned __int64 *)p, 1);
}
#define AO_HAVE_fetch_and_add1_acquire
-AO_INLINE AO_T
-AO_fetch_and_add1_release (volatile AO_T *p)
+AO_INLINE AO_t
+AO_fetch_and_add1_release (volatile AO_t *p)
{
return __fetchadd8_rel((unsigned __int64 *)p, 1);
}
#define AO_HAVE_fetch_and_add1_release
-AO_INLINE AO_T
-AO_fetch_and_sub1_acquire (volatile AO_T *p)
+AO_INLINE AO_t
+AO_fetch_and_sub1_acquire (volatile AO_t *p)
{
return __fetchadd8_acq((unsigned __int64 *)p, -1);
}
#define AO_HAVE_fetch_and_sub1_acquire
-AO_INLINE AO_T
-AO_fetch_and_sub1_release (volatile AO_T *p)
+AO_INLINE AO_t
+AO_fetch_and_sub1_release (volatile AO_t *p)
{
return __fetchadd8_rel((unsigned __int64 *)p, -1);
}
#define AO_HAVE_fetch_and_sub1_release
AO_INLINE int
-AO_compare_and_swap_acquire(volatile AO_T *addr,
- AO_T old, AO_T new_val)
+AO_compare_and_swap_acquire(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
{
- AO_T oldval;
+ AO_t oldval;
oldval = _InterlockedCompareExchange64_acq(addr, new_val, old);
return (oldval == old);
}
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
-AO_compare_and_swap_release(volatile AO_T *addr,
- AO_T old, AO_T new_val)
+AO_compare_and_swap_release(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
{
- AO_T oldval;
+ AO_t oldval;
oldval = _InterlockedCompareExchange64_rel(addr, new_val, old);
return (oldval == old);
}
# error This file should not be included directly.
#endif
-AO_T AO_compare_and_swap_emulation(volatile AO_T *addr, AO_T old,
- AO_T new_val);
+AO_t AO_compare_and_swap_emulation(volatile AO_t *addr, AO_t old,
+ AO_t new_val);
-void AO_store_full_emulation(volatile AO_T *addr, AO_T val);
+void AO_store_full_emulation(volatile AO_t *addr, AO_t val);
#define AO_compare_and_swap_full(addr, old, newval) \
AO_compare_and_swap_emulation(addr, old, newval)
/* We believe that ldq_l ... stq_c does not imply any memory barrier. */
/* We should add an explicit fetch_and_add definition. */
AO_INLINE int
-AO_compare_and_swap(volatile AO_T *addr,
- AO_T old, AO_T new_val)
+AO_compare_and_swap(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
{
unsigned long was_equal;
unsigned long temp;
#include "../atomic_load_store.h"
-AO_INLINE AO_TS_VAL
-AO_test_and_set_full(volatile AO_TS_T *addr) {
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
int oldval;
int temp = 1; /* locked value */
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Modified by Carlos O'Donell <carlos@baldric.uwo.ca>, 2003
+ * - Added self-aligning lock.
+ *
+ */
+
+#include "../atomic_load_store.h"
+
+/* Some architecture set descriptions include special "ordered" memory */
+/* operations. As far as we can tell, no existing processors actually */
+/* require those. Nor does it appear likely that future processors */
+/* will. */
+#include "../ordered.h"
+
+/* GCC will not guarantee the alignment we need, use four lock words */
+/* and select the correctly aligned datum. See the glibc 2.3.2 */
+/* linuxthread port for the original implementation. */
+struct AO_pa_clearable_loc {
+ int data[4];
+};
+
+#undef AO_TS_t
+#undef AO_TS_INITIALIZER
+#define AO_TS_t struct AO_pa_clearable_loc
+#define AO_TS_INITIALIZER {1,1,1,1}
+/* Switch meaning of set and clear, since we only have an atomic clear */
+/* instruction. */
+#undef AO_TS_VAL_t
+#undef AO_TS_CLEAR
+#undef AO_TS_SET
+typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
+#define AO_TS_VAL_t AO_PA_TS_val
+#define AO_TS_CLEAR AO_PA_TS_clear
+#define AO_TS_SET AO_PA_TS_set
+
+/* The hppa only has one atomic read and modify memory operation, */
+/* load and clear, so hppa spinlocks must use zero to signify that */
+/* someone is holding the lock. The address used for the ldcw */
+/* semaphore must be 16-byte aligned. */
+
+#define __ldcw(a) ({ \
+ volatile unsigned int __ret; \
+ __asm__ __volatile__("ldcw 0(%2),%0" \
+ : "=r" (__ret), "=m" (*(a)) : "r" (a)); \
+ __ret; \
+})
+
+/* Because malloc only guarantees 8-byte alignment for malloc'd data, */
+/* and GCC only guarantees 8-byte alignment for stack locals, we can't */
+/* be assured of 16-byte alignment for atomic lock data even if we */
+/* specify "__attribute ((aligned(16)))" in the type declaration. So, */
+/* we use a struct containing an array of four ints for the atomic lock */
+/* type and dynamically select the 16-byte aligned int from the array */
+/* for the semaphore. */
+#define __PA_LDCW_ALIGNMENT 16
+#define __ldcw_align(a) ({ \
+ unsigned long __ret = (unsigned long) a; \
+ __ret += __PA_LDCW_ALIGNMENT - 1; \
+ __ret &= ~(__PA_LDCW_ALIGNMENT - 1); \
+ (volatile unsigned int *) __ret; \
+})
+
+/* Works on PA 1.1 and PA 2.0 systems */
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t * addr)
+{
+ volatile unsigned int *a = __ldcw_align (addr);
+ return __ldcw (a);
+}
+
+AO_INLINE void
+AO_pa_clear(volatile AO_TS_t * addr)
+{
+ volatile unsigned int *a = __ldcw_align (addr);
+ AO_compiler_barrier();
+ *a = 1;
+}
+#undef AO_CLEAR
+#define AO_CLEAR(addr) AO_pa_clear(addr)
+
+#define AO_HAVE_test_and_set_full
+
}
#define AO_HAVE_nop_full
-AO_INLINE AO_T
-AO_fetch_and_add1_acquire (volatile AO_T *p)
+AO_INLINE AO_t
+AO_fetch_and_add1_acquire (volatile AO_t *p)
{
- AO_T result;
+ AO_t result;
__asm__ __volatile__ ("fetchadd8.acq %0=[%1],1":
"=r" (result): "r"(p) :"memory");
}
#define AO_HAVE_fetch_and_add1_acquire
-AO_INLINE AO_T
-AO_fetch_and_add1_release (volatile AO_T *p)
+AO_INLINE AO_t
+AO_fetch_and_add1_release (volatile AO_t *p)
{
- AO_T result;
+ AO_t result;
__asm__ __volatile__ ("fetchadd8.rel %0=[%1],1":
"=r" (result): "r"(p) :"memory");
#define AO_HAVE_fetch_and_add1_release
-AO_INLINE AO_T
-AO_fetch_and_sub1_acquire (volatile AO_T *p)
+AO_INLINE AO_t
+AO_fetch_and_sub1_acquire (volatile AO_t *p)
{
- AO_T result;
+ AO_t result;
__asm__ __volatile__ ("fetchadd8.acq %0=[%1],-1":
"=r" (result): "r"(p) :"memory");
#define AO_HAVE_fetch_and_sub1_acquire
-AO_INLINE AO_T
-AO_fetch_and_sub1_release (volatile AO_T *p)
+AO_INLINE AO_t
+AO_fetch_and_sub1_release (volatile AO_t *p)
{
- AO_T result;
+ AO_t result;
__asm__ __volatile__ ("fetchadd8.rel %0=[%1],-1":
"=r" (result): "r"(p) :"memory");
#define AO_HAVE_fetch_and_sub1_release
AO_INLINE int
-AO_compare_and_swap_acquire(volatile AO_T *addr,
- AO_T old, AO_T new_val)
+AO_compare_and_swap_acquire(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
{
- AO_T oldval;
+ AO_t oldval;
__asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg8.acq %0=%1,%2,ar.ccv"
: "=r"(oldval), "+S"(*addr)
: "r"(new_val), "r"(old) : "memory");
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
-AO_compare_and_swap_release(volatile AO_T *addr,
- AO_T old, AO_T new_val)
+AO_compare_and_swap_release(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
{
- AO_T oldval;
+ AO_t oldval;
__asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
: "=r"(oldval), "+S"(*addr)
: "r"(new_val), "r"(old) : "memory");
/* FIXME. Very incomplete. */
#include "../aligned_atomic_load_store.h"
+/* Are there any m68k multiprocessors still around? */
+/* AFAIK, Alliants were sequentially consistent. */
+#include "../ordered.h"
+
/* Contributed by Tony Mantler or new. Should be changed to MIT license? */
-AO_INLINE AO_TS_VAL
-AO_test_and_set_full(volatile AO_TS_T *addr) {
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
int oldval;
/* The return value is semi-phony. */
#define AO_HAVE_NOP_FULL
-AO_INLINE AO_TS_VAL
-AO_test_and_set_full(volatile AO_TS_T *addr) {
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
int oldval;
int temp = 1; /* locked value */
/* FIXME. Very incomplete. */
-AO_INLINE AO_T AO_compare_and_swap_full(volatile AO_T *addr,
- AO_T old, AO_T new_val)
+AO_INLINE AO_t AO_compare_and_swap_full(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
{
int retval;
__asm__ __volatile__ (
*/
/* FIXME. Very incomplete. No support for sparc64. */
+/* Non-ancient SPARCs provide compare-and-swap (casa). */
+/* We should make that available. */
#include "../atomic_load_store.h"
-AO_INLINE AO_TS_VAL
-AO_test_and_set_full(volatile AO_TS_T *addr) {
+/* Real SPARC code uses TSO: */
+#include "../ordered_except_wr.h"
+
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr) {
int oldval;
__asm__ __volatile__("ldstub %1,%0"
#include "../ordered_except_wr.h"
-#if defined(USE_PENTIUM4_INSTRS)
+#if defined(AO_USE_PENTIUM4_INSTRS)
AO_INLINE void
AO_nop_full()
{
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
-AO_INLINE AO_T
-AO_fetch_and_add_full (volatile AO_T *p, long incr)
+AO_INLINE AO_t
+AO_fetch_and_add_full (volatile AO_t *p, long incr)
{
- AO_T result = incr;
+ AO_t result = incr;
__asm__ __volatile__ ("lock; xaddl %0, %1" :
"+r" (result), "+m" (*p) : : "memory");
/* Really only works for 486 and later */
AO_INLINE void
-AO_or_full (volatile AO_T *p, AO_T incr)
+AO_or_full (volatile AO_t *p, AO_t incr)
{
__asm__ __volatile__ ("lock; orl %1, %0" :
"+m" (*p) : "r" (incr) : "memory");
#define AO_HAVE_or_full
-AO_INLINE AO_TS_T
-AO_test_and_set_full(volatile AO_T *addr)
+AO_INLINE AO_TS_t
+AO_test_and_set_full(volatile AO_t *addr)
{
int oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
-AO_compare_and_swap_full(volatile AO_T *addr,
- AO_T old, AO_T new_val)
+AO_compare_and_swap_full(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
{
char result;
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
#define AO_HAVE_nop_full
-AO_INLINE AO_T
-AO_load_full(volatile AO_T *addr)
+AO_INLINE AO_t
+AO_load_full(volatile AO_t *addr)
{
- AO_T result;
+ AO_t result;
pthread_mutex_lock(&AO_pt_lock);
result = *addr;
pthread_mutex_unlock(&AO_pt_lock);
#define AO_HAVE_load_full
AO_INLINE void
-AO_store_full(volatile AO_T *addr, AO_T val)
+AO_store_full(volatile AO_t *addr, AO_t val)
{
pthread_mutex_lock(&AO_pt_lock);
*addr = val;
#define AO_HAVE_store_full
-AO_INLINE AO_TS_VAL
-AO_test_and_set_full(volatile AO_TS_T *addr)
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t *addr)
{
- int result;
+ AO_TS_VAL_t result;
pthread_mutex_lock(&AO_pt_lock);
- result = (int)(*addr);
+ result = (AO_TS_VAL_t)(*addr);
*addr = AO_TS_SET;
pthread_mutex_unlock(&AO_pt_lock);
assert(result == AO_TS_SET || result == AO_TS_CLEAR);
#define AO_HAVE_test_and_set_full
-AO_INLINE AO_T
-AO_fetch_and_add_full(volatile AO_T *p, AO_T incr)
+AO_INLINE AO_t
+AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
{
- AO_T tmp;
+ AO_t tmp;
pthread_mutex_lock(&AO_pt_lock);
tmp = *p;
#define AO_HAVE_fetch_and_add_full
AO_INLINE void
-AO_or_full(volatile AO_T *p, AO_T incr)
+AO_or_full(volatile AO_t *p, AO_t incr)
{
- AO_T tmp;
+ AO_t tmp;
pthread_mutex_lock(&AO_pt_lock);
tmp = *p;
#define AO_HAVE_or_full
AO_INLINE int
-AO_compare_and_swap_full(volatile AO_T *addr,
- AO_T old, AO_T new_val)
+AO_compare_and_swap_full(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
{
pthread_mutex_lock(&AO_pt_lock);
if (*addr == old)
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Derived from the corresponsing header file for gcc.
+ *
+ */
+
+#include "../atomic_load_store.h"
+
+/* Some architecture set descriptions include special "ordered" memory */
+/* operations. As far as we can tell, no existing processors actually */
+/* require those. Nor does it appear likely that future processors */
+/* will. */
+#include "../ordered.h"
+
+#include <machine/inline.h>
+
+/* GCC will not guarantee the alignment we need, use four lock words */
+/* and select the correctly aligned datum. See the glibc 2.3.2 */
+/* linuxthread port for the original implementation. */
+struct AO_pa_clearable_loc {
+ int data[4];
+};
+
+#undef AO_TS_t
+#undef AO_TS_INITIALIZER
+#define AO_TS_t struct AO_pa_clearable_loc
+#define AO_TS_INITIALIZER {1,1,1,1}
+/* Switch meaning of set and clear, since we only have an atomic clear */
+/* instruction. */
+#undef AO_TS_VAL_t
+#undef AO_TS_CLEAR
+#undef AO_TS_SET
+typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val;
+#define AO_TS_VAL_t AO_PA_TS_val
+#define AO_TS_CLEAR AO_PA_TS_clear
+#define AO_TS_SET AO_PA_TS_set
+
+/* The hppa only has one atomic read and modify memory operation, */
+/* load and clear, so hppa spinlocks must use zero to signify that */
+/* someone is holding the lock. The address used for the ldcw */
+/* semaphore must be 16-byte aligned. */
+
+#define __ldcw(a, ret) \
+ _LDCWX(0 /* index */, 0 /* s */, a /* base */, ret);
+
+/* Because malloc only guarantees 8-byte alignment for malloc'd data, */
+/* and GCC only guarantees 8-byte alignment for stack locals, we can't */
+/* be assured of 16-byte alignment for atomic lock data even if we */
+/* specify "__attribute ((aligned(16)))" in the type declaration. So, */
+/* we use a struct containing an array of four ints for the atomic lock */
+/* type and dynamically select the 16-byte aligned int from the array */
+/* for the semaphore. */
+#define __PA_LDCW_ALIGNMENT 16
+
+#define __ldcw_align(a, ret) { \
+ ret = (unsigned long) a; \
+ ret += __PA_LDCW_ALIGNMENT - 1; \
+ ret &= ~(__PA_LDCW_ALIGNMENT - 1); \
+}
+
+/* Works on PA 1.1 and PA 2.0 systems */
+AO_INLINE AO_TS_VAL_t
+AO_test_and_set_full(volatile AO_TS_t * addr)
+{
+ register unsigned int ret;
+ register unsigned long a;
+ __ldcw_align (addr, a);
+ __ldcw (a, ret);
+ return ret;
+}
+
+AO_INLINE void
+AO_pa_clear(volatile AO_TS_t * addr)
+{
+ unsigned long a;
+ __ldcw_align (addr,a);
+ AO_compiler_barrier();
+ *(volatile unsigned int *)a = 1;
+}
+#undef AO_CLEAR
+#define AO_CLEAR(addr) AO_pa_clear(addr)
+
+#define AO_HAVE_test_and_set_full
+
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file specifies Itanimum primitives for use with the HP compiler
+ * unde HP/UX. We use intrinsics instead of the inline assembly code in the
+ * gcc file.
+ */
+
+#include "../atomic_load_store.h"
+
+#include "../acquire_release_volatile.h"
+
+#include <machine/sys/inline.h>
+
+#ifdef __LP64__
+# define AO_T_FASIZE _FASZ_D
+# define AO_T_SIZE _SZ_D
+#else
+# define AO_T_FASIZE _FASZ_W
+# define AO_T_SIZE _SZ_W
+#endif
+
+AO_INLINE void
+AO_nop_full()
+{
+ _Asm_mf();
+}
+#define AO_HAVE_nop_full
+
+AO_INLINE AO_t
+AO_fetch_and_add1_acquire (volatile AO_t *p)
+{
+ return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, 1,
+ _LDHINT_NONE, _DOWN_MEM_FENCE);
+}
+#define AO_HAVE_fetch_and_add1_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_add1_release (volatile AO_t *p)
+{
+ return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, 1,
+ _LDHINT_NONE, _UP_MEM_FENCE);
+}
+
+#define AO_HAVE_fetch_and_add1_release
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_acquire (volatile AO_t *p)
+{
+ return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, -1,
+ _LDHINT_NONE, _DOWN_MEM_FENCE);
+}
+
+#define AO_HAVE_fetch_and_sub1_acquire
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_release (volatile AO_t *p)
+{
+ return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, -1,
+ _LDHINT_NONE, _UP_MEM_FENCE);
+}
+
+#define AO_HAVE_fetch_and_sub1_release
+
+AO_INLINE int
+AO_compare_and_swap_acquire(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
+{
+ AO_t oldval;
+
+ _Asm_mov_to_ar(_AREG_CCV, old, _DOWN_MEM_FENCE);
+ oldval = _Asm_cmpxchg(AO_T_SIZE, _SEM_ACQ, addr,
+ new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
+ return (oldval == old);
+}
+
+#define AO_HAVE_compare_and_swap_acquire
+
+AO_INLINE int
+AO_compare_and_swap_release(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
+{
+ AO_t oldval;
+ _Asm_mov_to_ar(_AREG_CCV, old, _UP_MEM_FENCE);
+ oldval = _Asm_cmpxchg(AO_T_SIZE, _SEM_REL, addr,
+ new_val, _LDHINT_NONE, _UP_MEM_FENCE);
+ /* Hopefully the compiler knows not to reorder the above two? */
+ return (oldval == old);
+}
+
+#define AO_HAVE_compare_and_swap_release
+
--- /dev/null
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* The following really assume we have a 486 or better. */
+/* If ASSUME_WINDOWS98 is defined, we assume Windows 98 or newer. */
+
+#include "../aligned_atomic_load_store.h"
+
+/* Real X86 implementations, except for some old WinChips, appear */
+/* to enforce ordering between memory operations, EXCEPT that a later */
+/* read can pass earlier writes, presumably due to the visible */
+/* presence of store buffers. */
+/* We ignore both the WinChips, and the fact that the official specs */
+/* seem to be much weaker (and arguably too weak to be usable). */
+
+#include "../ordered_except_wr.h"
+
+/* As far as we can tell, the lfence and sfence instructions are not */
+/* currently needed or useful for cached memory accesses. */
+
+AO_INLINE AO_t
+AO_fetch_and_add1_full (volatile AO_t *p)
+{
+ return InterlockedIncrement((LONG volatile *)p) - 1;
+}
+
+#define AO_HAVE_fetch_and_add1_full
+
+AO_INLINE AO_t
+AO_fetch_and_sub1_full (volatile AO_t *p)
+{
+ return InterlockedDecrement((LONG volatile *)p) + 1;
+}
+
+#define AO_HAVE_fetch_and_sub1_full
+
+AO_INLINE AO_TS_t
+AO_test_and_set_full(volatile AO_t *addr)
+{
+ return (AO_TS_t) InterlockedExchange((LONG volatile *)addr, (LONG)AO_TS_SET);
+}
+
+#define AO_HAVE_test_and_set_full
+
+#ifdef AO_ASSUME_WINDOWS98
+/* Returns nonzero if the comparison succeeded. */
+AO_INLINE int
+AO_compare_and_swap_full(volatile AO_t *addr,
+ AO_t old, AO_t new_val)
+{
+# if 0
+ /* Use the pointer variant, since that should always have the right size. */
+ /* This seems to fail with VC++ 6 on Win2000 because the routine isn't */
+ /* actually there. */
+ return InterlockedCompareExchangePointer((PVOID volatile *)addr,
+ (PVOID)new_val, (PVOID) old)
+ == (PVOID)old;
+# endif
+ /* FIXME - This is nearly useless on win64. */
+ return InterlockedCompareExchange((DWORD volatile *)addr,
+ (DWORD)new_val, (DWORD) old)
+ == (DWORD)old;
+}
+
+#define AO_HAVE_compare_and_swap_full
+#endif /* ASSUME_WINDOWS98 */
#ifdef AO_HAVE_load
-AO_INLINE AO_T
-AO_load_read(volatile AO_T *addr)
+AO_INLINE AO_t
+AO_load_read(volatile AO_t *addr)
{
- AO_T result = AO_load(addr);
+ AO_t result = AO_load(addr);
AO_compiler_barrier();
return result;
}
#if defined(AO_HAVE_store)
AO_INLINE void
-AO_store_write(volatile AO_T *addr, AO_T val)
+AO_store_write(volatile AO_t *addr, AO_t val)
{
AO_compiler_barrier();
AO_store(addr, val);
Alpha.
The defined operations are all of the form AO_<op><barrier>(<args>).
-Most operations operate on values of type AO_T, which are unsigned integers
+Most operations operate on values of type AO_t, which are unsigned integers
whose size matches that of pointers on the given architecture. We may
provide more flexibility in operand types in the future, but this seems
to cover 90+% of common usage.
void nop()
No atomic operation. The barrier may still be useful.
-AO_T load(volatile AO_T * addr)
+AO_t load(volatile AO_t * addr)
Atomic load of *addr.
-void store(volatile AO_T * addr, AO_T new_val)
+void store(volatile AO_t * addr, AO_t new_val)
Atomically store new_val to *addr.
-AO_T fetch_and_add(volatile AO_T *addr, AO_T incr)
+AO_t fetch_and_add(volatile AO_t *addr, AO_t incr)
Atomically add incr to *addr, and return the original value of *addr.
-AO_T fetch_and_add1(volatile AO_T *addr)
+AO_t fetch_and_add1(volatile AO_t *addr)
Equivalent to AO_fetch_and_add(addr, 1).
-AO_T fetch_and_sub1(volatile AO_T *addr)
- Equivalent to AO_fetch_and_add(addr, (AO_T)(-1)).
-void or(volatile AO_T *addr, AO_T incr)
+AO_t fetch_and_sub1(volatile AO_t *addr)
+ Equivalent to AO_fetch_and_add(addr, (AO_t)(-1)).
+void or(volatile AO_t *addr, AO_t incr)
Atomically or incr into *addr.
-int compare_and_swap(volatile AO_T * addr, AO_T old_val, AO_T new_val)
+int compare_and_swap(volatile AO_t * addr, AO_t old_val, AO_t new_val)
Atomically compare *addr to old_val, and replace *addr by new_val
if the first comparison succeeds. Returns nonzero if the comparison
succeeded and *addr was updated.
-AO_TS_VAL test_and_set(volatile AO_TS_T * addr)
- Atomically read the binary value at *addr, and set it. AO_TS_VAL
+AO_TS_VAL_t test_and_set(volatile AO_TS_T * addr)
+ Atomically read the binary value at *addr, and set it. AO_TS_VAL_t
is an enumeration type which includes the two values AO_TS_SET and
and AO_TS_CLEAR. An AO_TS_T location is capable of holding an
- AO_TS_VAL, but may be much larger, as dictated by hardware
+ AO_TS_VAL_t, but may be much larger, as dictated by hardware
constraints. Test_and_set logically sets the value to AO_TS_SET.
It may be reset to AO_TS_CLEAR with the AO_CLEAR(AO_TS_T *) macro.
- AO_TS_T locations should be initialized to AO_TS_INITIALZER.
+ AO_TS_T locations should be initialized to AO_TS_INITIALIZER.
The values of AO_TS_SET and AO_TS_CLEAR are hardware dependent.
(On PA-RISC, AO_TS_SET is zero!)
We expect the list of memory barrier types to remain more or less fixed.
However, it is likely that the list of underlying atomic operations will
-grow. AO_fetch_and_or is a very likely candidate. It would also be
-useful to support double-wide operations when available.
+grow. It would also be useful to support double-wide and narrower operations
+when available.
Example:
p are guaranteed to see an initialized object, it suffices to use
AO_release_write(p, ...) to write the pointer to the object, and to
retrieve it in other threads with AO_acquire_read(p).
+
+Platform notes:
+
+All X86: We quietly assume 486 or better.
+
+Windows:
+Currently AO_REQUIRE_CAS is not supported.
+
+Microsoft compilers:
+Define AO_ASSUME_WINDOWS98 to get access to hardware compare-and-swap
+functionality. This relies on the InterlockedCompareExchange() function
+which was apparently not supported in Windows95. (There may be a better
+way to get access to this.) Currently only X86(32 bit) is supported for
+Windows.
+
+Gcc on x86:
+Define AO_USE_PNETIUM4_INSTRS to use the Pentium 4 mfence instruction.
+Currently this is appears to be of marginal benefit.
* see doc/COPYING for details.
*/
+#if defined(_MSC_VER) || \
+ defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__) || \
+ defined(_WIN32_WINCE)
+# define USE_WINTHREADS
+#else
+# define USE_PTHREADS
+#endif
+
#include <stdlib.h>
-#include <pthread.h>
#include <stdio.h>
+#ifdef USE_PTHREADS
+# include <pthread.h>
+#endif
+
+#ifdef USE_WINTHREADS
+# include <windows.h>
+#endif
+
#include "atomic_ops.h"
#include "test_atomic_include.h"
typedef int (* test_func)(void); /* Returns != 0 on success */
+#ifdef USE_PTHREADS
void * run_parallel(int nthreads, thr_func f1, test_func t, char *name)
{
pthread_attr_t attr;
}
return 0;
}
+#endif /* USE_PTHREADS */
+
+#ifdef USE_WINTHREADS
+
+struct tramp_args {
+ thr_func fn;
+ long arg;
+};
+
+DWORD WINAPI tramp(LPVOID param)
+{
+ struct tramp_args *args = (struct tramp_args *)param;
+
+ return (DWORD)(args -> fn)((LPVOID)(args -> arg));
+}
+
+void * run_parallel(int nthreads, thr_func f1, test_func t, char *name)
+{
+ HANDLE thr[100];
+ struct tramp_args args[100];
+ int i;
+ DWORD code;
+
+ fprintf(stderr, "Testing %s\n", name);
+ if (nthreads > 100)
+ {
+ fprintf(stderr, "run_parallel: requested too many threads\n");
+ abort();
+ }
+
+ for (i = 0; i < nthreads; ++i)
+ {
+ args[i].fn = f1;
+ args[i].arg = i;
+ if ((thr[i] = CreateThread(NULL, 0, tramp, (LPVOID)(args+i), 0, NULL))
+ == NULL)
+ {
+ perror("Thread creation failed");
+ fprintf(stderr, "CreateThread failed with %d, thread %d\n",
+ GetLastError(), i);
+ abort();
+ }
+ }
+ for (i = 0; i < nthreads; ++i)
+ {
+ if ((code = WaitForSingleObject(thr[i], INFINITE)) != WAIT_OBJECT_0)
+ {
+ perror("Thread join failed");
+ fprintf(stderr, "WaitForSingleObject returned %d, thread %d\n",
+ code, i);
+ abort();
+ }
+ }
+ if (t())
+ {
+ fprintf(stderr, "Succeeded\n");
+ }
+ else
+ {
+ fprintf(stderr, "Failed\n");
+ abort();
+ }
+ return 0;
+}
+#endif /* USE_WINTHREADS */
#ifdef AO_USE_PTHREAD_DEFS
# define NITERS 100000
--- /dev/null
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see doc/COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: )\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ fprintf(stderr, "Missing: %s\n", #name "")
+
+void test_atomic(void)
+{
+ AO_T x;
+# if defined(AO_HAVE_test_and_set)
+ AO_TS_T z = AO_TS_INITIALIZER;
+# endif
+
+# if defined(AO_HAVE_nop)
+ AO_nop();
+# else
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_store)
+ AO_store(&x, 13);
+ TA_assert (x == 13);
+# else
+ MISSING(AO_store);
+ x = 13;
+# endif
+# if defined(AO_HAVE_load)
+ TA_assert(AO_load(&x) == 13);
+# else
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_set)
+ assert(AO_test_and_set(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_set(&z) == AO_TS_SET);
+ assert(AO_test_and_set(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_add)
+ TA_assert(AO_fetch_and_add(&x, 42) == 13);
+ TA_assert(AO_fetch_and_add(&x, -42) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1)
+ TA_assert(AO_fetch_and_add1(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1)
+ TA_assert(AO_fetch_and_sub1(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_compare_and_swap)
+ TA_assert(!AO_compare_and_swap(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swap(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+# endif
+# if defined(AO_HAVE_or)
+ AO_or(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 34;
+# endif
+}
+
+
+
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see doc/COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: _release)\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ fprintf(stderr, "Missing: %s\n", #name "_release")
+
+void test_atomic_release(void)
+{
+ AO_T x;
+# if defined(AO_HAVE_test_and_set_release)
+ AO_TS_T z = AO_TS_INITIALIZER;
+# endif
+
+# if defined(AO_HAVE_nop_release)
+ AO_nop_release();
+# else
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_store_release)
+ AO_store_release(&x, 13);
+ TA_assert (x == 13);
+# else
+ MISSING(AO_store);
+ x = 13;
+# endif
+# if defined(AO_HAVE_load_release)
+ TA_assert(AO_load_release(&x) == 13);
+# else
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_set_release)
+ assert(AO_test_and_set_release(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_set_release(&z) == AO_TS_SET);
+ assert(AO_test_and_set_release(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_add_release)
+ TA_assert(AO_fetch_and_add_release(&x, 42) == 13);
+ TA_assert(AO_fetch_and_add_release(&x, -42) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1_release)
+ TA_assert(AO_fetch_and_add1_release(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1_release)
+ TA_assert(AO_fetch_and_sub1_release(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_compare_and_swap_release)
+ TA_assert(!AO_compare_and_swap_release(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swap_release(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+# endif
+# if defined(AO_HAVE_or_release)
+ AO_or_release(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 34;
+# endif
+}
+
+
+
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see doc/COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: _acquire)\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ fprintf(stderr, "Missing: %s\n", #name "_acquire")
+
+void test_atomic_acquire(void)
+{
+ AO_T x;
+# if defined(AO_HAVE_test_and_set_acquire)
+ AO_TS_T z = AO_TS_INITIALIZER;
+# endif
+
+# if defined(AO_HAVE_nop_acquire)
+ AO_nop_acquire();
+# else
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_store_acquire)
+ AO_store_acquire(&x, 13);
+ TA_assert (x == 13);
+# else
+ MISSING(AO_store);
+ x = 13;
+# endif
+# if defined(AO_HAVE_load_acquire)
+ TA_assert(AO_load_acquire(&x) == 13);
+# else
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_set_acquire)
+ assert(AO_test_and_set_acquire(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_set_acquire(&z) == AO_TS_SET);
+ assert(AO_test_and_set_acquire(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_add_acquire)
+ TA_assert(AO_fetch_and_add_acquire(&x, 42) == 13);
+ TA_assert(AO_fetch_and_add_acquire(&x, -42) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1_acquire)
+ TA_assert(AO_fetch_and_add1_acquire(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1_acquire)
+ TA_assert(AO_fetch_and_sub1_acquire(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_compare_and_swap_acquire)
+ TA_assert(!AO_compare_and_swap_acquire(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swap_acquire(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+# endif
+# if defined(AO_HAVE_or_acquire)
+ AO_or_acquire(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 34;
+# endif
+}
+
+
+
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see doc/COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: _read)\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ fprintf(stderr, "Missing: %s\n", #name "_read")
+
+void test_atomic_read(void)
+{
+ AO_T x;
+# if defined(AO_HAVE_test_and_set_read)
+ AO_TS_T z = AO_TS_INITIALIZER;
+# endif
+
+# if defined(AO_HAVE_nop_read)
+ AO_nop_read();
+# else
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_store_read)
+ AO_store_read(&x, 13);
+ TA_assert (x == 13);
+# else
+ MISSING(AO_store);
+ x = 13;
+# endif
+# if defined(AO_HAVE_load_read)
+ TA_assert(AO_load_read(&x) == 13);
+# else
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_set_read)
+ assert(AO_test_and_set_read(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_set_read(&z) == AO_TS_SET);
+ assert(AO_test_and_set_read(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_add_read)
+ TA_assert(AO_fetch_and_add_read(&x, 42) == 13);
+ TA_assert(AO_fetch_and_add_read(&x, -42) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1_read)
+ TA_assert(AO_fetch_and_add1_read(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1_read)
+ TA_assert(AO_fetch_and_sub1_read(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_compare_and_swap_read)
+ TA_assert(!AO_compare_and_swap_read(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swap_read(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+# endif
+# if defined(AO_HAVE_or_read)
+ AO_or_read(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 34;
+# endif
+}
+
+
+
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see doc/COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: _write)\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ fprintf(stderr, "Missing: %s\n", #name "_write")
+
+void test_atomic_write(void)
+{
+ AO_T x;
+# if defined(AO_HAVE_test_and_set_write)
+ AO_TS_T z = AO_TS_INITIALIZER;
+# endif
+
+# if defined(AO_HAVE_nop_write)
+ AO_nop_write();
+# else
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_store_write)
+ AO_store_write(&x, 13);
+ TA_assert (x == 13);
+# else
+ MISSING(AO_store);
+ x = 13;
+# endif
+# if defined(AO_HAVE_load_write)
+ TA_assert(AO_load_write(&x) == 13);
+# else
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_set_write)
+ assert(AO_test_and_set_write(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_set_write(&z) == AO_TS_SET);
+ assert(AO_test_and_set_write(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_add_write)
+ TA_assert(AO_fetch_and_add_write(&x, 42) == 13);
+ TA_assert(AO_fetch_and_add_write(&x, -42) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1_write)
+ TA_assert(AO_fetch_and_add1_write(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1_write)
+ TA_assert(AO_fetch_and_sub1_write(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_compare_and_swap_write)
+ TA_assert(!AO_compare_and_swap_write(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swap_write(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+# endif
+# if defined(AO_HAVE_or_write)
+ AO_or_write(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 34;
+# endif
+}
+
+
+
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see doc/COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: _full)\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ fprintf(stderr, "Missing: %s\n", #name "_full")
+
+void test_atomic_full(void)
+{
+ AO_T x;
+# if defined(AO_HAVE_test_and_set_full)
+ AO_TS_T z = AO_TS_INITIALIZER;
+# endif
+
+# if defined(AO_HAVE_nop_full)
+ AO_nop_full();
+# else
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_store_full)
+ AO_store_full(&x, 13);
+ TA_assert (x == 13);
+# else
+ MISSING(AO_store);
+ x = 13;
+# endif
+# if defined(AO_HAVE_load_full)
+ TA_assert(AO_load_full(&x) == 13);
+# else
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_set_full)
+ assert(AO_test_and_set_full(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_set_full(&z) == AO_TS_SET);
+ assert(AO_test_and_set_full(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_add_full)
+ TA_assert(AO_fetch_and_add_full(&x, 42) == 13);
+ TA_assert(AO_fetch_and_add_full(&x, -42) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1_full)
+ TA_assert(AO_fetch_and_add1_full(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1_full)
+ TA_assert(AO_fetch_and_sub1_full(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_compare_and_swap_full)
+ TA_assert(!AO_compare_and_swap_full(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swap_full(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+# endif
+# if defined(AO_HAVE_or_full)
+ AO_or_full(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 34;
+# endif
+}
+
+
+
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see doc/COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: _release_write)\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ fprintf(stderr, "Missing: %s\n", #name "_release_write")
+
+void test_atomic_release_write(void)
+{
+ AO_T x;
+# if defined(AO_HAVE_test_and_set_release_write)
+ AO_TS_T z = AO_TS_INITIALIZER;
+# endif
+
+# if defined(AO_HAVE_nop_release_write)
+ AO_nop_release_write();
+# else
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_store_release_write)
+ AO_store_release_write(&x, 13);
+ TA_assert (x == 13);
+# else
+ MISSING(AO_store);
+ x = 13;
+# endif
+# if defined(AO_HAVE_load_release_write)
+ TA_assert(AO_load_release_write(&x) == 13);
+# else
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_set_release_write)
+ assert(AO_test_and_set_release_write(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_set_release_write(&z) == AO_TS_SET);
+ assert(AO_test_and_set_release_write(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_add_release_write)
+ TA_assert(AO_fetch_and_add_release_write(&x, 42) == 13);
+ TA_assert(AO_fetch_and_add_release_write(&x, -42) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1_release_write)
+ TA_assert(AO_fetch_and_add1_release_write(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1_release_write)
+ TA_assert(AO_fetch_and_sub1_release_write(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_compare_and_swap_release_write)
+ TA_assert(!AO_compare_and_swap_release_write(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swap_release_write(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+# endif
+# if defined(AO_HAVE_or_release_write)
+ AO_or_release_write(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 34;
+# endif
+}
+
+
+
+/*
+ * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ * This file is covered by the GNU general public license, version 2.
+ * see doc/COPYING for details.
+ */
+
+/* Some basic sanity tests. These do not test the barrier semantics. */
+
+#undef TA_assert
+#define TA_assert(e) \
+ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: _acquire_read)\n", \
+ __FILE__, __LINE__), exit(1); }
+
+#undef MISSING
+#define MISSING(name) \
+ fprintf(stderr, "Missing: %s\n", #name "_acquire_read")
+
+void test_atomic_acquire_read(void)
+{
+ AO_T x;
+# if defined(AO_HAVE_test_and_set_acquire_read)
+ AO_TS_T z = AO_TS_INITIALIZER;
+# endif
+
+# if defined(AO_HAVE_nop_acquire_read)
+ AO_nop_acquire_read();
+# else
+ MISSING(AO_nop);
+# endif
+# if defined(AO_HAVE_store_acquire_read)
+ AO_store_acquire_read(&x, 13);
+ TA_assert (x == 13);
+# else
+ MISSING(AO_store);
+ x = 13;
+# endif
+# if defined(AO_HAVE_load_acquire_read)
+ TA_assert(AO_load_acquire_read(&x) == 13);
+# else
+ MISSING(AO_load);
+# endif
+# if defined(AO_HAVE_test_and_set_acquire_read)
+ assert(AO_test_and_set_acquire_read(&z) == AO_TS_CLEAR);
+ assert(AO_test_and_set_acquire_read(&z) == AO_TS_SET);
+ assert(AO_test_and_set_acquire_read(&z) == AO_TS_SET);
+ AO_CLEAR(&z);
+# else
+ MISSING(AO_test_and_set);
+# endif
+# if defined(AO_HAVE_fetch_and_add_acquire_read)
+ TA_assert(AO_fetch_and_add_acquire_read(&x, 42) == 13);
+ TA_assert(AO_fetch_and_add_acquire_read(&x, -42) == 55);
+# else
+ MISSING(AO_fetch_and_add);
+# endif
+# if defined(AO_HAVE_fetch_and_add1_acquire_read)
+ TA_assert(AO_fetch_and_add1_acquire_read(&x) == 13);
+# else
+ MISSING(AO_fetch_and_add1);
+ ++x;
+# endif
+# if defined(AO_HAVE_fetch_and_sub1_acquire_read)
+ TA_assert(AO_fetch_and_sub1_acquire_read(&x) == 14);
+# else
+ MISSING(AO_fetch_and_sub1);
+ --x;
+# endif
+# if defined(AO_HAVE_compare_and_swap_acquire_read)
+ TA_assert(!AO_compare_and_swap_acquire_read(&x, 14, 42));
+ TA_assert(x == 13);
+ TA_assert(AO_compare_and_swap_acquire_read(&x, 13, 42));
+ TA_assert(x == 42);
+# else
+ MISSING(AO_compare_and_swap);
+# endif
+# if defined(AO_HAVE_or_acquire_read)
+ AO_or_acquire_read(&x, 66);
+ TA_assert(x == 106);
+# else
+ MISSING(AO_or);
+ x |= 34;
+# endif
+}
+
+
+