*
*/
+#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) \
+ || __clang_major__ > 3 \
+ || (__clang_major__ == 3 && __clang_minor__ >= 5) \
+ || (defined(AO_PREFER_BUILTIN_ATOMICS) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ >= 2)) \
+ && !defined(AO_DISABLE_GCC_ATOMICS)
+ /* Probably, it could be enabled even for earlier gcc/clang versions. */
+# define AO_GCC_ATOMIC_TEST_AND_SET
+#endif
+
#include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
#ifdef __native_client__
/* Also, SWP is obsoleted for ARMv8+. */
#endif /* !__thumb2__ */
+#if !defined(AO_UNIPROCESSOR) && defined(AO_ARM_HAVE_DMB) \
+ && !defined(AO_PREFER_BUILTIN_ATOMICS)
+ AO_INLINE void
+ AO_nop_write(void)
+ {
+ /* AO_THUMB_GO_ARM is empty. */
+ /* This will target the system domain and thus be overly */
+ /* conservative as the CPUs will occupy the inner shareable domain. */
+ /* The plain variant (dmb st) is theoretically slower, and should */
+ /* not be needed. That said, with limited experimentation, a CPU */
+ /* implementation for which it actually matters has not been found */
+ /* yet, though they should already exist. */
+ /* Anyway, note that the "st" and "ishst" barriers are actually */
+ /* quite weak and, as the libatomic_ops documentation states, */
+ /* usually not what you really want. */
+ __asm__ __volatile__("dmb ishst" : : : "memory");
+ }
+# define AO_HAVE_nop_write
+#endif /* AO_ARM_HAVE_DMB */
+
+#ifndef AO_GCC_ATOMIC_TEST_AND_SET
+
#ifdef AO_UNIPROCESSOR
/* If only a single processor (core) is used, AO_UNIPROCESSOR could */
/* be defined by the client to avoid unnecessary memory barrier. */
}
# define AO_HAVE_nop_full
- AO_INLINE void
- AO_nop_write(void)
- {
- /* AO_THUMB_GO_ARM is empty. */
- /* This will target the system domain and thus be overly */
- /* conservative as the CPUs will occupy the inner shareable domain. */
- /* The plain variant (dmb st) is theoretically slower, and should */
- /* not be needed. That said, with limited experimentation, a CPU */
- /* implementation for which it actually matters has not been found */
- /* yet, though they should already exist. */
- /* Anyway, note that the "st" and "ishst" barriers are actually */
- /* quite weak and, as the libatomic_ops documentation states, */
- /* usually not what you really want. */
- __asm__ __volatile__("dmb ishst" : : : "memory");
- }
-# define AO_HAVE_nop_write
-
#elif defined(AO_ARM_HAVE_LDREX)
/* ARMv6 is the first architecture providing support for a simple */
/* LL/SC. A data memory barrier must be raised via CP15 command. */
/* AO_nop_full() is emulated using AO_test_and_set_full(). */
#endif /* !AO_UNIPROCESSOR && !AO_ARM_HAVE_LDREX */
-#ifdef AO_ARM_HAVE_LDREX
+#endif /* !AO_GCC_ATOMIC_TEST_AND_SET */
- /* AO_t/char/short/int load is simple reading. */
- /* Unaligned accesses are not guaranteed to be atomic. */
-# define AO_ACCESS_CHECK_ALIGNED
-# define AO_ACCESS_short_CHECK_ALIGNED
-# define AO_ACCESS_int_CHECK_ALIGNED
-# include "../all_atomic_only_load.h"
+#ifdef AO_ARM_HAVE_LDREX
/* "ARM Architecture Reference Manual" (chapter A3.5.3) says that the */
/* single-copy atomic processor accesses are all byte accesses, all */
/* arch/arm/kernel/entry-header.S of Linux. Nonetheless, there is */
/* a doubt this was properly implemented in some ancient OS releases. */
# ifdef AO_BROKEN_TASKSWITCH_CLREX
+
+# define AO_SKIPATOMIC_store
+# define AO_SKIPATOMIC_store_release
+# define AO_SKIPATOMIC_char_store
+# define AO_SKIPATOMIC_char_store_release
+# define AO_SKIPATOMIC_short_store
+# define AO_SKIPATOMIC_short_store_release
+# define AO_SKIPATOMIC_int_store
+# define AO_SKIPATOMIC_int_store_release
+
+# ifndef AO_PREFER_BUILTIN_ATOMICS
+
AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
{
int flag;
# define AO_HAVE_short_store
# endif /* AO_ARM_HAVE_LDREXBH */
-# else
+# endif /* !AO_PREFER_BUILTIN_ATOMICS */
+
+# elif !defined(AO_GCC_ATOMIC_TEST_AND_SET)
# include "../loadstore/atomic_store.h"
/* AO_int_store is defined in ao_t_is_int.h. */
# endif /* !AO_BROKEN_TASKSWITCH_CLREX */
+#endif /* AO_ARM_HAVE_LDREX */
+
+#ifndef AO_GCC_ATOMIC_TEST_AND_SET
+
+#ifdef AO_ARM_HAVE_LDREX
+
+ /* AO_t/char/short/int load is simple reading. */
+ /* Unaligned accesses are not guaranteed to be atomic. */
+# define AO_ACCESS_CHECK_ALIGNED
+# define AO_ACCESS_short_CHECK_ALIGNED
+# define AO_ACCESS_int_CHECK_ALIGNED
+# include "../all_atomic_only_load.h"
+
# ifndef AO_HAVE_char_store
# include "../loadstore/char_atomic_store.h"
# include "../loadstore/short_atomic_store.h"
# define AO_HAVE_test_and_set_full
#endif /* !AO_HAVE_test_and_set[_full] && AO_ARM_HAVE_SWP */
+#else /* AO_GCC_ATOMIC_TEST_AND_SET */
+
+# ifdef AO_ARM_HAVE_LDREXD
+# include "../standard_ao_double_t.h"
+# endif
+# include "generic.h"
+
+#endif /* AO_GCC_ATOMIC_TEST_AND_SET */
+
#define AO_T_IS_INT
/* char_load_read is defined using load and nop_read. */
/* char_store_full definition is omitted similar to load_full reason. */
-AO_INLINE void
-AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELAXED);
-}
-#define AO_HAVE_char_store
+#ifndef AO_SKIPATOMIC_char_store
+ AO_INLINE void
+ AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_char_store
+#endif
-AO_INLINE void
-AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELEASE);
-}
-#define AO_HAVE_char_store_release
+#ifndef AO_SKIPATOMIC_char_store_release
+ AO_INLINE void
+ AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_char_store_release
+#endif
#ifdef AO_GCC_HAVE_char_SYNC_CAS
/* short_load_read is defined using load and nop_read. */
/* short_store_full definition is omitted similar to load_full reason. */
-AO_INLINE void
-AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELAXED);
-}
-#define AO_HAVE_short_store
+#ifndef AO_SKIPATOMIC_short_store
+ AO_INLINE void
+ AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_short_store
+#endif
-AO_INLINE void
-AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELEASE);
-}
-#define AO_HAVE_short_store_release
+#ifndef AO_SKIPATOMIC_short_store_release
+ AO_INLINE void
+ AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_short_store_release
+#endif
#ifdef AO_GCC_HAVE_short_SYNC_CAS
/* int_load_read is defined using load and nop_read. */
/* int_store_full definition is omitted similar to load_full reason. */
-AO_INLINE void
-AO_int_store(volatile unsigned *addr, unsigned value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELAXED);
-}
-#define AO_HAVE_int_store
+#ifndef AO_SKIPATOMIC_int_store
+ AO_INLINE void
+ AO_int_store(volatile unsigned *addr, unsigned value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_int_store
+#endif
-AO_INLINE void
-AO_int_store_release(volatile unsigned *addr, unsigned value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELEASE);
-}
-#define AO_HAVE_int_store_release
+#ifndef AO_SKIPATOMIC_int_store_release
+ AO_INLINE void
+ AO_int_store_release(volatile unsigned *addr, unsigned value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_int_store_release
+#endif
#ifdef AO_GCC_HAVE_int_SYNC_CAS
/* load_read is defined using load and nop_read. */
/* store_full definition is omitted similar to load_full reason. */
-AO_INLINE void
-AO_store(volatile AO_t *addr, AO_t value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELAXED);
-}
-#define AO_HAVE_store
+#ifndef AO_SKIPATOMIC_store
+ AO_INLINE void
+ AO_store(volatile AO_t *addr, AO_t value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_store
+#endif
-AO_INLINE void
-AO_store_release(volatile AO_t *addr, AO_t value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELEASE);
-}
-#define AO_HAVE_store_release
+#ifndef AO_SKIPATOMIC_store_release
+ AO_INLINE void
+ AO_store_release(volatile AO_t *addr, AO_t value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_store_release
+#endif
#ifdef AO_GCC_HAVE_SYNC_CAS
/* XSIZE_load_read is defined using load and nop_read. */
/* XSIZE_store_full definition is omitted similar to load_full reason. */
-AO_INLINE void
-AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELAXED);
-}
-#define AO_HAVE_XSIZE_store
+#ifndef AO_SKIPATOMIC_XSIZE_store
+ AO_INLINE void
+ AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_XSIZE_store
+#endif
-AO_INLINE void
-AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE value)
-{
- __atomic_store_n(addr, value, __ATOMIC_RELEASE);
-}
-#define AO_HAVE_XSIZE_store_release
+#ifndef AO_SKIPATOMIC_XSIZE_store_release
+ AO_INLINE void
+ AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE value)
+ {
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_XSIZE_store_release
+#endif
#ifdef AO_GCC_HAVE_XSIZE_SYNC_CAS