AS_IF([test x"$THREADS" != xnone],
[ AC_DEFINE([GC_BUILTIN_ATOMIC], [1],
[Use C11 (GCC) atomic intrinsics instead of
- libatomic_ops primitives.]) ]) ]) ],
+ libatomic_ops primitives.]) ]) ])
+ AO_TRYLINK_CFLAGS="" ],
[ AC_MSG_RESULT([internal])
+ AO_TRYLINK_CFLAGS="-I${srcdir}/libatomic_ops/src"
ATOMIC_OPS_CFLAGS='-I$(top_builddir)/libatomic_ops/src -I$(top_srcdir)/libatomic_ops/src'
ATOMIC_OPS_LIBS=""
AC_SUBST([ATOMIC_OPS_CFLAGS])
AM_CONDITIONAL([NEED_ATOMIC_OPS_ASM],
[test x$with_libatomic_ops = xno -a x$need_atomic_ops_asm = xtrue])
+# Check whether particular AO primitives are emulated with locks.
+# The check below is based on the fact that linking with the libatomic_ops
+# binary file is not needed in case of absence of the emulation (except for
+# Solaris SPARC).
+AS_IF([test x$with_libatomic_ops != xnone -a x$need_atomic_ops_asm != xtrue],
+ [ old_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $AO_TRYLINK_CFLAGS $CFLAGS_EXTRA"
+ AC_MSG_CHECKING([for lock-free AO load/store, test-and-set primitives])
+ AC_TRY_LINK([#include "atomic_ops.h"],
+ [AO_t x=0;unsigned char c=0;AO_TS_t z=AO_TS_INITIALIZER;
+ (void)AO_test_and_set_acquire(&z);AO_CLEAR(&z);AO_compiler_barrier();
+ AO_store(&x,AO_load(&x)+1);AO_char_store(&c,AO_char_load(&c)+1);
+ AO_store_release(&x,AO_load_acquire(&x)+1)],
+ [ AC_MSG_RESULT(yes) ],
+ [ AC_MSG_RESULT(no)
+ AC_DEFINE([BASE_ATOMIC_OPS_EMULATED], [1],
+ [AO load, store and/or test-and-set primitives are
+ implemented in libatomic_ops using locks.]) ])
+ CFLAGS="$old_CFLAGS" ])
+
dnl Produce the Files
dnl -----------------
errno = old_errno;
}
+#ifdef BASE_ATOMIC_OPS_EMULATED
+ /* The AO primitives emulated with locks cannot be used inside signal */
+ /* handlers as this could cause a deadlock or a double lock. */
+ /* The following "async" macro definitions are correct only for */
+ /* an uniprocessor case and are provided for a test purpose. */
+# define ao_load_acquire_async(p) (*(p))
+# define ao_load_async(p) ao_load_acquire_async(p)
+# define ao_store_release_async(p, v) (void)(*(p) = (v))
+# define ao_store_async(p, v) ao_store_release_async(p, v)
+#else
+# define ao_load_acquire_async(p) AO_load_acquire(p)
+# define ao_load_async(p) AO_load(p)
+# define ao_store_release_async(p, v) AO_store_release(p, v)
+# define ao_store_async(p, v) AO_store(p, v)
+#endif /* !BASE_ATOMIC_OPS_EMULATED */
+
/* The lookup here is safe, since this is done on behalf */
/* of a thread which holds the allocation lock in order */
/* to stop the world. Thus concurrent modification of the */
/* and fetched (by GC_push_all_stacks) using the atomic primitives to */
/* avoid the related TSan warning. */
# ifdef SPARC
- AO_store((volatile AO_t *)&me->stop_info.stack_ptr,
+ ao_store_async((volatile AO_t *)&me->stop_info.stack_ptr,
(AO_t)GC_save_regs_in_stack());
# else
# ifdef IA64
me -> backing_store_ptr = GC_save_regs_in_stack();
# endif
- AO_store((volatile AO_t *)&me->stop_info.stack_ptr, (AO_t)GC_approx_sp());
+ ao_store_async((volatile AO_t *)&me->stop_info.stack_ptr,
+ (AO_t)GC_approx_sp());
# endif
}
pthread_t self = pthread_self();
GC_thread me;
IF_CANCEL(int cancel_state;)
- AO_t my_stop_count = AO_load_acquire(&GC_stop_count);
+ AO_t my_stop_count = ao_load_acquire_async(&GC_stop_count);
/* After the barrier, this thread should see */
/* the actual content of GC_threads. */
me = GC_lookup_thread_async(self);
# ifdef GC_ENABLE_SUSPEND_THREAD
- if (AO_load(&me->suspended_ext)) {
+ if (ao_load_async(&me->suspended_ext)) {
GC_store_stack_ptr(me);
sem_post(&GC_suspend_ack_sem);
suspend_self_inner(me);
/* thread has been stopped. Note that sem_post() is */
/* the only async-signal-safe primitive in LinuxThreads. */
sem_post(&GC_suspend_ack_sem);
- AO_store_release(&me->stop_info.last_stop_count, my_stop_count);
+ ao_store_release_async(&me->stop_info.last_stop_count, my_stop_count);
/* Wait until that thread tells us to restart by sending */
/* this thread a GC_sig_thr_restart signal (should be masked */
/* this code should not be executed. */
do {
sigsuspend (&suspend_handler_mask);
- } while (AO_load_acquire(&GC_world_is_stopped)
- && AO_load(&GC_stop_count) == my_stop_count);
+ } while (ao_load_acquire_async(&GC_world_is_stopped)
+ && ao_load_async(&GC_stop_count) == my_stop_count);
# ifdef DEBUG_THREADS
GC_log_printf("Continuing %p\n", (void *)self);
# endif
{
/* Set the flag that the thread has been restarted. */
- AO_store_release(&me->stop_info.last_stop_count,
- (AO_t)((word)my_stop_count | THREAD_RESTARTED));
+ ao_store_release_async(&me->stop_info.last_stop_count,
+ (AO_t)((word)my_stop_count | THREAD_RESTARTED));
}
}
RESTORE_CANCEL(cancel_state);
static void *GC_CALLBACK suspend_self_inner(void *client_data) {
GC_thread me = (GC_thread)client_data;
- while (AO_load_acquire(&me->suspended_ext)) {
+ while (ao_load_acquire_async(&me->suspended_ext)) {
/* TODO: Use sigsuspend() instead. */
GC_brief_async_signal_safe_sleep();
}
}
# endif /* GC_ENABLE_SUSPEND_THREAD */
+# undef ao_load_acquire_async
+# undef ao_load_async
+# undef ao_store_async
+# undef ao_store_release_async
#endif /* !GC_OPENBSD_UTHREADS && !NACL */
#ifdef IA64