* Makefile.am: Include NT_STSTIC_THREADS_MAKEFILE in dist.
* Makefile.in: Regenerate.
* include/private/gc_locks.h: GC_compare_and_exchange, GC_atomic_add:
remove. NUMERIC_THREAD_ID, THREAD_EQUAL: New. GC_lock_holder: now
unsigned long. I_DONT_HOLD_LOCK, I_HOLD_LOCK: Update.
* pthread_stop_world.c, pthread_support.c, win32_threads.c: Use
NUMERIC_THREAD_ID, THREAD_EQUAL.
* include/private/gcconfig.h: GENERIC_COMPARE_AND_SWAP: Remove.
* include/private/thread_local_alloc.h: Don't USE_COMPILER_TLS on
ARM.
* libatomic_ops-1.2/doc/README.txt: Update to reflect C++
standardization effort.
EXTRA_DIST += BCC_MAKEFILE NT_MAKEFILE NT_THREADS_MAKEFILE \
OS2_MAKEFILE PCR-Makefile digimars.mak EMX_MAKEFILE \
Makefile.direct Makefile.dj Makefile.DLLs SMakefile.amiga \
- WCC_MAKEFILE configure_atomic_ops.sh
+ WCC_MAKEFILE configure_atomic_ops.sh \
+ NT_STATIC_THREADS_MAKEFILE
# files used by makefiles other than Makefile.am
#
BCC_MAKEFILE NT_MAKEFILE NT_THREADS_MAKEFILE OS2_MAKEFILE \
PCR-Makefile digimars.mak EMX_MAKEFILE Makefile.direct \
Makefile.dj Makefile.DLLs SMakefile.amiga WCC_MAKEFILE \
- configure_atomic_ops.sh add_gc_prefix.c gcname.c if_mach.c \
- if_not_there.c hpux_test_and_clear.s gc.mak MacOS.c \
- MacProjects.sit.hqx mach_dep.c setjmp_t.c threadlibs.c \
- AmigaOS.c Mac_files/datastart.c Mac_files/dataend.c \
+ configure_atomic_ops.sh NT_STATIC_THREADS_MAKEFILE \
+ add_gc_prefix.c gcname.c if_mach.c if_not_there.c \
+ hpux_test_and_clear.s gc.mak MacOS.c MacProjects.sit.hqx \
+ mach_dep.c setjmp_t.c threadlibs.c AmigaOS.c \
+ Mac_files/datastart.c Mac_files/dataend.c \
Mac_files/MacOS_config.h Mac_files/MacOS_Test_config.h \
include/private/msvc_dbg.h msvc_dbg.c libatomic_ops-1.2 \
libtool.m4
* Needed if there is more than one allocator thread.
* DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK.
*
- * In the PARALLEL_MARK case, we also need to define a number of
- * other inline finctions here:
- * GC_bool GC_compare_and_exchange( volatile GC_word *addr,
- * GC_word old, GC_word new )
- * GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
- * void GC_memory_barrier( )
- *
* Note that I_HOLD_LOCK and I_DONT_HOLD_LOCK are used only positively
* in assertions, and may return TRUE in the "dont know" case.
*/
# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \
|| GC_lock_holder != GetCurrentThreadId())
# elif defined(GC_PTHREADS)
-# define NO_THREAD (pthread_t)(-1)
# include <pthread.h>
+
+ /* Posix allows pthread_t to be a struct, though it rarely is. */
+ /* Unfortunately, we need to use a pthread_t to index a data */
+ /* structure. It also helps if comparisons don't involve a */
+ /* function call. Hence we introduce platform-dependent macros */
+ /* to compare pthread_t ids and to map them to integers. */
+ /* the mapping to integers does not need to result in different */
+ /* integers for each thread, though that should be true as much */
+ /* as possible. */
+# if 1 /* Refine to exclude platforms on which pthread_t is struct */
+# define NUMERIC_THREAD_ID(id) ((unsigned long)(id))
+# define THREAD_EQUAL(id1, id2) ((id1) == (id2))
+# define NUMERIC_THREAD_ID_UNIQUE
+# else
+ /* Generic definitions that always work, but will result in */
+ /* poor performance and weak assertion checking. */
+# define NUMERIC_THREAD_ID(id) 1l
+# define THREAD_EQUAL(id1, id2) pthread_equal(id1, id2)
+# undef NUMERIC_THREAD_ID_UNIQUE
+# endif
+# define NO_THREAD (-1l) /* != NUMERIC_THREAD_ID(pthread_self()) for any thread */
# if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
/* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
# define UNCOND_UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# endif /* !GC_ASSERTIONS */
# endif /* USE_PTHREAD_LOCKS */
-# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
+# define SET_LOCK_HOLDER() GC_lock_holder = NUMERIC_THREAD_ID(pthread_self())
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (!GC_need_to_lock \
- || pthread_equal(GC_lock_holder, pthread_self()))
-# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \
- || !pthread_equal(GC_lock_holder, pthread_self()))
+ || GC_lock_holder == NUMERIC_THREAD_ID(pthread_self()))
+# ifndef NUMERIC_THREAD_ID_UNIQUE
+# define I_DONT_HOLD_LOCK() 1 /* Conservatively say yes */
+# else
+# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \
+ || GC_lock_holder != NUMERIC_THREAD_ID(pthread_self()))
+# endif
extern volatile GC_bool GC_collecting;
# define ENTER_GC() GC_collecting = 1;
# define EXIT_GC() GC_collecting = 0;
extern void GC_lock(void);
- extern pthread_t GC_lock_holder;
+ extern unsigned long GC_lock_holder;
# ifdef GC_ASSERTIONS
- extern pthread_t GC_mark_lock_holder;
+ extern unsigned long GC_mark_lock_holder;
# endif
# endif /* GC_PTHREADS with linux_threads.c implementation */
# define MPROTECT_VDB
# endif
# else
-# define GENERIC_COMPARE_AND_SWAP
- /* No compare-and-swap instruction. Use pthread mutexes */
- /* when we absolutely have to. */
# ifdef PARALLEL_MARK
# define USE_MARK_BYTES
/* Minimize compare-and-swap usage. */
# else
# define USE_WIN32_COMPILER_TLS
# endif /* !GNU */
-# elif defined(LINUX) && \
+# elif defined(LINUX) && !defined(ARM32) && \
(__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >=3))
# define USE_COMPILER_TLS
# elif (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
for (p = GC_threads[i]; p != 0; p = p -> next) {
if (p -> flags & FINISHED) continue;
++nthreads;
- if (pthread_equal(p -> id, me)) {
+ if (THREAD_EQUAL(p -> id, me)) {
# ifdef SPARC
lo = (ptr_t)GC_save_regs_in_stack();
# else
GC_printf("Reg stack for thread 0x%x = [%lx,%lx)\n",
(unsigned)p -> id, bs_lo, bs_hi);
# endif
- if (pthread_equal(p -> id, me)) {
+ if (THREAD_EQUAL(p -> id, me)) {
/* FIXME: This may add an unbounded number of entries, */
/* and hence overflow the mark stack, which is bad. */
GC_push_all_eager(bs_lo, bs_hi);
GC_stopping_pid = getpid(); /* debugging only. */
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (p = GC_threads[i]; p != 0; p = p -> next) {
- if (p -> id != my_thread) {
+ if (!THREAD_EQUAL(p -> id, my_thread)) {
if (p -> flags & FINISHED) continue;
if (p -> stop_info.last_stop_count == GC_stop_count) continue;
if (p -> thread_blocked) /* Will wait */ continue;
AO_store(&GC_world_is_stopped, FALSE);
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (p = GC_threads[i]; p != 0; p = p -> next) {
- if (p -> id != my_thread) {
+ if (!THREAD_EQUAL(p -> id, my_thread)) {
if (p -> flags & FINISHED) continue;
if (p -> thread_blocked) continue;
n_live_threads++;
#endif /* GC_NETBSD_THREADS */
/* Allocator lock definitions. */
-#if defined(USE_SPIN_LOCK)
- pthread_t GC_lock_holder = NO_THREAD;
-#else
+#if !defined(USE_SPIN_LOCK)
pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
- pthread_t GC_lock_holder = NO_THREAD;
+#endif
+unsigned long GC_lock_holder = NO_THREAD;
/* Used only for assertions, and to prevent */
/* recursive reentry in the system call wrapper. */
-#endif
#if defined(GC_DGUX386_THREADS)
# include <sys/dg_sys_info.h>
/* Caller holds allocation lock. */
GC_thread GC_new_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
GC_thread result;
static GC_bool first_thread_used = FALSE;
/* (The code intentionally traps if it wasn't.) */
void GC_delete_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
register GC_thread prev = 0;
GC_ASSERT(I_HOLD_LOCK());
- while (!pthread_equal(p -> id, id)) {
+ while (!THREAD_EQUAL(p -> id, id)) {
prev = p;
p = p -> next;
}
void GC_delete_gc_thread(GC_thread gc_id)
{
pthread_t id = gc_id -> id;
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
register GC_thread prev = 0;
/* return the most recent one. */
GC_thread GC_lookup_thread(pthread_t id)
{
- int hv = ((word)id) % THREAD_TABLE_SZ;
+ int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
- while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
+ while (p != 0 && !THREAD_EQUAL(p -> id, id)) p = p -> next;
return(p);
}
me = 0;
for (p = GC_threads[hv]; 0 != p; p = next) {
next = p -> next;
- if (p -> id == self) {
+ if (THREAD_EQUAL(p -> id, self)) {
me = p;
p -> next = 0;
} else {
return(result);
}
-#ifdef GENERIC_COMPARE_AND_SWAP
- pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
-
- GC_bool GC_compare_and_exchange(volatile GC_word *addr,
- GC_word old, GC_word new_val)
- {
- GC_bool result;
- pthread_mutex_lock(&GC_compare_and_swap_lock);
- if (*addr == old) {
- *addr = new_val;
- result = TRUE;
- } else {
- result = FALSE;
- }
- pthread_mutex_unlock(&GC_compare_and_swap_lock);
- return result;
- }
-
- GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
- {
- GC_word old;
- pthread_mutex_lock(&GC_compare_and_swap_lock);
- old = *addr;
- *addr = old + how_much;
- pthread_mutex_unlock(&GC_compare_and_swap_lock);
- return old;
- }
-
-#endif /* GENERIC_COMPARE_AND_SWAP */
/* Spend a few cycles in a way that can't introduce contention with */
/* othre threads. */
void GC_pause(void)
#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
#ifdef GC_ASSERTIONS
- pthread_t GC_mark_lock_holder = NO_THREAD;
+ unsigned long GC_mark_lock_holder = NO_THREAD;
#endif
#if 0
*/
GC_generic_lock(&mark_mutex);
# ifdef GC_ASSERTIONS
- GC_mark_lock_holder = pthread_self();
+ GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
# endif
}
void GC_release_mark_lock(void)
{
- GC_ASSERT(GC_mark_lock_holder == pthread_self());
+ GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
# ifdef GC_ASSERTIONS
GC_mark_lock_holder = NO_THREAD;
# endif
/* free-list link may be ignored. */
void GC_wait_builder(void)
{
- GC_ASSERT(GC_mark_lock_holder == pthread_self());
+ GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
# ifdef GC_ASSERTIONS
GC_mark_lock_holder = NO_THREAD;
# endif
}
GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
# ifdef GC_ASSERTIONS
- GC_mark_lock_holder = pthread_self();
+ GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
# endif
}
void GC_notify_all_builder(void)
{
- GC_ASSERT(GC_mark_lock_holder == pthread_self());
+ GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
if (pthread_cond_broadcast(&builder_cv) != 0) {
ABORT("pthread_cond_broadcast failed");
}
void GC_wait_marker(void)
{
- GC_ASSERT(GC_mark_lock_holder == pthread_self());
+ GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
# ifdef GC_ASSERTIONS
GC_mark_lock_holder = NO_THREAD;
# endif
}
GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
# ifdef GC_ASSERTIONS
- GC_mark_lock_holder = pthread_self();
+ GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
# endif
}
/* Thread id for current holder of allocation lock */
#else
pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
- pthread_t GC_lock_holder = NO_THREAD;
+ unsigned long GC_lock_holder = NO_THREAD;
#endif
#ifdef CYGWIN32
/* and win32 thread id. */
#define PTHREAD_MAP_SIZE 512
DWORD GC_pthread_map_cache[PTHREAD_MAP_SIZE];
-#define HASH(pthread_id) ((((word)(pthread_id) >> 5)) % PTHREAD_MAP_SIZE)
+#define HASH(pthread_id) ((NUMERIC_THREAD_ID(pthread_id) >> 5) % PTHREAD_MAP_SIZE)
/* It appears pthread_t is really a pointer type ... */
#define SET_PTHREAD_MAP_CACHE(pthread_id, win32_id) \
GC_pthread_map_cache[HASH(pthread_id)] = (win32_id);
LOCK();
for (p = GC_threads[hv_guess]; 0 != p; p = p -> next) {
- if (pthread_equal(p -> pthread_id, id))
+ if (THREAD_EQUAL(p -> pthread_id, id))
goto foundit;
}
for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
for (p = GC_threads[hv]; 0 != p; p = p -> next) {
- if (pthread_equal(p -> pthread_id, id))
+ if (THREAD_EQUAL(p -> pthread_id, id))
goto foundit;
}
}