* include/private/gcconfig.h (CANCEL_SAFE, IF_CANCEL): new macros.
* include/private/gc_priv.h (DISABLE_CANCEL, RESTORE_CANCEL,
ASSERT_CANCEL_DISABLED): New macros.
* alloc.c (GC_maybe_gc): Assert cancellation disabled.
(GC_collect_a_little_inner,GC_try_to_collect, GC_collect_or_expand):
Disable cancellation.
(GC_add_to_our_memory): Check for overflow.
* misc.c (GC_cancel_disable_count): declare.
(GC_init, GC_write): Disable cancellation.
(GC_init): Remove redundant GC_is_initialized test.
* os_dep.c (GC_repeat_read): Assert cancellation disabled.
(GC_get_stack_base): Disable cancellation.
* pthread_stop_world.c (GC_suspend_handler_inner): Disable
cancellation.
* pthread_support.c (GC_mark_thread): Permanently disable
cancellation.
(GC_wait_for_gc_completion, GC_wait_builder, GC_wait_marker):
Assert cancellation disabled.
(fork handling): Disable cancellation, fix comment.
(GC_pthread_create): Disable cancellation.
(GC_unregister_my_thread): Disable cancellation.
* Makefile.direct: Document NO_CANCEL_SAFE.
+2009-09-25 Hans Boehm <Hans.Boehm@hp.com>
+
+ * include/private/gcconfig.h (CANCEL_SAFE, IF_CANCEL): new macros.
+ * include/private/gc_priv.h (DISABLE_CANCEL, RESTORE_CANCEL,
+ ASSERT_CANCEL_DISABLED): New macros.
+ * alloc.c (GC_maybe_gc): Assert cancellation disabled.
+ (GC_collect_a_little_inner,GC_try_to_collect, GC_collect_or_expand):
+ Disable cancellation.
+ (GC_add_to_our_memory): Check for overflow.
+ * misc.c (GC_cancel_disable_count): declare.
+ (GC_init, GC_write): Disable cancellation.
+ (GC_init): Remove redundant GC_is_initialized test.
+ * os_dep.c (GC_repeat_read): Assert cancellation disabled.
+ (GC_get_stack_base): Disable cancellation.
+ * pthread_stop_world.c (GC_suspend_handler_inner): Disable
+ cancellation.
+ * pthread_support.c (GC_mark_thread): Permanently disable
+ cancellation.
+ (GC_wait_for_gc_completion, GC_wait_builder, GC_wait_marker):
+ Assert cancellation disabled.
+ (fork handling): Disable cancellation, fix comment.
+ (GC_pthread_create): Disable cancellation.
+ (GC_unregister_my_thread): Disable cancellation.
+ * Makefile.direct: Document NO_CANCEL_SAFE.
+
+
2009-09-25 Ivan Maidanski <ivmai@mail.ru>
* Makefile: Remove outdated file (Makefile.direct should be used
# collection while leaving generational collection enabled).
# -DGC_FULL_FREQ=<value> Set alternate default number of partial collections
# between full collections (matters only if incremental collection is on).
+# -DNO_CANCEL_SAFE (posix platforms with threads only) Don't bother trying
+# to make the collector safe for thread cancellation; cancellation is
+# not used. (Note that if cancellation is used anyway, threads may end
+# up getting cancelled in unexpected places.) Even without this option,
+# PTHREAD_CANCEL_ASYNCHRONOUS is never safe with the collector. (We could
+# argue about its safety without the collector.)
#
CXXFLAGS= $(CFLAGS)
static int n_partial_gcs = 0;
GC_ASSERT(I_HOLD_LOCK());
+ ASSERT_CANCEL_DISABLED();
if (GC_should_collect()) {
if (!GC_incremental) {
/* FIXME: If possible, GC_default_stop_func should be used here */
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
CLOCK_TYPE current_time;
# endif
+ ASSERT_CANCEL_DISABLED();
if (GC_dont_gc) return FALSE;
if (GC_incremental && GC_collection_in_progress()) {
if (GC_print_stats) {
void GC_collect_a_little_inner(int n)
{
int i;
-
+ IF_CANCEL(int cancel_state;)
+
if (GC_dont_gc) return;
+ DISABLE_CANCEL(cancel_state);
if (GC_incremental && GC_collection_in_progress()) {
for (i = GC_deficit; i < GC_RATE*n; i++) {
if (GC_mark_some((ptr_t)0)) {
} else {
GC_maybe_gc();
}
+ RESTORE_CANCEL(cancel_state);
}
GC_API int GC_CALL GC_collect_a_little(void)
# ifdef USE_MUNMAP
int old_unmap_threshold;
# endif
+ IF_CANCEL(int cancel_state;)
DCL_LOCK_STATE;
if (!GC_is_initialized) GC_init();
if (GC_debugging_started) GC_print_all_smashed();
GC_INVOKE_FINALIZERS();
LOCK();
+ DISABLE_CANCEL(cancel_state);
# ifdef USE_MUNMAP
old_unmap_threshold = GC_unmap_threshold;
if (GC_force_unmap_on_gcollect && old_unmap_threshold > 0)
# ifdef USE_MUNMAP
GC_unmap_threshold = old_unmap_threshold; /* restore */
# endif
+ RESTORE_CANCEL(cancel_state);
UNLOCK();
if(result) {
if (GC_debugging_started) GC_print_all_smashed();
void GC_add_to_our_memory(ptr_t p, size_t bytes)
{
if (0 == p) return;
+ if (GC_n_memory >= MAX_HEAP_SECTS)
+ ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
GC_our_memory[GC_n_memory].hs_start = p;
GC_our_memory[GC_n_memory].hs_bytes = bytes;
GC_n_memory++;
GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page)
{
GC_bool gc_not_stopped = TRUE;
+ IF_CANCEL(int cancel_state;)
+
+ DISABLE_CANCEL(cancel_state);
if (GC_incremental || GC_dont_gc ||
((!GC_dont_expand || GC_bytes_allocd == 0) && !GC_should_collect()) ||
(gc_not_stopped = GC_try_to_collect_inner(GC_bytes_allocd > 0 ?
" Returning NIL!\n",
(GC_heapsize - GC_unmapped_bytes) >> 20);
# endif
+ RESTORE_CANCEL(cancel_state);
return(FALSE);
}
} else {
}
}
}
+ RESTORE_CANCEL(cancel_state);
return(TRUE);
}
# endif /* Need to handle address faults. */
+/* Some convenience macros for cancellation support. */
+# if defined(CANCEL_SAFE)
+# if defined(GC_ASSERTIONS) && (defined(USE_COMPILER_TLS) \
+ || (defined(LINUX) && !defined(ARM32) \
+ && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) \
+ || defined(HPUX) /* and probably others ... */))
+ extern __thread unsigned char GC_cancel_disable_count;
+# define NEED_CANCEL_DISABLE_COUNT
+# define INCR_CANCEL_DISABLE() ++GC_cancel_disable_count
+# define DECR_CANCEL_DISABLE() --GC_cancel_disable_count
+# define ASSERT_CANCEL_DISABLED() GC_ASSERT(GC_cancel_disable_count > 0)
+# else
+# define INCR_CANCEL_DISABLE()
+# define DECR_CANCEL_DISABLE()
+# define ASSERT_CANCEL_DISABLED()
+# endif /* GC_ASSERTIONS & ... */
+# define DISABLE_CANCEL(state) \
+ { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \
+ INCR_CANCEL_DISABLE(); }
+# define RESTORE_CANCEL(state) \
+ { ASSERT_CANCEL_DISABLED(); \
+ pthread_setcancelstate(state, NULL); \
+ DECR_CANCEL_DISABLE(); }
+# else /* !CANCEL_SAFE */
+# define DISABLE_CANCEL(state)
+# define RESTORE_CANCEL(state)
+# define ASSERT_CANCEL_DISABLED()
+# endif /* !CANCEL_SAFE */
+
# endif /* GC_PRIVATE_H */
# define THREADS
# endif
+# if defined(UNIX_LIKE) && defined(THREADS) && !defined(NO_CANCEL_SAFE)
+ /* Make the code cancellation-safe. This basically means that we */
+ /* ensure that cancellation requests are ignored while we are in */
+ /* the collector. This applies only to Posix deferred cancellation;*/
+ /* we don't handle Posix asynchronous cancellation. */
+ /* Note that this only works if pthread_setcancelstate is */
+ /* async-signal-safe, at least in the absence of asynchronous */
+ /* cancellation. This appears to be true for the glibc version, */
+ /* though it is not documented. Without that assumption, there */
+ /* seems to be no way to safely wait in a signal handler, which */
+ /* we need to do for thread suspension. */
+ /* Also note that little other code appears to be cancellation-safe.*/
+ /* Hence it may make sense to turn this off for performance. */
+# define CANCEL_SAFE
+# define IF_CANCEL(x) x
+# else
+# define IF_CANCEL(x)
+# endif
+
# if !defined(USE_MARK_BITS) && !defined(USE_MARK_BYTES)
# if defined(THREADS) && defined(PARALLEL_MARK)
# define USE_MARK_BYTES
# define GC_REGISTER_MAIN_STATIC_DATA() TRUE
#endif
+#ifdef NEED_CANCEL_DISABLE_COUNT
+ __thread unsigned char GC_cancel_disable_count = 0;
+#endif
+
GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
# else
word initial_heap_sz = (word)MINHINCR;
# endif
+ IF_CANCEL(int cancel_state;)
if (GC_is_initialized) return;
+ DISABLE_CANCEL(cancel_state);
/* Note that although we are nominally called with the */
/* allocation lock held, the allocation lock is now */
/* only really acquired once a second thread is forked.*/
GC_ASSERT(!GC_need_to_lock);
# endif
# if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
- if (!GC_is_initialized) {
+ {
# ifndef MSWINCE
BOOL (WINAPI *pfn) (LPCRITICAL_SECTION, DWORD) = NULL;
HMODULE hK32 = GetModuleHandle(TEXT("kernel32.dll"));
else
# endif /* !MSWINCE */
/* else */ InitializeCriticalSection (&GC_allocate_ml);
- }
+ }
# endif /* GC_WIN32_THREADS */
# if (defined(MSWIN32) || defined(MSWINCE)) && defined(THREADS)
InitializeCriticalSection(&GC_write_cs);
GC_init_dyld();
}
# endif
+ RESTORE_CANCEL(cancel_state);
}
GC_API void GC_CALL GC_enable_incremental(void)
{
int bytes_written = 0;
int result;
+ IF_CANCEL(int cancel_state;)
+ DISABLE_CANCEL(cancel_state);
while (bytes_written < len) {
# ifdef GC_SOLARIS_THREADS
result = syscall(SYS_write, fd, buf + bytes_written,
# else
result = write(fd, buf + bytes_written, len - bytes_written);
# endif
- if (-1 == result) return(result);
+ if (-1 == result) {
+ RESTORE_CANCEL(cancel_state);
+ return(result);
+ }
bytes_written += result;
}
+ RESTORE_CANCEL(cancel_state);
return(bytes_written);
}
#endif /* UN*X */
ssize_t num_read = 0;
ssize_t result;
+ ASSERT_CANCEL_DISABLED();
while (num_read < count) {
result = READ(fd, buf + num_read, count - num_read);
if (result < 0) return result;
/* least the main thread. */
LOCK();
{
- ptr_t bsp = GC_save_regs_in_stack();
- ptr_t next_stack = GC_greatest_stack_base_below(bsp);
+ IF_CANCEL(int cancel_state;)
+ ptr_t bsp;
+ ptr_t next_stack;
+
+ DISABLE_CANCEL(cancel_state);
+ bsp = GC_save_regs_in_stack();
+ next_stack = GC_greatest_stack_base_below(bsp);
if (0 == next_stack) {
b -> reg_base = GC_find_limit(bsp, FALSE);
} else {
/* growing it. */
b -> reg_base = GC_find_limit_with_bound(bsp, FALSE, next_stack);
}
+ RESTORE_CANCEL(cancel_state);
}
UNLOCK();
# endif
{
# ifdef NEED_FIND_LIMIT
int dummy;
+ IF_CANCEL(int cancel_state;)
+ DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
# ifdef STACK_GROWS_DOWN
b -> mem_base = GC_find_limit((ptr_t)(&dummy), TRUE);
# ifdef IA64
# else
b -> mem_base = GC_find_limit(&dummy, FALSE);
# endif
+ RESTORE_CANCEL(cancel_state);
return GC_SUCCESS;
# else
return GC_UNIMPLEMENTED;
int dummy;
pthread_t my_thread = pthread_self();
GC_thread me;
+ IF_CANCEL(int cancel_state;)
AO_t my_stop_count = AO_load(&GC_stop_count);
if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
+ DISABLE_CANCEL(cancel_state);
+ /* pthread_setcancelstate is not defined to be async-signal-safe. */
+ /* But the glibc version appears to be in the absence of */
+ /* asynchronous cancellation. And since this signal handler */
+ /* to block on sigsuspend, which is both async-signal-safe */
+ /* and a cancellation point, there seems to be no obvious way */
+ /* out of it. In fact, it looks to me like an async-signal-safe */
+ /* cancellation point is inherently a problem, unless there is */
+ /* some way to disable cancellation in the handler. */
# if DEBUG_THREADS
GC_printf("Suspending 0x%x\n", (unsigned)my_thread);
# endif
if (!GC_retry_signals) {
WARN("Duplicate suspend signal in thread %p\n", pthread_self());
}
+ RESTORE_CANCEL(cancel_state);
return;
}
# ifdef SPARC
# if DEBUG_THREADS
GC_printf("Continuing 0x%x\n", (unsigned)my_thread);
# endif
+ RESTORE_CANCEL(cancel_state);
}
STATIC void GC_restart_handler(int sig)
STATIC void * GC_mark_thread(void * id)
{
word my_mark_no = 0;
+ IF_CANCEL(int cancel_state;)
+ DISABLE_CANCEL(cancel_state);
+ /* Mark threads are not cancellable; they */
+ /* should be invisible to client. */
marker_sp[(word)id] = GC_approx_sp();
# ifdef IA64
marker_bsp[(word)id] = GC_save_regs_in_stack();
STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all)
{
GC_ASSERT(I_HOLD_LOCK());
+ ASSERT_CANCEL_DISABLED();
if (GC_incremental && GC_collection_in_progress()) {
int old_gc_no = GC_gc_no;
/* should only call async-signal-safe functions, and we probably can't */
/* quite guarantee that. But we give it our best shot. (That same */
/* spec also implies that it's not safe to call the system malloc */
-/* between fork() and exec(). Thus we're doing no worse than it. */
+/* between fork() and exec(). Thus we're doing no worse than it.) */
+
+IF_CANCEL(static int fork_cancel_state;)
+ /* protected by allocation lock. */
/* Called before a fork() */
STATIC void GC_fork_prepare_proc(void)
/* Wait for an ongoing GC to finish, since we can't finish it in */
/* the (one remaining thread in) the child. */
LOCK();
+ DISABLE_CANCEL(fork_cancel_state);
+ /* Following waits may include cancellation points. */
# if defined(PARALLEL_MARK)
if (GC_parallel)
GC_wait_for_reclaim();
if (GC_parallel)
GC_release_mark_lock();
# endif
+ RESTORE_CANCEL(fork_cancel_state);
UNLOCK();
}
GC_markers = 1;
GC_parallel = FALSE;
# endif /* PARALLEL_MARK */
+ RESTORE_CANCEL(fork_cancel_state);
UNLOCK();
}
#endif /* HANDLE_FORK */
GC_API int GC_CALL GC_unregister_my_thread(void)
{
GC_thread me;
+ IF_CANCEL(int cancel_state;)
LOCK();
+ DISABLE_CANCEL(cancel_state);
/* Wait for any GC that may be marking from our stack to */
/* complete before we remove this thread. */
GC_wait_for_gc_completion(FALSE);
# if defined(THREAD_LOCAL_ALLOC)
GC_remove_specific(GC_thread_key);
# endif
+ RESTORE_CANCEL(cancel_state);
UNLOCK();
return GC_SUCCESS;
}
/* with it. Thus it doesn't matter whether it is otherwise */
/* visible to the collector. */
if (0 == result) {
+ IF_CANCEL(int cancel_state;)
+ DISABLE_CANCEL(cancel_state);
+ /* pthread_create is not a cancellation point. */
while (0 != sem_wait(&(si -> registered))) {
if (EINTR != errno) ABORT("sem_wait failed");
}
+ RESTORE_CANCEL(cancel_state);
}
sem_destroy(&(si -> registered));
LOCK();
STATIC void GC_wait_builder(void)
{
GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
+ ASSERT_CANCEL_DISABLED();
# ifdef GC_ASSERTIONS
GC_mark_lock_holder = NO_THREAD;
# endif
void GC_wait_marker(void)
{
GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
+ ASSERT_CANCEL_DISABLED();
# ifdef GC_ASSERTIONS
GC_mark_lock_holder = NO_THREAD;
# endif