From: hboehm Date: Sat, 26 Sep 2009 00:06:51 +0000 (+0000) Subject: 2009-09-25 Hans Boehm X-Git-Tag: gc7_2alpha4~94 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=39cc5f4743cdf84f473507d8cb9ada62e69c81fb;p=gc 2009-09-25 Hans Boehm * include/private/gcconfig.h (CANCEL_SAFE, IF_CANCEL): new macros. * include/private/gc_priv.h (DISABLE_CANCEL, RESTORE_CANCEL, ASSERT_CANCEL_DISABLED): New macros. * alloc.c (GC_maybe_gc): Assert cancellation disabled. (GC_collect_a_little_inner,GC_try_to_collect, GC_collect_or_expand): Disable cancellation. (GC_add_to_our_memory): Check for overflow. * misc.c (GC_cancel_disable_count): declare. (GC_init, GC_write): Disable cancellation. (GC_init): Remove redundant GC_is_initialized test. * os_dep.c (GC_repeat_read): Assert cancellation disabled. (GC_get_stack_base): Disable cancellation. * pthread_stop_world.c (GC_suspend_handler_inner): Disable cancellation. * pthread_support.c (GC_mark_thread): Permanently disable cancellation. (GC_wait_for_gc_completion, GC_wait_builder, GC_wait_marker): Assert cancellation disabled. (fork handling): Disable cancellation, fix comment. (GC_pthread_create): Disable cancellation. (GC_unregister_my_thread): Disable cancellation. * Makefile.direct: Document NO_CANCEL_SAFE. --- diff --git a/ChangeLog b/ChangeLog index 6a8d74c8..cfb5e05b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,29 @@ +2009-09-25 Hans Boehm + + * include/private/gcconfig.h (CANCEL_SAFE, IF_CANCEL): new macros. + * include/private/gc_priv.h (DISABLE_CANCEL, RESTORE_CANCEL, + ASSERT_CANCEL_DISABLED): New macros. + * alloc.c (GC_maybe_gc): Assert cancellation disabled. + (GC_collect_a_little_inner,GC_try_to_collect, GC_collect_or_expand): + Disable cancellation. + (GC_add_to_our_memory): Check for overflow. + * misc.c (GC_cancel_disable_count): declare. + (GC_init, GC_write): Disable cancellation. + (GC_init): Remove redundant GC_is_initialized test. + * os_dep.c (GC_repeat_read): Assert cancellation disabled. + (GC_get_stack_base): Disable cancellation. + * pthread_stop_world.c (GC_suspend_handler_inner): Disable + cancellation. + * pthread_support.c (GC_mark_thread): Permanently disable + cancellation. + (GC_wait_for_gc_completion, GC_wait_builder, GC_wait_marker): + Assert cancellation disabled. + (fork handling): Disable cancellation, fix comment. + (GC_pthread_create): Disable cancellation. + (GC_unregister_my_thread): Disable cancellation. + * Makefile.direct: Document NO_CANCEL_SAFE. + + 2009-09-25 Ivan Maidanski * Makefile: Remove outdated file (Makefile.direct should be used diff --git a/Makefile.direct b/Makefile.direct index ba1ca66f..e3c79afb 100644 --- a/Makefile.direct +++ b/Makefile.direct @@ -374,6 +374,12 @@ HOSTCFLAGS=$(CFLAGS) # collection while leaving generational collection enabled). # -DGC_FULL_FREQ= Set alternate default number of partial collections # between full collections (matters only if incremental collection is on). +# -DNO_CANCEL_SAFE (posix platforms with threads only) Don't bother trying +# to make the collector safe for thread cancellation; cancellation is +# not used. (Note that if cancellation is used anyway, threads may end +# up getting cancelled in unexpected places.) Even without this option, +# PTHREAD_CANCEL_ASYNCHRONOUS is never safe with the collector. (We could +# argue about its safety without the collector.) # CXXFLAGS= $(CFLAGS) diff --git a/alloc.c b/alloc.c index 3b40f74f..12fe428f 100644 --- a/alloc.c +++ b/alloc.c @@ -310,6 +310,7 @@ STATIC void GC_maybe_gc(void) static int n_partial_gcs = 0; GC_ASSERT(I_HOLD_LOCK()); + ASSERT_CANCEL_DISABLED(); if (GC_should_collect()) { if (!GC_incremental) { /* FIXME: If possible, GC_default_stop_func should be used here */ @@ -373,6 +374,7 @@ GC_bool GC_try_to_collect_inner(GC_stop_func stop_func) CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */ CLOCK_TYPE current_time; # endif + ASSERT_CANCEL_DISABLED(); if (GC_dont_gc) return FALSE; if (GC_incremental && GC_collection_in_progress()) { if (GC_print_stats) { @@ -461,8 +463,10 @@ STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some */ void GC_collect_a_little_inner(int n) { int i; - + IF_CANCEL(int cancel_state;) + if (GC_dont_gc) return; + DISABLE_CANCEL(cancel_state); if (GC_incremental && GC_collection_in_progress()) { for (i = GC_deficit; i < GC_RATE*n; i++) { if (GC_mark_some((ptr_t)0)) { @@ -497,6 +501,7 @@ void GC_collect_a_little_inner(int n) } else { GC_maybe_gc(); } + RESTORE_CANCEL(cancel_state); } GC_API int GC_CALL GC_collect_a_little(void) @@ -904,6 +909,7 @@ GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func) # ifdef USE_MUNMAP int old_unmap_threshold; # endif + IF_CANCEL(int cancel_state;) DCL_LOCK_STATE; if (!GC_is_initialized) GC_init(); @@ -911,6 +917,7 @@ GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func) if (GC_debugging_started) GC_print_all_smashed(); GC_INVOKE_FINALIZERS(); LOCK(); + DISABLE_CANCEL(cancel_state); # ifdef USE_MUNMAP old_unmap_threshold = GC_unmap_threshold; if (GC_force_unmap_on_gcollect && old_unmap_threshold > 0) @@ -924,6 +931,7 @@ GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func) # ifdef USE_MUNMAP GC_unmap_threshold = old_unmap_threshold; /* restore */ # endif + RESTORE_CANCEL(cancel_state); UNLOCK(); if(result) { if (GC_debugging_started) GC_print_all_smashed(); @@ -951,6 +959,8 @@ word GC_n_heap_sects = 0; /* Number of sections currently in heap. */ void GC_add_to_our_memory(ptr_t p, size_t bytes) { if (0 == p) return; + if (GC_n_memory >= MAX_HEAP_SECTS) + ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS"); GC_our_memory[GC_n_memory].hs_start = p; GC_our_memory[GC_n_memory].hs_bytes = bytes; GC_n_memory++; @@ -1149,6 +1159,9 @@ unsigned GC_fail_count = 0; GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page) { GC_bool gc_not_stopped = TRUE; + IF_CANCEL(int cancel_state;) + + DISABLE_CANCEL(cancel_state); if (GC_incremental || GC_dont_gc || ((!GC_dont_expand || GC_bytes_allocd == 0) && !GC_should_collect()) || (gc_not_stopped = GC_try_to_collect_inner(GC_bytes_allocd > 0 ? @@ -1191,6 +1204,7 @@ GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page) " Returning NIL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20); # endif + RESTORE_CANCEL(cancel_state); return(FALSE); } } else { @@ -1199,6 +1213,7 @@ GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page) } } } + RESTORE_CANCEL(cancel_state); return(TRUE); } diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h index 69c26b59..1f4dbb2d 100644 --- a/include/private/gc_priv.h +++ b/include/private/gc_priv.h @@ -2149,4 +2149,33 @@ extern void GC_reset_fault_handler(void); # endif /* Need to handle address faults. */ +/* Some convenience macros for cancellation support. */ +# if defined(CANCEL_SAFE) +# if defined(GC_ASSERTIONS) && (defined(USE_COMPILER_TLS) \ + || (defined(LINUX) && !defined(ARM32) \ + && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) \ + || defined(HPUX) /* and probably others ... */)) + extern __thread unsigned char GC_cancel_disable_count; +# define NEED_CANCEL_DISABLE_COUNT +# define INCR_CANCEL_DISABLE() ++GC_cancel_disable_count +# define DECR_CANCEL_DISABLE() --GC_cancel_disable_count +# define ASSERT_CANCEL_DISABLED() GC_ASSERT(GC_cancel_disable_count > 0) +# else +# define INCR_CANCEL_DISABLE() +# define DECR_CANCEL_DISABLE() +# define ASSERT_CANCEL_DISABLED() +# endif /* GC_ASSERTIONS & ... */ +# define DISABLE_CANCEL(state) \ + { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \ + INCR_CANCEL_DISABLE(); } +# define RESTORE_CANCEL(state) \ + { ASSERT_CANCEL_DISABLED(); \ + pthread_setcancelstate(state, NULL); \ + DECR_CANCEL_DISABLE(); } +# else /* !CANCEL_SAFE */ +# define DISABLE_CANCEL(state) +# define RESTORE_CANCEL(state) +# define ASSERT_CANCEL_DISABLED() +# endif /* !CANCEL_SAFE */ + # endif /* GC_PRIVATE_H */ diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h index 896e2fc1..69bec443 100644 --- a/include/private/gcconfig.h +++ b/include/private/gcconfig.h @@ -2230,6 +2230,25 @@ # define THREADS # endif +# if defined(UNIX_LIKE) && defined(THREADS) && !defined(NO_CANCEL_SAFE) + /* Make the code cancellation-safe. This basically means that we */ + /* ensure that cancellation requests are ignored while we are in */ + /* the collector. This applies only to Posix deferred cancellation;*/ + /* we don't handle Posix asynchronous cancellation. */ + /* Note that this only works if pthread_setcancelstate is */ + /* async-signal-safe, at least in the absence of asynchronous */ + /* cancellation. This appears to be true for the glibc version, */ + /* though it is not documented. Without that assumption, there */ + /* seems to be no way to safely wait in a signal handler, which */ + /* we need to do for thread suspension. */ + /* Also note that little other code appears to be cancellation-safe.*/ + /* Hence it may make sense to turn this off for performance. */ +# define CANCEL_SAFE +# define IF_CANCEL(x) x +# else +# define IF_CANCEL(x) +# endif + # if !defined(USE_MARK_BITS) && !defined(USE_MARK_BYTES) # if defined(THREADS) && defined(PARALLEL_MARK) # define USE_MARK_BYTES diff --git a/misc.c b/misc.c index 54352fbd..6db7dbd2 100644 --- a/misc.c +++ b/misc.c @@ -62,6 +62,10 @@ # define GC_REGISTER_MAIN_STATIC_DATA() TRUE #endif +#ifdef NEED_CANCEL_DISABLE_COUNT + __thread unsigned char GC_cancel_disable_count = 0; +#endif + GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */; @@ -523,9 +527,11 @@ GC_API void GC_CALL GC_init(void) # else word initial_heap_sz = (word)MINHINCR; # endif + IF_CANCEL(int cancel_state;) if (GC_is_initialized) return; + DISABLE_CANCEL(cancel_state); /* Note that although we are nominally called with the */ /* allocation lock held, the allocation lock is now */ /* only really acquired once a second thread is forked.*/ @@ -536,7 +542,7 @@ GC_API void GC_CALL GC_init(void) GC_ASSERT(!GC_need_to_lock); # endif # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS) - if (!GC_is_initialized) { + { # ifndef MSWINCE BOOL (WINAPI *pfn) (LPCRITICAL_SECTION, DWORD) = NULL; HMODULE hK32 = GetModuleHandle(TEXT("kernel32.dll")); @@ -549,7 +555,7 @@ GC_API void GC_CALL GC_init(void) else # endif /* !MSWINCE */ /* else */ InitializeCriticalSection (&GC_allocate_ml); - } + } # endif /* GC_WIN32_THREADS */ # if (defined(MSWIN32) || defined(MSWINCE)) && defined(THREADS) InitializeCriticalSection(&GC_write_cs); @@ -864,6 +870,7 @@ GC_API void GC_CALL GC_init(void) GC_init_dyld(); } # endif + RESTORE_CANCEL(cancel_state); } GC_API void GC_CALL GC_enable_incremental(void) @@ -1045,7 +1052,9 @@ STATIC int GC_write(int fd, const char *buf, size_t len) { int bytes_written = 0; int result; + IF_CANCEL(int cancel_state;) + DISABLE_CANCEL(cancel_state); while (bytes_written < len) { # ifdef GC_SOLARIS_THREADS result = syscall(SYS_write, fd, buf + bytes_written, @@ -1053,9 +1062,13 @@ STATIC int GC_write(int fd, const char *buf, size_t len) # else result = write(fd, buf + bytes_written, len - bytes_written); # endif - if (-1 == result) return(result); + if (-1 == result) { + RESTORE_CANCEL(cancel_state); + return(result); + } bytes_written += result; } + RESTORE_CANCEL(cancel_state); return(bytes_written); } #endif /* UN*X */ diff --git a/os_dep.c b/os_dep.c index efcb1664..daa0703c 100644 --- a/os_dep.c +++ b/os_dep.c @@ -156,6 +156,7 @@ ssize_t GC_repeat_read(int fd, char *buf, size_t count) ssize_t num_read = 0; ssize_t result; + ASSERT_CANCEL_DISABLED(); while (num_read < count) { result = READ(fd, buf + num_read, count - num_read); if (result < 0) return result; @@ -1110,8 +1111,13 @@ GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b) /* least the main thread. */ LOCK(); { - ptr_t bsp = GC_save_regs_in_stack(); - ptr_t next_stack = GC_greatest_stack_base_below(bsp); + IF_CANCEL(int cancel_state;) + ptr_t bsp; + ptr_t next_stack; + + DISABLE_CANCEL(cancel_state); + bsp = GC_save_regs_in_stack(); + next_stack = GC_greatest_stack_base_below(bsp); if (0 == next_stack) { b -> reg_base = GC_find_limit(bsp, FALSE); } else { @@ -1119,6 +1125,7 @@ GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b) /* growing it. */ b -> reg_base = GC_find_limit_with_bound(bsp, FALSE, next_stack); } + RESTORE_CANCEL(cancel_state); } UNLOCK(); # endif @@ -1141,6 +1148,8 @@ GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b) { # ifdef NEED_FIND_LIMIT int dummy; + IF_CANCEL(int cancel_state;) + DISABLE_CANCEL(cancel_state); /* May be unnecessary? */ # ifdef STACK_GROWS_DOWN b -> mem_base = GC_find_limit((ptr_t)(&dummy), TRUE); # ifdef IA64 @@ -1149,6 +1158,7 @@ GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b) # else b -> mem_base = GC_find_limit(&dummy, FALSE); # endif + RESTORE_CANCEL(cancel_state); return GC_SUCCESS; # else return GC_UNIMPLEMENTED; diff --git a/pthread_stop_world.c b/pthread_stop_world.c index 694a381e..08363ffb 100644 --- a/pthread_stop_world.c +++ b/pthread_stop_world.c @@ -169,11 +169,21 @@ STATIC void GC_suspend_handler_inner(ptr_t sig_arg, void *context) int dummy; pthread_t my_thread = pthread_self(); GC_thread me; + IF_CANCEL(int cancel_state;) AO_t my_stop_count = AO_load(&GC_stop_count); if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler"); + DISABLE_CANCEL(cancel_state); + /* pthread_setcancelstate is not defined to be async-signal-safe. */ + /* But the glibc version appears to be in the absence of */ + /* asynchronous cancellation. And since this signal handler */ + /* to block on sigsuspend, which is both async-signal-safe */ + /* and a cancellation point, there seems to be no obvious way */ + /* out of it. In fact, it looks to me like an async-signal-safe */ + /* cancellation point is inherently a problem, unless there is */ + /* some way to disable cancellation in the handler. */ # if DEBUG_THREADS GC_printf("Suspending 0x%x\n", (unsigned)my_thread); # endif @@ -188,6 +198,7 @@ STATIC void GC_suspend_handler_inner(ptr_t sig_arg, void *context) if (!GC_retry_signals) { WARN("Duplicate suspend signal in thread %p\n", pthread_self()); } + RESTORE_CANCEL(cancel_state); return; } # ifdef SPARC @@ -230,6 +241,7 @@ STATIC void GC_suspend_handler_inner(ptr_t sig_arg, void *context) # if DEBUG_THREADS GC_printf("Continuing 0x%x\n", (unsigned)my_thread); # endif + RESTORE_CANCEL(cancel_state); } STATIC void GC_restart_handler(int sig) diff --git a/pthread_support.c b/pthread_support.c index 5bf0c7ed..d216f770 100644 --- a/pthread_support.c +++ b/pthread_support.c @@ -281,7 +281,11 @@ static ptr_t marker_sp[MAX_MARKERS - 1] = {0}; STATIC void * GC_mark_thread(void * id) { word my_mark_no = 0; + IF_CANCEL(int cancel_state;) + DISABLE_CANCEL(cancel_state); + /* Mark threads are not cancellable; they */ + /* should be invisible to client. */ marker_sp[(word)id] = GC_approx_sp(); # ifdef IA64 marker_bsp[(word)id] = GC_save_regs_in_stack(); @@ -632,6 +636,7 @@ extern GC_bool GC_collection_in_progress(void); STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all) { GC_ASSERT(I_HOLD_LOCK()); + ASSERT_CANCEL_DISABLED(); if (GC_incremental && GC_collection_in_progress()) { int old_gc_no = GC_gc_no; @@ -658,7 +663,10 @@ STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all) /* should only call async-signal-safe functions, and we probably can't */ /* quite guarantee that. But we give it our best shot. (That same */ /* spec also implies that it's not safe to call the system malloc */ -/* between fork() and exec(). Thus we're doing no worse than it. */ +/* between fork() and exec(). Thus we're doing no worse than it.) */ + +IF_CANCEL(static int fork_cancel_state;) + /* protected by allocation lock. */ /* Called before a fork() */ STATIC void GC_fork_prepare_proc(void) @@ -671,6 +679,8 @@ STATIC void GC_fork_prepare_proc(void) /* Wait for an ongoing GC to finish, since we can't finish it in */ /* the (one remaining thread in) the child. */ LOCK(); + DISABLE_CANCEL(fork_cancel_state); + /* Following waits may include cancellation points. */ # if defined(PARALLEL_MARK) if (GC_parallel) GC_wait_for_reclaim(); @@ -689,6 +699,7 @@ STATIC void GC_fork_parent_proc(void) if (GC_parallel) GC_release_mark_lock(); # endif + RESTORE_CANCEL(fork_cancel_state); UNLOCK(); } @@ -707,6 +718,7 @@ STATIC void GC_fork_child_proc(void) GC_markers = 1; GC_parallel = FALSE; # endif /* PARALLEL_MARK */ + RESTORE_CANCEL(fork_cancel_state); UNLOCK(); } #endif /* HANDLE_FORK */ @@ -1013,8 +1025,10 @@ struct start_info { GC_API int GC_CALL GC_unregister_my_thread(void) { GC_thread me; + IF_CANCEL(int cancel_state;) LOCK(); + DISABLE_CANCEL(cancel_state); /* Wait for any GC that may be marking from our stack to */ /* complete before we remove this thread. */ GC_wait_for_gc_completion(FALSE); @@ -1030,6 +1044,7 @@ GC_API int GC_CALL GC_unregister_my_thread(void) # if defined(THREAD_LOCAL_ALLOC) GC_remove_specific(GC_thread_key); # endif + RESTORE_CANCEL(cancel_state); UNLOCK(); return GC_SUCCESS; } @@ -1309,9 +1324,13 @@ GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread, /* with it. Thus it doesn't matter whether it is otherwise */ /* visible to the collector. */ if (0 == result) { + IF_CANCEL(int cancel_state;) + DISABLE_CANCEL(cancel_state); + /* pthread_create is not a cancellation point. */ while (0 != sem_wait(&(si -> registered))) { if (EINTR != errno) ABORT("sem_wait failed"); } + RESTORE_CANCEL(cancel_state); } sem_destroy(&(si -> registered)); LOCK(); @@ -1553,6 +1572,7 @@ void GC_release_mark_lock(void) STATIC void GC_wait_builder(void) { GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self())); + ASSERT_CANCEL_DISABLED(); # ifdef GC_ASSERTIONS GC_mark_lock_holder = NO_THREAD; # endif @@ -1587,6 +1607,7 @@ static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER; void GC_wait_marker(void) { GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self())); + ASSERT_CANCEL_DISABLED(); # ifdef GC_ASSERTIONS GC_mark_lock_holder = NO_THREAD; # endif