2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
18 #include "private/pthread_support.h"
20 #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) && \
21 !defined(GC_DARWIN_THREADS) && !defined(SN_TARGET_ORBIS) \
22 && !defined(SN_TARGET_PSP2)
27 # include <sys/time.h>
29 STATIC int GC_nacl_num_gc_threads = 0;
30 STATIC __thread int GC_nacl_thread_idx = -1;
31 STATIC volatile int GC_nacl_park_threads_now = 0;
32 STATIC volatile pthread_t GC_nacl_thread_parker = -1;
34 GC_INNER __thread GC_thread GC_nacl_gc_thread_self = NULL;
36 volatile int GC_nacl_thread_parked[MAX_NACL_GC_THREADS];
37 int GC_nacl_thread_used[MAX_NACL_GC_THREADS];
39 #elif defined(GC_OPENBSD_UTHREADS)
41 # include <pthread_np.h>
43 #else /* !GC_OPENBSD_UTHREADS && !NACL */
46 #include <semaphore.h>
48 #include <time.h> /* for nanosleep() */
51 #if (!defined(AO_HAVE_load_acquire) || !defined(AO_HAVE_store_release)) \
53 # error AO_load_acquire and/or AO_store_release are missing;
54 # error please define AO_REQUIRE_CAS manually
57 /* It's safe to call original pthread_sigmask() here. */
58 #undef pthread_sigmask
60 #ifdef GC_ENABLE_SUSPEND_THREAD
61 static void *GC_CALLBACK suspend_self_inner(void *client_data);
67 # define NSIG (MAXSIG+1)
70 # elif defined(__SIGRTMAX)
71 # define NSIG (__SIGRTMAX+1)
77 void GC_print_sig_mask(void)
82 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
83 ABORT("pthread_sigmask failed");
84 for (i = 1; i < NSIG; i++) {
85 if (sigismember(&blocked, i))
86 GC_printf("Signal blocked: %d\n", i);
89 #endif /* DEBUG_THREADS */
91 /* Remove the signals that we want to allow in thread stopping */
92 /* handler from a set. */
93 STATIC void GC_remove_allowed_signals(sigset_t *set)
95 if (sigdelset(set, SIGINT) != 0
96 || sigdelset(set, SIGQUIT) != 0
97 || sigdelset(set, SIGABRT) != 0
98 || sigdelset(set, SIGTERM) != 0) {
99 ABORT("sigdelset failed");
103 /* Handlers write to the thread structure, which is in the heap, */
104 /* and hence can trigger a protection fault. */
105 if (sigdelset(set, SIGSEGV) != 0
107 || sigdelset(set, SIGBUS) != 0
110 ABORT("sigdelset failed");
115 static sigset_t suspend_handler_mask;
117 #define THREAD_RESTARTED 0x1
119 STATIC volatile AO_t GC_stop_count = 0;
120 /* Incremented by two (not to alter */
121 /* THREAD_RESTARTED bit) at the beginning of */
124 STATIC volatile AO_t GC_world_is_stopped = FALSE;
125 /* FALSE ==> it is safe for threads to restart, */
126 /* i.e. they will see another suspend signal */
127 /* before they are expected to stop (unless */
128 /* they have stopped voluntarily). */
130 #if defined(GC_OSF1_THREADS) || defined(THREAD_SANITIZER) \
131 || defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER)
132 STATIC GC_bool GC_retry_signals = TRUE;
134 STATIC GC_bool GC_retry_signals = FALSE;
138 * We use signals to stop threads during GC.
140 * Suspended threads wait in signal handler for SIG_THR_RESTART.
141 * That's more portable than semaphores or condition variables.
142 * (We do use sem_post from a signal handler, but that should be portable.)
144 * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
145 * Note that we can't just stop a thread; we need it to save its stack
146 * pointer(s) and acknowledge.
148 #ifndef SIG_THR_RESTART
149 # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) \
150 || defined(GC_NETBSD_THREADS) || defined(GC_USESIGRT_SIGNALS)
151 # if defined(_SIGRTMIN) && !defined(CPPCHECK)
152 # define SIG_THR_RESTART _SIGRTMIN + 5
154 # define SIG_THR_RESTART SIGRTMIN + 5
157 # define SIG_THR_RESTART SIGXCPU
161 #define SIGNAL_UNSET (-1)
162 /* Since SIG_SUSPEND and/or SIG_THR_RESTART could represent */
163 /* a non-constant expression (e.g., in case of SIGRTMIN), */
164 /* actual signal numbers are determined by GC_stop_init() */
165 /* unless manually set (before GC initialization). */
166 STATIC int GC_sig_suspend = SIGNAL_UNSET;
167 STATIC int GC_sig_thr_restart = SIGNAL_UNSET;
169 GC_API void GC_CALL GC_set_suspend_signal(int sig)
171 if (GC_is_initialized) return;
173 GC_sig_suspend = sig;
176 GC_API void GC_CALL GC_set_thr_restart_signal(int sig)
178 if (GC_is_initialized) return;
180 GC_sig_thr_restart = sig;
183 GC_API int GC_CALL GC_get_suspend_signal(void)
185 return GC_sig_suspend != SIGNAL_UNSET ? GC_sig_suspend : SIG_SUSPEND;
188 GC_API int GC_CALL GC_get_thr_restart_signal(void)
190 return GC_sig_thr_restart != SIGNAL_UNSET
191 ? GC_sig_thr_restart : SIG_THR_RESTART;
194 #if defined(GC_EXPLICIT_SIGNALS_UNBLOCK) \
195 || !defined(NO_SIGNALS_UNBLOCK_IN_MAIN)
196 /* Some targets (e.g., Solaris) might require this to be called when */
197 /* doing thread registering from the thread destructor. */
198 GC_INNER void GC_unblock_gc_signals(void)
202 GC_ASSERT(GC_sig_suspend != SIGNAL_UNSET);
203 GC_ASSERT(GC_sig_thr_restart != SIGNAL_UNSET);
204 sigaddset(&set, GC_sig_suspend);
205 sigaddset(&set, GC_sig_thr_restart);
206 if (pthread_sigmask(SIG_UNBLOCK, &set, NULL) != 0)
207 ABORT("pthread_sigmask failed");
209 #endif /* GC_EXPLICIT_SIGNALS_UNBLOCK */
211 STATIC sem_t GC_suspend_ack_sem; /* also used to acknowledge restart */
213 STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context);
215 #ifndef NO_SA_SIGACTION
216 STATIC void GC_suspend_handler(int sig, siginfo_t * info GC_ATTR_UNUSED,
217 void * context GC_ATTR_UNUSED)
219 STATIC void GC_suspend_handler(int sig)
222 int old_errno = errno;
224 if (sig != GC_sig_suspend) {
225 # if defined(GC_FREEBSD_THREADS)
226 /* Workaround "deferred signal handling" bug in FreeBSD 9.2. */
227 if (0 == sig) return;
229 ABORT("Bad signal in suspend_handler");
232 # if defined(IA64) || defined(HP_PA) || defined(M68K)
233 GC_with_callee_saves_pushed(GC_suspend_handler_inner, NULL);
235 /* We believe that in all other cases the full context is already */
236 /* in the signal handler frame. */
238 # ifdef NO_SA_SIGACTION
241 GC_suspend_handler_inner(NULL, context);
247 #ifdef BASE_ATOMIC_OPS_EMULATED
248 /* The AO primitives emulated with locks cannot be used inside signal */
249 /* handlers as this could cause a deadlock or a double lock. */
250 /* The following "async" macro definitions are correct only for */
251 /* an uniprocessor case and are provided for a test purpose. */
252 # define ao_load_acquire_async(p) (*(p))
253 # define ao_load_async(p) ao_load_acquire_async(p)
254 # define ao_store_release_async(p, v) (void)(*(p) = (v))
255 # define ao_store_async(p, v) ao_store_release_async(p, v)
257 # define ao_load_acquire_async(p) AO_load_acquire(p)
258 # define ao_load_async(p) AO_load(p)
259 # define ao_store_release_async(p, v) AO_store_release(p, v)
260 # define ao_store_async(p, v) AO_store(p, v)
261 #endif /* !BASE_ATOMIC_OPS_EMULATED */
263 /* The lookup here is safe, since this is done on behalf */
264 /* of a thread which holds the allocation lock in order */
265 /* to stop the world. Thus concurrent modification of the */
266 /* data structure is impossible. Unfortunately, we have to */
267 /* instruct TSan that the lookup is safe. */
268 #ifdef THREAD_SANITIZER
269 /* The implementation of the function is the same as that of */
270 /* GC_lookup_thread except for the attribute added here. */
271 GC_ATTR_NO_SANITIZE_THREAD
272 static GC_thread GC_lookup_thread_async(pthread_t id)
274 GC_thread p = GC_threads[THREAD_TABLE_INDEX(id)];
276 while (p != NULL && !THREAD_EQUAL(p->id, id))
281 # define GC_lookup_thread_async GC_lookup_thread
284 GC_INLINE void GC_store_stack_ptr(GC_thread me)
286 /* There is no data race between the suspend handler (storing */
287 /* stack_ptr) and GC_push_all_stacks (fetching stack_ptr) because */
288 /* GC_push_all_stacks is executed after GC_stop_world exits and the */
289 /* latter runs sem_wait repeatedly waiting for all the suspended */
290 /* threads to call sem_post. Nonetheless, stack_ptr is stored (here) */
291 /* and fetched (by GC_push_all_stacks) using the atomic primitives to */
292 /* avoid the related TSan warning. */
294 ao_store_async((volatile AO_t *)&me->stop_info.stack_ptr,
295 (AO_t)GC_save_regs_in_stack());
298 me -> backing_store_ptr = GC_save_regs_in_stack();
300 ao_store_async((volatile AO_t *)&me->stop_info.stack_ptr,
301 (AO_t)GC_approx_sp());
305 STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED,
306 void * context GC_ATTR_UNUSED)
308 pthread_t self = pthread_self();
310 IF_CANCEL(int cancel_state;)
311 AO_t my_stop_count = ao_load_acquire_async(&GC_stop_count);
312 /* After the barrier, this thread should see */
313 /* the actual content of GC_threads. */
315 DISABLE_CANCEL(cancel_state);
316 /* pthread_setcancelstate is not defined to be async-signal-safe. */
317 /* But the glibc version appears to be in the absence of */
318 /* asynchronous cancellation. And since this signal handler */
319 /* to block on sigsuspend, which is both async-signal-safe */
320 /* and a cancellation point, there seems to be no obvious way */
321 /* out of it. In fact, it looks to me like an async-signal-safe */
322 /* cancellation point is inherently a problem, unless there is */
323 /* some way to disable cancellation in the handler. */
324 # ifdef DEBUG_THREADS
325 GC_log_printf("Suspending %p\n", (void *)self);
327 GC_ASSERT(((word)my_stop_count & THREAD_RESTARTED) == 0);
329 me = GC_lookup_thread_async(self);
331 # ifdef GC_ENABLE_SUSPEND_THREAD
332 if (ao_load_async(&me->suspended_ext)) {
333 GC_store_stack_ptr(me);
334 sem_post(&GC_suspend_ack_sem);
335 suspend_self_inner(me);
336 # ifdef DEBUG_THREADS
337 GC_log_printf("Continuing %p on GC_resume_thread\n", (void *)self);
339 RESTORE_CANCEL(cancel_state);
344 if (((word)me->stop_info.last_stop_count & ~(word)THREAD_RESTARTED)
345 == (word)my_stop_count) {
346 /* Duplicate signal. OK if we are retrying. */
347 if (!GC_retry_signals) {
348 WARN("Duplicate suspend signal in thread %p\n", self);
350 RESTORE_CANCEL(cancel_state);
353 GC_store_stack_ptr(me);
355 # ifdef THREAD_SANITIZER
356 /* TSan disables signals around signal handlers. Without */
357 /* a pthread_sigmask call, sigsuspend may block forever. */
361 GC_ASSERT(GC_sig_suspend != SIGNAL_UNSET);
362 GC_ASSERT(GC_sig_thr_restart != SIGNAL_UNSET);
363 sigaddset(&set, GC_sig_suspend);
364 sigaddset(&set, GC_sig_thr_restart);
365 if (pthread_sigmask(SIG_UNBLOCK, &set, NULL) != 0)
366 ABORT("pthread_sigmask failed in suspend handler");
369 /* Tell the thread that wants to stop the world that this */
370 /* thread has been stopped. Note that sem_post() is */
371 /* the only async-signal-safe primitive in LinuxThreads. */
372 sem_post(&GC_suspend_ack_sem);
373 ao_store_release_async(&me->stop_info.last_stop_count, my_stop_count);
375 /* Wait until that thread tells us to restart by sending */
376 /* this thread a GC_sig_thr_restart signal (should be masked */
377 /* at this point thus there is no race). */
378 /* We do not continue until we receive that signal, */
379 /* but we do not take that as authoritative. (We may be */
380 /* accidentally restarted by one of the user signals we */
381 /* don't block.) After we receive the signal, we use a */
382 /* primitive and expensive mechanism to wait until it's */
383 /* really safe to proceed. Under normal circumstances, */
384 /* this code should not be executed. */
386 sigsuspend (&suspend_handler_mask);
387 } while (ao_load_acquire_async(&GC_world_is_stopped)
388 && ao_load_async(&GC_stop_count) == my_stop_count);
390 # ifdef DEBUG_THREADS
391 GC_log_printf("Continuing %p\n", (void *)self);
393 # ifndef GC_NETBSD_THREADS_WORKAROUND
394 if (GC_retry_signals)
397 /* If the RESTART signal loss is possible (though it should be */
398 /* less likely than losing the SUSPEND signal as we do not do */
399 /* much between the first sem_post and sigsuspend calls), more */
400 /* handshaking is provided to work around it. */
401 sem_post(&GC_suspend_ack_sem);
402 # ifdef GC_NETBSD_THREADS_WORKAROUND
403 if (GC_retry_signals)
406 /* Set the flag that the thread has been restarted. */
407 ao_store_release_async(&me->stop_info.last_stop_count,
408 (AO_t)((word)my_stop_count | THREAD_RESTARTED));
411 RESTORE_CANCEL(cancel_state);
414 static void suspend_restart_barrier(int n_live_threads)
418 for (i = 0; i < n_live_threads; i++) {
419 while (0 != sem_wait(&GC_suspend_ack_sem)) {
420 /* On Linux, sem_wait is documented to always return zero. */
421 /* But the documentation appears to be incorrect. */
422 /* EINTR seems to happen with some versions of gdb. */
424 ABORT("sem_wait failed");
427 # ifdef GC_ASSERTIONS
428 sem_getvalue(&GC_suspend_ack_sem, &i);
433 static int resend_lost_signals(int n_live_threads,
434 int (*suspend_restart_all)(void))
436 # define WAIT_UNIT 3000
437 # define RETRY_INTERVAL 100000
439 if (n_live_threads > 0) {
440 unsigned long wait_usecs = 0; /* Total wait since retry. */
444 sem_getvalue(&GC_suspend_ack_sem, &ack_count);
445 if (ack_count == n_live_threads)
447 if (wait_usecs > RETRY_INTERVAL) {
448 int newly_sent = suspend_restart_all();
450 GC_COND_LOG_PRINTF("Resent %d signals after timeout\n", newly_sent);
451 sem_getvalue(&GC_suspend_ack_sem, &ack_count);
452 if (newly_sent < n_live_threads - ack_count) {
453 WARN("Lost some threads while stopping or starting world?!\n", 0);
454 n_live_threads = ack_count + newly_sent;
460 /* Workaround "waiting while holding a lock" warning. */
464 # elif defined(CPPCHECK) /* || _POSIX_C_SOURCE >= 199309L */
469 ts.tv_nsec = WAIT_UNIT * 1000;
470 (void)nanosleep(&ts, NULL);
475 wait_usecs += WAIT_UNIT;
478 return n_live_threads;
481 STATIC void GC_restart_handler(int sig)
483 # if defined(DEBUG_THREADS)
484 int old_errno = errno; /* Preserve errno value. */
487 if (sig != GC_sig_thr_restart)
488 ABORT("Bad signal in restart handler");
491 ** Note: even if we don't do anything useful here,
492 ** it would still be necessary to have a signal handler,
493 ** rather than ignoring the signals, otherwise
494 ** the signals will not be delivered at all, and
495 ** will thus not interrupt the sigsuspend() above.
497 # ifdef DEBUG_THREADS
498 GC_log_printf("In GC_restart_handler for %p\n", (void *)pthread_self());
503 # ifdef USE_TKILL_ON_ANDROID
505 extern int tkill(pid_t tid, int sig); /* from sys/linux-unistd.h */
508 static int android_thread_kill(pid_t tid, int sig)
511 int old_errno = errno;
513 ret = tkill(tid, sig);
521 # define THREAD_SYSTEM_ID(t) (t)->kernel_id
522 # define RAISE_SIGNAL(t, sig) android_thread_kill(THREAD_SYSTEM_ID(t), sig)
524 # define THREAD_SYSTEM_ID(t) (t)->id
525 # define RAISE_SIGNAL(t, sig) pthread_kill(THREAD_SYSTEM_ID(t), sig)
526 # endif /* !USE_TKILL_ON_ANDROID */
528 # ifdef GC_ENABLE_SUSPEND_THREAD
529 # include <sys/time.h>
530 # include "javaxfc.h" /* to get the prototypes as extern "C" */
532 STATIC void GC_brief_async_signal_safe_sleep(void)
536 # if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
537 tv.tv_usec = 1000 * GC_TIME_LIMIT / 2;
539 tv.tv_usec = 1000 * 50 / 2;
541 (void)select(0, 0, 0, 0, &tv);
544 static void *GC_CALLBACK suspend_self_inner(void *client_data) {
545 GC_thread me = (GC_thread)client_data;
547 while (ao_load_acquire_async(&me->suspended_ext)) {
548 /* TODO: Use sigsuspend() instead. */
549 GC_brief_async_signal_safe_sleep();
554 GC_API void GC_CALL GC_suspend_thread(GC_SUSPEND_THREAD_ID thread) {
556 IF_CANCEL(int cancel_state;)
560 t = GC_lookup_thread((pthread_t)thread);
561 if (t == NULL || t -> suspended_ext) {
566 /* Set the flag making the change visible to the signal handler. */
567 AO_store_release(&t->suspended_ext, TRUE);
569 if (THREAD_EQUAL((pthread_t)thread, pthread_self())) {
571 /* It is safe as "t" cannot become invalid here (no race with */
572 /* GC_unregister_my_thread). */
573 (void)GC_do_blocking(suspend_self_inner, t);
576 if ((t -> flags & FINISHED) != 0) {
577 /* Terminated but not joined yet. */
582 DISABLE_CANCEL(cancel_state);
583 /* GC_suspend_thread is not a cancellation point. */
584 # ifdef PARALLEL_MARK
585 /* Ensure we do not suspend a thread while it is rebuilding */
586 /* a free list, otherwise such a dead-lock is possible: */
587 /* thread 1 is blocked in GC_wait_for_reclaim holding */
588 /* the allocation lock, thread 2 is suspended in */
589 /* GC_reclaim_generic invoked from GC_generic_malloc_many */
590 /* (with GC_fl_builder_count > 0), and thread 3 is blocked */
591 /* acquiring the allocation lock in GC_resume_thread. */
593 GC_wait_for_reclaim();
597 /* See the relevant comment in GC_stop_world. */
598 GC_acquire_dirty_lock();
600 /* Else do not acquire the lock as the write fault handler might */
601 /* be trying to acquire this lock too, and the suspend handler */
602 /* execution is deferred until the write fault handler completes. */
604 /* TODO: Support GC_retry_signals (not needed for TSan) */
605 switch (RAISE_SIGNAL(t, GC_sig_suspend)) {
606 /* ESRCH cannot happen as terminated threads are handled above. */
610 ABORT("pthread_kill failed");
613 /* Wait for the thread to complete threads table lookup and */
614 /* stack_ptr assignment. */
615 GC_ASSERT(GC_thr_initialized);
616 while (sem_wait(&GC_suspend_ack_sem) != 0) {
618 ABORT("sem_wait for handler failed (suspend_self)");
621 GC_release_dirty_lock();
622 RESTORE_CANCEL(cancel_state);
626 GC_API void GC_CALL GC_resume_thread(GC_SUSPEND_THREAD_ID thread) {
631 t = GC_lookup_thread((pthread_t)thread);
633 AO_store(&t->suspended_ext, FALSE);
637 GC_API int GC_CALL GC_is_thread_suspended(GC_SUSPEND_THREAD_ID thread) {
639 int is_suspended = 0;
643 t = GC_lookup_thread((pthread_t)thread);
644 if (t != NULL && t -> suspended_ext)
645 is_suspended = (int)TRUE;
649 # endif /* GC_ENABLE_SUSPEND_THREAD */
651 # undef ao_load_acquire_async
652 # undef ao_load_async
653 # undef ao_store_async
654 # undef ao_store_release_async
655 #endif /* !GC_OPENBSD_UTHREADS && !NACL */
658 # define IF_IA64(x) x
662 /* We hold allocation lock. Should do exactly the right thing if the */
663 /* world is stopped. Should not fail if it isn't. */
664 GC_INNER void GC_push_all_stacks(void)
666 GC_bool found_me = FALSE;
671 /* On IA64, we also need to scan the register backing store. */
672 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
673 struct GC_traced_stack_sect_s *traced_stack_sect;
674 pthread_t self = pthread_self();
677 if (!EXPECT(GC_thr_initialized, TRUE))
679 # ifdef DEBUG_THREADS
680 GC_log_printf("Pushing stacks from thread %p\n", (void *)self);
682 for (i = 0; i < THREAD_TABLE_SZ; i++) {
683 for (p = GC_threads[i]; p != 0; p = p -> next) {
684 if (p -> flags & FINISHED) continue;
686 traced_stack_sect = p -> traced_stack_sect;
687 if (THREAD_EQUAL(p -> id, self)) {
688 GC_ASSERT(!p->thread_blocked);
690 lo = (ptr_t)GC_save_regs_in_stack();
695 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
697 lo = (ptr_t)AO_load((volatile AO_t *)&p->stop_info.stack_ptr);
698 IF_IA64(bs_hi = p -> backing_store_ptr;)
699 if (traced_stack_sect != NULL
700 && traced_stack_sect->saved_stack_ptr == lo) {
701 /* If the thread has never been stopped since the recent */
702 /* GC_call_with_gc_active invocation then skip the top */
703 /* "stack section" as stack_ptr already points to. */
704 traced_stack_sect = traced_stack_sect->prev;
707 if ((p -> flags & MAIN_THREAD) == 0) {
709 IF_IA64(bs_lo = p -> backing_store_end);
711 /* The original stack. */
713 IF_IA64(bs_lo = BACKING_STORE_BASE;)
715 # ifdef DEBUG_THREADS
716 GC_log_printf("Stack for thread %p = [%p,%p)\n",
717 (void *)p->id, (void *)lo, (void *)hi);
719 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!");
720 if (p->altstack != NULL && (word)p->altstack <= (word)lo
721 && (word)lo <= (word)p->altstack + p->altstack_size) {
722 hi = p->altstack + p->altstack_size;
723 /* FIXME: Need to scan the normal stack too, but how ? */
724 /* FIXME: Assume stack grows down */
726 GC_push_all_stack_sections(lo, hi, traced_stack_sect);
727 # ifdef STACK_GROWS_UP
728 total_size += lo - hi;
730 total_size += hi - lo; /* lo <= hi */
733 /* Push reg_storage as roots, this will cover the reg context. */
734 GC_push_all_stack((ptr_t)p -> stop_info.reg_storage,
735 (ptr_t)(p -> stop_info.reg_storage + NACL_GC_REG_STORAGE_SIZE));
736 total_size += NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t);
739 # ifdef DEBUG_THREADS
740 GC_log_printf("Reg stack for thread %p = [%p,%p)\n",
741 (void *)p->id, (void *)bs_lo, (void *)bs_hi);
743 /* FIXME: This (if p->id==self) may add an unbounded number of */
744 /* entries, and hence overflow the mark stack, which is bad. */
745 GC_push_all_register_sections(bs_lo, bs_hi,
746 THREAD_EQUAL(p -> id, self),
748 total_size += bs_hi - bs_lo; /* bs_lo <= bs_hi */
752 GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks\n", (int)nthreads);
753 if (!found_me && !GC_in_thread_creation)
754 ABORT("Collecting from unknown thread");
755 GC_total_stacksize = total_size;
759 /* There seems to be a very rare thread stopping problem. To help us */
760 /* debug that, we save the ids of the stopping thread. */
761 pthread_t GC_stopping_thread;
762 int GC_stopping_pid = 0;
765 /* We hold the allocation lock. Suspend all threads that might */
766 /* still be running. Return the number of suspend signals that */
768 STATIC int GC_suspend_all(void)
770 int n_live_threads = 0;
774 # ifndef GC_OPENBSD_UTHREADS
777 pthread_t self = pthread_self();
779 for (i = 0; i < THREAD_TABLE_SZ; i++) {
780 for (p = GC_threads[i]; p != 0; p = p -> next) {
781 if (!THREAD_EQUAL(p -> id, self)) {
782 if ((p -> flags & FINISHED) != 0) continue;
783 if (p -> thread_blocked) /* Will wait */ continue;
784 # ifndef GC_OPENBSD_UTHREADS
785 # ifdef GC_ENABLE_SUSPEND_THREAD
786 if (p -> suspended_ext) continue;
788 if (AO_load(&p->stop_info.last_stop_count) == GC_stop_count)
789 continue; /* matters only if GC_retry_signals */
792 # ifdef DEBUG_THREADS
793 GC_log_printf("Sending suspend signal to %p\n", (void *)p->id);
796 # ifdef GC_OPENBSD_UTHREADS
800 GC_acquire_dirty_lock();
801 if (pthread_suspend_np(p -> id) != 0)
802 ABORT("pthread_suspend_np failed");
803 GC_release_dirty_lock();
804 if (pthread_stackseg_np(p->id, &stack))
805 ABORT("pthread_stackseg_np failed");
806 p -> stop_info.stack_ptr = (ptr_t)stack.ss_sp - stack.ss_size;
807 if (GC_on_thread_event)
808 GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,
812 /* The synchronization between GC_dirty (based on */
813 /* test-and-set) and the signal-based thread suspension */
814 /* is performed in GC_stop_world because */
815 /* GC_release_dirty_lock cannot be called before */
816 /* acknowledging the thread is really suspended. */
817 result = RAISE_SIGNAL(p, GC_sig_suspend);
820 /* Not really there anymore. Possible? */
824 if (GC_on_thread_event)
825 GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,
826 (void *)(word)THREAD_SYSTEM_ID(p));
827 /* Note: thread id might be truncated. */
830 ABORT_ARG1("pthread_kill failed at suspend",
831 ": errcode= %d", result);
839 # ifndef NACL_PARK_WAIT_NANOSECONDS
840 # define NACL_PARK_WAIT_NANOSECONDS (100 * 1000)
842 # define NANOS_PER_SECOND (1000UL * 1000 * 1000)
843 unsigned long num_sleeps = 0;
845 # ifdef DEBUG_THREADS
846 GC_log_printf("pthread_stop_world: num_threads=%d\n",
847 GC_nacl_num_gc_threads - 1);
849 GC_nacl_thread_parker = pthread_self();
850 GC_nacl_park_threads_now = 1;
853 GC_acquire_dirty_lock();
855 int num_threads_parked = 0;
859 /* Check the 'parked' flag for each thread the GC knows about. */
860 for (i = 0; i < MAX_NACL_GC_THREADS
861 && num_used < GC_nacl_num_gc_threads; i++) {
862 if (GC_nacl_thread_used[i] == 1) {
864 if (GC_nacl_thread_parked[i] == 1) {
865 num_threads_parked++;
866 if (GC_on_thread_event)
867 GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED, (void *)(word)i);
871 /* -1 for the current thread. */
872 if (num_threads_parked >= GC_nacl_num_gc_threads - 1)
875 ts.tv_nsec = NACL_PARK_WAIT_NANOSECONDS;
876 # ifdef DEBUG_THREADS
877 GC_log_printf("Sleep waiting for %d threads to park...\n",
878 GC_nacl_num_gc_threads - num_threads_parked - 1);
880 /* This requires _POSIX_TIMERS feature. */
882 if (++num_sleeps > NANOS_PER_SECOND / NACL_PARK_WAIT_NANOSECONDS) {
883 WARN("GC appears stalled waiting for %" WARN_PRIdPTR
884 " threads to park...\n",
885 GC_nacl_num_gc_threads - num_threads_parked - 1);
890 GC_release_dirty_lock();
892 return n_live_threads;
895 GC_INNER void GC_stop_world(void)
897 # if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
900 GC_ASSERT(I_HOLD_LOCK());
901 # ifdef DEBUG_THREADS
902 GC_stopping_thread = pthread_self();
903 GC_stopping_pid = getpid();
904 GC_log_printf("Stopping the world from %p\n", (void *)GC_stopping_thread);
907 /* Make sure all free list construction has stopped before we start. */
908 /* No new construction can start, since free list construction is */
909 /* required to acquire and release the GC lock before it starts, */
910 /* and we have the lock. */
911 # ifdef PARALLEL_MARK
913 GC_acquire_mark_lock();
914 GC_ASSERT(GC_fl_builder_count == 0);
915 /* We should have previously waited for it to become zero. */
917 # endif /* PARALLEL_MARK */
919 # if defined(GC_OPENBSD_UTHREADS) || defined(NACL)
920 (void)GC_suspend_all();
922 AO_store(&GC_stop_count,
923 (AO_t)((word)GC_stop_count + (THREAD_RESTARTED+1)));
924 /* Only concurrent reads are possible. */
926 GC_acquire_dirty_lock();
927 /* The write fault handler cannot be called if GC_manual_vdb */
928 /* (thus double-locking should not occur in */
929 /* async_set_pht_entry_from_index based on test-and-set). */
931 AO_store_release(&GC_world_is_stopped, TRUE);
932 n_live_threads = GC_suspend_all();
933 if (GC_retry_signals)
934 n_live_threads = resend_lost_signals(n_live_threads, GC_suspend_all);
935 suspend_restart_barrier(n_live_threads);
937 GC_release_dirty_lock(); /* cannot be done in GC_suspend_all */
940 # ifdef PARALLEL_MARK
942 GC_release_mark_lock();
944 # ifdef DEBUG_THREADS
945 GC_log_printf("World stopped from %p\n", (void *)pthread_self());
946 GC_stopping_thread = 0;
951 # if defined(__x86_64__)
952 # define NACL_STORE_REGS() \
954 __asm__ __volatile__ ("push %rbx"); \
955 __asm__ __volatile__ ("push %rbp"); \
956 __asm__ __volatile__ ("push %r12"); \
957 __asm__ __volatile__ ("push %r13"); \
958 __asm__ __volatile__ ("push %r14"); \
959 __asm__ __volatile__ ("push %r15"); \
960 __asm__ __volatile__ ("mov %%esp, %0" \
961 : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
962 BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
963 GC_nacl_gc_thread_self->stop_info.reg_storage, \
964 NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
965 __asm__ __volatile__ ("naclasp $48, %r15"); \
967 # elif defined(__i386__)
968 # define NACL_STORE_REGS() \
970 __asm__ __volatile__ ("push %ebx"); \
971 __asm__ __volatile__ ("push %ebp"); \
972 __asm__ __volatile__ ("push %esi"); \
973 __asm__ __volatile__ ("push %edi"); \
974 __asm__ __volatile__ ("mov %%esp, %0" \
975 : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
976 BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
977 GC_nacl_gc_thread_self->stop_info.reg_storage, \
978 NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
979 __asm__ __volatile__ ("add $16, %esp"); \
981 # elif defined(__arm__)
982 # define NACL_STORE_REGS() \
984 __asm__ __volatile__ ("push {r4-r8,r10-r12,lr}"); \
985 __asm__ __volatile__ ("mov r0, %0" \
986 : : "r" (&GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
987 __asm__ __volatile__ ("bic r0, r0, #0xc0000000"); \
988 __asm__ __volatile__ ("str sp, [r0]"); \
989 BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
990 GC_nacl_gc_thread_self->stop_info.reg_storage, \
991 NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
992 __asm__ __volatile__ ("add sp, sp, #40"); \
993 __asm__ __volatile__ ("bic sp, sp, #0xc0000000"); \
996 # error TODO Please port NACL_STORE_REGS
999 GC_API_OSCALL void nacl_pre_syscall_hook(void)
1001 if (GC_nacl_thread_idx != -1) {
1003 GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
1004 GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
1008 GC_API_OSCALL void __nacl_suspend_thread_if_needed(void)
1010 if (GC_nacl_park_threads_now) {
1011 pthread_t self = pthread_self();
1013 /* Don't try to park the thread parker. */
1014 if (GC_nacl_thread_parker == self)
1017 /* This can happen when a thread is created outside of the GC */
1018 /* system (wthread mostly). */
1019 if (GC_nacl_thread_idx < 0)
1022 /* If it was already 'parked', we're returning from a syscall, */
1023 /* so don't bother storing registers again, the GC has a set. */
1024 if (!GC_nacl_thread_parked[GC_nacl_thread_idx]) {
1026 GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
1028 GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
1029 while (GC_nacl_park_threads_now) {
1032 GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
1034 /* Clear out the reg storage for next suspend. */
1035 BZERO(GC_nacl_gc_thread_self->stop_info.reg_storage,
1036 NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));
1040 GC_API_OSCALL void nacl_post_syscall_hook(void)
1042 /* Calling __nacl_suspend_thread_if_needed right away should */
1043 /* guarantee we don't mutate the GC set. */
1044 __nacl_suspend_thread_if_needed();
1045 if (GC_nacl_thread_idx != -1) {
1046 GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
1050 STATIC GC_bool GC_nacl_thread_parking_inited = FALSE;
1051 STATIC pthread_mutex_t GC_nacl_thread_alloc_lock = PTHREAD_MUTEX_INITIALIZER;
1053 struct nacl_irt_blockhook {
1054 int (*register_block_hooks)(void (*pre)(void), void (*post)(void));
1058 extern size_t nacl_interface_query(const char *interface_ident,
1059 void *table, size_t tablesize);
1062 GC_INNER void GC_nacl_initialize_gc_thread(void)
1065 static struct nacl_irt_blockhook gc_hook;
1067 pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
1068 if (!EXPECT(GC_nacl_thread_parking_inited, TRUE)) {
1069 BZERO(GC_nacl_thread_parked, sizeof(GC_nacl_thread_parked));
1070 BZERO(GC_nacl_thread_used, sizeof(GC_nacl_thread_used));
1071 /* TODO: replace with public 'register hook' function when */
1072 /* available from glibc. */
1073 nacl_interface_query("nacl-irt-blockhook-0.1",
1074 &gc_hook, sizeof(gc_hook));
1075 gc_hook.register_block_hooks(nacl_pre_syscall_hook,
1076 nacl_post_syscall_hook);
1077 GC_nacl_thread_parking_inited = TRUE;
1079 GC_ASSERT(GC_nacl_num_gc_threads <= MAX_NACL_GC_THREADS);
1080 for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
1081 if (GC_nacl_thread_used[i] == 0) {
1082 GC_nacl_thread_used[i] = 1;
1083 GC_nacl_thread_idx = i;
1084 GC_nacl_num_gc_threads++;
1088 pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
1091 GC_INNER void GC_nacl_shutdown_gc_thread(void)
1093 pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
1094 GC_ASSERT(GC_nacl_thread_idx >= 0);
1095 GC_ASSERT(GC_nacl_thread_idx < MAX_NACL_GC_THREADS);
1096 GC_ASSERT(GC_nacl_thread_used[GC_nacl_thread_idx] != 0);
1097 GC_nacl_thread_used[GC_nacl_thread_idx] = 0;
1098 GC_nacl_thread_idx = -1;
1099 GC_nacl_num_gc_threads--;
1100 pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
1105 /* Restart all threads that were suspended by the collector. */
1106 /* Return the number of restart signals that were sent. */
1107 STATIC int GC_restart_all(void)
1109 int n_live_threads = 0;
1111 pthread_t self = pthread_self();
1113 # ifndef GC_OPENBSD_UTHREADS
1117 for (i = 0; i < THREAD_TABLE_SZ; i++) {
1118 for (p = GC_threads[i]; p != NULL; p = p -> next) {
1119 if (!THREAD_EQUAL(p -> id, self)) {
1120 if ((p -> flags & FINISHED) != 0) continue;
1121 if (p -> thread_blocked) continue;
1122 # ifndef GC_OPENBSD_UTHREADS
1123 # ifdef GC_ENABLE_SUSPEND_THREAD
1124 if (p -> suspended_ext) continue;
1126 if (GC_retry_signals
1127 && AO_load(&p->stop_info.last_stop_count)
1128 == (AO_t)((word)GC_stop_count | THREAD_RESTARTED))
1129 continue; /* The thread has been restarted. */
1132 # ifdef DEBUG_THREADS
1133 GC_log_printf("Sending restart signal to %p\n", (void *)p->id);
1135 # ifdef GC_OPENBSD_UTHREADS
1136 if (pthread_resume_np(p -> id) != 0)
1137 ABORT("pthread_resume_np failed");
1138 if (GC_on_thread_event)
1139 GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, (void *)p->id);
1141 result = RAISE_SIGNAL(p, GC_sig_thr_restart);
1144 /* Not really there anymore. Possible? */
1148 if (GC_on_thread_event)
1149 GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,
1150 (void *)(word)THREAD_SYSTEM_ID(p));
1153 ABORT_ARG1("pthread_kill failed at resume",
1154 ": errcode= %d", result);
1160 return n_live_threads;
1164 /* Caller holds allocation lock, and has held it continuously since */
1165 /* the world stopped. */
1166 GC_INNER void GC_start_world(void)
1171 GC_ASSERT(I_HOLD_LOCK());
1172 # ifdef DEBUG_THREADS
1173 GC_log_printf("World starting\n");
1175 # ifndef GC_OPENBSD_UTHREADS
1176 AO_store_release(&GC_world_is_stopped, FALSE);
1177 /* The updated value should now be visible to the */
1178 /* signal handler (note that pthread_kill is not on */
1179 /* the list of functions which synchronize memory). */
1181 n_live_threads = GC_restart_all();
1182 # ifdef GC_OPENBSD_UTHREADS
1183 (void)n_live_threads;
1184 # elif defined(GC_NETBSD_THREADS_WORKAROUND)
1185 if (GC_retry_signals)
1186 n_live_threads = resend_lost_signals(n_live_threads, GC_restart_all);
1187 suspend_restart_barrier(n_live_threads);
1189 if (GC_retry_signals) {
1190 n_live_threads = resend_lost_signals(n_live_threads, GC_restart_all);
1191 suspend_restart_barrier(n_live_threads);
1194 # ifdef DEBUG_THREADS
1195 GC_log_printf("World started\n");
1198 # ifdef DEBUG_THREADS
1199 GC_log_printf("World starting...\n");
1201 GC_nacl_park_threads_now = 0;
1202 if (GC_on_thread_event)
1203 GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, NULL);
1204 /* TODO: Send event for every unsuspended thread. */
1208 GC_INNER void GC_stop_init(void)
1210 # if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
1211 struct sigaction act;
1214 if (SIGNAL_UNSET == GC_sig_suspend)
1215 GC_sig_suspend = SIG_SUSPEND;
1216 if (SIGNAL_UNSET == GC_sig_thr_restart)
1217 GC_sig_thr_restart = SIG_THR_RESTART;
1218 if (GC_sig_suspend == GC_sig_thr_restart)
1219 ABORT("Cannot use same signal for thread suspend and resume");
1221 if (sem_init(&GC_suspend_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
1222 ABORT("sem_init failed");
1225 act.sa_flags = SA_RESTART
1229 # ifndef NO_SA_SIGACTION
1233 if (sigfillset(&act.sa_mask) != 0) {
1234 ABORT("sigfillset failed");
1236 # ifdef GC_RTEMS_PTHREADS
1237 if(sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL) != 0) {
1238 ABORT("sigprocmask failed");
1241 GC_remove_allowed_signals(&act.sa_mask);
1242 /* GC_sig_thr_restart is set in the resulting mask. */
1243 /* It is unmasked by the handler when necessary. */
1244 # ifndef NO_SA_SIGACTION
1245 act.sa_sigaction = GC_suspend_handler;
1247 act.sa_handler = GC_suspend_handler;
1249 /* act.sa_restorer is deprecated and should not be initialized. */
1250 if (sigaction(GC_sig_suspend, &act, NULL) != 0) {
1251 ABORT("Cannot set SIG_SUSPEND handler");
1254 # ifndef NO_SA_SIGACTION
1255 act.sa_flags &= ~SA_SIGINFO;
1257 act.sa_handler = GC_restart_handler;
1258 if (sigaction(GC_sig_thr_restart, &act, NULL) != 0) {
1259 ABORT("Cannot set SIG_THR_RESTART handler");
1262 /* Initialize suspend_handler_mask (excluding GC_sig_thr_restart). */
1263 if (sigfillset(&suspend_handler_mask) != 0) ABORT("sigfillset failed");
1264 GC_remove_allowed_signals(&suspend_handler_mask);
1265 if (sigdelset(&suspend_handler_mask, GC_sig_thr_restart) != 0)
1266 ABORT("sigdelset failed");
1268 /* Override the default value of GC_retry_signals. */
1269 str = GETENV("GC_RETRY_SIGNALS");
1271 if (*str == '0' && *(str + 1) == '\0') {
1272 /* Do not retry if the environment variable is set to "0". */
1273 GC_retry_signals = FALSE;
1275 GC_retry_signals = TRUE;
1278 if (GC_retry_signals) {
1280 "Will retry suspend and restart signals if necessary\n");
1282 # ifndef NO_SIGNALS_UNBLOCK_IN_MAIN
1283 /* Explicitly unblock the signals once before new threads creation. */
1284 GC_unblock_gc_signals();
1286 # endif /* !GC_OPENBSD_UTHREADS && !NACL */
1289 #endif /* GC_PTHREADS && !GC_DARWIN_THREADS && !GC_WIN32_THREADS */