struct GC_stack_base *sb, void *arg);
GC_INNER_PTHRSTART void GC_thread_exit_proc(void *);
-GC_INNER void GC_setup_mark_lock(void);
-
#endif /* GC_PTHREADS && !GC_WIN32_THREADS */
#endif /* GC_PTHREAD_SUPPORT_H */
/* else */ InitializeCriticalSection (&GC_allocate_ml);
}
# endif /* GC_WIN32_THREADS */
-# if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
- GC_setup_mark_lock();
-# endif /* GC_PTHREADS */
# if (defined(MSWIN32) || defined(MSWINCE)) && defined(THREADS)
InitializeCriticalSection(&GC_write_cs);
# endif
ptr_t *startp, ptr_t *endp);
#endif
+#ifdef PARALLEL_MARK
+ static void setup_mark_lock(void);
+#endif
+
/* We hold the allocation lock. */
GC_INNER void GC_thr_init(void)
{
} else {
/* Disable true incremental collection, but generational is OK. */
GC_time_limit = GC_TIME_UNLIMITED;
+ setup_mark_lock();
/* If we are using a parallel marker, actually start helper threads. */
start_mark_threads();
}
}
#endif /* GLIBC_2_19_TSX_BUG */
-GC_INNER void GC_setup_mark_lock(void)
+static void setup_mark_lock(void)
{
# ifdef GLIBC_2_19_TSX_BUG
pthread_mutexattr_t mattr;