(a joint commit of commits 'd6acbd' and '
57b94a3' in master branch)
* alloc.c (min_bytes_allocd, GC_stopped_mark): Use GC_approx_sp()
instead of "&dummy"; remove 'dummy' local variable.
* dyn_load.c (GC_cond_add_roots): Likewise.
* misc.c (GC_init): Likewise.
* os_dep.c (GC_get_stack_base, GC_get_main_stack_base): Likewise.
* pthread_stop_world.c (GC_suspend_handler_inner,
nacl_pre_syscall_hook, __nacl_suspend_thread_if_needed): Likewise.
* pthread_support.c (GC_thr_init): Likewise.
* ptr_chck.c (GC_on_stack): Likewise.
* win32_threads.c (GC_push_stack_for): Likewise.
* extra/setjmp_t.c (main): Define volatile 'sp' local variable, store
its address to it and use it instead of "&dummy"; remove 'dummy' local
variable.
* mach_dep.c (GC_with_callee_saves_pushed): Use volatile for 'dummy'
local variable.
* misc.c (GC_clear_stack_inner): Store address of volatile 'dummy'
local array (i.e. 'sp' value) to its first element (and use it in the
comparison of addresses) to prevent any harmful optimizations as C
officially disallows comparisons of pointers to different objects
(e.g., some Mac OS X clang releases might turn a conditional
expression that uses 'dummy' address into a constant); update comment.
* misc.c (GC_call_with_stack_base): Use "&base" instead of "&dummy"
(it is safe to use address of base here); remove dummy variable.
/* collections to amortize the collection cost. */
static word min_bytes_allocd(void)
{
- int dummy; /* GC_stackbottom is used only for a single-threaded case. */
# ifdef STACK_GROWS_UP
- word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
+ word stack_size = GC_approx_sp() - GC_stackbottom;
+ /* GC_stackbottom is used only for a single-threaded case. */
# else
- word stack_size = GC_stackbottom - (ptr_t)(&dummy);
+ word stack_size = GC_stackbottom - GC_approx_sp();
# endif
word total_root_size; /* includes double stack size, */
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
{
unsigned i;
- int dummy;
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
CLOCK_TYPE current_time;
START_WORLD();
return(FALSE);
}
- if (GC_mark_some((ptr_t)(&dummy))) break;
+ if (GC_mark_some(GC_approx_sp())) break;
}
GC_gc_no++;
}
if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
# else
- char dummy;
char * stack_top
- = (char *) ((word)(&dummy) & ~(GC_sysinfo.dwAllocationGranularity-1));
+ = (char *)((word)GC_approx_sp() &
+ ~(GC_sysinfo.dwAllocationGranularity - 1));
if (base == limit) return;
if (limit > stack_top && base < GC_stackbottom) {
/* Part of the stack; ignore it. */
int main(void)
{
- int dummy;
+ volatile word sp;
long ps = GETPAGESIZE();
jmp_buf b;
register int x = (int)strlen("a"); /* 1, slightly disguised */
static int y = 0;
+ sp = (word)(&sp);
printf("This appears to be a %s running %s\n", MACH_TYPE, OS_TYPE);
- if (nested_sp() < &dummy) {
+ if (nested_sp() < (int *)sp) {
printf("Stack appears to grow down, which is the default.\n");
printf("A good guess for STACKBOTTOM on this machine is 0x%lx.\n",
- ((unsigned long)(&dummy) + ps) & ~(ps-1));
+ ((unsigned long)sp + ps) & ~(ps-1));
} else {
printf("Stack appears to grow up.\n");
printf("Define STACK_GROWS_UP in gc_private.h\n");
printf("A good guess for STACKBOTTOM on this machine is 0x%lx.\n",
- ((unsigned long)(&dummy) + ps) & ~(ps-1));
+ ((unsigned long)sp + ps) & ~(ps-1));
}
printf("Note that this may vary between machines of ostensibly\n");
printf("the same architecture (e.g. Sun 3/50s and 3/80s).\n");
GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
ptr_t arg)
{
- word dummy;
+ volatile int dummy;
void * context = 0;
# if defined(HAVE_PUSH_REGS)
/*ARGSUSED*/
void * GC_clear_stack_inner(void *arg, ptr_t limit)
{
- word dummy[CLEAR_SIZE];
+ volatile word dummy[CLEAR_SIZE];
- BZERO(dummy, CLEAR_SIZE*sizeof(word));
- if ((ptr_t)(dummy) COOLER_THAN limit) {
+ dummy[0] = (word)(&dummy);
+ /* Store to a volatile variable prevents the following */
+ /* condition to be 'optimized' to TRUE constant. */
+ BZERO((/* no volatile */ void *)&dummy[1], (CLEAR_SIZE-1) * sizeof(word));
+ if (dummy[0] COOLER_THAN (word)limit) {
(void) GC_clear_stack_inner(arg, limit);
}
/* Make sure the recursive call is not a tail call, and the bzero */
GC_API void GC_CALL GC_init(void)
{
/* LOCK(); -- no longer does anything this early. */
-# if !defined(THREADS) && defined(GC_ASSERTIONS)
- word dummy;
-# endif
word initial_heap_sz;
IF_CANCEL(int cancel_state;)
GC_STATIC_ASSERT(sizeof (signed_word) == sizeof(word));
GC_STATIC_ASSERT(sizeof (struct hblk) == HBLKSIZE);
# ifndef THREADS
- GC_ASSERT(!((word)GC_stackbottom HOTTER_THAN (word)(&dummy)));
+ GC_ASSERT(!((word)GC_stackbottom HOTTER_THAN (word)GC_approx_sp()));
# endif
# if !defined(_AUX_SOURCE) || defined(__GNUC__)
GC_STATIC_ASSERT((word)(-1) > (word)0);
GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func fn, void *arg)
{
- int dummy;
struct GC_stack_base base;
void *result;
- base.mem_base = (void *)&dummy;
+ base.mem_base = (void *)&base;
# ifdef IA64
base.reg_base = (void *)GC_save_regs_in_stack();
/* Unnecessarily flushes register stack, */
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
{
- int dummy;
- ptr_t sp = (ptr_t)(&dummy);
- ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
+ ptr_t trunc_sp = (ptr_t)((word)GC_approx_sp() & ~(GC_page_size - 1));
/* FIXME: This won't work if called from a deeply recursive */
/* client code (and the committed stack space has grown). */
word size = GC_get_writable_length(trunc_sp, 0);
ptr_t GC_get_main_stack_base(void)
{
- ptr_t result; /* also used as "dummy" to get the approx. sp value */
+ ptr_t result;
# if defined(LINUX) && !defined(NACL) \
&& (defined(USE_GET_STACKBASE_FOR_MAIN) \
|| (defined(THREADS) && !defined(REDIRECT_MALLOC)))
pthread_attr_t attr;
void *stackaddr;
size_t size;
+
if (pthread_getattr_np(pthread_self(), &attr) == 0) {
if (pthread_attr_getstack(&attr, &stackaddr, &size) == 0
&& stackaddr != NULL) {
# define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
# ifdef HEURISTIC1
# ifdef STACK_GROWS_DOWN
- result = (ptr_t)((((word)(&result)) + STACKBOTTOM_ALIGNMENT_M1)
+ result = (ptr_t)(((word)GC_approx_sp() + STACKBOTTOM_ALIGNMENT_M1)
& ~STACKBOTTOM_ALIGNMENT_M1);
# else
- result = (ptr_t)(((word)(&result)) & ~STACKBOTTOM_ALIGNMENT_M1);
+ result = (ptr_t)((word)GC_approx_sp() & ~STACKBOTTOM_ALIGNMENT_M1);
# endif
# endif /* HEURISTIC1 */
# ifdef LINUX_STACKBOTTOM
result = GC_freebsd_main_stack_base();
# endif
# ifdef HEURISTIC2
-# ifdef STACK_GROWS_DOWN
- result = GC_find_limit((ptr_t)(&result), TRUE);
-# ifdef HEURISTIC2_LIMIT
- if (result > HEURISTIC2_LIMIT
- && (ptr_t)(&result) < HEURISTIC2_LIMIT) {
- result = HEURISTIC2_LIMIT;
- }
-# endif
-# else
- result = GC_find_limit((ptr_t)(&result), FALSE);
-# ifdef HEURISTIC2_LIMIT
- if (result < HEURISTIC2_LIMIT
- && (ptr_t)(&result) > HEURISTIC2_LIMIT) {
- result = HEURISTIC2_LIMIT;
- }
+ {
+ ptr_t sp = GC_approx_sp();
+# ifdef STACK_GROWS_DOWN
+ result = GC_find_limit(sp, TRUE);
+# ifdef HEURISTIC2_LIMIT
+ if (result > HEURISTIC2_LIMIT
+ && sp < HEURISTIC2_LIMIT) {
+ result = HEURISTIC2_LIMIT;
+ }
+# endif
+# else
+ result = GC_find_limit(sp, FALSE);
+# ifdef HEURISTIC2_LIMIT
+ if (result < HEURISTIC2_LIMIT
+ && sp > HEURISTIC2_LIMIT) {
+ result = HEURISTIC2_LIMIT;
+ }
+# endif
# endif
-# endif
+ }
# endif /* HEURISTIC2 */
# ifdef STACK_GROWS_DOWN
if (result == 0)
result = (ptr_t)(signed_word)(-sizeof(ptr_t));
# endif
# endif
- GC_ASSERT((ptr_t)(&result) HOTTER_THAN result);
+ GC_ASSERT(GC_approx_sp() HOTTER_THAN result);
return(result);
}
# define GET_MAIN_STACKBASE_SPECIAL
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
{
-# ifdef GC_ASSERTIONS
- int dummy;
-# endif
/* pthread_get_stackaddr_np() should return stack bottom (highest */
/* stack address plus 1). */
b->mem_base = pthread_get_stackaddr_np(pthread_self());
- GC_ASSERT((void *)&dummy HOTTER_THAN b->mem_base);
+ GC_ASSERT((void *)GC_approx_sp() HOTTER_THAN b->mem_base);
return GC_SUCCESS;
}
# define HAVE_GET_STACK_BASE
{
stack_t s;
pthread_t self = pthread_self();
+
if (self == stackbase_main_self)
{
/* If the client calls GC_get_stack_base() from the main thread */
ABORT("thr_stksegment failed");
}
/* s.ss_sp holds the pointer to the stack bottom. */
- GC_ASSERT((void *)&s HOTTER_THAN s.ss_sp);
+ GC_ASSERT((void *)GC_approx_sp() HOTTER_THAN s.ss_sp);
if (!stackbase_main_self && thr_main() != 0)
{
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
{
# ifdef NEED_FIND_LIMIT
- int dummy;
IF_CANCEL(int cancel_state;)
DCL_LOCK_STATE;
LOCK();
DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
# ifdef STACK_GROWS_DOWN
- b -> mem_base = GC_find_limit((ptr_t)(&dummy), TRUE);
+ b -> mem_base = GC_find_limit(GC_approx_sp(), TRUE);
# ifdef IA64
b -> reg_base = GC_find_limit(GC_save_regs_in_stack(), FALSE);
# endif
# else
- b -> mem_base = GC_find_limit(&dummy, FALSE);
+ b -> mem_base = GC_find_limit(GC_approx_sp(), FALSE);
# endif
RESTORE_CANCEL(cancel_state);
UNLOCK();
ptr_t GC_get_main_stack_base(void)
{
struct GC_stack_base sb;
+
if (GC_get_stack_base(&sb) != GC_SUCCESS)
ABORT("GC_get_stack_base failed");
- GC_ASSERT((void *)&sb HOTTER_THAN sb.mem_base);
+ GC_ASSERT((void *)GC_approx_sp() HOTTER_THAN sb.mem_base);
return (ptr_t)sb.mem_base;
}
#endif /* !GET_MAIN_STACKBASE_SPECIAL */
# ifdef SPARC
me -> stop_info.stack_ptr = GC_save_regs_in_stack();
# else
- me -> stop_info.stack_ptr = (ptr_t)(&me);
+ me -> stop_info.stack_ptr = GC_approx_sp();
# endif
# ifdef IA64
me -> backing_store_ptr = GC_save_regs_in_stack();
GC_API_OSCALL void nacl_pre_syscall_hook(void)
{
- int local_dummy = 0;
if (GC_nacl_thread_idx != -1) {
NACL_STORE_REGS();
- GC_nacl_gc_thread_self->stop_info.stack_ptr = (ptr_t)(&local_dummy);
+ GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
}
}
{
if (GC_nacl_park_threads_now) {
pthread_t self = pthread_self();
- int local_dummy = 0;
/* Don't try to park the thread parker. */
if (GC_nacl_thread_parker == self)
/* so don't bother storing registers again, the GC has a set. */
if (!GC_nacl_thread_parked[GC_nacl_thread_idx]) {
NACL_STORE_REGS();
- GC_nacl_gc_thread_self->stop_info.stack_ptr = (ptr_t)(&local_dummy);
+ GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
}
GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
while (GC_nacl_park_threads_now) {
/* We hold the allocation lock. */
GC_INNER void GC_thr_init(void)
{
-# ifndef GC_DARWIN_THREADS
- int dummy;
-# endif
if (GC_thr_initialized) return;
GC_thr_initialized = TRUE;
# ifdef GC_DARWIN_THREADS
t -> stop_info.mach_thread = mach_thread_self();
# else
- t -> stop_info.stack_ptr = (ptr_t)(&dummy);
+ t -> stop_info.stack_ptr = GC_approx_sp();
# endif
t -> flags = DETACHED | MAIN_THREAD;
}
/* Could p be a stack address? */
STATIC GC_bool GC_on_stack(ptr_t p)
{
- int dummy;
# ifdef STACK_GROWS_DOWN
- if ((ptr_t)p >= (ptr_t)(&dummy) && (ptr_t)p < GC_stackbottom ) {
+ if ((ptr_t)p >= GC_approx_sp() && (ptr_t)p < GC_stackbottom) {
return(TRUE);
}
# else
- if ((ptr_t)p <= (ptr_t)(&dummy) && (ptr_t)p > GC_stackbottom ) {
+ if ((ptr_t)p <= GC_approx_sp() && (ptr_t)p > GC_stackbottom) {
return(TRUE);
}
# endif
STATIC word GC_push_stack_for(GC_thread thread, DWORD me)
{
- int dummy;
ptr_t sp, stack_min;
struct GC_traced_stack_sect_s *traced_stack_sect =
thread -> traced_stack_sect;
if (thread -> id == me) {
GC_ASSERT(thread -> thread_blocked_sp == NULL);
- sp = (ptr_t) &dummy;
+ sp = GC_approx_sp();
} else if ((sp = thread -> thread_blocked_sp) == NULL) {
/* Use saved sp value for blocked threads. */
/* For unblocked threads call GetThreadContext(). */