* doc/README.macros (GC_DISABLE_INCREMENTAL): Document.
* include/private/gcconfig.h (GC_DISABLE_INCREMENTAL): Recognize
new macro; implicitly define it if SMALL_CONFIG.
* alloc.c (GC_incremental, GC_timeout_stop_func): Check for
GC_DISABLE_INCREMENTAL instead of SMALL_CONFIG.
* include/private/gc_priv.h (GC_incremental, TRUE_INCREMENTAL,
GC_push_conditional): Ditto.
* mark.c (GC_push_next_marked_dirty, GC_push_selected,
GC_push_conditional, GC_block_was_dirty): Ditto.
* misc.c (GC_enable_incremental): Ditto.
* misc.c (GC_init): Ditto (for "GC_PAUSE_TIME_TARGET" and
"GC_ENABLE_INCREMENTAL" environment variables).
* misc.c (GC_enable_incremental): Reformat the code.
+2009-11-06 Ivan Maidanski <ivmai@mail.ru>
+
+ * doc/README.macros (GC_DISABLE_INCREMENTAL): Document.
+ * include/private/gcconfig.h (GC_DISABLE_INCREMENTAL): Recognize
+ new macro; implicitly define it if SMALL_CONFIG.
+ * alloc.c (GC_incremental, GC_timeout_stop_func): Check for
+ GC_DISABLE_INCREMENTAL instead of SMALL_CONFIG.
+ * include/private/gc_priv.h (GC_incremental, TRUE_INCREMENTAL,
+ GC_push_conditional): Ditto.
+ * mark.c (GC_push_next_marked_dirty, GC_push_selected,
+ GC_push_conditional, GC_block_was_dirty): Ditto.
+ * misc.c (GC_enable_incremental): Ditto.
+ * misc.c (GC_init): Ditto (for "GC_PAUSE_TIME_TARGET" and
+ "GC_ENABLE_INCREMENTAL" environment variables).
+ * misc.c (GC_enable_incremental): Reformat the code.
+
2009-11-06 Ivan Maidanski <ivmai@mail.ru>
* dyn_load.c (WIN32_LEAN_AND_MEAN): Guard with ifndef.
word GC_gc_no = 0;
-#ifndef SMALL_CONFIG
+#ifndef GC_DISABLE_INCREMENTAL
GC_INNER int GC_incremental = 0; /* By default, stop the world. */
#endif
return stop_func;
}
-#if defined(SMALL_CONFIG) || defined(NO_CLOCK)
+#if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
# define GC_timeout_stop_func GC_default_stop_func
#else
STATIC int GC_CALLBACK GC_timeout_stop_func (void)
}
return(0);
}
-#endif /* !SMALL_CONFIG */
+#endif /* !GC_DISABLE_INCREMENTAL */
#ifdef THREADS
GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
usually causing it to use less space in such situations.
Incremental collection no longer works in this case.
+GC_DISABLE_INCREMENTAL Turn off the incremental collection support.
+
+NO_INCREMENTAL Causes the gctest program to not invoke the incremental
+ collector. This has no impact on the generated library, only on the test
+ program. (This is often useful for debugging failures unrelated to
+ incremental GC.)
+
LARGE_CONFIG Tunes the collector for unusually large heaps.
Necessary for heaps larger than about 4 GiB on most (64-bit) machines.
Recommended for heaps larger than about 500 MiB. Not recommended for
getting traced by the collector. This has no impact on the generated
library; it only affects the test.
-NO_INCREMENTAL Causes the gctest program to not invoke the incremental
- collector. This has no impact on the generated library, only on the test
- program. (This is often useful for debugging failures unrelated to
- incremental GC.)
-
POINTER_MASK=<0x...> Causes candidate pointers to be AND'ed with the given
mask before being considered. If either this or the following macro is
defined, it will be assumed that all pointers stored in the heap need to be
/* "stack-blacklisted", i.e. that are */
/* problematic in the interior of an object. */
-#ifndef SMALL_CONFIG
+#ifdef GC_DISABLE_INCREMENTAL
+# define GC_incremental FALSE
+ /* Hopefully allow optimizer to remove some code. */
+# define TRUE_INCREMENTAL FALSE
+#else
GC_EXTERN GC_bool GC_incremental;
/* Using incremental/generational collection. */
# define TRUE_INCREMENTAL \
(GC_incremental && GC_time_limit != GC_TIME_UNLIMITED)
/* True incremental, not just generational, mode */
-#else
-# define GC_incremental FALSE
- /* Hopefully allow optimizer to remove some code. */
-# define TRUE_INCREMENTAL FALSE
-#endif
+#endif /* !GC_DISABLE_INCREMENTAL */
GC_EXTERN GC_bool GC_dirty_maintained;
/* Dirty bits are being maintained, */
GC_INNER void GC_push_all(ptr_t bottom, ptr_t top);
/* Push everything in a range */
/* onto mark stack. */
-#ifndef SMALL_CONFIG
+#ifndef GC_DISABLE_INCREMENTAL
GC_INNER void GC_push_conditional(ptr_t b, ptr_t t, GC_bool all);
#else
# define GC_push_conditional(b, t, all) GC_push_all(b, t)
# error --> undefined STACKBOTTOM
# endif
-# ifdef SMALL_CONFIG
+# if defined(SMALL_CONFIG) && !defined(GC_DISABLE_INCREMENTAL)
/* Presumably not worth the space it takes. */
-# undef PROC_VDB
+# define GC_DISABLE_INCREMENTAL
+# endif
+
+# ifdef GC_DISABLE_INCREMENTAL
+# undef GWW_VDB
# undef MPROTECT_VDB
+# undef PCR_VDB
+# undef PROC_VDB
# endif
# ifdef USE_GLOBAL_ALLOC
STATIC void GC_do_parallel_mark(void); /* initiate parallel marking. */
#endif /* PARALLEL_MARK */
-#ifdef SMALL_CONFIG
+#ifdef GC_DISABLE_INCREMENTAL
# define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
#else
STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h);
/* Invoke GC_push_marked on next dirty block above h. */
/* Return a pointer just past the end of this block. */
-#endif /* !SMALL_CONFIG */
+#endif /* !GC_DISABLE_INCREMENTAL */
STATIC struct hblk * GC_push_next_marked(struct hblk *h);
/* Ditto, but also mark from clean pages. */
STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h);
GC_mark_stack_top -> mse_descr = length;
}
-#ifndef SMALL_CONFIG
+#ifndef GC_DISABLE_INCREMENTAL
/*
* Analogous to the above, but push only those pages h with
GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);
}
}
-#endif /* !SMALL_CONFIG */
+#endif /* !GC_DISABLE_INCREMENTAL */
#if defined(MSWIN32) || defined(MSWINCE)
void __cdecl GC_push_one(word p)
}
}
-#ifndef SMALL_CONFIG
-/* Test whether any page in the given block is dirty */
-STATIC GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
-{
+#ifndef GC_DISABLE_INCREMENTAL
+ /* Test whether any page in the given block is dirty. */
+ STATIC GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
+ {
size_t sz = hhdr -> hb_sz;
if (sz <= MAXOBJBYTES) {
}
return(FALSE);
}
-}
-#endif /* SMALL_CONFIG */
+ }
+#endif /* GC_DISABLE_INCREMENTAL */
/* Similar to GC_push_marked, but skip over unallocated blocks */
/* and return address of next plausible block. */
return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
}
-#ifndef SMALL_CONFIG
+#ifndef GC_DISABLE_INCREMENTAL
/* Identical to above, but mark only from dirty pages */
STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h)
{
GC_push_marked(h, hhdr);
return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
}
-#endif
+#endif /* !GC_DISABLE_INCREMENTAL */
/* Similar to above, but for uncollectable pages. Needed since we */
/* do not clear marks for such pages, even for full collections. */
# if (defined(MSWIN32) || defined(MSWINCE)) && defined(THREADS)
InitializeCriticalSection(&GC_write_cs);
# endif
-# if (!defined(SMALL_CONFIG))
+# ifndef SMALL_CONFIG
# ifdef GC_PRINT_VERBOSE_STATS
/* This is useful for debugging and profiling on platforms with */
/* missing getenv() (like WinCE). */
}
}
# endif
-# endif
+# endif /* !SMALL_CONFIG */
# ifndef NO_DEBUGGING
if (0 != GETENV("GC_DUMP_REGULARLY")) {
GC_dump_regularly = 1;
# endif
}
}
-# ifndef SMALL_CONFIG
+# ifndef GC_DISABLE_INCREMENTAL
{
char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET");
if (0 != time_limit_string) {
}
}
}
+# endif
+# ifndef SMALL_CONFIG
{
char * full_freq_string = GETENV("GC_FULL_FREQUENCY");
if (full_freq_string != NULL) {
/* Ptr_t comparisons should behave as unsigned comparisons. */
# endif
GC_STATIC_ASSERT((signed_word)(-1) < (signed_word)0);
-# if !defined(SMALL_CONFIG)
+# ifndef GC_DISABLE_INCREMENTAL
if (GC_incremental || 0 != GETENV("GC_ENABLE_INCREMENTAL")) {
/* For GWW_MPROTECT on Win32, this needs to happen before any */
/* heap memory is allocated. */
GC_ASSERT(GC_bytes_allocd == 0)
GC_incremental = TRUE;
}
-# endif /* !SMALL_CONFIG */
+# endif
/* Add initial guess of root sets. Do this first, since sbrk(0) */
/* might be used. */
GC_API void GC_CALL GC_enable_incremental(void)
{
-# if !defined(SMALL_CONFIG) && !defined(KEEP_BACK_PTRS)
- /* If we are keeping back pointers, the GC itself dirties all */
- /* pages on which objects have been marked, making */
- /* incremental GC pointless. */
- if (!GC_find_leak && 0 == GETENV("GC_DISABLE_INCREMENTAL")) {
+# if !defined(GC_DISABLE_INCREMENTAL) && !defined(KEEP_BACK_PTRS)
DCL_LOCK_STATE;
-
- LOCK();
- if (GC_incremental) goto out;
- GC_setpagesize();
- /* if (GC_no_win32_dlls) goto out; Should be win32S test? */
- maybe_install_looping_handler(); /* Before write fault handler! */
- GC_incremental = TRUE;
- if (!GC_is_initialized) {
- GC_init();
- } else {
- GC_dirty_init();
- }
- if (!GC_dirty_maintained) goto out;
- if (GC_dont_gc) {
- /* Can't easily do it. */
- UNLOCK();
- return;
+ /* If we are keeping back pointers, the GC itself dirties all */
+ /* pages on which objects have been marked, making */
+ /* incremental GC pointless. */
+ if (!GC_find_leak && 0 == GETENV("GC_DISABLE_INCREMENTAL")) {
+ LOCK();
+ if (!GC_incremental) {
+ GC_setpagesize();
+ /* if (GC_no_win32_dlls) goto out; Should be win32S test? */
+ maybe_install_looping_handler(); /* Before write fault handler! */
+ GC_incremental = TRUE;
+ if (!GC_is_initialized) {
+ GC_init();
+ } else {
+ GC_dirty_init();
+ }
+ if (GC_dirty_maintained && !GC_dont_gc) {
+ /* Can't easily do it if GC_dont_gc. */
+ if (GC_bytes_allocd > 0) {
+ /* There may be unmarked reachable objects. */
+ GC_gcollect_inner();
+ }
+ /* else we're OK in assuming everything's */
+ /* clean since nothing can point to an */
+ /* unmarked object. */
+ GC_read_dirty();
+ }
+ }
+ UNLOCK();
+ return;
}
- if (GC_bytes_allocd > 0) {
- /* There may be unmarked reachable objects */
- GC_gcollect_inner();
- } /* else we're OK in assuming everything's */
- /* clean since nothing can point to an */
- /* unmarked object. */
- GC_read_dirty();
-out:
- UNLOCK();
- } else {
- GC_init();
- }
-# else
- GC_init();
# endif
+ GC_init();
}
(void) abort();
# endif
}
-#endif
+#endif /* !SMALL_CONFIG */
GC_API void GC_CALL GC_enable(void)
{