+2009-06-11 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+ (diff92_cvs: resembling diff20, diff27, diff34, diff38, diff43, diff45,
+ diff46, diff56, diff60, diff62, diff74, diff75, diff81 partly)
+
+ * allchblk.c (GC_print_hblkfreelist, GC_dump_regions): Convert
+ a group of printf() calls into a single one (for output atomicity).
+ * include/gc.h (GC_set_all_interior_pointers, GC_set_full_freq,
+ GC_set_time_limit): New prototypes.
+ * misc.c (GC_set_all_interior_pointers, GC_set_full_freq,
+ GC_set_time_limit): New public setter/getter functions.
+ * include/gc.h: Fix (and remove outdated) comments for thread-local
+ allocation.
+ * include/gc.h: Fix typos in comments.
+ * misc.c (GC_init_inner, GC_printf): Ditto.
+ * include/gc.h (GC_unregister_disappearing_link): Refine comment.
+ * include/gc.h (GC_stack_base): Recognize _M_IA64 macro.
+ * misc.c (GC_stack_last_cleared, GC_min_sp, GC_high_water,
+ GC_bytes_allocd_at_reset, DEGRADE_RATE): Define only if THREADS.
+ * misc.c (GC_stack_last_cleared, GC_min_sp, GC_high_water,
+ GC_bytes_allocd_at_reset): Define as STATIC.
+ * misc.c (GC_get_heap_size, GC_get_free_bytes,
+ GC_get_bytes_since_gc, GC_get_total_bytes): Acquire the GC lock to
+ avoid data races.
+ * misc.c (GC_write_cs): Define only if THREADS (Win32/WinCE only).
+ * misc.c (GC_init_inner): Initialize GC_write_cs only if THREADS.
+ * misc.c (GC_init_inner): Use GC_INITIAL_HEAP_SIZE (if available) to
+ set the default initial value of initial_heap_sz.
+ * misc.c (GC_deinit): Destroy GC_write_cs only if THREADS.
+ * misc.c (GC_init_inner): Fix WARN() format specifier (should be
+ word-complient, "%p" is used w/o "0x").
+ * misc.c (GC_init_inner): Don't recognize "GC_PAUSE_TIME_TARGET"
+ environment variable if SMALL_CONFIG.
+ * misc.c (GC_init_inner): Recognize "GC_FULL_FREQUENCY" environment
+ variable to set initial GC_full_freq value (if not SMALL_CONFIG).
+ * doc/README.environment (GC_FULL_FREQUENCY): Add information.
+ * doc/README.environment (GC_MARKERS): Refine information.
+ * misc.c (GC_init_inner): Change GC_ASSERT to GC_STATIC_ASSERT where
+ possible.
+ * misc.c (IF_NEED_TO_LOCK): New macro (instead of GC_need_to_lock).
+ * misc.c (GC_write): Use IF_NEED_TO_LOCK for handling GC_write_cs.
+ * misc.c (GC_abort): Don't define if SMALL_CONFIG.
+ * misc.c (GC_abort): Directly use WRITE() instead of GC_err_printf()
+ (to prevent possible infinite recursion).
+
2009-06-09 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
diff90_cvs (resembling diff28, diff30, diff32, diff34, diff47,
diff49, diff60, diff62, diff66, diff67, diff68, diff72 partly)
for (i = 0; i <= N_HBLK_FLS; ++i) {
h = GC_hblkfreelist[i];
# ifdef USE_MUNMAP
- if (0 != h) GC_printf("Free list %ld:\n",
- (unsigned long)i);
+ if (0 != h) GC_printf("Free list %u:\n", i);
# else
- if (0 != h) GC_printf("Free list %lu (Total size %lu):\n",
- (long)i, (unsigned long)GC_free_bytes[i]);
+ if (0 != h) GC_printf("Free list %u (Total size %lu):\n",
+ i, (unsigned long)GC_free_bytes[i]);
# endif
while (h != 0) {
hhdr = HDR(h);
sz = hhdr -> hb_sz;
- GC_printf("\t%p size %lu ", h, (unsigned long)sz);
total_free += sz;
- if (GC_is_black_listed(h, HBLKSIZE) != 0) {
- GC_printf("start black listed\n");
- } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
- GC_printf("partially black listed\n");
- } else {
- GC_printf("not black listed\n");
- }
+ GC_printf("\t%p size %lu %s black listed\n", h, (unsigned long)sz,
+ GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
+ GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
+ "not");
h = hhdr -> hb_next;
}
}
GC_printf("***Section from %p to %p\n", start, end);
for (p = start; p < end;) {
hhdr = HDR(p);
- GC_printf("\t%p ", p);
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- GC_printf("Missing header!!(%p)\n", hhdr);
+ GC_printf("\t%p Missing header!!(%p)\n", p, hhdr);
p += HBLKSIZE;
continue;
}
divHBLKSZ(hhdr -> hb_sz));
int actual_index;
- GC_printf("\tfree block of size 0x%lx bytes",
- (unsigned long)(hhdr -> hb_sz));
- if (IS_MAPPED(hhdr)) {
- GC_printf("\n");
- } else {
- GC_printf("(unmapped)\n");
- }
+ GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p,
+ (unsigned long)(hhdr -> hb_sz),
+ IS_MAPPED(hhdr) ? "" : " (unmapped)");
actual_index = free_list_index_of(hhdr);
if (-1 == actual_index) {
GC_printf("\t\tBlock not on free list %d!!\n",
}
p += hhdr -> hb_sz;
} else {
- GC_printf("\tused for blocks of size 0x%lx bytes\n",
+ GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p,
(unsigned long)(hhdr -> hb_sz));
p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
}
since the lock implementation will immediately yield without
first spinning.
-GC_MARKERS=<n> - Linux w/threads and parallel marker only. Set the number
+GC_MARKERS=<n> - Only if compiled with PARALLEL_MARK. Set the number
of marker threads. This is normally set to the number of
processors. It is safer to adjust GC_MARKERS than GC_NPROCS,
since GC_MARKERS has no impact on the lock implementation.
run on a multiprocessor, incremental collection should
only be used with unlimited pause time.
+GC_FULL_FREQUENCY - Set the desired number of partial collections between full
+ collections. Matters only if GC_incremental is set.
+
GC_FREE_SPACE_DIVISOR - Set GC_free_space_divisor to the indicated value.
Setting it to larger values decreases space consumption
and increases GC frequency.
/* at least a byte to allow "off the end" */
/* pointer recognition. */
/* MUST BE 0 or 1. */
+GC_API int GC_CALL GC_set_all_interior_pointers(int);
GC_API int GC_finalize_on_demand;
/* If nonzero, finalizers will only be run in */
/* blocks. Values in the tens are now */
/* perfectly reasonable, unlike for */
/* earlier GC versions. */
+GC_API int GC_CALL GC_set_full_freq(int value);
GC_API GC_word GC_non_gc_bytes;
/* Bytes not considered candidates for collection. */
/* Setting GC_time_limit to this value */
/* will disable the "pause time exceeded"*/
/* tests. */
+GC_API unsigned long GC_CALL GC_set_time_limit(unsigned long value);
/* Public procedures */
/* Only the generational piece of this is */
/* functional if GC_parallel is TRUE */
/* or if GC_time_limit is GC_TIME_UNLIMITED. */
-/* Causes GC_local_gcj_malloc() to revert to */
-/* locked allocation. Must be called */
-/* before any GC_local_gcj_malloc() calls. */
+/* Causes thread-local variant of GC_gcj_malloc() to revert to */
+/* locked allocation. Must be called before any such */
+/* GC_gcj_malloc() calls. */
/* For best performance, should be called as early as possible. */
/* On some platforms, calling it later may have adverse effects.*/
/* Safe to call before GC_INIT(). Includes a GC_init() call. */
/* This can be used to implement certain types of */
/* weak pointers. Note however that this generally */
/* requires that the allocation lock is held (see */
- /* GC_call_with_allock_lock() below) when the disguised */
+ /* GC_call_with_alloc_lock() below) when the disguised */
/* pointer is accessed. Otherwise a strong pointer */
/* could be recreated between the time the collector */
/* decides to reclaim the object and the link is */
/* cleared. */
GC_API int GC_CALL GC_unregister_disappearing_link (void * * link);
- /* Returns 0 if link was not actually registered. */
/* Undoes a registration by either of the above two */
- /* routines. */
+ /* routines. Returns 0 if link was not actually */
+ /* registered (otherwise returns 1). */
-/* Returns !=0 if GC_invoke_finalizers has something to do. */
+/* Returns !=0 if GC_invoke_finalizers has something to do. */
GC_API int GC_CALL GC_should_invoke_finalizers(void);
GC_API int GC_CALL GC_invoke_finalizers(void);
/* platforms this contains just a single address. */
struct GC_stack_base {
void * mem_base; /* Base of memory stack. */
-# if defined(__ia64) || defined(__ia64__)
+# if defined(__ia64) || defined(__ia64__) || defined(_M_IA64)
void * reg_base; /* Base of separate register stack. */
# endif
};
/* Explicitly dump the GC state. This is most often called from the */
/* debugger, or by setting the GC_DUMP_REGULARLY environment variable, */
/* but it may be useful to call it from client code during debugging. */
+/* Defined only if the library has been compiled without NO_DEBUGGING. */
GC_API void GC_CALL GC_dump(void);
/* Safer, but slow, pointer addition. Probably useful mainly with */
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
-/* It is used internally by gc_local_alloc.h, which provides a simpler */
-/* programming interface on Linux. */
GC_API void * GC_CALL GC_malloc_many(size_t lb);
#define GC_NEXT(p) (*(void * *)(p)) /* Retrieve the next element */
/* in returned list. */
* that are not written. We partially address this by clearing
* sections of the stack whenever we get control.
*/
-word GC_stack_last_cleared = 0; /* GC_no when we last did this */
# ifdef THREADS
# define BIG_CLEAR_SIZE 2048 /* Clear this much now and then. */
# define SMALL_CLEAR_SIZE 256 /* Clear this much every time. */
-# endif
-# define CLEAR_SIZE 213 /* Granularity for GC_clear_stack_inner */
-# define DEGRADE_RATE 50
-
-ptr_t GC_min_sp; /* Coolest stack pointer value from which we've */
- /* already cleared the stack. */
-
-ptr_t GC_high_water;
+# else
+ STATIC word GC_stack_last_cleared = 0; /* GC_no when we last did this */
+ STATIC ptr_t GC_min_sp; /* Coolest stack pointer value from which */
+ /* we've already cleared the stack. */
+ STATIC ptr_t GC_high_water;
/* "hottest" stack pointer value we have seen */
/* recently. Degrades over time. */
+ STATIC word GC_bytes_allocd_at_reset;
+# define DEGRADE_RATE 50
+# endif
-word GC_bytes_allocd_at_reset;
+# define CLEAR_SIZE 213 /* Granularity for GC_clear_stack_inner */
#if defined(ASM_CLEAR_CODE)
extern void *GC_clear_stack_inner(void *, ptr_t);
GC_API size_t GC_CALL GC_get_heap_size(void)
{
- return GC_heapsize;
+ size_t value;
+ DCL_LOCK_STATE;
+ LOCK();
+ value = GC_heapsize;
+ UNLOCK();
+ return value;
}
GC_API size_t GC_CALL GC_get_free_bytes(void)
{
- return GC_large_free_bytes;
+ size_t value;
+ DCL_LOCK_STATE;
+ LOCK();
+ value = GC_large_free_bytes;
+ UNLOCK();
+ return value;
}
GC_API size_t GC_CALL GC_get_bytes_since_gc(void)
{
- return GC_bytes_allocd;
+ size_t value;
+ DCL_LOCK_STATE;
+ LOCK();
+ value = GC_bytes_allocd;
+ UNLOCK();
+ return value;
}
GC_API size_t GC_CALL GC_get_total_bytes(void)
{
- return GC_bytes_allocd+GC_bytes_allocd_before_gc;
+ size_t value;
+ DCL_LOCK_STATE;
+ LOCK();
+ value = GC_bytes_allocd+GC_bytes_allocd_before_gc;
+ UNLOCK();
+ return value;
}
GC_bool GC_is_initialized = FALSE;
/* UNLOCK(); */
}
-#if defined(MSWIN32) || defined(MSWINCE)
+#if (defined(MSWIN32) || defined(MSWINCE)) && defined(GC_THREADS)
CRITICAL_SECTION GC_write_cs;
#endif
# if !defined(THREADS) && defined(GC_ASSERTIONS)
word dummy;
# endif
- word initial_heap_sz = (word)MINHINCR;
+
+# ifdef GC_INITIAL_HEAP_SIZE
+ word initial_heap_sz = divHBLKSZ(GC_INITIAL_HEAP_SIZE);
+# else
+ word initial_heap_sz = (word)MINHINCR;
+# endif
if (GC_is_initialized) return;
InitializeCriticalSection (&GC_allocate_ml);
}
#endif /* MSWIN32 */
-# if defined(MSWIN32) || defined(MSWINCE)
+# if (defined(MSWIN32) || defined(MSWINCE)) && defined(GC_THREADS)
InitializeCriticalSection(&GC_write_cs);
# endif
# if (!defined(SMALL_CONFIG))
word addr = (word)strtoul(addr_string, NULL, 16);
# endif
if (addr < 0x1000)
- WARN("Unlikely trace address: 0x%lx\n", (long)addr);
+ WARN("Unlikely trace address: %p\n", addr);
GC_trace_addr = (ptr_t)addr;
# endif
}
}
- {
- char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET");
- if (0 != time_limit_string) {
- long time_limit = atol(time_limit_string);
- if (time_limit < 5) {
- WARN("GC_PAUSE_TIME_TARGET environment variable value too small "
- "or bad syntax: Ignoring\n", 0);
- } else {
- GC_time_limit = time_limit;
- }
+# ifndef SMALL_CONFIG
+ {
+ char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET");
+ if (0 != time_limit_string) {
+ long time_limit = atol(time_limit_string);
+ if (time_limit < 5) {
+ WARN("GC_PAUSE_TIME_TARGET environment variable value too small "
+ "or bad syntax: Ignoring\n", 0);
+ } else {
+ GC_time_limit = time_limit;
+ }
+ }
}
- }
+ {
+ char * full_freq_string = GETENV("GC_FULL_FREQUENCY");
+ if (full_freq_string != NULL) {
+ int full_freq = atoi(full_freq_string);
+ if (full_freq > 0)
+ GC_full_freq = full_freq;
+ }
+ }
+# endif
{
char * interval_string = GETENV("GC_LARGE_ALLOC_WARN_INTERVAL");
if (0 != interval_string) {
# endif
# endif
# if !defined(_AUX_SOURCE) || defined(__GNUC__)
- GC_ASSERT((word)(-1) > (word)0);
+ GC_STATIC_ASSERT((word)(-1) > (word)0);
/* word should be unsigned */
# endif
- GC_ASSERT((ptr_t)(word)(-1) > (ptr_t)0);
+# if !defined(__BORLANDC__) /* Workaround for Borland C */
+ GC_STATIC_ASSERT((ptr_t)(word)(-1) > (ptr_t)0);
/* Ptr_t comparisons should behave as unsigned comparisons. */
- GC_ASSERT((signed_word)(-1) < (signed_word)0);
+# endif
+ GC_STATIC_ASSERT((signed_word)(-1) < (signed_word)0);
# if !defined(SMALL_CONFIG)
if (GC_incremental || 0 != GETENV("GC_ENABLE_INCREMENTAL")) {
/* This used to test for !GC_no_win32_dlls. Why? */
/* The rest of this again assumes we don't really hold */
/* the allocation lock. */
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
- /* Make sure marker threads and started and thread local */
+ /* Make sure marker threads are started and thread local */
/* allocation is initialized, in case we didn't get */
/* called from GC_init_parallel(); */
{
void GC_deinit(void)
{
+# ifdef GC_THREADS
if (GC_is_initialized) {
DeleteCriticalSection(&GC_write_cs);
}
+# endif
}
-# ifndef THREADS
-# define GC_need_to_lock 0 /* Not defined without threads */
+#ifdef GC_THREADS
+# ifdef PARALLEL_MARK
+# define IF_NEED_TO_LOCK(x) if (GC_parallel || GC_need_to_lock) x
+# else
+# define IF_NEED_TO_LOCK(x) if (GC_need_to_lock) x
# endif
+#else
+# define IF_NEED_TO_LOCK(x)
+#endif
+
int GC_write(const char *buf, size_t len)
{
BOOL tmp;
DWORD written;
if (len == 0)
return 0;
- if (GC_need_to_lock) EnterCriticalSection(&GC_write_cs);
+ IF_NEED_TO_LOCK(EnterCriticalSection(&GC_write_cs));
if (GC_stdout == INVALID_HANDLE_VALUE) {
- if (GC_need_to_lock) LeaveCriticalSection(&GC_write_cs);
+ IF_NEED_TO_LOCK(LeaveCriticalSection(&GC_write_cs));
return -1;
} else if (GC_stdout == 0) {
char * file_name = GETENV("GC_LOG_FILE");
# if defined(_MSC_VER) && defined(_DEBUG)
_CrtDbgReport(_CRT_WARN, NULL, 0, NULL, "%.*s", len, buf);
# endif
- if (GC_need_to_lock) LeaveCriticalSection(&GC_write_cs);
+ IF_NEED_TO_LOCK(LeaveCriticalSection(&GC_write_cs));
return tmp ? (int)written : -1;
}
-# undef GC_need_to_lock
#endif
#endif
/* A version of printf that is unlikely to call malloc, and is thus safer */
/* to call from the collector in case malloc has been bound to GC_malloc. */
-/* Floating point arguments ans formats should be avoided, since fp */
+/* Floating point arguments and formats should be avoided, since fp */
/* conversion is more likely to allocate. */
/* Assumes that no more than BUFSZ-1 characters are written at once. */
void GC_printf(const char *format, ...)
return old;
}
-#ifndef PCR
+#if !defined(PCR) && !defined(SMALL_CONFIG)
void GC_abort(const char *msg)
{
# if defined(MSWIN32) && !defined(DONT_USE_USER32_DLL)
(void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK);
# else
- GC_err_printf("%s\n", msg);
+ /* Avoid calling GC_err_printf() here, as GC_abort() could be */
+ /* called from it. Note 1: this is not an atomic output. */
+ /* Note 2: possible write errors are ignored. */
+ if (WRITE(GC_stderr, (void *)msg, strlen(msg)) >= 0)
+ (void)WRITE(GC_stderr, (void *)("\n"), 1);
# endif
if (GETENV("GC_LOOP_ON_ABORT") != NULL) {
/* In many cases it's easier to debug a running process. */
return ofn;
}
+GC_API int GC_CALL GC_set_all_interior_pointers(int value)
+{
+ int ovalue = GC_all_interior_pointers;
+ if (value != -1) {
+ GC_ASSERT(!GC_is_initialized || value == ovalue);
+ GC_ASSERT(value == 0 || value == 1);
+ GC_all_interior_pointers = value;
+ }
+ return ovalue;
+}
+
GC_API int GC_CALL GC_set_finalize_on_demand(int value)
{
int ovalue = GC_finalize_on_demand;
GC_dont_precollect = value;
return ovalue;
}
+
+GC_API int GC_CALL GC_set_full_freq(int value)
+{
+ int ovalue = GC_full_freq;
+ if (value != -1)
+ GC_full_freq = value;
+ return ovalue;
+}
+
+GC_API unsigned long GC_CALL GC_set_time_limit(unsigned long value)
+{
+ unsigned long ovalue = GC_time_limit;
+ if (value != (unsigned long)-1L)
+ GC_time_limit = value;
+ return ovalue;
+}