* doc/README.environment (GC_COLLECT_AT_MALLOC): Document.
* doc/README.macros (GC_COLLECT_AT_MALLOC): Likewise.
* fnlz_mlc.c (GC_core_finalized_malloc): Insert
GC_DBG_COLLECT_AT_MALLOC invocation (before LOCK).
* gcj_mlc.c (GC_core_gcj_malloc, GC_gcj_malloc_ignore_off_page):
Likewise.
* malloc.c (GC_generic_malloc, GC_malloc_atomic, GC_malloc,
GC_malloc_uncollectable): Likewise.
* mallocx.c (GC_generic_malloc_ignore_off_page,
GC_generic_malloc_many, GC_malloc_atomic_uncollectable): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page): Likewise.
* include/private/gc_priv.h (GC_COLLECT_AT_MALLOC): Recognize new
macro.
(GC_dbg_collect_at_malloc_min_lb): New global variable declaration
(only if GC_COLLECT_AT_MALLOC defined).
(GC_DBG_COLLECT_AT_MALLOC): Define new macro (invoking GC_gcollect).
* malloc.c (GC_dbg_collect_at_malloc_min_lb): New global variable
(only if GC_COLLECT_AT_MALLOC defined).
* misc.c (GC_init): Test "GC_COLLECT_AT_MALLOC" environment variable
and alter default GC_dbg_collect_at_malloc_min_lb value (only if
GC_COLLECT_AT_MALLOC macro defined).
if you have a bug to report, but please include only the
last complete dump.
+GC_COLLECT_AT_MALLOC=<n> - Override the default value specified by
+ GC_COLLECT_AT_MALLOC macro. Has no effect unless
+ GC is built with GC_COLLECT_AT_MALLOC defined.
+
GC_BACKTRACES=<n> - Generate n random back-traces (for heap profiling) after
each GC. Collector must have been built with
KEEP_BACK_PTRS. This won't generate useful output unless
DEBUG_THREADS Turn on printing additional thread-support debugging
information.
+GC_COLLECT_AT_MALLOC=<n> Force garbage collection at every
+ GC_malloc_* call with the size greater than the specified value.
+ (Might be useful for application debugging or in find-leak mode.)
+
JAVA_FINALIZATION Makes it somewhat safer to finalize objects out of
order by specifying a nonstandard finalization mark procedure (see
finalize.c). Objects reachable from finalizable objects will be marked
lb += sizeof(void *);
GC_ASSERT(done_init);
if (EXPECT(SMALL_OBJ(lb), TRUE)) {
+ GC_DBG_COLLECT_AT_MALLOC(lb);
lg = GC_size_map[lb];
opp = &GC_finalized_objfreelist[lg];
LOCK();
word lg;
DCL_LOCK_STATE;
+ GC_DBG_COLLECT_AT_MALLOC(lb);
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = &(GC_gcjobjfreelist[lg]);
word lg;
DCL_LOCK_STATE;
+ GC_DBG_COLLECT_AT_MALLOC(lb);
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = &(GC_gcjobjfreelist[lg]);
#define GENERAL_MALLOC_IOP(lb,k) \
GC_clear_stack(GC_generic_malloc_ignore_off_page(lb, k))
+#ifdef GC_COLLECT_AT_MALLOC
+ extern size_t GC_dbg_collect_at_malloc_min_lb;
+ /* variable visible outside for debugging */
+# define GC_DBG_COLLECT_AT_MALLOC(lb) \
+ (void)((lb) >= GC_dbg_collect_at_malloc_min_lb ? \
+ (GC_gcollect(), 0) : 0)
+#else
+# define GC_DBG_COLLECT_AT_MALLOC(lb) (void)0
+#endif /* !GC_COLLECT_AT_MALLOC */
+
/* Allocation routines that bypass the thread local cache. */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc(size_t);
return op;
}
+#ifdef GC_COLLECT_AT_MALLOC
+ /* Parameter to force GC at every malloc of size greater or equal to */
+ /* the given value. This might be handy during debugging. */
+ size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
+#endif
+
GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
{
void * result;
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
+ GC_DBG_COLLECT_AT_MALLOC(lb);
if (SMALL_OBJ(lb)) {
LOCK();
result = GC_generic_malloc_inner((word)lb, k);
size_t lb_rounded;
word n_blocks;
GC_bool init;
+
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
if (lb_rounded < lb)
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
+ GC_DBG_COLLECT_AT_MALLOC(lb);
lg = GC_size_map[lb];
opp = &(GC_aobjfreelist[lg]);
LOCK();
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
+ GC_DBG_COLLECT_AT_MALLOC(lb);
lg = GC_size_map[lb];
opp = (void **)&(GC_objfreelist[lg]);
LOCK();
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
+ GC_DBG_COLLECT_AT_MALLOC(lb);
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
+ GC_DBG_COLLECT_AT_MALLOC(lb);
LOCK();
result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
if (0 != result) {
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
+ GC_DBG_COLLECT_AT_MALLOC(lb);
LOCK();
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
/* Do our share of marking work */
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
+ GC_DBG_COLLECT_AT_MALLOC(lb);
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
# endif
}
}
+# ifdef GC_COLLECT_AT_MALLOC
+ {
+ char * string = GETENV("GC_COLLECT_AT_MALLOC");
+ if (0 != string) {
+ size_t min_lb = (size_t)STRTOULL(string, NULL, 10);
+ if (min_lb > 0)
+ GC_dbg_collect_at_malloc_min_lb = min_lb;
+ }
+ }
+# endif
# ifndef GC_DISABLE_INCREMENTAL
{
char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET");
lb += TYPD_EXTRA_BYTES;
if(SMALL_OBJ(lb)) {
+ GC_DBG_COLLECT_AT_MALLOC(lb);
lg = GC_size_map[lb];
opp = &(GC_eobjfreelist[lg]);
LOCK();
lb += TYPD_EXTRA_BYTES;
if( SMALL_OBJ(lb) ) {
+ GC_DBG_COLLECT_AT_MALLOC(lb);
lg = GC_size_map[lb];
opp = &(GC_eobjfreelist[lg]);
LOCK();