+2011-03-15 Ivan Maidanski <ivmai@mail.ru>
+
+ * alloc.c (GC_finish_collection): Replace "#else #ifdef" with
+ "#elif".
+ * include/private/gc_priv.h (CPP_LOG_HBLKSIZE, LOG_PHT_ENTRIES,
+ MAX_ROOT_SETS, MAX_HEAP_SECTS): Ditto.
+ * alloc.c (GC_expand_hp_inner): Check for GC_collect_at_heapsize
+ overflow even if not LARGE_CONFIG.
+ * dbg_mlc.c (GC_check_heap_proc): Check "oh" size even if
+ SMALL_CONFIG.
+ * finalize.c (GC_print_finalization_stats): Fix "#endif" comment.
+ * new_hblk.c (GC_build_fl_clear2, GC_build_fl_clear4, GC_build_fl2,
+ GC_build_fl4): Reformat the comment; adjust code indentation.
+ * new_hblk.c (GC_build_fl): Fix indentation.
+ * include/private/gc_hdrs.h: Ditto.
+ * doc/README.environment (GC_LOG_FILE, GC_PRINT_VERBOSE_STATS,
+ GC_FULL_FREQUENCY): Refine the documentation.
+
2011-03-13 Ivan Maidanski <ivmai@mail.ru>
* extra/msvc_dbg.c: Test _MSC_VER macro; include "gc.h" (for
if (GC_print_back_height) {
# ifdef MAKE_BACK_GRAPH
GC_traverse_back_graph();
-# else
-# ifndef SMALL_CONFIG
- GC_err_printf("Back height not available: "
- "Rebuild collector with -DMAKE_BACK_GRAPH\n");
-# endif
+# elif !defined(SMALL_CONFIG)
+ GC_err_printf("Back height not available: "
+ "Rebuild collector with -DMAKE_BACK_GRAPH\n");
# endif
}
/* Force GC before we are likely to allocate past expansion_slop */
GC_collect_at_heapsize =
GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
-# if defined(LARGE_CONFIG)
- if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
+ if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
GC_collect_at_heapsize = (word)(-1);
-# endif
return(TRUE);
}
/* I hold the allocation lock. Normally called by collector. */
STATIC void GC_check_heap_proc(void)
{
-# ifndef SMALL_CONFIG
- GC_STATIC_ASSERT((sizeof(oh) & (GRANULE_BYTES - 1)) == 0);
- /* FIXME: Should we check for twice that alignment? */
-# endif
- GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
+ GC_STATIC_ASSERT((sizeof(oh) & (GRANULE_BYTES - 1)) == 0);
+ /* FIXME: Should we check for twice that alignment? */
+ GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
}
#endif /* !SHORT_DBG_HDRS */
GC_PRINT_STATS - Turn on GC logging. Not functional with SMALL_CONFIG.
-GC_LOG_FILE - The name of the log file. Stderr by default.
+GC_LOG_FILE - The name of the log file. Stderr by default. Not functional
+ with SMALL_CONFIG.
-GC_PRINT_VERBOSE_STATS - Turn on even more logging.
+GC_PRINT_VERBOSE_STATS - Turn on even more logging. Not functional with
+ SMALL_CONFIG.
GC_DUMP_REGULARLY - Generate a GC debugging dump GC_dump() on startup
and during every collection. Very verbose. Useful
only be used with unlimited pause time.
GC_FULL_FREQUENCY - Set the desired number of partial collections between full
- collections. Matters only if GC_incremental is set.
+ collections. Matters only if GC_incremental is set.
+ Not functional with SMALL_CONFIG.
GC_FREE_SPACE_DIVISOR - Set GC_free_space_divisor to the indicated value.
Setting it to larger values decreases space consumption
"%ld links cleared\n",
ready, (long)GC_old_dl_entries - (long)GC_dl_entries);
}
-#endif /* SMALL_CONFIG */
+#endif /* !SMALL_CONFIG */
* modified is included with the above copyright notice.
*/
/* Boehm, July 11, 1995 11:54 am PDT */
-# ifndef GC_HEADERS_H
-# define GC_HEADERS_H
+#ifndef GC_HEADERS_H
+#define GC_HEADERS_H
+
typedef struct hblkhdr hdr;
-# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
+#if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
--> Get a real machine.
-# endif
+#endif
/*
* The 2 level tree data structure that is used to find block headers.
* memory references from each pointer validation.
*/
-# if CPP_WORDSZ > 32
-# define HASH_TL
-# endif
+#if CPP_WORDSZ > 32
+# define HASH_TL
+#endif
/* Define appropriate out-degrees for each of the two tree levels */
-# ifdef SMALL_CONFIG
-# define LOG_BOTTOM_SZ 11
- /* Keep top index size reasonable with smaller blocks. */
-# else
-# define LOG_BOTTOM_SZ 10
-# endif
-# ifndef HASH_TL
-# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
-# else
-# define LOG_TOP_SZ 11
-# endif
-# define TOP_SZ (1 << LOG_TOP_SZ)
-# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
+#ifdef SMALL_CONFIG
+# define LOG_BOTTOM_SZ 11
+ /* Keep top index size reasonable with smaller blocks. */
+#else
+# define LOG_BOTTOM_SZ 10
+#endif
+#define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
+
+#ifndef HASH_TL
+# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
+#else
+# define LOG_TOP_SZ 11
+#endif
+#define TOP_SZ (1 << LOG_TOP_SZ)
/* #define COUNT_HDR_CACHE_HITS */
-# ifdef COUNT_HDR_CACHE_HITS
- extern word GC_hdr_cache_hits; /* used for debugging/profiling */
- extern word GC_hdr_cache_misses;
-# define HC_HIT() ++GC_hdr_cache_hits
-# define HC_MISS() ++GC_hdr_cache_misses
-# else
-# define HC_HIT()
-# define HC_MISS()
-# endif
+#ifdef COUNT_HDR_CACHE_HITS
+ extern word GC_hdr_cache_hits; /* used for debugging/profiling */
+ extern word GC_hdr_cache_misses;
+# define HC_HIT() ++GC_hdr_cache_hits
+# define HC_MISS() ++GC_hdr_cache_misses
+#else
+# define HC_HIT()
+# define HC_MISS()
+#endif
- typedef struct hce {
- word block_addr; /* right shifted by LOG_HBLKSIZE */
- hdr * hce_hdr;
- } hdr_cache_entry;
+typedef struct hce {
+ word block_addr; /* right shifted by LOG_HBLKSIZE */
+ hdr * hce_hdr;
+} hdr_cache_entry;
-# define HDR_CACHE_SIZE 8 /* power of 2 */
+#define HDR_CACHE_SIZE 8 /* power of 2 */
-# define DECLARE_HDR_CACHE \
+#define DECLARE_HDR_CACHE \
hdr_cache_entry hdr_cache[HDR_CACHE_SIZE]
-# define INIT_HDR_CACHE BZERO(hdr_cache, sizeof(hdr_cache))
+#define INIT_HDR_CACHE BZERO(hdr_cache, sizeof(hdr_cache))
-# define HCE(h) hdr_cache + (((word)(h) >> LOG_HBLKSIZE) & (HDR_CACHE_SIZE-1))
+#define HCE(h) hdr_cache + (((word)(h) >> LOG_HBLKSIZE) & (HDR_CACHE_SIZE-1))
-# define HCE_VALID_FOR(hce,h) ((hce) -> block_addr == \
+#define HCE_VALID_FOR(hce,h) ((hce) -> block_addr == \
((word)(h) >> LOG_HBLKSIZE))
-# define HCE_HDR(h) ((hce) -> hce_hdr)
+#define HCE_HDR(h) ((hce) -> hce_hdr)
#ifdef PRINT_BLACK_LIST
GC_INNER hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce,
/* is set. */
/* Returns zero if p points to somewhere other than the first page */
/* of an object, and it is not a valid pointer to the object. */
-# define HC_GET_HDR(p, hhdr, source, exit_label) \
+#define HC_GET_HDR(p, hhdr, source, exit_label) \
{ \
hdr_cache_entry * hce = HCE(p); \
if (EXPECT(HCE_VALID_FOR(hce, p), TRUE)) { \
/* GC_all_nils. */
-# define MAX_JUMP (HBLKSIZE - 1)
+#define MAX_JUMP (HBLKSIZE - 1)
-# define HDR_FROM_BI(bi, p) \
+#define HDR_FROM_BI(bi, p) \
((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
-# ifndef HASH_TL
-# define BI(p) (GC_top_index \
- [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
-# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
-# ifdef SMALL_CONFIG
-# define HDR(p) GC_find_header((ptr_t)(p))
-# else
-# define HDR(p) HDR_INNER(p)
-# endif
-# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
-# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
-# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
-# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
-# else /* hash */
-/* Hash function for tree top level */
-# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
-/* Set bottom_indx to point to the bottom index for address p */
-# define GET_BI(p, bottom_indx) \
- { \
- register word hi = \
- (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
- register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
- \
- while (_bi -> key != hi && _bi != GC_all_nils) \
- _bi = _bi -> hash_link; \
- (bottom_indx) = _bi; \
- }
-# define GET_HDR_ADDR(p, ha) \
- { \
- register bottom_index * bi; \
- \
- GET_BI(p, bi); \
- (ha) = &(HDR_FROM_BI(bi, p)); \
- }
-# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
- (hhdr) = *_ha; }
-# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
- *_ha = (hhdr); }
-# define HDR(p) GC_find_header((ptr_t)(p))
+#ifndef HASH_TL
+# define BI(p) (GC_top_index \
+ [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
+# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
+# ifdef SMALL_CONFIG
+# define HDR(p) GC_find_header((ptr_t)(p))
+# else
+# define HDR(p) HDR_INNER(p)
# endif
+# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
+# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
+# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
+# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
+#else /* hash */
+ /* Hash function for tree top level */
+# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
+ /* Set bottom_indx to point to the bottom index for address p */
+# define GET_BI(p, bottom_indx) \
+ { \
+ register word hi = \
+ (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
+ register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
+ \
+ while (_bi -> key != hi && _bi != GC_all_nils) \
+ _bi = _bi -> hash_link; \
+ (bottom_indx) = _bi; \
+ }
+# define GET_HDR_ADDR(p, ha) \
+ { \
+ register bottom_index * bi; \
+ \
+ GET_BI(p, bi); \
+ (ha) = &(HDR_FROM_BI(bi, p)); \
+ }
+# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ (hhdr) = *_ha; }
+# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ *_ha = (hhdr); }
+# define HDR(p) GC_find_header((ptr_t)(p))
+#endif
/* Is the result a forwarding address to someplace closer to the */
/* beginning of the block or NULL? */
-# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((size_t) (hhdr) <= MAX_JUMP)
+#define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((size_t) (hhdr) <= MAX_JUMP)
/* Get an HBLKSIZE aligned address closer to the beginning of the block */
/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
-# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (size_t)(hhdr))
-# endif /* GC_HEADERS_H */
+#define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (size_t)(hhdr))
+
+#endif /* GC_HEADERS_H */
/* */
/*********************/
-/* Heap block size, bytes. Should be power of 2. */
+/* Heap block size, bytes. Should be power of 2. */
/* Incremental GC with MPROTECT_VDB currently requires the */
/* page size to be a multiple of HBLKSIZE. Since most modern */
/* architectures support variable page sizes down to 4K, and */
/* X86 is generally 4K, we now default to 4K, except for */
/* Alpha: Seems to be used with 8K pages. */
/* SMALL_CONFIG: Want less block-level fragmentation. */
-
#ifndef HBLKSIZE
# ifdef SMALL_CONFIG
# define CPP_LOG_HBLKSIZE 10
+# elif defined(ALPHA)
+# define CPP_LOG_HBLKSIZE 13
# else
-# if defined(ALPHA)
-# define CPP_LOG_HBLKSIZE 13
-# else
-# define CPP_LOG_HBLKSIZE 12
-# endif
+# define CPP_LOG_HBLKSIZE 12
# endif
#else
# if HBLKSIZE == 512
# define CPP_LOG_HBLKSIZE 9
-# endif
-# if HBLKSIZE == 1024
+# elif HBLKSIZE == 1024
# define CPP_LOG_HBLKSIZE 10
-# endif
-# if HBLKSIZE == 2048
+# elif HBLKSIZE == 2048
# define CPP_LOG_HBLKSIZE 11
-# endif
-# if HBLKSIZE == 4096
+# elif HBLKSIZE == 4096
# define CPP_LOG_HBLKSIZE 12
-# endif
-# if HBLKSIZE == 8192
+# elif HBLKSIZE == 8192
# define CPP_LOG_HBLKSIZE 13
-# endif
-# if HBLKSIZE == 16384
+# elif HBLKSIZE == 16384
# define CPP_LOG_HBLKSIZE 14
-# endif
-# ifndef CPP_LOG_HBLKSIZE
+# else
--> fix HBLKSIZE
# endif
# undef HBLKSIZE
# ifdef LARGE_CONFIG
# if CPP_WORDSZ == 32
-# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
+# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
/* which is >= 4GB. Each table takes */
/* 128KB, some of which may never be */
/* touched. */
# else
-# define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks, */
+# define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks, */
/* which is >= 8GB. Each table takes */
/* 256KB, some of which may never be */
/* touched. */
# endif
-# else
-# ifdef SMALL_CONFIG
-# define LOG_PHT_ENTRIES 15 /* Collisions are likely if heap grows */
- /* to more than 32K hblks = 128MB. */
- /* Each hash table occupies 4K bytes. */
-# else /* default "medium" configuration */
-# define LOG_PHT_ENTRIES 18 /* Collisions are likely if heap grows */
+# elif !defined(SMALL_CONFIG)
+# define LOG_PHT_ENTRIES 18 /* Collisions are likely if heap grows */
/* to more than 256K hblks >= 1GB. */
/* Each hash table occupies 32K bytes. */
/* Even for somewhat smaller heaps, */
/* say half that, collisions may be an */
/* issue because we blacklist */
/* addresses outside the heap. */
-# endif
+# else
+# define LOG_PHT_ENTRIES 15 /* Collisions are likely if heap grows */
+ /* to more than 32K hblks = 128MB. */
+ /* Each hash table occupies 4K bytes. */
# endif
# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
/* registered as static roots. */
# ifdef LARGE_CONFIG
# define MAX_ROOT_SETS 8192
+# elif !defined(SMALL_CONFIG)
+# define MAX_ROOT_SETS 2048
# else
-# ifdef SMALL_CONFIG
-# define MAX_ROOT_SETS 512
-# else
-# define MAX_ROOT_SETS 2048
-# endif
+# define MAX_ROOT_SETS 512
# endif
# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
# else
# define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
# endif
+# elif defined(SMALL_CONFIG)
+# define MAX_HEAP_SECTS 128 /* Roughly 256MB (128*2048*1K) */
+# elif CPP_WORDSZ > 32
+# define MAX_HEAP_SECTS 1024 /* Roughly 8GB */
# else
-# ifdef SMALL_CONFIG
-# define MAX_HEAP_SECTS 128 /* Roughly 256MB (128*2048*1K) */
-# else
-# if CPP_WORDSZ > 32
-# define MAX_HEAP_SECTS 1024 /* Roughly 8GB */
-# else
-# define MAX_HEAP_SECTS 512 /* Roughly 4GB */
-# endif
-# endif
+# define MAX_HEAP_SECTS 512 /* Roughly 4GB */
# endif
#endif /* !MAX_HEAP_SECTS */
#include <stdio.h>
#ifndef SMALL_CONFIG
-/*
- * Build a free list for size 2 (words) cleared objects inside hblk h.
- * Set the last link to
- * be ofl. Return a pointer tpo the first free list entry.
- */
-STATIC ptr_t GC_build_fl_clear2(struct hblk *h, ptr_t ofl)
-{
+ /* Build a free list for size 2 (words) cleared objects inside */
+ /* hblk h. Set the last link to be ofl. Return a pointer tpo the */
+ /* first free list entry. */
+ STATIC ptr_t GC_build_fl_clear2(struct hblk *h, ptr_t ofl)
+ {
word * p = (word *)(h -> hb_body);
word * lim = (word *)(h + 1);
p[3] = 0;
};
return((ptr_t)(p-2));
-}
+ }
-/* The same for size 4 cleared objects */
-STATIC ptr_t GC_build_fl_clear4(struct hblk *h, ptr_t ofl)
-{
+ /* The same for size 4 cleared objects. */
+ STATIC ptr_t GC_build_fl_clear4(struct hblk *h, ptr_t ofl)
+ {
word * p = (word *)(h -> hb_body);
word * lim = (word *)(h + 1);
CLEAR_DOUBLE(p+2);
};
return((ptr_t)(p-4));
-}
+ }
-/* The same for size 2 uncleared objects */
-STATIC ptr_t GC_build_fl2(struct hblk *h, ptr_t ofl)
-{
+ /* The same for size 2 uncleared objects. */
+ STATIC ptr_t GC_build_fl2(struct hblk *h, ptr_t ofl)
+ {
word * p = (word *)(h -> hb_body);
word * lim = (word *)(h + 1);
p[2] = (word)p;
};
return((ptr_t)(p-2));
-}
+ }
-/* The same for size 4 uncleared objects */
-STATIC ptr_t GC_build_fl4(struct hblk *h, ptr_t ofl)
-{
+ /* The same for size 4 uncleared objects. */
+ STATIC ptr_t GC_build_fl4(struct hblk *h, ptr_t ofl)
+ {
word * p = (word *)(h -> hb_body);
word * lim = (word *)(h + 1);
p[4] = (word)p;
};
return((ptr_t)(p-4));
-}
-
+ }
#endif /* !SMALL_CONFIG */
-
/* Build a free list for objects of size sz inside heap block h. */
/* Clear objects inside h if clear is set. Add list to the end of */
/* the free list we build. Return the new free list. */
PREFETCH_FOR_WRITE((ptr_t)h + 128);
PREFETCH_FOR_WRITE((ptr_t)h + 256);
PREFETCH_FOR_WRITE((ptr_t)h + 378);
- /* Handle small objects sizes more efficiently. For larger objects */
- /* the difference is less significant. */
-# ifndef SMALL_CONFIG
- switch (sz) {
+# ifndef SMALL_CONFIG
+ /* Handle small objects sizes more efficiently. For larger objects */
+ /* the difference is less significant. */
+ switch (sz) {
case 2: if (clear) {
return GC_build_fl_clear2(h, list);
} else {
}
default:
break;
- }
-# endif /* !SMALL_CONFIG */
+ }
+# endif /* !SMALL_CONFIG */
/* Clear the page if necessary. */
if (clear) BZERO(h, HBLKSIZE);
return ((ptr_t)p);
}
-
/*
* Allocate a new heapblock for small objects of size gran granules.
* Add all of the heapblock's objects to the free list for objects