+2011-03-13 Ivan Maidanski <ivmai@mail.ru>
+
+ * finalize.c (GC_register_finalizer_inner): Fix a typo in a
+ comment.
+ *include/private/gcconfig.h (STACKBOTTOM): Ditto.
+ * gcj_mlc.c (GC_core_gcj_malloc): Replace 0/1 with TRUE/FALSE in
+ EXPECT (the 2nd argument).
+ * malloc.c (GC_core_malloc_atomic, GC_core_malloc, GC_free):
+ Ditto.
+ * mark.c (GC_mark_and_push, GC_mark_and_push_stack): Ditto.
+ * thread_local_alloc.c (GC_malloc, GC_malloc_atomic): Ditto.
+ * include/private/gc_hdrs.h (HC_GET_HDR): Ditto.
+ * include/private/gc_priv.h (SMALL_OBJ): Ditto.
+ * include/private/specific.h (getspecific): Ditto.
+ * pthread_support.c (LOCK_STATS): Add a comment.
+
2011-03-08 Ivan Maidanski <ivmai@mail.ru>
* include/gc_pthread_redirects.h (GC_NO_DLOPEN,
curr_fo = fo_next(curr_fo);
}
if (EXPECT(new_fo != 0, FALSE)) {
- /* new_fo is returned GC_oom_fn(), so fn != 0 and hhdr != 0. */
+ /* new_fo is returned by GC_oom_fn(), so fn != 0 and hhdr != 0. */
break;
}
if (fn == 0) {
opp = &(GC_gcjobjfreelist[lg]);
LOCK();
op = *opp;
- if(EXPECT(op == 0, 0)) {
+ if(EXPECT(op == 0, FALSE)) {
maybe_finalize();
op = (ptr_t)GENERAL_MALLOC_INNER((word)lb, GC_gcj_kind);
if (0 == op) {
# define HC_GET_HDR(p, hhdr, source, exit_label) \
{ \
hdr_cache_entry * hce = HCE(p); \
- if (EXPECT(HCE_VALID_FOR(hce, p), 1)) { \
+ if (EXPECT(HCE_VALID_FOR(hce, p), TRUE)) { \
HC_HIT(); \
hhdr = hce -> hce_hdr; \
} else { \
# define ROUNDED_UP_GRANULES(n) \
BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
# if MAX_EXTRA_BYTES == 0
-# define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), 1)
+# define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), TRUE)
# else
# define SMALL_OBJ(bytes) \
- (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), 1) || \
- (bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
+ (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), TRUE) \
+ || (bytes) <= MAXOBJBYTES - EXTRA_BYTES)
/* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES. */
/* But we try to avoid looking up EXTRA_BYTES. */
# endif
* If STACKBOTTOM is defined, then it's value will be used directly as the
* stack base. If LINUX_STACKBOTTOM is defined, then it will be determined
* with a method appropriate for most Linux systems. Currently we look
- * first for __libc_stack_end (currently only id USE_LIBC_PRIVATES is
+ * first for __libc_stack_end (currently only if USE_LIBC_PRIVATES is
* defined), and if that fails read it from /proc. (If USE_LIBC_PRIVATES
* is not defined and NO_PROC_STAT is defined, we revert to HEURISTIC2.)
* If either of the last two macros are defined, then STACKBOTTOM is computed
unsigned hash_val = CACHE_HASH(qtid);
tse * volatile * entry_ptr = key -> cache + hash_val;
tse * entry = *entry_ptr; /* Must be loaded only once. */
- if (EXPECT(entry -> qtid == qtid, 1)) {
+ if (EXPECT(entry -> qtid == qtid, TRUE)) {
GC_ASSERT(entry -> thread == pthread_self());
return entry -> value;
}
} else {
# ifdef THREADS
/* Clear any memory that might be used for GC descriptors */
- /* before we release the lock. */
+ /* before we release the lock. */
((word *)result)[0] = 0;
((word *)result)[1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
lg = GC_size_map[lb];
opp = &(GC_aobjfreelist[lg]);
LOCK();
- if( EXPECT((op = *opp) == 0, 0) ) {
+ if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
lg = GC_size_map[lb];
opp = (void **)&(GC_objfreelist[lg]);
LOCK();
- if( EXPECT((op = *opp) == 0, 0) ) {
+ if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
- return(GENERAL_MALLOC((word)lb, NORMAL));
+ return (GENERAL_MALLOC((word)lb, NORMAL));
}
GC_ASSERT(0 == obj_link(op)
|| ((word)obj_link(op)
ngranules = BYTES_TO_GRANULES(sz);
knd = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[knd];
- if (EXPECT((ngranules <= MAXOBJGRANULES), 1)) {
+ if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
PREFETCH(obj);
GET_HDR(obj, hhdr);
- if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
+ if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
if (GC_all_interior_pointers) {
hhdr = GC_find_header(GC_base(obj));
if (hhdr == 0) {
return mark_stack_ptr;
}
}
- if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
+ if (EXPECT(HBLK_IS_FREE(hhdr), FALSE)) {
GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
return mark_stack_ptr;
}
PREFETCH(p);
GET_HDR(p, hhdr);
- if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
+ if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
if (hhdr != 0) {
r = GC_base(p);
hhdr = HDR(r);
return;
}
}
- if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
+ if (EXPECT(HBLK_IS_FREE(hhdr), FALSE)) {
GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
return;
}
/* explicitly sleep. */
/* #define LOCK_STATS */
+/* Note that LOCK_STATS requires AO_HAVE_test_and_set. */
#ifdef LOCK_STATS
AO_t GC_spin_count = 0;
AO_t GC_block_count = 0;
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
GC_key_t k = GC_thread_key;
- if (EXPECT(0 == k, 0)) {
+ if (EXPECT(0 == k, FALSE)) {
/* We haven't yet run GC_init_parallel. That means */
/* we also aren't locking, so this is fairly cheap. */
return GC_core_malloc(bytes);
tsd = GC_getspecific(GC_thread_key);
# endif
# if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
- if (EXPECT(0 == tsd, 0)) {
+ if (EXPECT(0 == tsd, FALSE)) {
return GC_core_malloc(bytes);
}
# endif
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
GC_key_t k = GC_thread_key;
- if (EXPECT(0 == k, 0)) {
+ if (EXPECT(0 == k, FALSE)) {
/* We haven't yet run GC_init_parallel. That means */
/* we also aren't locking, so this is fairly cheap. */
return GC_core_malloc(bytes);
tsd = GC_getspecific(GC_thread_key);
# endif
# if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
- if (EXPECT(0 == tsd, 0)) {
+ if (EXPECT(0 == tsd, FALSE)) {
return GC_core_malloc(bytes);
}
# endif