+2009-02-28 Hans Boehm <Hans.Boehm@hp.com> (Mostly Ivan Maidansky)
+ * allchblk.c, backgraph.c, dbg_mlc.c, dyn_load.c,
+ finalize.c, include/private/gc_pmark.h, malloc.c, mark.c,
+ os_dep.c, pthread_stop_world.c, pthread_support.c, reclaim.c,
+ thread_local_alloc.c.
+ * misc.s: Refine comment.
+
+2009-02-28 Hans Boehm <Hans.Boehm@hp.com>
+ * os_dep.c: Define GC_GWW_BUF_LEN more intelligently. Add FIXME
+ comment.
+
2009-02-28 Hans Boehm <Hans.Boehm@hp.com> (With input from Ivan Maidansky)
* win32_threads.c (GC_push_stack_for): Yet another attempt
at the stack_min finding logic. Try to clean up the existing code
(unsigned long)i);
# else
if (0 != h) GC_printf("Free list %lu (Total size %lu):\n",
- i, (unsigned long)GC_free_bytes[i]);
+ (long)i, (unsigned long)GC_free_bytes[i]);
# endif
while (h != 0) {
hhdr = HDR(h);
hhdr = HDR(p);
GC_printf("\t%p ", p);
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- GC_printf("Missing header!!(%d)\n", hhdr);
+ GC_printf("Missing header!!(%p)\n", hhdr);
p += HBLKSIZE;
continue;
}
word this_height;
if (GC_is_marked(q) && !(FLAG_MANY & (word)GET_OH_BG_PTR(p))) {
if (GC_print_stats)
- GC_log_printf("Found bogus pointer from 0x%lx to 0x%lx\n", q, p);
+ GC_log_printf("Found bogus pointer from %p to %p\n", q, p);
/* Reachable object "points to" unreachable one. */
/* Could be caused by our lax treatment of GC descriptors. */
this_height = 1;
GC_print_heap_obj(GC_deepest_obj);
}
if (GC_print_stats) {
- GC_log_printf("Needed max total of %ld back-edge structs\n",
+ GC_log_printf("Needed max total of %d back-edge structs\n",
GC_n_back_edge_structs);
}
GC_apply_to_each_object(reset_back_edge);
GC_err_printf("No debug info in object: Can't find reference\n");
goto out;
}
- GC_err_printf("Reachable via %d levels of pointers from ",
- (unsigned long)i);
+ GC_err_printf("Reachable via %d levels of pointers from ", i);
switch(source) {
case GC_REFD_FROM_ROOT:
GC_err_printf("root at %p\n\n", base);
GC_err_printf("%p in or near object at %p(", clobbered_addr, p);
if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
|| ohdr -> oh_string == 0) {
- GC_err_printf("<smashed>, appr. sz = %ld)\n",
- (GC_size((ptr_t)ohdr) - DEBUG_BYTES));
+ GC_err_printf("<smashed>, appr. sz = %lu)\n",
+ (unsigned long)(GC_size((ptr_t)ohdr) - DEBUG_BYTES));
} else {
if ((word)(ohdr -> oh_string) < HBLKSIZE) {
GC_err_puts("(smashed string)");
# endif /* SOLARISDL */
if (fd < 0) {
- sprintf(buf, "/proc/%d", getpid());
+ sprintf(buf, "/proc/%ld", (long)getpid());
/* The above generates a lint complaint, since pid_t varies. */
/* It's unclear how to improve this. */
fd = open(buf, O_RDONLY);
(current_sz * sizeof(prmap_t)));
}
if (ioctl(fd, PIOCMAP, addr_map) < 0) {
- GC_err_printf("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
+ GC_err_printf("fd = %d, errno = %d, needed_sz = %d, addr_map = %p\n",
fd, errno, needed_sz, addr_map);
ABORT("/proc PIOCMAP ioctl failed");
};
&log_dl_table_size);
if (GC_print_stats) {
GC_log_printf("Grew dl table to %u entries\n",
- (1 << log_dl_table_size));
+ (1 << (unsigned)log_dl_table_size));
}
}
index = HASH2(link, log_dl_table_size);
&log_fo_table_size);
if (GC_print_stats) {
GC_log_printf("Grew fo table to %u entries\n",
- (1 << log_fo_table_size));
+ (1 << (unsigned)log_fo_table_size));
}
}
/* in the THREADS case signals are disabled and we hold allocation */
void GC_print_finalization_stats(void)
{
struct finalizable_object *fo = GC_finalize_now;
- size_t ready = 0;
+ unsigned ready = 0;
GC_printf("%u finalization table entries; %u disappearing links\n",
- GC_fo_entries, GC_dl_entries);
+ (unsigned)GC_fo_entries, (unsigned)GC_dl_entries);
for (; 0 != fo; fo = fo_next(fo)) ++ready;
GC_printf("%u objects are eligible for immediate finalization\n", ready);
}
} \
GC_ASSERT(hhdr == GC_find_header(base)); \
GC_ASSERT(gran_displ % BYTES_TO_GRANULES(hhdr -> hb_sz) == 0); \
- TRACE(source, GC_log_printf("GC:%d: passed validity tests\n",GC_gc_no)); \
+ TRACE(source, GC_log_printf("GC:%u: passed validity tests\n", \
+ (unsigned)GC_gc_no)); \
SET_MARK_BIT_EXIT_IF_SET(hhdr, gran_displ, exit_label); \
- TRACE(source, GC_log_printf("GC:%d: previously unmarked\n",GC_gc_no)); \
+ TRACE(source, GC_log_printf("GC:%u: previously unmarked\n", \
+ (unsigned)GC_gc_no)); \
TRACE_TARGET(base, \
- GC_log_printf("GC:%d: marking %p from %p instead\n", GC_gc_no, \
+ GC_log_printf("GC:%u: marking %p from %p instead\n", (unsigned)GC_gc_no, \
base, source)); \
INCR_MARKS(hhdr); \
GC_STORE_BACK_PTR((ptr_t)source, base); \
/* May get here for pointer to start of block not at */ \
/* beginning of object. If so, it's valid, and we're fine. */ \
GC_ASSERT(high_prod >= 0 && high_prod <= HBLK_OBJS(hhdr -> hb_sz)); \
- TRACE(source, GC_log_printf("GC:%d: passed validity tests\n",GC_gc_no)); \
+ TRACE(source, GC_log_printf("GC:%u: passed validity tests\n", \
+ (unsigned)GC_gc_no)); \
SET_MARK_BIT_EXIT_IF_SET(hhdr, high_prod, exit_label); \
- TRACE(source, GC_log_printf("GC:%d: previously unmarked\n",GC_gc_no)); \
+ TRACE(source, GC_log_printf("GC:%u: previously unmarked\n", \
+ (unsigned)GC_gc_no)); \
TRACE_TARGET(base, \
- GC_log_printf("GC:%d: marking %p from %p instead\n", GC_gc_no, \
+ GC_log_printf("GC:%u: marking %p from %p instead\n",
+ (unsigned)GC_gc_no, \
base, source)); \
INCR_MARKS(hhdr); \
GC_STORE_BACK_PTR((ptr_t)source, base); \
if (p == 0) return;
/* Required by ANSI. It's not my fault ... */
# ifdef LOG_ALLOCS
- GC_err_printf("GC_free(%p): %d\n", p, GC_gc_no);
+ GC_err_printf("GC_free(%p): %lu\n", p, (unsigned long)GC_gc_no);
# endif
h = HBLKPTR(p);
hhdr = HDR(h);
scan_ptr = GC_push_next_marked_dirty(scan_ptr);
if (scan_ptr == 0) {
if (GC_print_stats) {
- GC_log_printf("Marked from %u dirty pages\n",
- GC_n_rescuing_pages);
+ GC_log_printf("Marked from %lu dirty pages\n",
+ (unsigned long)GC_n_rescuing_pages);
}
GC_push_roots(FALSE, cold_gc_frame);
GC_objects_are_marked = TRUE;
GC_mark_stack_too_small = TRUE;
if (GC_print_stats) {
GC_log_printf("Mark stack overflow; current size = %lu entries\n",
- GC_mark_stack_size);
+ (unsigned long)GC_mark_stack_size);
}
return(msp - GC_MARK_STACK_DISCARDS);
}
# ifdef ENABLE_TRACE
if (GC_trace_addr >= current_p
&& GC_trace_addr < current_p + descr) {
- GC_log_printf("GC:%d Large section; start %p len %lu\n",
- GC_gc_no, current_p, (unsigned long) descr);
+ GC_log_printf("GC:%u Large section; start %p len %lu\n",
+ (unsigned)GC_gc_no, current_p,
+ (unsigned long) descr);
}
# endif /* ENABLE_TRACE */
# ifdef PARALLEL_MARK
# ifdef ENABLE_TRACE
if (GC_trace_addr >= current_p
&& GC_trace_addr < current_p + descr) {
- GC_log_printf("GC:%d splitting (parallel) %p at %p\n",
- GC_gc_no, current_p, current_p + new_size);
+ GC_log_printf("GC:%u splitting (parallel) %p at %p\n",
+ (unsigned)GC_gc_no, current_p,
+ current_p + new_size);
}
# endif /* ENABLE_TRACE */
current_p += new_size;
# ifdef ENABLE_TRACE
if (GC_trace_addr >= current_p
&& GC_trace_addr < current_p + descr) {
- GC_log_printf("GC:%d splitting %p at %p\n",
- GC_gc_no, current_p, limit);
+ GC_log_printf("GC:%u splitting %p at %p\n",
+ (unsigned)GC_gc_no, current_p, limit);
}
# endif /* ENABLE_TRACE */
/* Make sure that pointers overlapping the two ranges are */
# ifdef ENABLE_TRACE
if (GC_trace_addr >= current_p
&& GC_trace_addr < current_p + WORDS_TO_BYTES(WORDSZ-2)) {
- GC_log_printf("GC:%d Tracing from %p bitmap descr %lu\n",
- GC_gc_no, current_p, (unsigned long) descr);
+ GC_log_printf("GC:%u Tracing from %p bitmap descr %lu\n",
+ (unsigned)GC_gc_no, current_p,
+ (unsigned long) descr);
}
# endif /* ENABLE_TRACE */
descr &= ~GC_DS_TAGS;
PREFETCH((ptr_t)current);
# ifdef ENABLE_TRACE
if (GC_trace_addr == current_p) {
- GC_log_printf("GC:%d Considering(3) %p -> %p\n",
- GC_gc_no, current_p, (ptr_t) current);
+ GC_log_printf("GC:%u Considering(3) %p -> %p\n",
+ (unsigned)GC_gc_no, current_p,
+ (ptr_t) current);
}
# endif /* ENABLE_TRACE */
PUSH_CONTENTS((ptr_t)current, mark_stack_top,
if (GC_trace_addr >= current_p
&& GC_base(current_p) != 0
&& GC_base(current_p) == GC_base(GC_trace_addr)) {
- GC_log_printf("GC:%d Tracing from %p proc descr %lu\n",
- GC_gc_no, current_p, (unsigned long) descr);
+ GC_log_printf("GC:%u Tracing from %p proc descr %lu\n",
+ (unsigned)GC_gc_no, current_p,
+ (unsigned long) descr);
}
# endif /* ENABLE_TRACE */
credit -= GC_PROC_BYTES;
# ifdef ENABLE_TRACE
if (GC_trace_addr >= current_p
&& GC_trace_addr < limit) {
- GC_log_printf("GC:%d Tracing from %p len %lu\n",
- GC_gc_no, current_p, (unsigned long) descr);
+ GC_log_printf("GC:%u Tracing from %p len %lu\n",
+ (int)GC_gc_no, current_p, (unsigned long) descr);
}
# endif /* ENABLE_TRACE */
/* The simple case in which we're scanning a range. */
PREFETCH((ptr_t)current);
# ifdef ENABLE_TRACE
if (GC_trace_addr == current_p) {
- GC_log_printf("GC:%d Considering(1) %p -> %p\n",
- GC_gc_no, current_p, (ptr_t) current);
+ GC_log_printf("GC:%u Considering(1) %p -> %p\n",
+ (unsigned)GC_gc_no, current_p, (ptr_t) current);
}
# endif /* ENABLE_TRACE */
PUSH_CONTENTS((ptr_t)current, mark_stack_top,
/* validity test. */
# ifdef ENABLE_TRACE
if (GC_trace_addr == current_p) {
- GC_log_printf("GC:%d Considering(2) %p -> %p\n",
- GC_gc_no, current_p, (ptr_t) deferred);
+ GC_log_printf("GC:%u Considering(2) %p -> %p\n",
+ (unsigned)GC_gc_no, current_p, (ptr_t) deferred);
}
# endif /* ENABLE_TRACE */
PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
if (i < 0) i = TRACE_ENTRIES-1;
p = GC_trace_buf + i;
if (p -> gc_no < gc_no || p -> kind == 0) return;
- printf("Trace:%s (gc:%d,bytes:%d) 0x%X, 0x%X\n",
- p -> kind, p -> gc_no, p -> bytes_allocd,
+ printf("Trace:%s (gc:%u,bytes:%lu) 0x%X, 0x%X\n",
+ p -> kind, (unsigned)p -> gc_no,
+ (unsigned long)p -> bytes_allocd,
(p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
}
printf("Trace incomplete\n");
close(f);
# ifdef THREADS
if (maps_size > old_maps_size) {
- GC_err_printf("Old maps size = %d, new maps size = %d\n",
- old_maps_size, maps_size);
+ GC_err_printf("Old maps size = %lu, new maps size = %lu\n",
+ (unsigned long)old_maps_size,
+ (unsigned long)maps_size);
ABORT("Unexpected asynchronous /proc/self/maps growth: "
"Unregistered thread?");
}
#ifdef GWW_VDB
-# define GC_GWW_BUF_LEN 1024
+# define GC_GWW_BUF_LEN (MAXHINCR * HBLKSIZE / 4096 /* X86 page size */)
+ /* Still susceptible to overflow, if there are very large allocations, */
+ /* and everything is dirty. */
static PVOID gww_buf[GC_GWW_BUF_LEN];
# ifdef MPROTECT_VDB
}
}
} while (count == GC_GWW_BUF_LEN);
+ /* FIXME: It's unclear from Microsoft's documentation if this loop */
+ /* is useful. We suspect the call just fails if the buffer fills */
+ /* up. But that should still be handled correctly. */
}
GC_or_pages(GC_written_pages, GC_grungy_pages);
# define PROTECT(addr, len) \
if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
&protect_junk)) { \
- DWORD last_error = GetLastError(); \
- GC_printf("Last error code: %lx\n", last_error); \
+ GC_printf("Last error code: %lx\n", (long)GetLastError()); \
ABORT("VirtualProtect failed"); \
}
# define UNPROTECT(addr, len) \
(unsigned long)
(GC_bytes_allocd + GC_bytes_allocd_before_gc));
}
- sprintf(buf, "/proc/%d", getpid());
+ sprintf(buf, "/proc/%ld", (long)getpid());
fd = open(buf, O_RDONLY);
if (fd < 0) {
ABORT("/proc open failed");
# endif
# ifdef IA64
# if DEBUG_THREADS
- GC_printf("Reg stack for thread 0x%x = [%lx,%lx)\n",
+ GC_printf("Reg stack for thread 0x%x = [%p,%p)\n",
(unsigned)p -> id, bs_lo, bs_hi);
# endif
if (THREAD_EQUAL(p -> id, me)) {
}
}
if (GC_print_stats == VERBOSE) {
- GC_log_printf("Pushed %d thread stacks\n", nthreads);
+ GC_log_printf("Pushed %d thread stacks\n", (int)nthreads);
}
if (!found_me && !GC_in_thread_creation)
ABORT("Collecting from unknown thread.");
my_mark_no = GC_mark_no;
}
# ifdef DEBUG_THREADS
- GC_printf("Starting mark helper for mark number %lu\n", my_mark_no);
+ GC_printf("Starting mark helper for mark number %lu\n",
+ (unsigned long)my_mark_no);
# endif
GC_help_marker(my_mark_no);
}
# ifdef DEBUG_THREADS
GC_printf("Starting thread 0x%x\n", (unsigned)my_pthread);
GC_printf("pid = %ld\n", (long) getpid());
- GC_printf("sp = 0x%lx\n", (long) &arg);
+ GC_printf("sp = %p\n", &arg);
# endif
LOCK();
me = GC_register_my_thread_inner(sb, my_pthread);
UNLOCK();
start = si -> start_routine;
# ifdef DEBUG_THREADS
- GC_printf("start_routine = %p\n", (void *)start);
+ GC_printf("start_routine = %p\n", (void *)(signed_word)start);
# endif
start_arg = si -> arg;
sem_post(&(si -> registered)); /* Last action on si. */
unsigned n_marks = GC_n_set_marks(hhdr);
if (hhdr -> hb_n_marks != n_marks) {
- GC_printf("(%u:%u,%u!=%u)", hhdr -> hb_obj_kind,
- bytes,
- hhdr -> hb_n_marks, n_marks);
+ GC_printf("(%u:%u,%u!=%u)", hhdr -> hb_obj_kind, (unsigned)bytes,
+ (unsigned)hhdr -> hb_n_marks, n_marks);
} else {
GC_printf("(%u:%u,%u)", hhdr -> hb_obj_kind,
- bytes, n_marks);
+ (unsigned)bytes, n_marks);
}
bytes += HBLKSIZE-1;
bytes &= ~(HBLKSIZE-1);
while (flh){
struct hblk *block = HBLKPTR(flh);
if (block != lastBlock){
- GC_printf("\nIn heap block at 0x%x:\n\t", block);
+ GC_printf("\nIn heap block at %p:\n\t", block);
lastBlock = block;
}
- GC_printf("%d: 0x%x;", ++n, flh);
+ GC_printf("%d: %p;", ++n, flh);
flh = obj_link(flh);
}
}
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
NORMAL, GC_core_malloc(bytes), obj_link(result)=0);
# ifdef LOG_ALLOCS
- GC_err_printf("GC_malloc(%d) = %p : %d\n", bytes, result, GC_gc_no);
+ GC_err_printf("GC_malloc(%u) = %p : %u\n",
+ (unsigned)bytes, result, (unsigned)GC_gc_no);
# endif
return result;
}