2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
19 #ifdef ENABLE_DISCLAIM
20 # include "gc_disclaim.h"
25 GC_INNER signed_word GC_bytes_found = 0;
26 /* Number of bytes of memory reclaimed */
27 /* minus the number of bytes originally */
28 /* on free lists which we had to drop. */
30 #if defined(PARALLEL_MARK)
31 GC_INNER signed_word GC_fl_builder_count = 0;
32 /* Number of threads currently building free lists without */
33 /* holding GC lock. It is not safe to collect if this is */
34 /* nonzero. Also, together with the mark lock, it is used as */
35 /* a semaphore during marker threads startup. */
36 #endif /* PARALLEL_MARK */
38 /* We defer printing of leaked objects until we're done with the GC */
39 /* cycle, since the routine for printing objects needs to run outside */
40 /* the collector, e.g. without the allocation lock. */
42 # define MAX_LEAKED 40
44 STATIC ptr_t GC_leaked[MAX_LEAKED] = { NULL };
45 STATIC unsigned GC_n_leaked = 0;
47 GC_INNER GC_bool GC_have_errors = FALSE;
49 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
50 STATIC void GC_reclaim_unconditionally_marked(void);
53 GC_INLINE void GC_add_leaked(ptr_t leaked)
55 # ifndef SHORT_DBG_HDRS
56 if (GC_findleak_delay_free && !GC_check_leaked(leaked))
60 GC_have_errors = TRUE;
61 if (GC_n_leaked < MAX_LEAKED) {
62 GC_leaked[GC_n_leaked++] = leaked;
63 /* Make sure it's not reclaimed this cycle */
64 GC_set_mark_bit(leaked);
68 /* Print all objects on the list after printing any smashed objects. */
69 /* Clear both lists. Called without the allocation lock held. */
70 GC_INNER void GC_print_all_errors(void)
72 static GC_bool printing_errors = FALSE;
75 ptr_t leaked[MAX_LEAKED];
79 if (printing_errors) {
83 have_errors = GC_have_errors;
84 printing_errors = TRUE;
85 n_leaked = GC_n_leaked;
87 GC_ASSERT(n_leaked <= MAX_LEAKED);
88 BCOPY(GC_leaked, leaked, n_leaked * sizeof(ptr_t));
90 BZERO(GC_leaked, n_leaked * sizeof(ptr_t));
94 if (GC_debugging_started) {
95 GC_print_all_smashed();
101 GC_err_printf("Found %u leaked objects:\n", n_leaked);
104 for (i = 0; i < n_leaked; i++) {
106 # ifndef SKIP_LEAKED_OBJECTS_PRINTING
107 GC_print_heap_obj(p);
113 # ifndef GC_ABORT_ON_LEAK
114 && GETENV("GC_ABORT_ON_LEAK") != NULL
117 ABORT("Leaked or smashed objects encountered");
121 printing_errors = FALSE;
131 /* Test whether a block is completely empty, i.e. contains no marked */
132 /* objects. This does not require the block to be in physical memory. */
133 GC_INNER GC_bool GC_block_empty(hdr *hhdr)
135 return (hhdr -> hb_n_marks == 0);
138 STATIC GC_bool GC_block_nearly_full(hdr *hhdr)
140 return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
143 /* TODO: This should perhaps again be specialized for USE_MARK_BYTES */
144 /* and USE_MARK_BITS cases. */
147 * Restore unmarked small objects in h of size sz to the object
148 * free list. Returns the new list.
149 * Clears unmarked objects. Sz is in bytes.
151 STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, word sz,
152 ptr_t list, signed_word *count)
156 signed_word n_bytes_found = 0;
158 GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
159 GC_ASSERT(sz == hhdr -> hb_sz);
160 GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
161 p = (word *)(hbp->hb_body);
162 plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
164 /* go through all words in block */
165 while ((word)p <= (word)plim) {
166 if (mark_bit_from_hdr(hhdr, bit_no)) {
167 p = (word *)((ptr_t)p + sz);
170 /* object is available - put on list */
173 /* Clear object, advance p to next object in the process */
174 q = (word *)((ptr_t)p + sz);
175 # ifdef USE_MARK_BYTES
177 && !((word)p & (2 * sizeof(word) - 1)));
180 while ((word)p < (word)q) {
185 p++; /* Skip link field */
186 while ((word)p < (word)q) {
191 bit_no += MARK_BIT_OFFSET(sz);
193 *count += n_bytes_found;
197 /* The same thing, but don't clear objects: */
198 STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, word sz,
199 ptr_t list, signed_word *count)
203 signed_word n_bytes_found = 0;
205 GC_ASSERT(sz == hhdr -> hb_sz);
206 p = (word *)(hbp->hb_body);
207 plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
209 /* go through all words in block */
210 while ((word)p <= (word)plim) {
211 if (!mark_bit_from_hdr(hhdr, bit_no)) {
213 /* object is available - put on list */
217 p = (word *)((ptr_t)p + sz);
218 bit_no += MARK_BIT_OFFSET(sz);
220 *count += n_bytes_found;
224 #ifdef ENABLE_DISCLAIM
225 /* Call reclaim notifier for block's kind on each unmarked object in */
226 /* block, all within a pair of corresponding enter/leave callbacks. */
227 STATIC ptr_t GC_disclaim_and_reclaim(struct hblk *hbp, hdr *hhdr, word sz,
228 ptr_t list, signed_word *count)
232 signed_word n_bytes_found = 0;
233 struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
234 int (GC_CALLBACK *disclaim)(void *) = ok->ok_disclaim_proc;
236 GC_ASSERT(sz == hhdr -> hb_sz);
237 p = (word *)(hbp -> hb_body);
238 plim = (word *)((ptr_t)p + HBLKSIZE - sz);
240 while ((word)p <= (word)plim) {
241 int marked = mark_bit_from_hdr(hhdr, bit_no);
242 if (!marked && (*disclaim)(p)) {
243 set_mark_bit_from_hdr(hhdr, bit_no);
244 hhdr -> hb_n_marks++;
248 p = (word *)((ptr_t)p + sz);
251 /* object is available - put on list */
254 /* Clear object, advance p to next object in the process */
255 q = (word *)((ptr_t)p + sz);
256 # ifdef USE_MARK_BYTES
257 GC_ASSERT((sz & 1) == 0);
258 GC_ASSERT(((word)p & (2 * sizeof(word) - 1)) == 0);
261 while ((word)p < (word)q) {
266 p++; /* Skip link field */
267 while ((word)p < (word)q) {
272 bit_no += MARK_BIT_OFFSET(sz);
274 *count += n_bytes_found;
277 #endif /* ENABLE_DISCLAIM */
279 /* Don't really reclaim objects, just check for unmarked ones: */
280 STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
284 GC_ASSERT(sz == hhdr -> hb_sz);
286 /* go through all words in block */
288 plim = p + HBLKSIZE - sz;
289 for (bit_no = 0; (word)p <= (word)plim;
290 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
291 if (!mark_bit_from_hdr(hhdr, bit_no)) {
297 /* Is a pointer-free block? Same as IS_PTRFREE macro (in os_dep.c) but */
298 /* uses unordered atomic access to avoid racing with GC_realloc. */
300 # define IS_PTRFREE_SAFE(hhdr) \
301 (AO_load((volatile AO_t *)&(hhdr)->hb_descr) == 0)
303 /* No race as GC_realloc holds the lock while updating hb_descr. */
304 # define IS_PTRFREE_SAFE(hhdr) ((hhdr)->hb_descr == 0)
308 * Generic procedure to rebuild a free list in hbp.
309 * Also called directly from GC_malloc_many.
310 * Sz is now in bytes.
312 GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
313 GC_bool init, ptr_t list,
318 GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
319 # ifndef GC_DISABLE_INCREMENTAL
320 GC_remove_protection(hbp, 1, IS_PTRFREE_SAFE(hhdr));
322 # ifdef ENABLE_DISCLAIM
323 if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
324 result = GC_disclaim_and_reclaim(hbp, hhdr, sz, list, count);
327 /* else */ if (init || GC_debugging_started) {
328 result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
330 GC_ASSERT(IS_PTRFREE_SAFE(hhdr));
331 result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
333 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
338 * Restore unmarked small objects in the block pointed to by hbp
339 * to the appropriate object free list.
340 * If entirely empty blocks are to be completely deallocated, then
341 * caller should perform that check.
343 STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
344 GC_bool report_if_found)
346 hdr *hhdr = HDR(hbp);
347 word sz = hhdr -> hb_sz;
348 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
349 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
351 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
353 if (report_if_found) {
354 GC_reclaim_check(hbp, hhdr, sz);
356 *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
357 (ptr_t)(*flh), &GC_bytes_found);
361 #ifdef ENABLE_DISCLAIM
362 STATIC void GC_disclaim_and_reclaim_or_free_small_block(struct hblk *hbp)
364 hdr *hhdr = HDR(hbp);
365 word sz = hhdr -> hb_sz;
366 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
367 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
370 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
371 flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
372 (ptr_t)(*flh), &GC_bytes_found);
373 if (hhdr -> hb_n_marks)
376 GC_bytes_found += HBLKSIZE;
380 #endif /* ENABLE_DISCLAIM */
383 * Restore an unmarked large object or an entirely empty blocks of small objects
384 * to the heap block free list.
385 * Otherwise enqueue the block for later processing
386 * by GC_reclaim_small_nonempty_block.
387 * If report_if_found is TRUE, then process any block immediately, and
388 * simply report free objects; do not actually reclaim them.
390 STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
392 hdr * hhdr = HDR(hbp);
393 word sz = hhdr -> hb_sz; /* size of objects in current block */
394 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
396 if( sz > MAXOBJBYTES ) { /* 1 big object */
397 if( !mark_bit_from_hdr(hhdr, 0) ) {
398 if (report_if_found) {
399 GC_add_leaked((ptr_t)hbp);
403 # ifdef ENABLE_DISCLAIM
404 if (EXPECT(hhdr->hb_flags & HAS_DISCLAIM, 0)) {
405 if ((*ok->ok_disclaim_proc)(hbp)) {
406 /* Not disclaimed => resurrect the object. */
407 set_mark_bit_from_hdr(hhdr, 0);
412 blocks = OBJ_SZ_TO_BLOCKS(sz);
414 GC_large_allocd_bytes -= blocks * HBLKSIZE;
416 GC_bytes_found += sz;
420 # ifdef ENABLE_DISCLAIM
423 if (IS_PTRFREE_SAFE(hhdr)) {
424 GC_atomic_in_use += sz;
426 GC_composite_in_use += sz;
430 GC_bool empty = GC_block_empty(hhdr);
431 # ifdef PARALLEL_MARK
432 /* Count can be low or one too high because we sometimes */
433 /* have to ignore decrements. Objects can also potentially */
434 /* be repeatedly marked by each marker. */
435 /* Here we assume two markers, but this is extremely */
436 /* unlikely to fail spuriously with more. And if it does, it */
437 /* should be looked at. */
438 GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
440 GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
442 if (report_if_found) {
443 GC_reclaim_small_nonempty_block(hbp, TRUE /* report_if_found */);
445 # ifdef ENABLE_DISCLAIM
446 if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
447 GC_disclaim_and_reclaim_or_free_small_block(hbp);
451 GC_bytes_found += HBLKSIZE;
454 } else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
455 /* group of smaller objects, enqueue the real work */
456 struct hblk **rlh = ok -> ok_reclaim_list;
459 rlh += BYTES_TO_GRANULES(sz);
460 hhdr -> hb_next = *rlh;
463 } /* else not worth salvaging. */
464 /* We used to do the nearly_full check later, but we */
465 /* already have the right cache context here. Also */
466 /* doing it here avoids some silly lock contention in */
467 /* GC_malloc_many. */
468 if (IS_PTRFREE_SAFE(hhdr)) {
469 GC_atomic_in_use += sz * hhdr -> hb_n_marks;
471 GC_composite_in_use += sz * hhdr -> hb_n_marks;
476 #if !defined(NO_DEBUGGING)
477 /* Routines to gather and print heap block info */
478 /* intended for debugging. Otherwise should be called */
483 size_t number_of_blocks;
487 #ifdef USE_MARK_BYTES
489 /* Return the number of set mark bits in the given header. */
490 /* Remains externally visible as used by GNU GCJ currently. */
491 unsigned GC_n_set_marks(hdr *hhdr)
495 word sz = hhdr -> hb_sz;
496 word offset = MARK_BIT_OFFSET(sz);
497 word limit = FINAL_MARK_BIT(sz);
499 for (i = 0; i < limit; i += offset) {
500 result += hhdr -> hb_marks[i];
502 GC_ASSERT(hhdr -> hb_marks[limit]);
508 /* Number of set bits in a word. Not performance critical. */
509 static unsigned set_bits(word n)
521 unsigned GC_n_set_marks(hdr *hhdr)
526 # ifdef MARK_BIT_PER_OBJ
527 word n_objs = HBLK_OBJS(hhdr -> hb_sz);
529 if (0 == n_objs) n_objs = 1;
530 n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
531 # else /* MARK_BIT_PER_GRANULE */
532 n_mark_words = MARK_BITS_SZ;
534 for (i = 0; i < n_mark_words - 1; i++) {
535 result += set_bits(hhdr -> hb_marks[i]);
537 # ifdef MARK_BIT_PER_OBJ
538 result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
539 << (n_mark_words * WORDSZ - n_objs));
541 result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
543 return result; /* the number of set bits excluding the one past the end */
546 #endif /* !USE_MARK_BYTES */
548 STATIC void GC_print_block_descr(struct hblk *h,
549 word /* struct PrintStats */ raw_ps)
552 size_t bytes = hhdr -> hb_sz;
553 struct Print_stats *ps;
554 unsigned n_marks = GC_n_set_marks(hhdr);
555 unsigned n_objs = (unsigned)HBLK_OBJS(bytes);
557 if (0 == n_objs) n_objs = 1;
558 if (hhdr -> hb_n_marks != n_marks) {
559 GC_printf("%u,%u,%u!=%u,%u\n", hhdr->hb_obj_kind, (unsigned)bytes,
560 (unsigned)hhdr->hb_n_marks, n_marks, n_objs);
562 GC_printf("%u,%u,%u,%u\n", hhdr->hb_obj_kind, (unsigned)bytes,
566 ps = (struct Print_stats *)raw_ps;
567 ps->total_bytes += (bytes + (HBLKSIZE-1)) & ~(HBLKSIZE-1); /* round up */
568 ps->number_of_blocks++;
571 void GC_print_block_list(void)
573 struct Print_stats pstats;
575 GC_printf("kind(0=ptrfree,1=normal,2=unc.),"
576 "size_in_bytes,#_marks_set,#objs\n");
577 pstats.number_of_blocks = 0;
578 pstats.total_bytes = 0;
579 GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
580 GC_printf("blocks= %lu, bytes= %lu\n",
581 (unsigned long)pstats.number_of_blocks,
582 (unsigned long)pstats.total_bytes);
585 #include "gc_inline.h" /* for GC_print_free_list prototype */
587 /* Currently for debugger use only: */
588 GC_API void GC_CALL GC_print_free_list(int kind, size_t sz_in_granules)
593 GC_ASSERT(kind < MAXOBJKINDS);
594 GC_ASSERT(sz_in_granules <= MAXOBJGRANULES);
595 flh_next = GC_obj_kinds[kind].ok_freelist[sz_in_granules];
596 for (n = 0; flh_next; n++) {
597 GC_printf("Free object in heap block %p [%d]: %p\n",
598 (void *)HBLKPTR(flh_next), n, flh_next);
599 flh_next = obj_link(flh_next);
603 #endif /* !NO_DEBUGGING */
606 * Clear all obj_link pointers in the list of free objects *flp.
608 * This must be done before dropping a list of free gcj-style objects,
609 * since may otherwise end up with dangling "descriptor" pointers.
610 * It may help for other pointer-containing objects.
612 STATIC void GC_clear_fl_links(void **flp)
618 flp = &(obj_link(next));
624 * Perform GC_reclaim_block on the entire heap, after first clearing
625 * small object free lists (if we are not just looking for leaks).
627 GC_INNER void GC_start_reclaim(GC_bool report_if_found)
631 # if defined(PARALLEL_MARK)
632 GC_ASSERT(0 == GC_fl_builder_count);
634 /* Reset in use counters. GC_reclaim_block recomputes them. */
635 GC_composite_in_use = 0;
636 GC_atomic_in_use = 0;
637 /* Clear reclaim- and free-lists */
638 for (kind = 0; kind < GC_n_kinds; kind++) {
639 struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
640 GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
642 if (rlist == 0) continue; /* This kind not used. */
643 if (!report_if_found) {
645 void **lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
647 for (fop = GC_obj_kinds[kind].ok_freelist;
648 (word)fop < (word)lim; (*(word **)&fop)++) {
650 if (should_clobber) {
651 GC_clear_fl_links(fop);
657 } /* otherwise free list objects are marked, */
658 /* and its safe to leave them */
659 BZERO(rlist, (MAXOBJGRANULES + 1) * sizeof(void *));
663 /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
664 /* or enqueue the block for later processing. */
665 GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
668 /* This is a very stupid thing to do. We make it possible anyway, */
669 /* so that you can convince yourself that it really is very stupid. */
670 GC_reclaim_all((GC_stop_func)0, FALSE);
671 # elif defined(ENABLE_DISCLAIM)
672 /* However, make sure to clear reclaimable objects of kinds with */
673 /* unconditional marking enabled before we do any significant */
675 GC_reclaim_unconditionally_marked();
677 # if defined(PARALLEL_MARK)
678 GC_ASSERT(0 == GC_fl_builder_count);
684 * Sweep blocks of the indicated object size and kind until either the
685 * appropriate free list is nonempty, or there are no more blocks to
688 GC_INNER void GC_continue_reclaim(word sz /* granules */, int kind)
692 struct obj_kind * ok = &(GC_obj_kinds[kind]);
693 struct hblk ** rlh = ok -> ok_reclaim_list;
694 void **flh = &(ok -> ok_freelist[sz]);
696 if (rlh == 0) return; /* No blocks of this kind. */
698 while ((hbp = *rlh) != 0) {
700 *rlh = hhdr -> hb_next;
701 GC_reclaim_small_nonempty_block(hbp, FALSE);
702 if (*flh != 0) break;
707 * Reclaim all small blocks waiting to be reclaimed.
708 * Abort and return FALSE when/if (*stop_func)() returns TRUE.
709 * If this returns TRUE, then it's safe to restart the world
710 * with incorrectly cleared mark bits.
711 * If ignore_old is TRUE, then reclaim only blocks that have been
712 * recently reclaimed, and discard the rest.
713 * Stop_func may be 0.
715 GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
721 struct obj_kind * ok;
725 CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
727 if (GC_print_stats == VERBOSE)
728 GET_TIME(start_time);
731 for (kind = 0; kind < GC_n_kinds; kind++) {
732 ok = &(GC_obj_kinds[kind]);
733 rlp = ok -> ok_reclaim_list;
734 if (rlp == 0) continue;
735 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
737 while ((hbp = *rlh) != 0) {
738 if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
742 *rlh = hhdr -> hb_next;
744 || (word)hhdr->hb_last_reclaimed == GC_gc_no - 1) {
745 /* It's likely we'll need it this time, too */
746 /* It's been touched recently, so this */
747 /* shouldn't trigger paging. */
748 GC_reclaim_small_nonempty_block(hbp, FALSE);
754 if (GC_print_stats == VERBOSE) {
755 CLOCK_TYPE done_time;
758 GC_verbose_log_printf(
759 "Disposing of reclaim lists took %lu ms %lu ns\n",
760 MS_TIME_DIFF(done_time, start_time),
761 NS_FRAC_TIME_DIFF(done_time, start_time));
767 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
768 /* We do an eager sweep on heap blocks where unconditional marking has */
769 /* been enabled, so that any reclaimable objects have been reclaimed */
770 /* before we start marking. This is a simplified GC_reclaim_all */
771 /* restricted to kinds where ok_mark_unconditionally is true. */
772 STATIC void GC_reclaim_unconditionally_marked(void)
778 struct obj_kind * ok;
782 for (kind = 0; kind < GC_n_kinds; kind++) {
783 ok = &(GC_obj_kinds[kind]);
784 if (!ok->ok_mark_unconditionally)
786 rlp = ok->ok_reclaim_list;
789 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
791 while ((hbp = *rlh) != 0) {
793 *rlh = hhdr->hb_next;
794 GC_reclaim_small_nonempty_block(hbp, FALSE);
799 #endif /* !EAGER_SWEEP && ENABLE_DISCLAIM */
801 struct enumerate_reachable_s {
802 GC_reachable_object_proc proc;
806 STATIC void GC_do_enumerate_reachable_objects(struct hblk *hbp, word ped)
808 struct hblkhdr *hhdr = HDR(hbp);
809 size_t sz = (size_t)hhdr->hb_sz;
813 if (GC_block_empty(hhdr)) {
818 if (sz > MAXOBJBYTES) { /* one big object */
821 plim = hbp->hb_body + HBLKSIZE - sz;
823 /* Go through all words in block. */
824 for (bit_no = 0; p <= plim; bit_no += MARK_BIT_OFFSET(sz), p += sz) {
825 if (mark_bit_from_hdr(hhdr, bit_no)) {
826 ((struct enumerate_reachable_s *)ped)->proc(p, sz,
827 ((struct enumerate_reachable_s *)ped)->client_data);
832 GC_API void GC_CALL GC_enumerate_reachable_objects_inner(
833 GC_reachable_object_proc proc,
836 struct enumerate_reachable_s ed;
838 GC_ASSERT(I_HOLD_LOCK());
840 ed.client_data = client_data;
841 GC_apply_to_all_blocks(GC_do_enumerate_reachable_objects, (word)&ed);