2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
19 #ifdef ENABLE_DISCLAIM
20 # include "gc_disclaim.h"
25 GC_INNER signed_word GC_bytes_found = 0;
26 /* Number of bytes of memory reclaimed */
27 /* minus the number of bytes originally */
28 /* on free lists which we had to drop. */
30 #if defined(PARALLEL_MARK)
31 GC_INNER signed_word GC_fl_builder_count = 0;
32 /* Number of threads currently building free lists without */
33 /* holding GC lock. It is not safe to collect if this is */
34 /* nonzero. Also, together with the mark lock, it is used as */
35 /* a semaphore during marker threads startup. */
36 #endif /* PARALLEL_MARK */
38 /* We defer printing of leaked objects until we're done with the GC */
39 /* cycle, since the routine for printing objects needs to run outside */
40 /* the collector, e.g. without the allocation lock. */
42 # define MAX_LEAKED 40
44 STATIC ptr_t GC_leaked[MAX_LEAKED] = { NULL };
45 STATIC unsigned GC_n_leaked = 0;
47 GC_INNER GC_bool GC_have_errors = FALSE;
49 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
50 STATIC void GC_reclaim_unconditionally_marked(void);
53 GC_INLINE void GC_add_leaked(ptr_t leaked)
55 # ifndef SHORT_DBG_HDRS
56 if (GC_findleak_delay_free && !GC_check_leaked(leaked))
60 GC_have_errors = TRUE;
61 if (GC_n_leaked < MAX_LEAKED) {
62 GC_leaked[GC_n_leaked++] = leaked;
63 /* Make sure it's not reclaimed this cycle */
64 GC_set_mark_bit(leaked);
68 /* Print all objects on the list after printing any smashed objects. */
69 /* Clear both lists. Called without the allocation lock held. */
70 GC_INNER void GC_print_all_errors(void)
72 static GC_bool printing_errors = FALSE;
75 ptr_t leaked[MAX_LEAKED];
79 if (printing_errors) {
83 have_errors = GC_have_errors;
84 printing_errors = TRUE;
85 n_leaked = GC_n_leaked;
87 GC_ASSERT(n_leaked <= MAX_LEAKED);
88 BCOPY(GC_leaked, leaked, n_leaked * sizeof(ptr_t));
90 BZERO(GC_leaked, n_leaked * sizeof(ptr_t));
94 if (GC_debugging_started) {
95 GC_print_all_smashed();
101 GC_err_printf("Found %u leaked objects:\n", n_leaked);
104 for (i = 0; i < n_leaked; i++) {
106 # ifndef SKIP_LEAKED_OBJECTS_PRINTING
107 GC_print_heap_obj(p);
113 # ifndef GC_ABORT_ON_LEAK
114 && GETENV("GC_ABORT_ON_LEAK") != NULL
117 ABORT("Leaked or smashed objects encountered");
121 printing_errors = FALSE;
131 /* Test whether a block is completely empty, i.e. contains no marked */
132 /* objects. This does not require the block to be in physical memory. */
133 GC_INNER GC_bool GC_block_empty(hdr *hhdr)
135 return (hhdr -> hb_n_marks == 0);
138 STATIC GC_bool GC_block_nearly_full(hdr *hhdr, word sz)
140 return hhdr -> hb_n_marks > HBLK_OBJS(sz) * 7 / 8;
143 /* TODO: This should perhaps again be specialized for USE_MARK_BYTES */
144 /* and USE_MARK_BITS cases. */
147 * Restore unmarked small objects in h of size sz to the object
148 * free list. Returns the new list.
149 * Clears unmarked objects. Sz is in bytes.
151 STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, word sz,
152 ptr_t list, signed_word *count)
156 signed_word n_bytes_found = 0;
158 GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
160 GC_ASSERT(sz == hhdr -> hb_sz);
162 /* Skip the assertion because of a potential race with GC_realloc. */
164 GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
165 p = (word *)(hbp->hb_body);
166 plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
168 /* go through all words in block */
169 while ((word)p <= (word)plim) {
170 if (mark_bit_from_hdr(hhdr, bit_no)) {
171 p = (word *)((ptr_t)p + sz);
174 /* object is available - put on list */
177 /* Clear object, advance p to next object in the process */
178 q = (word *)((ptr_t)p + sz);
179 # ifdef USE_MARK_BYTES
181 && !((word)p & (2 * sizeof(word) - 1)));
184 while ((word)p < (word)q) {
189 p++; /* Skip link field */
190 while ((word)p < (word)q) {
195 bit_no += MARK_BIT_OFFSET(sz);
197 *count += n_bytes_found;
201 /* The same thing, but don't clear objects: */
202 STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, word sz,
203 ptr_t list, signed_word *count)
207 signed_word n_bytes_found = 0;
210 GC_ASSERT(sz == hhdr -> hb_sz);
212 p = (word *)(hbp->hb_body);
213 plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
215 /* go through all words in block */
216 while ((word)p <= (word)plim) {
217 if (!mark_bit_from_hdr(hhdr, bit_no)) {
219 /* object is available - put on list */
223 p = (word *)((ptr_t)p + sz);
224 bit_no += MARK_BIT_OFFSET(sz);
226 *count += n_bytes_found;
230 #ifdef ENABLE_DISCLAIM
231 /* Call reclaim notifier for block's kind on each unmarked object in */
232 /* block, all within a pair of corresponding enter/leave callbacks. */
233 STATIC ptr_t GC_disclaim_and_reclaim(struct hblk *hbp, hdr *hhdr, word sz,
234 ptr_t list, signed_word *count)
238 signed_word n_bytes_found = 0;
239 struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
240 int (GC_CALLBACK *disclaim)(void *) = ok->ok_disclaim_proc;
243 GC_ASSERT(sz == hhdr -> hb_sz);
245 p = (word *)(hbp -> hb_body);
246 plim = (word *)((ptr_t)p + HBLKSIZE - sz);
248 while ((word)p <= (word)plim) {
249 int marked = mark_bit_from_hdr(hhdr, bit_no);
250 if (!marked && (*disclaim)(p)) {
251 set_mark_bit_from_hdr(hhdr, bit_no);
252 hhdr -> hb_n_marks++;
256 p = (word *)((ptr_t)p + sz);
259 /* object is available - put on list */
262 /* Clear object, advance p to next object in the process */
263 q = (word *)((ptr_t)p + sz);
264 # ifdef USE_MARK_BYTES
265 GC_ASSERT((sz & 1) == 0);
266 GC_ASSERT(((word)p & (2 * sizeof(word) - 1)) == 0);
269 while ((word)p < (word)q) {
274 p++; /* Skip link field */
275 while ((word)p < (word)q) {
280 bit_no += MARK_BIT_OFFSET(sz);
282 *count += n_bytes_found;
285 #endif /* ENABLE_DISCLAIM */
287 /* Don't really reclaim objects, just check for unmarked ones: */
288 STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
294 GC_ASSERT(sz == hhdr -> hb_sz);
296 /* go through all words in block */
298 plim = p + HBLKSIZE - sz;
299 for (bit_no = 0; (word)p <= (word)plim;
300 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
301 if (!mark_bit_from_hdr(hhdr, bit_no)) {
307 /* Is a pointer-free block? Same as IS_PTRFREE macro (in os_dep.c) but */
308 /* uses unordered atomic access to avoid racing with GC_realloc. */
310 # define IS_PTRFREE_SAFE(hhdr) \
311 (AO_load((volatile AO_t *)&(hhdr)->hb_descr) == 0)
313 /* No race as GC_realloc holds the lock while updating hb_descr. */
314 # define IS_PTRFREE_SAFE(hhdr) ((hhdr)->hb_descr == 0)
318 * Generic procedure to rebuild a free list in hbp.
319 * Also called directly from GC_malloc_many.
320 * Sz is now in bytes.
322 GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
323 GC_bool init, ptr_t list,
328 GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
329 # ifndef GC_DISABLE_INCREMENTAL
330 GC_remove_protection(hbp, 1, IS_PTRFREE_SAFE(hhdr));
332 # ifdef ENABLE_DISCLAIM
333 if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
334 result = GC_disclaim_and_reclaim(hbp, hhdr, sz, list, count);
337 /* else */ if (init || GC_debugging_started) {
338 result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
340 GC_ASSERT(IS_PTRFREE_SAFE(hhdr));
341 result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
343 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
348 * Restore unmarked small objects in the block pointed to by hbp
349 * to the appropriate object free list.
350 * If entirely empty blocks are to be completely deallocated, then
351 * caller should perform that check.
353 STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp, word sz,
354 GC_bool report_if_found)
356 hdr *hhdr = HDR(hbp);
357 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
358 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
360 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
362 if (report_if_found) {
363 GC_reclaim_check(hbp, hhdr, sz);
365 *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
366 (ptr_t)(*flh), &GC_bytes_found);
370 #ifdef ENABLE_DISCLAIM
371 STATIC void GC_disclaim_and_reclaim_or_free_small_block(struct hblk *hbp)
373 hdr *hhdr = HDR(hbp);
374 word sz = hhdr -> hb_sz;
375 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
376 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
379 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
380 flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
381 (ptr_t)(*flh), &GC_bytes_found);
382 if (hhdr -> hb_n_marks)
385 GC_bytes_found += HBLKSIZE;
389 #endif /* ENABLE_DISCLAIM */
392 * Restore an unmarked large object or an entirely empty blocks of small objects
393 * to the heap block free list.
394 * Otherwise enqueue the block for later processing
395 * by GC_reclaim_small_nonempty_block.
396 * If report_if_found is TRUE, then process any block immediately, and
397 * simply report free objects; do not actually reclaim them.
399 STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
401 hdr * hhdr = HDR(hbp);
402 word sz; /* size of objects in current block */
403 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
406 /* Atomic access is used to avoid racing with GC_realloc. */
407 sz = (word)AO_load((volatile AO_t *)&hhdr->hb_sz);
409 /* No race as GC_realloc holds the lock while updating hb_sz. */
412 if( sz > MAXOBJBYTES ) { /* 1 big object */
413 if( !mark_bit_from_hdr(hhdr, 0) ) {
414 if (report_if_found) {
415 GC_add_leaked((ptr_t)hbp);
419 # ifdef ENABLE_DISCLAIM
420 if (EXPECT(hhdr->hb_flags & HAS_DISCLAIM, 0)) {
421 if ((*ok->ok_disclaim_proc)(hbp)) {
422 /* Not disclaimed => resurrect the object. */
423 set_mark_bit_from_hdr(hhdr, 0);
428 blocks = OBJ_SZ_TO_BLOCKS(sz);
430 GC_large_allocd_bytes -= blocks * HBLKSIZE;
432 GC_bytes_found += sz;
436 # ifdef ENABLE_DISCLAIM
439 if (IS_PTRFREE_SAFE(hhdr)) {
440 GC_atomic_in_use += sz;
442 GC_composite_in_use += sz;
446 GC_bool empty = GC_block_empty(hhdr);
447 # ifdef PARALLEL_MARK
448 /* Count can be low or one too high because we sometimes */
449 /* have to ignore decrements. Objects can also potentially */
450 /* be repeatedly marked by each marker. */
451 /* Here we assume two markers, but this is extremely */
452 /* unlikely to fail spuriously with more. And if it does, it */
453 /* should be looked at. */
454 GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
456 GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
458 if (report_if_found) {
459 GC_reclaim_small_nonempty_block(hbp, sz,
460 TRUE /* report_if_found */);
462 # ifdef ENABLE_DISCLAIM
463 if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
464 GC_disclaim_and_reclaim_or_free_small_block(hbp);
468 GC_bytes_found += HBLKSIZE;
471 } else if (GC_find_leak || !GC_block_nearly_full(hhdr, sz)) {
472 /* group of smaller objects, enqueue the real work */
473 struct hblk **rlh = ok -> ok_reclaim_list;
476 rlh += BYTES_TO_GRANULES(sz);
477 hhdr -> hb_next = *rlh;
480 } /* else not worth salvaging. */
481 /* We used to do the nearly_full check later, but we */
482 /* already have the right cache context here. Also */
483 /* doing it here avoids some silly lock contention in */
484 /* GC_malloc_many. */
485 if (IS_PTRFREE_SAFE(hhdr)) {
486 GC_atomic_in_use += sz * hhdr -> hb_n_marks;
488 GC_composite_in_use += sz * hhdr -> hb_n_marks;
493 #if !defined(NO_DEBUGGING)
494 /* Routines to gather and print heap block info */
495 /* intended for debugging. Otherwise should be called */
500 size_t number_of_blocks;
504 #ifdef USE_MARK_BYTES
506 /* Return the number of set mark bits in the given header. */
507 /* Remains externally visible as used by GNU GCJ currently. */
508 unsigned GC_n_set_marks(hdr *hhdr)
512 word sz = hhdr -> hb_sz;
513 word offset = MARK_BIT_OFFSET(sz);
514 word limit = FINAL_MARK_BIT(sz);
516 for (i = 0; i < limit; i += offset) {
517 result += hhdr -> hb_marks[i];
519 GC_ASSERT(hhdr -> hb_marks[limit]);
525 /* Number of set bits in a word. Not performance critical. */
526 static unsigned set_bits(word n)
538 unsigned GC_n_set_marks(hdr *hhdr)
543 # ifdef MARK_BIT_PER_OBJ
544 word n_objs = HBLK_OBJS(hhdr -> hb_sz);
546 if (0 == n_objs) n_objs = 1;
547 n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
548 # else /* MARK_BIT_PER_GRANULE */
549 n_mark_words = MARK_BITS_SZ;
551 for (i = 0; i < n_mark_words - 1; i++) {
552 result += set_bits(hhdr -> hb_marks[i]);
554 # ifdef MARK_BIT_PER_OBJ
555 result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
556 << (n_mark_words * WORDSZ - n_objs));
558 result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
560 return result; /* the number of set bits excluding the one past the end */
563 #endif /* !USE_MARK_BYTES */
565 STATIC void GC_print_block_descr(struct hblk *h,
566 word /* struct PrintStats */ raw_ps)
569 size_t bytes = hhdr -> hb_sz;
570 struct Print_stats *ps;
571 unsigned n_marks = GC_n_set_marks(hhdr);
572 unsigned n_objs = (unsigned)HBLK_OBJS(bytes);
574 if (0 == n_objs) n_objs = 1;
575 if (hhdr -> hb_n_marks != n_marks) {
576 GC_printf("%u,%u,%u!=%u,%u\n", hhdr->hb_obj_kind, (unsigned)bytes,
577 (unsigned)hhdr->hb_n_marks, n_marks, n_objs);
579 GC_printf("%u,%u,%u,%u\n", hhdr->hb_obj_kind, (unsigned)bytes,
583 ps = (struct Print_stats *)raw_ps;
584 ps->total_bytes += (bytes + (HBLKSIZE-1)) & ~(HBLKSIZE-1); /* round up */
585 ps->number_of_blocks++;
588 void GC_print_block_list(void)
590 struct Print_stats pstats;
592 GC_printf("kind(0=ptrfree,1=normal,2=unc.),"
593 "size_in_bytes,#_marks_set,#objs\n");
594 pstats.number_of_blocks = 0;
595 pstats.total_bytes = 0;
596 GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
597 GC_printf("blocks= %lu, bytes= %lu\n",
598 (unsigned long)pstats.number_of_blocks,
599 (unsigned long)pstats.total_bytes);
602 #include "gc_inline.h" /* for GC_print_free_list prototype */
604 /* Currently for debugger use only: */
605 GC_API void GC_CALL GC_print_free_list(int kind, size_t sz_in_granules)
610 GC_ASSERT(kind < MAXOBJKINDS);
611 GC_ASSERT(sz_in_granules <= MAXOBJGRANULES);
612 flh_next = GC_obj_kinds[kind].ok_freelist[sz_in_granules];
613 for (n = 0; flh_next; n++) {
614 GC_printf("Free object in heap block %p [%d]: %p\n",
615 (void *)HBLKPTR(flh_next), n, flh_next);
616 flh_next = obj_link(flh_next);
620 #endif /* !NO_DEBUGGING */
623 * Clear all obj_link pointers in the list of free objects *flp.
625 * This must be done before dropping a list of free gcj-style objects,
626 * since may otherwise end up with dangling "descriptor" pointers.
627 * It may help for other pointer-containing objects.
629 STATIC void GC_clear_fl_links(void **flp)
635 flp = &(obj_link(next));
641 * Perform GC_reclaim_block on the entire heap, after first clearing
642 * small object free lists (if we are not just looking for leaks).
644 GC_INNER void GC_start_reclaim(GC_bool report_if_found)
648 # if defined(PARALLEL_MARK)
649 GC_ASSERT(0 == GC_fl_builder_count);
651 /* Reset in use counters. GC_reclaim_block recomputes them. */
652 GC_composite_in_use = 0;
653 GC_atomic_in_use = 0;
654 /* Clear reclaim- and free-lists */
655 for (kind = 0; kind < GC_n_kinds; kind++) {
656 struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
657 GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
659 if (rlist == 0) continue; /* This kind not used. */
660 if (!report_if_found) {
662 void **lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
664 for (fop = GC_obj_kinds[kind].ok_freelist;
665 (word)fop < (word)lim; (*(word **)&fop)++) {
667 if (should_clobber) {
668 GC_clear_fl_links(fop);
674 } /* otherwise free list objects are marked, */
675 /* and its safe to leave them */
676 BZERO(rlist, (MAXOBJGRANULES + 1) * sizeof(void *));
680 /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
681 /* or enqueue the block for later processing. */
682 GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
685 /* This is a very stupid thing to do. We make it possible anyway, */
686 /* so that you can convince yourself that it really is very stupid. */
687 GC_reclaim_all((GC_stop_func)0, FALSE);
688 # elif defined(ENABLE_DISCLAIM)
689 /* However, make sure to clear reclaimable objects of kinds with */
690 /* unconditional marking enabled before we do any significant */
692 GC_reclaim_unconditionally_marked();
694 # if defined(PARALLEL_MARK)
695 GC_ASSERT(0 == GC_fl_builder_count);
701 * Sweep blocks of the indicated object size and kind until either the
702 * appropriate free list is nonempty, or there are no more blocks to
705 GC_INNER void GC_continue_reclaim(word sz /* granules */, int kind)
709 struct obj_kind * ok = &(GC_obj_kinds[kind]);
710 struct hblk ** rlh = ok -> ok_reclaim_list;
711 void **flh = &(ok -> ok_freelist[sz]);
713 if (rlh == 0) return; /* No blocks of this kind. */
715 while ((hbp = *rlh) != 0) {
717 *rlh = hhdr -> hb_next;
718 GC_reclaim_small_nonempty_block(hbp, hhdr -> hb_sz, FALSE);
725 * Reclaim all small blocks waiting to be reclaimed.
726 * Abort and return FALSE when/if (*stop_func)() returns TRUE.
727 * If this returns TRUE, then it's safe to restart the world
728 * with incorrectly cleared mark bits.
729 * If ignore_old is TRUE, then reclaim only blocks that have been
730 * recently reclaimed, and discard the rest.
731 * Stop_func may be 0.
733 GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
739 struct obj_kind * ok;
743 CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
745 if (GC_print_stats == VERBOSE)
746 GET_TIME(start_time);
749 for (kind = 0; kind < GC_n_kinds; kind++) {
750 ok = &(GC_obj_kinds[kind]);
751 rlp = ok -> ok_reclaim_list;
752 if (rlp == 0) continue;
753 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
755 while ((hbp = *rlh) != 0) {
756 if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
760 *rlh = hhdr -> hb_next;
762 || (word)hhdr->hb_last_reclaimed == GC_gc_no - 1) {
763 /* It's likely we'll need it this time, too */
764 /* It's been touched recently, so this */
765 /* shouldn't trigger paging. */
766 GC_reclaim_small_nonempty_block(hbp, hhdr->hb_sz, FALSE);
772 if (GC_print_stats == VERBOSE) {
773 CLOCK_TYPE done_time;
776 GC_verbose_log_printf(
777 "Disposing of reclaim lists took %lu ms %lu ns\n",
778 MS_TIME_DIFF(done_time, start_time),
779 NS_FRAC_TIME_DIFF(done_time, start_time));
785 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
786 /* We do an eager sweep on heap blocks where unconditional marking has */
787 /* been enabled, so that any reclaimable objects have been reclaimed */
788 /* before we start marking. This is a simplified GC_reclaim_all */
789 /* restricted to kinds where ok_mark_unconditionally is true. */
790 STATIC void GC_reclaim_unconditionally_marked(void)
796 struct obj_kind * ok;
800 for (kind = 0; kind < GC_n_kinds; kind++) {
801 ok = &(GC_obj_kinds[kind]);
802 if (!ok->ok_mark_unconditionally)
804 rlp = ok->ok_reclaim_list;
807 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
809 while ((hbp = *rlh) != 0) {
811 *rlh = hhdr->hb_next;
812 GC_reclaim_small_nonempty_block(hbp, hhdr->hb_sz, FALSE);
817 #endif /* !EAGER_SWEEP && ENABLE_DISCLAIM */
819 struct enumerate_reachable_s {
820 GC_reachable_object_proc proc;
824 STATIC void GC_do_enumerate_reachable_objects(struct hblk *hbp, word ped)
826 struct hblkhdr *hhdr = HDR(hbp);
827 size_t sz = (size_t)hhdr->hb_sz;
831 if (GC_block_empty(hhdr)) {
836 if (sz > MAXOBJBYTES) { /* one big object */
839 plim = hbp->hb_body + HBLKSIZE - sz;
841 /* Go through all words in block. */
842 for (bit_no = 0; p <= plim; bit_no += MARK_BIT_OFFSET(sz), p += sz) {
843 if (mark_bit_from_hdr(hhdr, bit_no)) {
844 ((struct enumerate_reachable_s *)ped)->proc(p, sz,
845 ((struct enumerate_reachable_s *)ped)->client_data);
850 GC_API void GC_CALL GC_enumerate_reachable_objects_inner(
851 GC_reachable_object_proc proc,
854 struct enumerate_reachable_s ed;
856 GC_ASSERT(I_HOLD_LOCK());
858 ed.client_data = client_data;
859 GC_apply_to_all_blocks(GC_do_enumerate_reachable_objects, (word)&ed);