2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
19 #ifdef ENABLE_DISCLAIM
20 # include "gc_disclaim.h"
25 GC_INNER signed_word GC_bytes_found = 0;
26 /* Number of bytes of memory reclaimed */
27 /* minus the number of bytes originally */
28 /* on free lists which we had to drop. */
30 #if defined(PARALLEL_MARK)
31 GC_INNER word GC_fl_builder_count = 0;
32 /* Number of threads currently building free lists without */
33 /* holding GC lock. It is not safe to collect if this is */
35 #endif /* PARALLEL_MARK */
37 /* We defer printing of leaked objects until we're done with the GC */
38 /* cycle, since the routine for printing objects needs to run outside */
39 /* the collector, e.g. without the allocation lock. */
41 # define MAX_LEAKED 40
43 STATIC ptr_t GC_leaked[MAX_LEAKED] = { NULL };
44 STATIC unsigned GC_n_leaked = 0;
46 GC_INNER GC_bool GC_have_errors = FALSE;
48 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
49 STATIC void GC_reclaim_unconditionally_marked(void);
52 GC_INLINE void GC_add_leaked(ptr_t leaked)
54 # ifndef SHORT_DBG_HDRS
55 if (GC_findleak_delay_free && !GC_check_leaked(leaked))
59 GC_have_errors = TRUE;
60 /* FIXME: Prevent adding an object while printing leaked ones. */
61 if (GC_n_leaked < MAX_LEAKED) {
62 GC_leaked[GC_n_leaked++] = leaked;
63 /* Make sure it's not reclaimed this cycle */
64 GC_set_mark_bit(leaked);
68 /* Print all objects on the list after printing any smashed objects. */
69 /* Clear both lists. Called without the allocation lock held. */
70 GC_INNER void GC_print_all_errors(void)
72 static GC_bool printing_errors = FALSE;
78 if (printing_errors) {
82 have_errors = GC_have_errors;
83 printing_errors = TRUE;
86 if (GC_debugging_started) {
87 GC_print_all_smashed();
92 for (i = 0; i < GC_n_leaked; ++i) {
93 ptr_t p = GC_leaked[i];
94 if (HDR(p) -> hb_obj_kind == PTRFREE) {
95 GC_err_printf("Leaked atomic object at ");
97 GC_err_printf("Leaked composite object at ");
108 # ifndef GC_ABORT_ON_LEAK
109 && GETENV("GC_ABORT_ON_LEAK") != NULL
112 ABORT("Leaked or smashed objects encountered");
115 printing_errors = FALSE;
124 /* Test whether a block is completely empty, i.e. contains no marked */
125 /* objects. This does not require the block to be in physical memory. */
126 GC_INNER GC_bool GC_block_empty(hdr *hhdr)
128 return (hhdr -> hb_n_marks == 0);
131 STATIC GC_bool GC_block_nearly_full(hdr *hhdr)
133 return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
136 /* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
137 /* and USE_MARK_BITS cases. */
140 * Restore unmarked small objects in h of size sz to the object
141 * free list. Returns the new list.
142 * Clears unmarked objects. Sz is in bytes.
144 STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
145 ptr_t list, signed_word *count)
149 signed_word n_bytes_found = 0;
151 GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
152 GC_ASSERT(sz == hhdr -> hb_sz);
153 GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
154 p = (word *)(hbp->hb_body);
155 plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
157 /* go through all words in block */
158 while ((word)p <= (word)plim) {
159 if (mark_bit_from_hdr(hhdr, bit_no)) {
160 p = (word *)((ptr_t)p + sz);
163 /* object is available - put on list */
166 /* Clear object, advance p to next object in the process */
167 q = (word *)((ptr_t)p + sz);
168 # ifdef USE_MARK_BYTES
170 && !((word)p & (2 * sizeof(word) - 1)));
173 while ((word)p < (word)q) {
178 p++; /* Skip link field */
179 while ((word)p < (word)q) {
184 bit_no += MARK_BIT_OFFSET(sz);
186 *count += n_bytes_found;
190 /* The same thing, but don't clear objects: */
191 STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
192 ptr_t list, signed_word *count)
196 signed_word n_bytes_found = 0;
198 GC_ASSERT(sz == hhdr -> hb_sz);
199 p = (word *)(hbp->hb_body);
200 plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
202 /* go through all words in block */
203 while ((word)p <= (word)plim) {
204 if (!mark_bit_from_hdr(hhdr, bit_no)) {
206 /* object is available - put on list */
210 p = (word *)((ptr_t)p + sz);
211 bit_no += MARK_BIT_OFFSET(sz);
213 *count += n_bytes_found;
217 #ifdef ENABLE_DISCLAIM
218 /* Call reclaim notifier for block's kind on each unmarked object in */
219 /* block, all within a pair of corresponding enter/leave callbacks. */
220 STATIC ptr_t GC_disclaim_and_reclaim(struct hblk *hbp, hdr *hhdr, size_t sz,
221 ptr_t list, signed_word *count)
225 signed_word n_bytes_found = 0;
226 struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
227 int (GC_CALLBACK *disclaim)(void *) = ok->ok_disclaim_proc;
229 GC_ASSERT(sz == hhdr -> hb_sz);
230 p = (word *)(hbp -> hb_body);
231 plim = (word *)((ptr_t)p + HBLKSIZE - sz);
233 while ((word)p <= (word)plim) {
234 int marked = mark_bit_from_hdr(hhdr, bit_no);
235 if (!marked && (*disclaim)(p)) {
236 hhdr -> hb_n_marks++;
240 p = (word *)((ptr_t)p + sz);
243 /* object is available - put on list */
246 /* Clear object, advance p to next object in the process */
247 q = (word *)((ptr_t)p + sz);
248 # ifdef USE_MARK_BYTES
249 GC_ASSERT((sz & 1) == 0);
250 GC_ASSERT(((word)p & (2 * sizeof(word) - 1)) == 0);
253 while ((word)p < (word)q) {
258 p++; /* Skip link field */
259 while ((word)p < (word)q) {
264 bit_no += MARK_BIT_OFFSET(sz);
266 *count += n_bytes_found;
269 #endif /* ENABLE_DISCLAIM */
271 /* Don't really reclaim objects, just check for unmarked ones: */
272 STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
276 GC_ASSERT(sz == hhdr -> hb_sz);
278 /* go through all words in block */
280 plim = p + HBLKSIZE - sz;
281 for (bit_no = 0; (word)p <= (word)plim;
282 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
283 if (!mark_bit_from_hdr(hhdr, bit_no)) {
290 * Generic procedure to rebuild a free list in hbp.
291 * Also called directly from GC_malloc_many.
292 * Sz is now in bytes.
294 GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
295 GC_bool init, ptr_t list,
300 GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
301 # ifndef GC_DISABLE_INCREMENTAL
302 GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */);
304 # ifdef ENABLE_DISCLAIM
305 if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
306 result = GC_disclaim_and_reclaim(hbp, hhdr, sz, list, count);
309 /* else */ if (init || GC_debugging_started) {
310 result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
312 GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
313 result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
315 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
320 * Restore unmarked small objects in the block pointed to by hbp
321 * to the appropriate object free list.
322 * If entirely empty blocks are to be completely deallocated, then
323 * caller should perform that check.
325 STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
326 GC_bool report_if_found)
328 hdr *hhdr = HDR(hbp);
329 size_t sz = hhdr -> hb_sz;
330 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
331 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
333 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
335 if (report_if_found) {
336 GC_reclaim_check(hbp, hhdr, sz);
338 *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
339 *flh, &GC_bytes_found);
343 #ifdef ENABLE_DISCLAIM
344 STATIC void GC_disclaim_and_reclaim_or_free_small_block(struct hblk *hbp)
346 hdr *hhdr = HDR(hbp);
347 size_t sz = hhdr -> hb_sz;
348 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
349 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
352 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
353 flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
354 *flh, &GC_bytes_found);
355 if (hhdr -> hb_n_marks)
358 GC_bytes_found += HBLKSIZE;
362 #endif /* ENABLE_DISCLAIM */
365 * Restore an unmarked large object or an entirely empty blocks of small objects
366 * to the heap block free list.
367 * Otherwise enqueue the block for later processing
368 * by GC_reclaim_small_nonempty_block.
369 * If report_if_found is TRUE, then process any block immediately, and
370 * simply report free objects; do not actually reclaim them.
372 STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
374 hdr * hhdr = HDR(hbp);
375 size_t sz = hhdr -> hb_sz; /* size of objects in current block */
376 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
379 if( sz > MAXOBJBYTES ) { /* 1 big object */
380 if( !mark_bit_from_hdr(hhdr, 0) ) {
381 if (report_if_found) {
382 GC_add_leaked((ptr_t)hbp);
386 # ifdef ENABLE_DISCLAIM
387 if (EXPECT(hhdr->hb_flags & HAS_DISCLAIM, 0)) {
388 struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
389 if ((*ok->ok_disclaim_proc)(hbp)) {
390 /* Not disclaimed => resurrect the object. */
391 set_mark_bit_from_hdr(hhdr, 0);
396 blocks = OBJ_SZ_TO_BLOCKS(sz);
398 GC_large_allocd_bytes -= blocks * HBLKSIZE;
400 GC_bytes_found += sz;
404 # ifdef ENABLE_DISCLAIM
407 if (hhdr -> hb_descr != 0) {
408 GC_composite_in_use += sz;
410 GC_atomic_in_use += sz;
414 GC_bool empty = GC_block_empty(hhdr);
415 # ifdef PARALLEL_MARK
416 /* Count can be low or one too high because we sometimes */
417 /* have to ignore decrements. Objects can also potentially */
418 /* be repeatedly marked by each marker. */
419 /* Here we assume two markers, but this is extremely */
420 /* unlikely to fail spuriously with more. And if it does, it */
421 /* should be looked at. */
422 GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
424 GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
426 if (report_if_found) {
427 GC_reclaim_small_nonempty_block(hbp, TRUE /* report_if_found */);
429 # ifdef ENABLE_DISCLAIM
430 if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
431 GC_disclaim_and_reclaim_or_free_small_block(hbp);
435 GC_bytes_found += HBLKSIZE;
438 } else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
439 /* group of smaller objects, enqueue the real work */
440 rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
441 hhdr -> hb_next = *rlh;
443 } /* else not worth salvaging. */
444 /* We used to do the nearly_full check later, but we */
445 /* already have the right cache context here. Also */
446 /* doing it here avoids some silly lock contention in */
447 /* GC_malloc_many. */
449 if (hhdr -> hb_descr != 0) {
450 GC_composite_in_use += sz * hhdr -> hb_n_marks;
452 GC_atomic_in_use += sz * hhdr -> hb_n_marks;
457 #if !defined(NO_DEBUGGING)
458 /* Routines to gather and print heap block info */
459 /* intended for debugging. Otherwise should be called */
464 size_t number_of_blocks;
468 #ifdef USE_MARK_BYTES
470 /* Return the number of set mark bits in the given header. */
471 /* Remains externally visible as used by GNU GCJ currently. */
472 int GC_n_set_marks(hdr *hhdr)
476 size_t sz = hhdr -> hb_sz;
477 int offset = (int)MARK_BIT_OFFSET(sz);
478 int limit = (int)FINAL_MARK_BIT(sz);
480 for (i = 0; i < limit; i += offset) {
481 result += hhdr -> hb_marks[i];
483 GC_ASSERT(hhdr -> hb_marks[limit]);
489 /* Number of set bits in a word. Not performance critical. */
490 static int set_bits(word n)
502 int GC_n_set_marks(hdr *hhdr)
507 # ifdef MARK_BIT_PER_OBJ
508 int n_objs = (int)HBLK_OBJS(hhdr -> hb_sz);
510 if (0 == n_objs) n_objs = 1;
511 n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
512 # else /* MARK_BIT_PER_GRANULE */
513 n_mark_words = MARK_BITS_SZ;
515 for (i = 0; i < n_mark_words - 1; i++) {
516 result += set_bits(hhdr -> hb_marks[i]);
518 # ifdef MARK_BIT_PER_OBJ
519 result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
520 << (n_mark_words * WORDSZ - n_objs));
522 result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
527 #endif /* !USE_MARK_BYTES */
529 STATIC void GC_print_block_descr(struct hblk *h,
530 word /* struct PrintStats */ raw_ps)
533 size_t bytes = hhdr -> hb_sz;
534 struct Print_stats *ps;
535 unsigned n_marks = GC_n_set_marks(hhdr);
537 if (hhdr -> hb_n_marks != n_marks) {
538 GC_printf("(%u:%u,%u!=%u)\n", hhdr->hb_obj_kind, (unsigned)bytes,
539 (unsigned)hhdr->hb_n_marks, n_marks);
541 GC_printf("(%u:%u,%u)\n", hhdr->hb_obj_kind,
542 (unsigned)bytes, n_marks);
545 bytes &= ~(HBLKSIZE-1);
547 ps = (struct Print_stats *)raw_ps;
548 ps->total_bytes += bytes;
549 ps->number_of_blocks++;
552 void GC_print_block_list(void)
554 struct Print_stats pstats;
556 GC_printf("(kind(0=ptrfree,1=normal,2=unc.):size_in_bytes, #_marks_set)\n");
557 pstats.number_of_blocks = 0;
558 pstats.total_bytes = 0;
559 GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
560 GC_printf("blocks= %lu, bytes= %lu\n",
561 (unsigned long)pstats.number_of_blocks,
562 (unsigned long)pstats.total_bytes);
565 /* Currently for debugger use only: */
566 void GC_print_free_list(int kind, size_t sz_in_granules)
568 struct obj_kind * ok = &GC_obj_kinds[kind];
569 ptr_t flh = ok -> ok_freelist[sz_in_granules];
572 for (n = 0; flh; n++) {
573 struct hblk *block = HBLKPTR(flh);
574 GC_printf("Free object in heap block %p [%d]: %p\n",
575 (void *)block, n, flh);
580 #endif /* !NO_DEBUGGING */
583 * Clear all obj_link pointers in the list of free objects *flp.
585 * This must be done before dropping a list of free gcj-style objects,
586 * since may otherwise end up with dangling "descriptor" pointers.
587 * It may help for other pointer-containing objects.
589 STATIC void GC_clear_fl_links(void **flp)
595 flp = &(obj_link(next));
601 * Perform GC_reclaim_block on the entire heap, after first clearing
602 * small object free lists (if we are not just looking for leaks).
604 GC_INNER void GC_start_reclaim(GC_bool report_if_found)
608 # if defined(PARALLEL_MARK)
609 GC_ASSERT(0 == GC_fl_builder_count);
611 /* Reset in use counters. GC_reclaim_block recomputes them. */
612 GC_composite_in_use = 0;
613 GC_atomic_in_use = 0;
614 /* Clear reclaim- and free-lists */
615 for (kind = 0; kind < GC_n_kinds; kind++) {
618 struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
619 GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
621 if (rlist == 0) continue; /* This kind not used. */
622 if (!report_if_found) {
623 lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
624 for (fop = GC_obj_kinds[kind].ok_freelist;
625 (word)fop < (word)lim; fop++) {
627 if (should_clobber) {
628 GC_clear_fl_links(fop);
634 } /* otherwise free list objects are marked, */
635 /* and its safe to leave them */
636 BZERO(rlist, (MAXOBJGRANULES + 1) * sizeof(void *));
640 /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
641 /* or enqueue the block for later processing. */
642 GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
645 /* This is a very stupid thing to do. We make it possible anyway, */
646 /* so that you can convince yourself that it really is very stupid. */
647 GC_reclaim_all((GC_stop_func)0, FALSE);
648 # elif defined(ENABLE_DISCLAIM)
649 /* However, make sure to clear reclaimable objects of kinds with */
650 /* unconditional marking enabled before we do any significant */
652 GC_reclaim_unconditionally_marked();
654 # if defined(PARALLEL_MARK)
655 GC_ASSERT(0 == GC_fl_builder_count);
661 * Sweep blocks of the indicated object size and kind until either the
662 * appropriate free list is nonempty, or there are no more blocks to
665 GC_INNER void GC_continue_reclaim(size_t sz /* granules */, int kind)
669 struct obj_kind * ok = &(GC_obj_kinds[kind]);
670 struct hblk ** rlh = ok -> ok_reclaim_list;
671 void **flh = &(ok -> ok_freelist[sz]);
673 if (rlh == 0) return; /* No blocks of this kind. */
675 while ((hbp = *rlh) != 0) {
677 *rlh = hhdr -> hb_next;
678 GC_reclaim_small_nonempty_block(hbp, FALSE);
679 if (*flh != 0) break;
684 * Reclaim all small blocks waiting to be reclaimed.
685 * Abort and return FALSE when/if (*stop_func)() returns TRUE.
686 * If this returns TRUE, then it's safe to restart the world
687 * with incorrectly cleared mark bits.
688 * If ignore_old is TRUE, then reclaim only blocks that have been
689 * recently reclaimed, and discard the rest.
690 * Stop_func may be 0.
692 GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
698 struct obj_kind * ok;
701 # ifndef SMALL_CONFIG
702 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
703 CLOCK_TYPE done_time;
705 if (GC_print_stats == VERBOSE)
706 GET_TIME(start_time);
709 for (kind = 0; kind < GC_n_kinds; kind++) {
710 ok = &(GC_obj_kinds[kind]);
711 rlp = ok -> ok_reclaim_list;
712 if (rlp == 0) continue;
713 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
715 while ((hbp = *rlh) != 0) {
716 if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
720 *rlh = hhdr -> hb_next;
721 if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
722 /* It's likely we'll need it this time, too */
723 /* It's been touched recently, so this */
724 /* shouldn't trigger paging. */
725 GC_reclaim_small_nonempty_block(hbp, FALSE);
730 # ifndef SMALL_CONFIG
731 if (GC_print_stats == VERBOSE) {
733 GC_log_printf("Disposing of reclaim lists took %lu msecs\n",
734 MS_TIME_DIFF(done_time,start_time));
740 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
741 /* We do an eager sweep on heap blocks where unconditional marking has */
742 /* been enabled, so that any reclaimable objects have been reclaimed */
743 /* before we start marking. This is a simplified GC_reclaim_all */
744 /* restricted to kinds where ok_mark_unconditionally is true. */
745 STATIC void GC_reclaim_unconditionally_marked(void)
751 struct obj_kind * ok;
755 for (kind = 0; kind < GC_n_kinds; kind++) {
756 ok = &(GC_obj_kinds[kind]);
757 if (!ok->ok_mark_unconditionally)
759 rlp = ok->ok_reclaim_list;
762 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
764 while ((hbp = *rlh) != 0) {
766 *rlh = hhdr->hb_next;
767 GC_reclaim_small_nonempty_block(hbp, FALSE);
772 #endif /* !EAGER_SWEEP && ENABLE_DISCLAIM */