2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
17 #if defined(__MINGW32__) && !defined(__MINGW_EXCPT_DEFINE_PSDK) \
18 && defined(__i386__) /* cannot use macros from gcconfig.h */
19 /* Otherwise EXCEPTION_REGISTRATION type declaration from winnt.h */
20 /* might be used. That declaration has "handler" callback with NTAPI */
21 /* attribute. The proper type (with "handler" field compatible with */
22 /* GC mark_ex_handler) is declared in excpt.h. The given macro is */
23 /* defined before any system header include. */
24 # define __MINGW_EXCPT_DEFINE_PSDK 1
27 #include "private/gc_pmark.h"
31 #if defined(MSWIN32) && defined(__GNUC__)
35 /* Make arguments appear live to compiler. Put here to minimize the */
36 /* risk of inlining. Used to minimize junk left in registers. */
38 void GC_noop6(word arg1 GC_ATTR_UNUSED, word arg2 GC_ATTR_UNUSED,
39 word arg3 GC_ATTR_UNUSED, word arg4 GC_ATTR_UNUSED,
40 word arg5 GC_ATTR_UNUSED, word arg6 GC_ATTR_UNUSED)
42 /* Avoid GC_noop6 calls to be optimized away. */
43 # if defined(AO_HAVE_compiler_barrier) && !defined(BASE_ATOMIC_OPS_EMULATED)
44 AO_compiler_barrier(); /* to serve as a special side-effect */
50 volatile word GC_noop_sink;
52 /* Single argument version, robust against whole program analysis. */
53 GC_ATTR_NO_SANITIZE_THREAD
54 GC_API void GC_CALL GC_noop1(word x)
59 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
61 GC_INNER unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
63 /* Initialize GC_obj_kinds properly and standard free lists properly. */
64 /* This must be done statically since they may be accessed before */
65 /* GC_init is called. */
66 /* It's done here, since we need to deal with mark descriptors. */
67 GC_INNER struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
68 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
69 /* 0 | */ GC_DS_LENGTH, FALSE, FALSE
70 /*, */ OK_DISCLAIM_INITZ },
71 /* NORMAL */ { &GC_objfreelist[0], 0,
72 /* 0 | */ GC_DS_LENGTH,
73 /* adjusted in GC_init for EXTRA_BYTES */
74 TRUE /* add length to descr */, TRUE
75 /*, */ OK_DISCLAIM_INITZ },
77 { &GC_uobjfreelist[0], 0,
78 /* 0 | */ GC_DS_LENGTH, TRUE /* add length to descr */, TRUE
79 /*, */ OK_DISCLAIM_INITZ },
80 # ifdef GC_ATOMIC_UNCOLLECTABLE
81 { &GC_auobjfreelist[0], 0,
82 /* 0 | */ GC_DS_LENGTH, FALSE /* add length to descr */, FALSE
83 /*, */ OK_DISCLAIM_INITZ },
87 GC_INNER unsigned GC_n_kinds = GC_N_KINDS_INITIAL_VALUE;
89 # ifndef INITIAL_MARK_STACK_SIZE
90 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
91 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
92 /* multiple of HBLKSIZE. */
93 /* The incremental collector actually likes a larger */
94 /* size, since it wants to push all marked dirty */
95 /* objects before marking anything new. Currently we */
96 /* let it grow dynamically. */
99 #if !defined(GC_DISABLE_INCREMENTAL)
100 STATIC word GC_n_rescuing_pages = 0;
101 /* Number of dirty pages we marked from */
102 /* excludes ptrfree pages, etc. */
103 /* Used for logging only. */
106 GC_INNER size_t GC_mark_stack_size = 0;
109 STATIC volatile AO_t GC_first_nonempty = 0;
110 /* Lowest entry on mark stack */
111 /* that may be nonempty. */
112 /* Updated only by initiating */
115 GC_INNER GC_bool GC_parallel_mark_disabled = FALSE;
118 GC_INNER mark_state_t GC_mark_state = MS_NONE;
120 GC_INNER GC_bool GC_mark_stack_too_small = FALSE;
122 static struct hblk * scan_ptr;
124 STATIC GC_bool GC_objects_are_marked = FALSE;
125 /* Are there collectible marked objects in the heap? */
127 /* Is a collection in progress? Note that this can return true in the */
128 /* non-incremental case, if a collection has been abandoned and the */
129 /* mark state is now MS_INVALID. */
130 GC_INNER GC_bool GC_collection_in_progress(void)
132 return(GC_mark_state != MS_NONE);
135 /* Clear all mark bits in the header. */
136 GC_INNER void GC_clear_hdr_marks(hdr *hhdr)
141 /* Atomic access is used to avoid racing with GC_realloc. */
142 last_bit = FINAL_MARK_BIT((size_t)AO_load((volatile AO_t *)&hhdr->hb_sz));
144 /* No race as GC_realloc holds the lock while updating hb_sz. */
145 last_bit = FINAL_MARK_BIT((size_t)hhdr->hb_sz);
148 BZERO(hhdr -> hb_marks, sizeof(hhdr->hb_marks));
149 set_mark_bit_from_hdr(hhdr, last_bit);
150 hhdr -> hb_n_marks = 0;
153 /* Set all mark bits in the header. Used for uncollectible blocks. */
154 GC_INNER void GC_set_hdr_marks(hdr *hhdr)
157 size_t sz = (size_t)hhdr->hb_sz;
158 unsigned n_marks = (unsigned)FINAL_MARK_BIT(sz);
160 # ifdef USE_MARK_BYTES
161 for (i = 0; i <= n_marks; i += (unsigned)MARK_BIT_OFFSET(sz)) {
162 hhdr -> hb_marks[i] = 1;
165 for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) {
166 hhdr -> hb_marks[i] = GC_WORD_MAX;
169 # ifdef MARK_BIT_PER_OBJ
170 hhdr -> hb_n_marks = n_marks;
172 hhdr -> hb_n_marks = HBLK_OBJS(sz);
176 /* Clear all mark bits associated with block h. */
177 static void clear_marks_for_block(struct hblk *h, word dummy GC_ATTR_UNUSED)
181 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
182 /* Mark bit for these is cleared only once the object is */
183 /* explicitly deallocated. This either frees the block, or */
184 /* the bit is cleared once the object is on the free list. */
185 GC_clear_hdr_marks(hhdr);
188 /* Slow but general routines for setting/clearing/asking about mark bits. */
189 GC_API void GC_CALL GC_set_mark_bit(const void *p)
191 struct hblk *h = HBLKPTR(p);
193 word bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, hhdr -> hb_sz);
195 if (!mark_bit_from_hdr(hhdr, bit_no)) {
196 set_mark_bit_from_hdr(hhdr, bit_no);
197 ++hhdr -> hb_n_marks;
201 GC_API void GC_CALL GC_clear_mark_bit(const void *p)
203 struct hblk *h = HBLKPTR(p);
205 word bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, hhdr -> hb_sz);
207 if (mark_bit_from_hdr(hhdr, bit_no)) {
208 size_t n_marks = hhdr -> hb_n_marks;
210 GC_ASSERT(n_marks != 0);
211 clear_mark_bit_from_hdr(hhdr, bit_no);
213 # ifdef PARALLEL_MARK
214 if (n_marks != 0 || !GC_parallel)
215 hhdr -> hb_n_marks = n_marks;
216 /* Don't decrement to zero. The counts are approximate due to */
217 /* concurrency issues, but we need to ensure that a count of */
218 /* zero implies an empty block. */
220 hhdr -> hb_n_marks = n_marks;
225 GC_API int GC_CALL GC_is_marked(const void *p)
227 struct hblk *h = HBLKPTR(p);
229 word bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, hhdr -> hb_sz);
231 return (int)mark_bit_from_hdr(hhdr, bit_no); /* 0 or 1 */
234 /* Clear mark bits in all allocated heap blocks. This invalidates the */
235 /* marker invariant, and sets GC_mark_state to reflect this. (This */
236 /* implicitly starts marking to reestablish the invariant.) */
237 GC_INNER void GC_clear_marks(void)
239 GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
240 GC_objects_are_marked = FALSE;
241 GC_mark_state = MS_INVALID;
245 /* Initiate a garbage collection. Initiates a full collection if the */
246 /* mark state is invalid. */
247 GC_INNER void GC_initiate_gc(void)
249 GC_ASSERT(I_HOLD_LOCK());
250 # ifndef GC_DISABLE_INCREMENTAL
251 if (GC_incremental) {
253 GC_read_dirty(FALSE);
256 GC_read_dirty(GC_mark_state == MS_INVALID);
259 GC_n_rescuing_pages = 0;
261 if (GC_mark_state == MS_NONE) {
262 GC_mark_state = MS_PUSH_RESCUERS;
263 } else if (GC_mark_state != MS_INVALID) {
264 ABORT("Unexpected state");
265 } /* Else this is really a full collection, and mark bits are invalid. */
270 STATIC void GC_do_parallel_mark(void); /* Initiate parallel marking. */
271 #endif /* PARALLEL_MARK */
273 #ifdef GC_DISABLE_INCREMENTAL
274 # define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
276 STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h);
277 /* Invoke GC_push_marked on next dirty block above h. */
278 /* Return a pointer just past the end of this block. */
279 #endif /* !GC_DISABLE_INCREMENTAL */
280 STATIC struct hblk * GC_push_next_marked(struct hblk *h);
281 /* Ditto, but also mark from clean pages. */
282 STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h);
283 /* Ditto, but mark only from uncollectible pages. */
285 static void alloc_mark_stack(size_t);
287 /* Perform a small amount of marking. */
288 /* We try to touch roughly a page of memory. */
289 /* Return TRUE if we just finished a mark phase. */
290 /* Cold_gc_frame is an address inside a GC frame that */
291 /* remains valid until all marking is complete. */
292 /* A zero value indicates that it's OK to miss some */
293 /* register values. */
294 /* We hold the allocation lock. In the case of */
295 /* incremental collection, the world may not be stopped.*/
296 #ifdef WRAP_MARK_SOME
297 /* For win32, this is called after we establish a structured */
298 /* exception handler, in case Windows unmaps one of our root */
299 /* segments. See below. In either case, we acquire the */
300 /* allocator lock long before we get here. */
301 STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
303 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
306 switch(GC_mark_state) {
310 case MS_PUSH_RESCUERS:
311 if ((word)GC_mark_stack_top
312 >= (word)(GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2)) {
313 /* Go ahead and mark, even though that might cause us to */
314 /* see more marked dirty objects later on. Avoid this */
316 GC_mark_stack_too_small = TRUE;
317 MARK_FROM_MARK_STACK();
320 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
322 # if !defined(GC_DISABLE_INCREMENTAL)
323 GC_COND_LOG_PRINTF("Marked from %lu dirty pages\n",
324 (unsigned long)GC_n_rescuing_pages);
326 GC_push_roots(FALSE, cold_gc_frame);
327 GC_objects_are_marked = TRUE;
328 if (GC_mark_state != MS_INVALID) {
329 GC_mark_state = MS_ROOTS_PUSHED;
335 case MS_PUSH_UNCOLLECTABLE:
336 if ((word)GC_mark_stack_top
337 >= (word)(GC_mark_stack + GC_mark_stack_size/4)) {
338 # ifdef PARALLEL_MARK
339 /* Avoid this, since we don't parallelize the marker */
341 if (GC_parallel) GC_mark_stack_too_small = TRUE;
343 MARK_FROM_MARK_STACK();
346 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
348 GC_push_roots(TRUE, cold_gc_frame);
349 GC_objects_are_marked = TRUE;
350 if (GC_mark_state != MS_INVALID) {
351 GC_mark_state = MS_ROOTS_PUSHED;
357 case MS_ROOTS_PUSHED:
358 # ifdef PARALLEL_MARK
359 /* Eventually, incremental marking should run */
360 /* asynchronously in multiple threads, without grabbing */
361 /* the allocation lock. */
362 /* For now, parallel marker is disabled if there is */
363 /* a chance that marking could be interrupted by */
364 /* a client-supplied time limit or custom stop function. */
365 if (GC_parallel && !GC_parallel_mark_disabled) {
366 GC_do_parallel_mark();
367 GC_ASSERT((word)GC_mark_stack_top < (word)GC_first_nonempty);
368 GC_mark_stack_top = GC_mark_stack - 1;
369 if (GC_mark_stack_too_small) {
370 alloc_mark_stack(2*GC_mark_stack_size);
372 if (GC_mark_state == MS_ROOTS_PUSHED) {
373 GC_mark_state = MS_NONE;
379 if ((word)GC_mark_stack_top >= (word)GC_mark_stack) {
380 MARK_FROM_MARK_STACK();
383 GC_mark_state = MS_NONE;
384 if (GC_mark_stack_too_small) {
385 alloc_mark_stack(2*GC_mark_stack_size);
391 case MS_PARTIALLY_INVALID:
392 if (!GC_objects_are_marked) {
393 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
396 if ((word)GC_mark_stack_top >= (word)GC_mark_stack) {
397 MARK_FROM_MARK_STACK();
400 if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
401 /* About to start a heap scan for marked objects. */
402 /* Mark stack is empty. OK to reallocate. */
403 if (GC_mark_stack_too_small) {
404 alloc_mark_stack(2*GC_mark_stack_size);
406 GC_mark_state = MS_PARTIALLY_INVALID;
408 scan_ptr = GC_push_next_marked(scan_ptr);
409 if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
410 GC_push_roots(TRUE, cold_gc_frame);
411 GC_objects_are_marked = TRUE;
412 if (GC_mark_state != MS_INVALID) {
413 GC_mark_state = MS_ROOTS_PUSHED;
419 ABORT("GC_mark_some: bad state");
424 #ifdef WRAP_MARK_SOME
426 # if (defined(MSWIN32) || defined(MSWINCE)) && defined(__GNUC__)
429 EXCEPTION_REGISTRATION ex_reg;
433 static EXCEPTION_DISPOSITION mark_ex_handler(
434 struct _EXCEPTION_RECORD *ex_rec,
436 struct _CONTEXT *context,
437 void *disp_ctxt GC_ATTR_UNUSED)
439 if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
440 ext_ex_regn *xer = (ext_ex_regn *)est_frame;
442 /* Unwind from the inner function assuming the standard */
443 /* function prologue. */
444 /* Assumes code has not been compiled with */
445 /* -fomit-frame-pointer. */
446 context->Esp = context->Ebp;
447 context->Ebp = *((DWORD *)context->Esp);
448 context->Esp = context->Esp - 8;
450 /* Resume execution at the "real" handler within the */
451 /* wrapper function. */
452 context->Eip = (DWORD )(xer->alt_path);
454 return ExceptionContinueExecution;
457 return ExceptionContinueSearch;
460 # endif /* __GNUC__ && MSWIN32 */
462 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
466 # if defined(MSWIN32) || defined(MSWINCE)
468 /* Windows 98 appears to asynchronously create and remove */
469 /* writable memory mappings, for reasons we haven't yet */
470 /* understood. Since we look for writable regions to */
471 /* determine the root set, we may try to mark from an */
472 /* address range that disappeared since we started the */
473 /* collection. Thus we have to recover from faults here. */
474 /* This code does not appear to be necessary for Windows */
475 /* 95/NT/2000+. Note that this code should never generate */
476 /* an incremental GC write fault. */
477 /* This code seems to be necessary for WinCE (at least in */
478 /* the case we'd decide to add MEM_PRIVATE sections to */
479 /* data roots in GC_register_dynamic_libraries()). */
480 /* It's conceivable that this is the same issue with */
481 /* terminating threads that we see with Linux and */
482 /* USE_PROC_FOR_LIBRARIES. */
485 ret_val = GC_mark_some_inner(cold_gc_frame);
486 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
487 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
490 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
491 /* With DllMain-based thread tracking, a thread may have */
492 /* started while we were marking. This is logically equivalent */
493 /* to the exception case; our results are invalid and we have */
494 /* to start over. This cannot be prevented since we can't */
495 /* block in DllMain. */
496 if (GC_started_thread_while_stopped()) goto handle_ex;
501 # else /* __GNUC__ */
503 /* Manually install an exception handler since GCC does */
504 /* not yet support Structured Exception Handling (SEH) on */
509 # if GC_GNUC_PREREQ(4, 7) || GC_CLANG_PREREQ(3, 3)
510 # pragma GCC diagnostic push
511 /* Suppress "taking the address of label is non-standard" warning. */
512 # if defined(__clang__) || GC_GNUC_PREREQ(6, 4)
513 # pragma GCC diagnostic ignored "-Wpedantic"
515 /* GCC before ~4.8 does not accept "-Wpedantic" quietly. */
516 # pragma GCC diagnostic ignored "-pedantic"
518 er.alt_path = &&handle_ex;
519 # pragma GCC diagnostic pop
520 # elif !defined(CPPCHECK) /* pragma diagnostic is not supported */
521 er.alt_path = &&handle_ex;
523 er.ex_reg.handler = mark_ex_handler;
524 __asm__ __volatile__ ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
525 __asm__ __volatile__ ("movl %0, %%fs:0" : : "r" (&er));
526 ret_val = GC_mark_some_inner(cold_gc_frame);
527 /* Prevent GCC from considering the following code unreachable */
528 /* and thus eliminating it. */
529 if (er.alt_path == 0)
531 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
532 if (GC_started_thread_while_stopped())
536 /* Uninstall the exception handler. */
537 __asm__ __volatile__ ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
540 # endif /* __GNUC__ */
541 # else /* !MSWIN32 */
542 /* Here we are handling the case in which /proc is used for root */
543 /* finding, and we have threads. We may find a stack for a */
544 /* thread that is in the process of exiting, and disappears */
545 /* while we are marking it. This seems extremely difficult to */
546 /* avoid otherwise. */
547 if (GC_incremental) {
548 WARN("Incremental GC incompatible with /proc roots\n", 0);
549 /* I'm not sure if this could still work ... */
551 GC_setup_temporary_fault_handler();
552 if(SETJMP(GC_jmp_buf) != 0) goto handle_ex;
553 ret_val = GC_mark_some_inner(cold_gc_frame);
555 GC_reset_fault_handler();
558 # endif /* !MSWIN32 */
561 /* Exception handler starts here for all cases. */
563 static word warned_gc_no;
565 /* Warn about it at most once per collection. */
566 if (warned_gc_no != GC_gc_no) {
567 WARN("Caught ACCESS_VIOLATION in marker;"
568 " memory mapping disappeared\n", 0);
569 warned_gc_no = GC_gc_no;
572 /* We have bad roots on the stack. Discard mark stack. */
573 /* Rescan from marked objects. Redetermine roots. */
574 # ifdef REGISTER_LIBRARIES_EARLY
576 GC_cond_register_dynamic_libraries();
579 GC_invalidate_mark_state();
583 goto rm_handler; /* Back to platform-specific code. */
585 #endif /* WRAP_MARK_SOME */
587 GC_INNER void GC_invalidate_mark_state(void)
589 GC_mark_state = MS_INVALID;
590 GC_mark_stack_top = GC_mark_stack-1;
593 GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp)
595 GC_mark_state = MS_INVALID;
596 # ifdef PARALLEL_MARK
597 /* We are using a local_mark_stack in parallel mode, so */
598 /* do not signal the global mark stack to be resized. */
599 /* That will be done if required in GC_return_mark_stack. */
601 GC_mark_stack_too_small = TRUE;
603 GC_mark_stack_too_small = TRUE;
605 GC_COND_LOG_PRINTF("Mark stack overflow; current size = %lu entries\n",
606 (unsigned long)GC_mark_stack_size);
607 return(msp - GC_MARK_STACK_DISCARDS);
611 * Mark objects pointed to by the regions described by
612 * mark stack entries between mark_stack and mark_stack_top,
613 * inclusive. Assumes the upper limit of a mark stack entry
614 * is never 0. A mark stack entry never has size 0.
615 * We try to traverse on the order of a hblk of memory before we return.
616 * Caller is responsible for calling this until the mark stack is empty.
617 * Note that this is the most performance critical routine in the
618 * collector. Hence it contains all sorts of ugly hacks to speed
619 * things up. In particular, we avoid procedure calls on the common
620 * path, we take advantage of peculiarities of the mark descriptor
621 * encoding, we optionally maintain a cache for the block address to
622 * header mapping, we prefetch when an object is "grayed", etc.
624 GC_ATTR_NO_SANITIZE_ADDR GC_ATTR_NO_SANITIZE_MEMORY GC_ATTR_NO_SANITIZE_THREAD
625 GC_INNER mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack,
626 mse *mark_stack_limit)
628 signed_word credit = HBLKSIZE; /* Remaining credit for marking work. */
629 ptr_t current_p; /* Pointer to current candidate ptr. */
630 word current; /* Candidate pointer. */
631 ptr_t limit = 0; /* (Incl) limit of current candidate range. */
633 ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
634 ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
637 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
639 GC_objects_are_marked = TRUE;
641 # ifdef OS2 /* Use untweaked version to circumvent compiler problem. */
642 while ((word)mark_stack_top >= (word)mark_stack && credit >= 0)
644 while (((((word)mark_stack_top - (word)mark_stack) | (word)credit)
648 current_p = mark_stack_top -> mse_start;
649 descr = mark_stack_top -> mse_descr.w;
651 /* current_p and descr describe the current object. */
652 /* (*mark_stack_top) is vacant. */
653 /* The following is 0 only for small objects described by a simple */
654 /* length descriptor. For many applications this is the common */
655 /* case, so we try to detect it quickly. */
656 if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
657 word tag = descr & GC_DS_TAGS;
659 GC_STATIC_ASSERT(GC_DS_TAGS == 0x3);
663 /* Process part of the range to avoid pushing too much on the */
665 GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
666 - (word)GC_least_plausible_heap_addr
667 || (word)(current_p + descr)
668 <= (word)GC_least_plausible_heap_addr
669 || (word)current_p >= (word)GC_greatest_plausible_heap_addr);
670 # ifdef PARALLEL_MARK
671 # define SHARE_BYTES 2048
672 if (descr > SHARE_BYTES && GC_parallel
673 && (word)mark_stack_top < (word)(mark_stack_limit - 1)) {
674 word new_size = (descr/2) & ~(word)(sizeof(word)-1);
676 mark_stack_top -> mse_start = current_p;
677 mark_stack_top -> mse_descr.w = new_size + sizeof(word);
678 /* Makes sure we handle */
679 /* misaligned pointers. */
682 if ((word)GC_trace_addr >= (word)current_p
683 && (word)GC_trace_addr < (word)(current_p + descr)) {
684 GC_log_printf("GC #%u: large section; start %p, len %lu,"
685 " splitting (parallel) at %p\n",
686 (unsigned)GC_gc_no, (void *)current_p,
687 (unsigned long)descr,
688 (void *)(current_p + new_size));
691 current_p += new_size;
695 # endif /* PARALLEL_MARK */
696 mark_stack_top -> mse_start =
697 limit = current_p + WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
698 mark_stack_top -> mse_descr.w =
699 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
701 if ((word)GC_trace_addr >= (word)current_p
702 && (word)GC_trace_addr < (word)(current_p + descr)) {
703 GC_log_printf("GC #%u: large section; start %p, len %lu,"
704 " splitting at %p\n",
705 (unsigned)GC_gc_no, (void *)current_p,
706 (unsigned long)descr, (void *)limit);
709 /* Make sure that pointers overlapping the two ranges are */
711 limit += sizeof(word) - ALIGNMENT;
716 if ((word)GC_trace_addr >= (word)current_p
717 && (word)GC_trace_addr < (word)(current_p
718 + WORDS_TO_BYTES(WORDSZ-2))) {
719 GC_log_printf("GC #%u: tracing from %p bitmap descr %lu\n",
720 (unsigned)GC_gc_no, (void *)current_p,
721 (unsigned long)descr);
723 # endif /* ENABLE_TRACE */
724 descr &= ~GC_DS_TAGS;
725 credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
727 if ((descr & SIGNB) != 0) {
728 current = *(word *)current_p;
729 FIXUP_POINTER(current);
730 if (current >= (word)least_ha && current < (word)greatest_ha) {
731 PREFETCH((ptr_t)current);
733 if (GC_trace_addr == current_p) {
734 GC_log_printf("GC #%u: considering(3) %p -> %p\n",
735 (unsigned)GC_gc_no, (void *)current_p,
738 # endif /* ENABLE_TRACE */
739 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
740 mark_stack_limit, current_p);
744 current_p += sizeof(word);
750 if ((word)GC_trace_addr >= (word)current_p
751 && GC_base(current_p) != 0
752 && GC_base(current_p) == GC_base(GC_trace_addr)) {
753 GC_log_printf("GC #%u: tracing from %p, proc descr %lu\n",
754 (unsigned)GC_gc_no, (void *)current_p,
755 (unsigned long)descr);
757 # endif /* ENABLE_TRACE */
758 credit -= GC_PROC_BYTES;
759 mark_stack_top = (*PROC(descr))((word *)current_p, mark_stack_top,
760 mark_stack_limit, ENV(descr));
762 case GC_DS_PER_OBJECT:
763 if ((signed_word)descr >= 0) {
764 /* Descriptor is in the object. */
765 descr = *(word *)(current_p + descr - GC_DS_PER_OBJECT);
767 /* Descriptor is in type descriptor pointed to by first */
768 /* word in object. */
769 ptr_t type_descr = *(ptr_t *)current_p;
770 /* type_descr is either a valid pointer to the descriptor */
771 /* structure, or this object was on a free list. */
772 /* If it was anything but the last object on the free list, */
773 /* we will misinterpret the next object on the free list as */
774 /* the type descriptor, and get a 0 GC descriptor, which */
775 /* is ideal. Unfortunately, we need to check for the last */
776 /* object case explicitly. */
777 if (EXPECT(0 == type_descr, FALSE)) {
781 descr = *(word *)(type_descr
782 - ((signed_word)descr + (GC_INDIR_PER_OBJ_BIAS
783 - GC_DS_PER_OBJECT)));
786 /* Can happen either because we generated a 0 descriptor */
787 /* or we saw a pointer to a free object. */
794 /* Small object with length descriptor. */
796 # ifndef SMALL_CONFIG
797 if (descr < sizeof(word))
801 if ((word)GC_trace_addr >= (word)current_p
802 && (word)GC_trace_addr < (word)(current_p + descr)) {
803 GC_log_printf("GC #%u: small object; start %p, len %lu\n",
804 (unsigned)GC_gc_no, (void *)current_p,
805 (unsigned long)descr);
808 limit = current_p + (word)descr;
810 /* The simple case in which we're scanning a range. */
811 GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
812 credit -= limit - current_p;
813 limit -= sizeof(word);
817 # ifndef SMALL_CONFIG
820 /* Try to prefetch the next pointer to be examined ASAP. */
821 /* Empirically, this also seems to help slightly without */
822 /* prefetches, at least on linux/X86. Presumably this loop */
823 /* ends up with less register pressure, and gcc thus ends up */
824 /* generating slightly better code. Overall gcc code quality */
825 /* for this loop is still not great. */
827 PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE);
828 GC_ASSERT((word)limit >= (word)current_p);
829 deferred = *(word *)limit;
830 FIXUP_POINTER(deferred);
832 if (deferred >= (word)least_ha && deferred < (word)greatest_ha) {
833 PREFETCH((ptr_t)deferred);
836 if ((word)current_p > (word)limit) goto next_object;
837 /* Unroll once, so we don't do too many of the prefetches */
838 /* based on limit. */
839 deferred = *(word *)limit;
840 FIXUP_POINTER(deferred);
842 if (deferred >= (word)least_ha && deferred < (word)greatest_ha) {
843 PREFETCH((ptr_t)deferred);
846 if ((word)current_p > (word)limit) goto next_object;
850 while ((word)current_p <= (word)limit) {
851 /* Empirically, unrolling this loop doesn't help a lot. */
852 /* Since PUSH_CONTENTS expands to a lot of code, */
854 current = *(word *)current_p;
855 FIXUP_POINTER(current);
856 PREFETCH(current_p + PREF_DIST*CACHE_LINE_SIZE);
857 if (current >= (word)least_ha && current < (word)greatest_ha) {
858 /* Prefetch the contents of the object we just pushed. It's */
859 /* likely we will need them soon. */
860 PREFETCH((ptr_t)current);
862 if (GC_trace_addr == current_p) {
863 GC_log_printf("GC #%u: considering(1) %p -> %p\n",
864 (unsigned)GC_gc_no, (void *)current_p,
867 # endif /* ENABLE_TRACE */
868 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
869 mark_stack_limit, current_p);
871 current_p += ALIGNMENT;
874 # ifndef SMALL_CONFIG
875 /* We still need to mark the entry we previously prefetched. */
876 /* We already know that it passes the preliminary pointer */
879 if (GC_trace_addr == current_p) {
880 GC_log_printf("GC #%u: considering(2) %p -> %p\n",
881 (unsigned)GC_gc_no, (void *)current_p,
884 # endif /* ENABLE_TRACE */
885 PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
886 mark_stack_limit, current_p);
891 return mark_stack_top;
896 STATIC GC_bool GC_help_wanted = FALSE; /* Protected by mark lock. */
897 STATIC unsigned GC_helper_count = 0; /* Number of running helpers. */
898 /* Protected by mark lock. */
899 STATIC unsigned GC_active_count = 0; /* Number of active helpers. */
900 /* Protected by mark lock. */
901 /* May increase and decrease */
902 /* within each mark cycle. But */
903 /* once it returns to 0, it */
904 /* stays zero for the cycle. */
906 GC_INNER word GC_mark_no = 0;
908 static mse *main_local_mark_stack;
911 # define LOCAL_MARK_STACK_SIZE (HBLKSIZE / 8)
913 # define LOCAL_MARK_STACK_SIZE HBLKSIZE
914 /* Under normal circumstances, this is big enough to guarantee */
915 /* we don't overflow half of it in a single call to */
919 /* Wait all markers to finish initialization (i.e. store */
920 /* marker_[b]sp, marker_mach_threads, GC_marker_Id). */
921 GC_INNER void GC_wait_for_markers_init(void)
925 if (GC_markers_m1 == 0)
928 /* Allocate the local mark stack for the thread that holds GC lock. */
929 # ifndef CAN_HANDLE_FORK
930 GC_ASSERT(NULL == main_local_mark_stack);
932 if (NULL == main_local_mark_stack)
935 size_t bytes_to_get =
936 ROUNDUP_PAGESIZE_IF_MMAP(LOCAL_MARK_STACK_SIZE * sizeof(mse));
937 main_local_mark_stack = (mse *)GET_MEM(bytes_to_get);
938 if (NULL == main_local_mark_stack)
939 ABORT("Insufficient memory for main local_mark_stack");
940 GC_add_to_our_memory((ptr_t)main_local_mark_stack, bytes_to_get);
943 /* Reuse marker lock and builders count to synchronize */
944 /* marker threads startup. */
945 GC_acquire_mark_lock();
946 GC_fl_builder_count += GC_markers_m1;
947 count = GC_fl_builder_count;
948 GC_release_mark_lock();
950 GC_ASSERT(count > 0);
951 GC_wait_for_reclaim();
955 /* Steal mark stack entries starting at mse low into mark stack local */
956 /* until we either steal mse high, or we have max entries. */
957 /* Return a pointer to the top of the local mark stack. */
958 /* (*next) is replaced by a pointer to the next unscanned mark stack */
960 STATIC mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
961 unsigned max, mse **next)
964 mse *top = local - 1;
967 GC_ASSERT((word)high >= (word)(low - 1)
968 && (word)(high - low + 1) <= GC_mark_stack_size);
969 for (p = low; (word)p <= (word)high && i <= max; ++p) {
970 word descr = (word)AO_load(&p->mse_descr.ao);
972 /* Must be ordered after read of descr: */
973 AO_store_release_write(&p->mse_descr.ao, 0);
974 /* More than one thread may get this entry, but that's only */
975 /* a minor performance problem. */
977 top -> mse_descr.w = descr;
978 top -> mse_start = p -> mse_start;
979 GC_ASSERT((descr & GC_DS_TAGS) != GC_DS_LENGTH
980 || descr < (word)GC_greatest_plausible_heap_addr
981 - (word)GC_least_plausible_heap_addr
982 || (word)(p->mse_start + descr)
983 <= (word)GC_least_plausible_heap_addr
984 || (word)p->mse_start
985 >= (word)GC_greatest_plausible_heap_addr);
986 /* If this is a big object, count it as */
987 /* size/256 + 1 objects. */
989 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (int)(descr >> 8);
996 /* Copy back a local mark stack. */
997 /* low and high are inclusive bounds. */
998 STATIC void GC_return_mark_stack(mse * low, mse * high)
1004 if ((word)high < (word)low) return;
1005 stack_size = high - low + 1;
1006 GC_acquire_mark_lock();
1007 my_top = GC_mark_stack_top; /* Concurrent modification impossible. */
1008 my_start = my_top + 1;
1009 if ((word)(my_start - GC_mark_stack + stack_size)
1010 > (word)GC_mark_stack_size) {
1011 GC_COND_LOG_PRINTF("No room to copy back mark stack\n");
1012 GC_mark_state = MS_INVALID;
1013 GC_mark_stack_too_small = TRUE;
1014 /* We drop the local mark stack. We'll fix things later. */
1016 BCOPY(low, my_start, stack_size * sizeof(mse));
1017 GC_ASSERT((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
1019 AO_store_release_write((volatile AO_t *)(&GC_mark_stack_top),
1020 (AO_t)(my_top + stack_size));
1021 /* Ensures visibility of previously written stack contents. */
1023 GC_release_mark_lock();
1024 GC_notify_all_marker();
1027 #ifndef N_LOCAL_ITERS
1028 # define N_LOCAL_ITERS 1
1031 /* This function is only called when the local */
1032 /* and the main mark stacks are both empty. */
1033 static GC_bool has_inactive_helpers(void)
1037 GC_acquire_mark_lock();
1038 res = GC_active_count < GC_helper_count;
1039 GC_release_mark_lock();
1043 /* Mark from the local mark stack. */
1044 /* On return, the local mark stack is empty. */
1045 /* But this may be achieved by copying the */
1046 /* local mark stack back into the global one. */
1047 /* We do not hold the mark lock. */
1048 STATIC void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
1053 for (n = 0; n < N_LOCAL_ITERS; ++n) {
1054 local_top = GC_mark_from(local_top, local_mark_stack,
1055 local_mark_stack + LOCAL_MARK_STACK_SIZE);
1056 if ((word)local_top < (word)local_mark_stack) return;
1057 if ((word)(local_top - local_mark_stack)
1058 >= LOCAL_MARK_STACK_SIZE / 2) {
1059 GC_return_mark_stack(local_mark_stack, local_top);
1063 if ((word)AO_load((volatile AO_t *)&GC_mark_stack_top)
1064 < (word)AO_load(&GC_first_nonempty)
1065 && (word)local_top > (word)(local_mark_stack + 1)
1066 && has_inactive_helpers()) {
1067 /* Try to share the load, since the main stack is empty, */
1068 /* and helper threads are waiting for a refill. */
1069 /* The entries near the bottom of the stack are likely */
1070 /* to require more work. Thus we return those, even though */
1072 mse * new_bottom = local_mark_stack
1073 + (local_top - local_mark_stack)/2;
1074 GC_ASSERT((word)new_bottom > (word)local_mark_stack
1075 && (word)new_bottom < (word)local_top);
1076 GC_return_mark_stack(local_mark_stack, new_bottom - 1);
1077 memmove(local_mark_stack, new_bottom,
1078 (local_top - new_bottom + 1) * sizeof(mse));
1079 local_top -= (new_bottom - local_mark_stack);
1084 #ifndef ENTRIES_TO_GET
1085 # define ENTRIES_TO_GET 5
1088 /* Mark using the local mark stack until the global mark stack is empty */
1089 /* and there are no active workers. Update GC_first_nonempty to reflect */
1090 /* progress. Caller holds the mark lock. */
1091 /* Caller has already incremented GC_helper_count. We decrement it, */
1092 /* and maintain GC_active_count. */
1093 STATIC void GC_mark_local(mse *local_mark_stack, int id)
1095 mse * my_first_nonempty;
1098 my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1099 GC_ASSERT((word)GC_mark_stack <= (word)my_first_nonempty);
1100 GC_ASSERT((word)my_first_nonempty
1101 <= (word)AO_load((volatile AO_t *)&GC_mark_stack_top) + sizeof(mse));
1102 GC_VERBOSE_LOG_PRINTF("Starting mark helper %d\n", id);
1103 GC_release_mark_lock();
1109 mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
1111 GC_ASSERT((word)my_first_nonempty >= (word)GC_mark_stack &&
1112 (word)my_first_nonempty <=
1113 (word)AO_load((volatile AO_t *)&GC_mark_stack_top)
1115 GC_ASSERT((word)global_first_nonempty >= (word)GC_mark_stack);
1116 if ((word)my_first_nonempty < (word)global_first_nonempty) {
1117 my_first_nonempty = global_first_nonempty;
1118 } else if ((word)global_first_nonempty < (word)my_first_nonempty) {
1119 (void)AO_compare_and_swap(&GC_first_nonempty,
1120 (AO_t)global_first_nonempty,
1121 (AO_t)my_first_nonempty);
1122 /* If this fails, we just go ahead, without updating */
1123 /* GC_first_nonempty. */
1125 /* Perhaps we should also update GC_first_nonempty, if it */
1126 /* is less. But that would require using atomic updates. */
1127 my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));
1128 if ((word)my_top < (word)my_first_nonempty) {
1129 GC_acquire_mark_lock();
1130 my_top = GC_mark_stack_top;
1131 /* Asynchronous modification impossible here, */
1132 /* since we hold mark lock. */
1133 n_on_stack = my_top - my_first_nonempty + 1;
1134 if (0 == n_on_stack) {
1136 GC_ASSERT(GC_active_count <= GC_helper_count);
1137 /* Other markers may redeposit objects */
1139 if (0 == GC_active_count) GC_notify_all_marker();
1140 while (GC_active_count > 0
1141 && (word)AO_load(&GC_first_nonempty)
1142 > (word)GC_mark_stack_top) {
1143 /* We will be notified if either GC_active_count */
1144 /* reaches zero, or if more objects are pushed on */
1145 /* the global mark stack. */
1148 if (GC_active_count == 0
1149 && (word)AO_load(&GC_first_nonempty)
1150 > (word)GC_mark_stack_top) {
1151 GC_bool need_to_notify = FALSE;
1152 /* The above conditions can't be falsified while we */
1153 /* hold the mark lock, since neither */
1154 /* GC_active_count nor GC_mark_stack_top can */
1155 /* change. GC_first_nonempty can only be */
1156 /* incremented asynchronously. Thus we know that */
1157 /* both conditions actually held simultaneously. */
1159 if (0 == GC_helper_count) need_to_notify = TRUE;
1160 GC_VERBOSE_LOG_PRINTF("Finished mark helper %d\n", id);
1161 if (need_to_notify) GC_notify_all_marker();
1164 /* Else there's something on the stack again, or */
1165 /* another helper may push something. */
1167 GC_ASSERT(GC_active_count > 0);
1168 GC_release_mark_lock();
1171 GC_release_mark_lock();
1174 n_on_stack = my_top - my_first_nonempty + 1;
1176 n_to_get = ENTRIES_TO_GET;
1177 if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
1178 local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
1179 local_mark_stack, n_to_get,
1180 &my_first_nonempty);
1181 GC_ASSERT((word)my_first_nonempty >= (word)GC_mark_stack &&
1182 (word)my_first_nonempty <=
1183 (word)AO_load((volatile AO_t *)&GC_mark_stack_top)
1185 GC_do_local_mark(local_mark_stack, local_top);
1189 /* Perform Parallel mark. */
1190 /* We hold the GC lock, not the mark lock. */
1191 /* Currently runs until the mark stack is */
1193 STATIC void GC_do_parallel_mark(void)
1195 GC_acquire_mark_lock();
1196 GC_ASSERT(I_HOLD_LOCK());
1197 /* This could be a GC_ASSERT, but it seems safer to keep it on */
1198 /* all the time, especially since it's cheap. */
1199 if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
1200 ABORT("Tried to start parallel mark in bad state");
1201 GC_VERBOSE_LOG_PRINTF("Starting marking for mark phase number %lu\n",
1202 (unsigned long)GC_mark_no);
1203 GC_first_nonempty = (AO_t)GC_mark_stack;
1204 GC_active_count = 0;
1205 GC_helper_count = 1;
1206 GC_help_wanted = TRUE;
1207 GC_notify_all_marker();
1208 /* Wake up potential helpers. */
1209 GC_mark_local(main_local_mark_stack, 0);
1210 GC_help_wanted = FALSE;
1211 /* Done; clean up. */
1212 while (GC_helper_count > 0) {
1215 /* GC_helper_count cannot be incremented while not GC_help_wanted. */
1216 GC_VERBOSE_LOG_PRINTF("Finished marking for mark phase number %lu\n",
1217 (unsigned long)GC_mark_no);
1219 GC_release_mark_lock();
1220 GC_notify_all_marker();
1224 /* Try to help out the marker, if it's running. */
1225 /* We do not hold the GC lock, but the requestor does. */
1226 /* And we hold the mark lock. */
1227 GC_INNER void GC_help_marker(word my_mark_no)
1229 # define my_id my_id_mse.mse_descr.w
1230 mse my_id_mse; /* align local_mark_stack explicitly */
1231 mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1232 /* Note: local_mark_stack is quite big (up to 128 KiB). */
1234 GC_ASSERT(GC_parallel);
1235 while (GC_mark_no < my_mark_no
1236 || (!GC_help_wanted && GC_mark_no == my_mark_no)) {
1239 my_id = GC_helper_count;
1240 if (GC_mark_no != my_mark_no || my_id > (unsigned)GC_markers_m1) {
1241 /* Second test is useful only if original threads can also */
1242 /* act as helpers. Under Linux they can't. */
1245 GC_helper_count = (unsigned)my_id + 1;
1246 GC_mark_local(local_mark_stack, (int)my_id);
1247 /* GC_mark_local decrements GC_helper_count. */
1251 #endif /* PARALLEL_MARK */
1253 GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes)
1256 size_t page_offset = (word)ptr & (GC_page_size - 1);
1258 size_t recycled_bytes;
1260 GC_ASSERT(bytes != 0);
1261 GC_ASSERT(GC_page_size != 0);
1262 /* TODO: Assert correct memory flags if GWW_VDB */
1263 if (page_offset != 0)
1264 displ = GC_page_size - page_offset;
1265 recycled_bytes = (bytes - displ) & ~(GC_page_size - 1);
1266 GC_COND_LOG_PRINTF("Recycle %lu scratch-allocated bytes at %p\n",
1267 (unsigned long)recycled_bytes, ptr);
1268 if (recycled_bytes > 0)
1269 GC_add_to_heap((struct hblk *)((word)ptr + displ), recycled_bytes);
1273 /* Allocate or reallocate space for mark stack of size n entries. */
1274 /* May silently fail. */
1275 static void alloc_mark_stack(size_t n)
1277 mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
1279 /* Don't recycle a stack segment obtained with the wrong flags. */
1280 /* Win32 GetWriteWatch requires the right kind of memory. */
1281 static GC_bool GC_incremental_at_stack_alloc = FALSE;
1282 GC_bool recycle_old = !GC_auto_incremental
1283 || GC_incremental_at_stack_alloc;
1285 GC_incremental_at_stack_alloc = GC_auto_incremental;
1287 # define recycle_old TRUE
1290 GC_mark_stack_too_small = FALSE;
1291 if (GC_mark_stack != NULL) {
1292 if (new_stack != 0) {
1294 /* Recycle old space. */
1295 GC_scratch_recycle_inner(GC_mark_stack,
1296 GC_mark_stack_size * sizeof(struct GC_ms_entry));
1298 GC_mark_stack = new_stack;
1299 GC_mark_stack_size = n;
1300 /* FIXME: Do we need some way to reset GC_mark_stack_size? */
1301 GC_mark_stack_limit = new_stack + n;
1302 GC_COND_LOG_PRINTF("Grew mark stack to %lu frames\n",
1303 (unsigned long)GC_mark_stack_size);
1305 WARN("Failed to grow mark stack to %" WARN_PRIdPTR " frames\n", n);
1307 } else if (NULL == new_stack) {
1308 GC_err_printf("No space for mark stack\n");
1311 GC_mark_stack = new_stack;
1312 GC_mark_stack_size = n;
1313 GC_mark_stack_limit = new_stack + n;
1315 GC_mark_stack_top = GC_mark_stack-1;
1318 GC_INNER void GC_mark_init(void)
1320 alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
1324 * Push all locations between b and t onto the mark stack.
1325 * b is the first location to be checked. t is one past the last
1326 * location to be checked.
1327 * Should only be used if there is no possibility of mark stack
1330 GC_API void GC_CALL GC_push_all(void *bottom, void *top)
1334 bottom = (void *)(((word)bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1335 top = (void *)((word)top & ~(ALIGNMENT-1));
1336 if ((word)bottom >= (word)top) return;
1338 GC_mark_stack_top++;
1339 if ((word)GC_mark_stack_top >= (word)GC_mark_stack_limit) {
1340 ABORT("Unexpected mark stack overflow");
1342 length = (word)top - (word)bottom;
1343 # if GC_DS_TAGS > ALIGNMENT - 1
1344 length += GC_DS_TAGS;
1345 length &= ~GC_DS_TAGS;
1347 GC_mark_stack_top -> mse_start = (ptr_t)bottom;
1348 GC_mark_stack_top -> mse_descr.w = length;
1351 #ifndef GC_DISABLE_INCREMENTAL
1353 /* Analogous to the above, but push only those pages h with */
1354 /* dirty_fn(h) != 0. We use GC_push_all to actually push the block. */
1355 /* Used both to selectively push dirty pages, or to push a block in */
1356 /* piecemeal fashion, to allow for more marking concurrency. */
1357 /* Will not overflow mark stack if GC_push_all pushes a small fixed */
1358 /* number of entries. (This is invoked only if GC_push_all pushes */
1359 /* a single entry, or if it marks each object before pushing it, thus */
1360 /* ensuring progress in the event of a stack overflow.) */
1361 STATIC void GC_push_selected(ptr_t bottom, ptr_t top,
1362 GC_bool (*dirty_fn)(struct hblk *))
1366 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1367 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1368 if ((word)bottom >= (word)top) return;
1370 h = HBLKPTR(bottom + HBLKSIZE);
1371 if ((word)top <= (word)h) {
1372 if ((*dirty_fn)(h-1)) {
1373 GC_push_all(bottom, top);
1377 if ((*dirty_fn)(h-1)) {
1378 if ((word)(GC_mark_stack_top - GC_mark_stack)
1379 > 3 * GC_mark_stack_size / 4) {
1380 GC_push_all(bottom, top);
1383 GC_push_all(bottom, h);
1386 while ((word)(h+1) <= (word)top) {
1387 if ((*dirty_fn)(h)) {
1388 if ((word)(GC_mark_stack_top - GC_mark_stack)
1389 > 3 * GC_mark_stack_size / 4) {
1390 /* Danger of mark stack overflow. */
1391 GC_push_all(h, top);
1394 GC_push_all(h, h + 1);
1400 if ((ptr_t)h != top && (*dirty_fn)(h)) {
1401 GC_push_all(h, top);
1405 GC_API void GC_CALL GC_push_conditional(void *bottom, void *top, int all)
1408 GC_push_selected((ptr_t)bottom, (ptr_t)top, GC_page_was_dirty);
1411 if (GC_auto_incremental) {
1412 /* Pages that were never dirtied cannot contain pointers. */
1413 GC_push_selected((ptr_t)bottom, (ptr_t)top, GC_page_was_ever_dirty);
1417 GC_push_all(bottom, top);
1422 GC_API void GC_CALL GC_push_conditional(void *bottom, void *top,
1423 int all GC_ATTR_UNUSED)
1425 GC_push_all(bottom, top);
1427 #endif /* GC_DISABLE_INCREMENTAL */
1429 #if defined(MSWIN32) || defined(MSWINCE)
1430 void __cdecl GC_push_one(word p)
1432 void GC_push_one(word p)
1435 GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
1438 GC_API struct GC_ms_entry * GC_CALL GC_mark_and_push(void *obj,
1439 mse *mark_stack_ptr,
1440 mse *mark_stack_limit,
1441 void ** src GC_ATTR_UNUSED)
1447 if ((EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)
1448 && (!GC_all_interior_pointers
1449 || NULL == (hhdr = GC_find_header((ptr_t)GC_base(obj)))))
1450 || EXPECT(HBLK_IS_FREE(hhdr), FALSE)) {
1451 GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
1452 return mark_stack_ptr;
1454 return GC_push_contents_hdr((ptr_t)obj, mark_stack_ptr, mark_stack_limit,
1455 (ptr_t)src, hhdr, TRUE);
1458 /* Mark and push (i.e. gray) a single object p onto the main */
1459 /* mark stack. Consider p to be valid if it is an interior */
1461 /* The object p has passed a preliminary pointer validity */
1462 /* test, but we do not definitely know whether it is valid. */
1463 /* Mark bits are NOT atomically updated. Thus this must be the */
1464 /* only thread setting them. */
1465 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1466 GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source)
1468 GC_INNER void GC_mark_and_push_stack(ptr_t p)
1469 # define source ((ptr_t)0)
1477 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)
1479 || (r = (ptr_t)GC_base(p)) == NULL
1480 || (hhdr = HDR(r)) == NULL)) {
1481 GC_ADD_TO_BLACK_LIST_STACK(p, source);
1484 if (EXPECT(HBLK_IS_FREE(hhdr), FALSE)) {
1485 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
1489 /* Pointer is on the stack. We may have dirtied the object */
1490 /* it points to, but have not called GC_dirty yet. */
1491 GC_dirty(p); /* entire object */
1493 GC_mark_stack_top = GC_push_contents_hdr(r, GC_mark_stack_top,
1494 GC_mark_stack_limit,
1495 source, hhdr, FALSE);
1496 /* We silently ignore pointers to near the end of a block, */
1497 /* which is very mildly suboptimal. */
1498 /* FIXME: We should probably add a header word to address */
1505 # ifndef TRACE_ENTRIES
1506 # define TRACE_ENTRIES 1000
1509 struct trace_entry {
1515 } GC_trace_buf[TRACE_ENTRIES] = { { NULL, 0, 0, 0, 0 } };
1517 int GC_trace_buf_ptr = 0;
1519 void GC_add_trace_entry(char *kind, word arg1, word arg2)
1521 GC_trace_buf[GC_trace_buf_ptr].kind = kind;
1522 GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
1523 GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;
1524 GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
1525 GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
1527 if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
1530 GC_API void GC_CALL GC_print_trace_inner(word gc_no)
1534 for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
1535 struct trace_entry *p;
1537 if (i < 0) i = TRACE_ENTRIES-1;
1538 p = GC_trace_buf + i;
1539 if (p -> gc_no < gc_no || p -> kind == 0) {
1542 GC_printf("Trace:%s (gc:%u, bytes:%lu) 0x%lX, 0x%lX\n",
1543 p -> kind, (unsigned)p -> gc_no,
1544 (unsigned long)p -> bytes_allocd,
1545 (long)p->arg1 ^ 0x80000000L, (long)p->arg2 ^ 0x80000000L);
1547 GC_printf("Trace incomplete\n");
1550 GC_API void GC_CALL GC_print_trace(word gc_no)
1555 GC_print_trace_inner(gc_no);
1559 #endif /* TRACE_BUF */
1561 /* A version of GC_push_all that treats all interior pointers as valid */
1562 /* and scans the entire region immediately, in case the contents change.*/
1563 GC_ATTR_NO_SANITIZE_ADDR GC_ATTR_NO_SANITIZE_MEMORY GC_ATTR_NO_SANITIZE_THREAD
1564 GC_API void GC_CALL GC_push_all_eager(void *bottom, void *top)
1566 word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1567 word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
1570 REGISTER ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
1571 REGISTER ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
1572 # define GC_greatest_plausible_heap_addr greatest_ha
1573 # define GC_least_plausible_heap_addr least_ha
1575 if (top == 0) return;
1577 /* Check all pointers in range and push if they appear to be valid. */
1578 lim = t - 1 /* longword */;
1579 for (p = b; (word)p <= (word)lim;
1580 p = (word *)(((ptr_t)p) + ALIGNMENT)) {
1581 REGISTER word q = *p;
1583 GC_PUSH_ONE_STACK(q, p);
1585 # undef GC_greatest_plausible_heap_addr
1586 # undef GC_least_plausible_heap_addr
1589 GC_INNER void GC_push_all_stack(ptr_t bottom, ptr_t top)
1591 # ifndef NEED_FIXUP_POINTER
1592 if (GC_all_interior_pointers
1593 # if defined(THREADS) && defined(MPROTECT_VDB)
1594 && !GC_auto_incremental
1596 && (word)GC_mark_stack_top
1597 < (word)(GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/8)) {
1598 GC_push_all(bottom, top);
1602 GC_push_all_eager(bottom, top);
1606 #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK)
1607 /* Similar to GC_push_conditional but scans the whole region immediately. */
1608 GC_ATTR_NO_SANITIZE_ADDR GC_ATTR_NO_SANITIZE_MEMORY
1609 GC_ATTR_NO_SANITIZE_THREAD
1610 GC_INNER void GC_push_conditional_eager(void *bottom, void *top,
1613 word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1614 word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
1617 REGISTER ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
1618 REGISTER ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
1619 # define GC_greatest_plausible_heap_addr greatest_ha
1620 # define GC_least_plausible_heap_addr least_ha
1624 (void)all; /* TODO: If !all then scan only dirty pages. */
1627 for (p = b; (word)p <= (word)lim; p = (word *)((ptr_t)p + ALIGNMENT)) {
1628 REGISTER word q = *p;
1630 GC_PUSH_ONE_HEAP(q, p, GC_mark_stack_top);
1632 # undef GC_greatest_plausible_heap_addr
1633 # undef GC_least_plausible_heap_addr
1635 #endif /* WRAP_MARK_SOME && PARALLEL_MARK */
1637 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) && \
1638 defined(MARK_BIT_PER_GRANULE)
1639 # if GC_GRANULE_WORDS == 1
1640 # define USE_PUSH_MARKED_ACCELERATORS
1641 # define PUSH_GRANULE(q) \
1643 word qcontents = (q)[0]; \
1644 GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
1646 # elif GC_GRANULE_WORDS == 2
1647 # define USE_PUSH_MARKED_ACCELERATORS
1648 # define PUSH_GRANULE(q) \
1650 word qcontents = (q)[0]; \
1651 GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
1652 qcontents = (q)[1]; \
1653 GC_PUSH_ONE_HEAP(qcontents, (q)+1, GC_mark_stack_top); \
1655 # elif GC_GRANULE_WORDS == 4
1656 # define USE_PUSH_MARKED_ACCELERATORS
1657 # define PUSH_GRANULE(q) \
1659 word qcontents = (q)[0]; \
1660 GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
1661 qcontents = (q)[1]; \
1662 GC_PUSH_ONE_HEAP(qcontents, (q)+1, GC_mark_stack_top); \
1663 qcontents = (q)[2]; \
1664 GC_PUSH_ONE_HEAP(qcontents, (q)+2, GC_mark_stack_top); \
1665 qcontents = (q)[3]; \
1666 GC_PUSH_ONE_HEAP(qcontents, (q)+3, GC_mark_stack_top); \
1669 #endif /* !USE_MARK_BYTES && MARK_BIT_PER_GRANULE */
1671 #ifdef USE_PUSH_MARKED_ACCELERATORS
1672 /* Push all objects reachable from marked objects in the given block */
1673 /* containing objects of size 1 granule. */
1674 STATIC void GC_push_marked1(struct hblk *h, hdr *hhdr)
1676 word * mark_word_addr = &(hhdr->hb_marks[0]);
1680 /* Allow registers to be used for some frequently accessed */
1681 /* global variables. Otherwise aliasing issues are likely */
1682 /* to prevent that. */
1683 ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
1684 ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
1685 mse * mark_stack_top = GC_mark_stack_top;
1686 mse * mark_stack_limit = GC_mark_stack_limit;
1688 # undef GC_mark_stack_top
1689 # undef GC_mark_stack_limit
1690 # define GC_mark_stack_top mark_stack_top
1691 # define GC_mark_stack_limit mark_stack_limit
1692 # define GC_greatest_plausible_heap_addr greatest_ha
1693 # define GC_least_plausible_heap_addr least_ha
1695 p = (word *)(h->hb_body);
1696 plim = (word *)(((word)h) + HBLKSIZE);
1698 /* Go through all words in block. */
1699 while ((word)p < (word)plim) {
1700 word mark_word = *mark_word_addr++;
1703 while(mark_word != 0) {
1704 if (mark_word & 1) {
1707 q += GC_GRANULE_WORDS;
1710 p += WORDSZ*GC_GRANULE_WORDS;
1713 # undef GC_greatest_plausible_heap_addr
1714 # undef GC_least_plausible_heap_addr
1715 # undef GC_mark_stack_top
1716 # undef GC_mark_stack_limit
1717 # define GC_mark_stack_limit GC_arrays._mark_stack_limit
1718 # define GC_mark_stack_top GC_arrays._mark_stack_top
1719 GC_mark_stack_top = mark_stack_top;
1723 #ifndef UNALIGNED_PTRS
1725 /* Push all objects reachable from marked objects in the given block */
1726 /* of size 2 (granules) objects. */
1727 STATIC void GC_push_marked2(struct hblk *h, hdr *hhdr)
1729 word * mark_word_addr = &(hhdr->hb_marks[0]);
1733 ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
1734 ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
1735 mse * mark_stack_top = GC_mark_stack_top;
1736 mse * mark_stack_limit = GC_mark_stack_limit;
1738 # undef GC_mark_stack_top
1739 # undef GC_mark_stack_limit
1740 # define GC_mark_stack_top mark_stack_top
1741 # define GC_mark_stack_limit mark_stack_limit
1742 # define GC_greatest_plausible_heap_addr greatest_ha
1743 # define GC_least_plausible_heap_addr least_ha
1745 p = (word *)(h->hb_body);
1746 plim = (word *)(((word)h) + HBLKSIZE);
1748 /* Go through all words in block. */
1749 while ((word)p < (word)plim) {
1750 word mark_word = *mark_word_addr++;
1753 while(mark_word != 0) {
1754 if (mark_word & 1) {
1756 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1758 q += 2 * GC_GRANULE_WORDS;
1761 p += WORDSZ*GC_GRANULE_WORDS;
1764 # undef GC_greatest_plausible_heap_addr
1765 # undef GC_least_plausible_heap_addr
1766 # undef GC_mark_stack_top
1767 # undef GC_mark_stack_limit
1768 # define GC_mark_stack_limit GC_arrays._mark_stack_limit
1769 # define GC_mark_stack_top GC_arrays._mark_stack_top
1770 GC_mark_stack_top = mark_stack_top;
1773 # if GC_GRANULE_WORDS < 4
1774 /* Push all objects reachable from marked objects in the given block */
1775 /* of size 4 (granules) objects. */
1776 /* There is a risk of mark stack overflow here. But we handle that. */
1777 /* And only unmarked objects get pushed, so it's not very likely. */
1778 STATIC void GC_push_marked4(struct hblk *h, hdr *hhdr)
1780 word * mark_word_addr = &(hhdr->hb_marks[0]);
1784 ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
1785 ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
1786 mse * mark_stack_top = GC_mark_stack_top;
1787 mse * mark_stack_limit = GC_mark_stack_limit;
1789 # undef GC_mark_stack_top
1790 # undef GC_mark_stack_limit
1791 # define GC_mark_stack_top mark_stack_top
1792 # define GC_mark_stack_limit mark_stack_limit
1793 # define GC_greatest_plausible_heap_addr greatest_ha
1794 # define GC_least_plausible_heap_addr least_ha
1796 p = (word *)(h->hb_body);
1797 plim = (word *)(((word)h) + HBLKSIZE);
1799 /* Go through all words in block. */
1800 while ((word)p < (word)plim) {
1801 word mark_word = *mark_word_addr++;
1804 while(mark_word != 0) {
1805 if (mark_word & 1) {
1807 PUSH_GRANULE(q + GC_GRANULE_WORDS);
1808 PUSH_GRANULE(q + 2*GC_GRANULE_WORDS);
1809 PUSH_GRANULE(q + 3*GC_GRANULE_WORDS);
1811 q += 4 * GC_GRANULE_WORDS;
1814 p += WORDSZ*GC_GRANULE_WORDS;
1816 # undef GC_greatest_plausible_heap_addr
1817 # undef GC_least_plausible_heap_addr
1818 # undef GC_mark_stack_top
1819 # undef GC_mark_stack_limit
1820 # define GC_mark_stack_limit GC_arrays._mark_stack_limit
1821 # define GC_mark_stack_top GC_arrays._mark_stack_top
1822 GC_mark_stack_top = mark_stack_top;
1825 #endif /* GC_GRANULE_WORDS < 4 */
1827 #endif /* UNALIGNED_PTRS */
1829 #endif /* USE_PUSH_MARKED_ACCELERATORS */
1831 /* Push all objects reachable from marked objects in the given block. */
1832 STATIC void GC_push_marked(struct hblk *h, hdr *hhdr)
1834 word sz = hhdr -> hb_sz;
1835 word descr = hhdr -> hb_descr;
1839 mse * GC_mark_stack_top_reg;
1840 mse * mark_stack_limit = GC_mark_stack_limit;
1842 /* Some quick shortcuts: */
1843 if ((/* 0 | */ GC_DS_LENGTH) == descr) return;
1844 if (GC_block_empty(hhdr)/* nothing marked */) return;
1845 # if !defined(GC_DISABLE_INCREMENTAL)
1846 GC_n_rescuing_pages++;
1848 GC_objects_are_marked = TRUE;
1849 if (sz > MAXOBJBYTES) {
1852 lim = (ptr_t)((word)(h + 1)->hb_body - sz);
1855 switch(BYTES_TO_GRANULES(sz)) {
1856 # if defined(USE_PUSH_MARKED_ACCELERATORS)
1858 GC_push_marked1(h, hhdr);
1860 # if !defined(UNALIGNED_PTRS)
1862 GC_push_marked2(h, hhdr);
1864 # if GC_GRANULE_WORDS < 4
1866 GC_push_marked4(h, hhdr);
1871 case 1: /* to suppress "switch statement contains no case" warning */
1874 GC_mark_stack_top_reg = GC_mark_stack_top;
1875 for (p = h -> hb_body, bit_no = 0; (word)p <= (word)lim;
1876 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
1877 if (mark_bit_from_hdr(hhdr, bit_no)) {
1878 /* Mark from fields inside the object. */
1879 GC_mark_stack_top_reg = GC_push_obj(p, hhdr, GC_mark_stack_top_reg,
1883 GC_mark_stack_top = GC_mark_stack_top_reg;
1887 #ifdef ENABLE_DISCLAIM
1888 /* Unconditionally mark from all objects which have not been reclaimed. */
1889 /* This is useful in order to retain pointers which are reachable from */
1890 /* the disclaim notifiers. */
1891 /* To determine whether an object has been reclaimed, we require that */
1892 /* any live object has a non-zero as one of the two lowest bits of the */
1893 /* first word. On the other hand, a reclaimed object is a members of */
1894 /* free-lists, and thus contains a word-aligned next-pointer as the */
1896 STATIC void GC_push_unconditionally(struct hblk *h, hdr *hhdr)
1898 word sz = hhdr -> hb_sz;
1899 word descr = hhdr -> hb_descr;
1902 mse * GC_mark_stack_top_reg;
1903 mse * mark_stack_limit = GC_mark_stack_limit;
1905 if ((/* 0 | */ GC_DS_LENGTH) == descr)
1908 # if !defined(GC_DISABLE_INCREMENTAL)
1909 GC_n_rescuing_pages++;
1911 GC_objects_are_marked = TRUE;
1912 if (sz > MAXOBJBYTES)
1915 lim = (ptr_t)((word)(h + 1)->hb_body - sz);
1917 GC_mark_stack_top_reg = GC_mark_stack_top;
1918 for (p = h -> hb_body; (word)p <= (word)lim; p += sz)
1919 if ((*(word *)p & 0x3) != 0)
1920 GC_mark_stack_top_reg = GC_push_obj(p, hhdr, GC_mark_stack_top_reg,
1922 GC_mark_stack_top = GC_mark_stack_top_reg;
1924 #endif /* ENABLE_DISCLAIM */
1926 #ifndef GC_DISABLE_INCREMENTAL
1927 /* Test whether any page in the given block is dirty. */
1928 STATIC GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
1930 word sz = hhdr -> hb_sz;
1932 if (sz <= MAXOBJBYTES) {
1933 return(GC_page_was_dirty(h));
1936 while ((word)p < (word)h + sz) {
1937 if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1943 #endif /* GC_DISABLE_INCREMENTAL */
1945 /* Similar to GC_push_marked, but skip over unallocated blocks */
1946 /* and return address of next plausible block. */
1947 STATIC struct hblk * GC_push_next_marked(struct hblk *h)
1949 hdr * hhdr = HDR(h);
1951 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr) || HBLK_IS_FREE(hhdr), FALSE)) {
1952 h = GC_next_used_block(h);
1953 if (h == 0) return(0);
1954 hhdr = GC_find_header((ptr_t)h);
1957 if (NULL == h) ABORT("Bad HDR() definition");
1960 GC_push_marked(h, hhdr);
1961 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1964 #ifndef GC_DISABLE_INCREMENTAL
1965 /* Identical to above, but mark only from dirty pages. */
1966 STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h)
1968 hdr * hhdr = HDR(h);
1970 if (!GC_incremental) ABORT("Dirty bits not set up");
1972 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
1973 || HBLK_IS_FREE(hhdr), FALSE)) {
1974 h = GC_next_used_block(h);
1975 if (h == 0) return(0);
1976 hhdr = GC_find_header((ptr_t)h);
1979 if (NULL == h) ABORT("Bad HDR() definition");
1982 if (GC_block_was_dirty(h, hhdr))
1984 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1987 # ifdef ENABLE_DISCLAIM
1988 if ((hhdr -> hb_flags & MARK_UNCONDITIONALLY) != 0) {
1989 GC_push_unconditionally(h, hhdr);
1991 /* Then we may ask, why not also add the MARK_UNCONDITIONALLY */
1992 /* case to GC_push_next_marked, which is also applied to */
1993 /* uncollectible blocks? But it seems to me that the function */
1994 /* does not need to scan uncollectible (and unconditionally */
1995 /* marked) blocks since those are already handled in the */
1996 /* MS_PUSH_UNCOLLECTABLE phase. */
2000 GC_push_marked(h, hhdr);
2002 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
2004 #endif /* !GC_DISABLE_INCREMENTAL */
2006 /* Similar to above, but for uncollectible pages. Needed since we */
2007 /* do not clear marks for such pages, even for full collections. */
2008 STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
2010 hdr * hhdr = HDR(h);
2013 if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
2014 || HBLK_IS_FREE(hhdr), FALSE)) {
2015 h = GC_next_used_block(h);
2016 if (h == 0) return(0);
2017 hhdr = GC_find_header((ptr_t)h);
2020 if (NULL == h) ABORT("Bad HDR() definition");
2023 if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
2024 GC_push_marked(h, hhdr);
2027 # ifdef ENABLE_DISCLAIM
2028 if ((hhdr -> hb_flags & MARK_UNCONDITIONALLY) != 0) {
2029 GC_push_unconditionally(h, hhdr);
2033 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
2036 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));