2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
15 #include "private/gc_priv.h"
19 /* Data structure for list of root sets. */
20 /* We keep a hash table, so that we can filter out duplicate additions. */
21 /* Under Win32, we need to do a better job of filtering overlaps, so */
22 /* we resort to sequential search, and pay the price. */
23 /* This is really declared in gc_priv.h:
27 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
28 struct roots * r_next;
31 -- Delete before registering new dynamic libraries
34 struct roots GC_static_roots[MAX_ROOT_SETS];
37 int GC_no_dls = 0; /* Register dynamic library data segments. */
39 static int n_root_sets = 0;
40 /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
42 #if !defined(NO_DEBUGGING) || defined(GC_ASSERTIONS)
43 /* Should return the same value as GC_root_size. */
44 GC_INNER word GC_compute_root_size(void)
49 for (i = 0; i < n_root_sets; i++) {
50 size += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
54 #endif /* !NO_DEBUGGING || GC_ASSERTIONS */
56 #if !defined(NO_DEBUGGING)
58 void GC_print_static_roots(void)
63 for (i = 0; i < n_root_sets; i++) {
64 GC_printf("From %p to %p%s\n",
65 (void *)GC_static_roots[i].r_start,
66 (void *)GC_static_roots[i].r_end,
67 GC_static_roots[i].r_tmp ? " (temporary)" : "");
69 GC_printf("GC_root_size: %lu\n", (unsigned long)GC_root_size);
71 if ((size = GC_compute_root_size()) != GC_root_size)
72 GC_err_printf("GC_root_size incorrect!! Should be: %lu\n",
75 #endif /* !NO_DEBUGGING */
78 /* Primarily for debugging support: */
79 /* Is the address p in one of the registered static root sections? */
80 GC_INNER GC_bool GC_is_static_root(void *p)
82 static int last_root_set = MAX_ROOT_SETS;
85 if (last_root_set < n_root_sets
86 && (word)p >= (word)GC_static_roots[last_root_set].r_start
87 && (word)p < (word)GC_static_roots[last_root_set].r_end)
89 for (i = 0; i < n_root_sets; i++) {
90 if ((word)p >= (word)GC_static_roots[i].r_start
91 && (word)p < (word)GC_static_roots[i].r_end) {
100 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
102 # define LOG_RT_SIZE 6
103 # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
105 struct roots * GC_root_index[RT_SIZE];
106 -- Hash table header. Used only to check whether a range is
108 -- really defined in gc_priv.h
111 GC_INLINE int rt_hash(ptr_t addr)
113 word result = (word) addr;
114 # if CPP_WORDSZ > 8*LOG_RT_SIZE
115 result ^= result >> 8*LOG_RT_SIZE;
117 # if CPP_WORDSZ > 4*LOG_RT_SIZE
118 result ^= result >> 4*LOG_RT_SIZE;
120 result ^= result >> 2*LOG_RT_SIZE;
121 result ^= result >> LOG_RT_SIZE;
122 result &= (RT_SIZE-1);
126 /* Is a range starting at b already in the table? If so return a */
127 /* pointer to it, else NULL. */
128 GC_INNER void * GC_roots_present(ptr_t b)
131 struct roots *p = GC_root_index[h];
134 if (p -> r_start == (ptr_t)b) return(p);
140 /* Add the given root structure to the index. */
141 GC_INLINE void add_roots_to_index(struct roots *p)
143 int h = rt_hash(p -> r_start);
145 p -> r_next = GC_root_index[h];
146 GC_root_index[h] = p;
148 #endif /* !MSWIN32 && !MSWINCE && !CYGWIN32 */
150 GC_INNER word GC_root_size = 0;
152 GC_API void GC_CALL GC_add_roots(void *b, void *e)
156 if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
158 GC_add_roots_inner((ptr_t)b, (ptr_t)e, FALSE);
163 /* Add [b,e) to the root set. Adding the same interval a second time */
164 /* is a moderately fast no-op, and hence benign. We do not handle */
165 /* different but overlapping intervals efficiently. (We do handle */
166 /* them correctly.) */
167 /* Tmp specifies that the interval may be deleted before */
168 /* re-registering dynamic libraries. */
169 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
171 GC_ASSERT((word)b <= (word)e);
172 b = (ptr_t)(((word)b + (sizeof(word) - 1)) & ~(word)(sizeof(word) - 1));
173 /* round b up to word boundary */
174 e = (ptr_t)((word)e & ~(word)(sizeof(word) - 1));
175 /* round e down to word boundary */
176 if ((word)b >= (word)e) return; /* nothing to do */
178 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
179 /* Spend the time to ensure that there are no overlapping */
180 /* or adjacent intervals. */
181 /* This could be done faster with e.g. a */
182 /* balanced tree. But the execution time here is */
183 /* virtually guaranteed to be dominated by the time it */
184 /* takes to scan the roots. */
187 struct roots * old = NULL; /* initialized to prevent warning. */
189 for (i = 0; i < n_root_sets; i++) {
190 old = GC_static_roots + i;
191 if ((word)b <= (word)old->r_end
192 && (word)e >= (word)old->r_start) {
193 if ((word)b < (word)old->r_start) {
194 GC_root_size += old->r_start - b;
197 if ((word)e > (word)old->r_end) {
198 GC_root_size += e - old->r_end;
205 if (i < n_root_sets) {
206 /* merge other overlapping intervals */
209 for (i++; i < n_root_sets; i++) {
210 other = GC_static_roots + i;
211 b = other -> r_start;
213 if ((word)b <= (word)old->r_end
214 && (word)e >= (word)old->r_start) {
215 if ((word)b < (word)old->r_start) {
216 GC_root_size += old->r_start - b;
219 if ((word)e > (word)old->r_end) {
220 GC_root_size += e - old->r_end;
223 old -> r_tmp &= other -> r_tmp;
224 /* Delete this entry. */
225 GC_root_size -= (other -> r_end - other -> r_start);
226 other -> r_start = GC_static_roots[n_root_sets-1].r_start;
227 other -> r_end = GC_static_roots[n_root_sets-1].r_end;
236 struct roots * old = (struct roots *)GC_roots_present(b);
239 if ((word)e <= (word)old->r_end) {
241 return; /* already there */
243 if (old -> r_tmp == tmp || !tmp) {
244 /* Extend the existing root. */
245 GC_root_size += e - old -> r_end;
254 if (n_root_sets == MAX_ROOT_SETS) {
255 ABORT("Too many root sets");
258 # ifdef DEBUG_ADD_DEL_ROOTS
259 GC_log_printf("Adding data root section %d: %p .. %p%s\n",
260 n_root_sets, (void *)b, (void *)e,
261 tmp ? " (temporary)" : "");
263 GC_static_roots[n_root_sets].r_start = (ptr_t)b;
264 GC_static_roots[n_root_sets].r_end = (ptr_t)e;
265 GC_static_roots[n_root_sets].r_tmp = tmp;
266 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
267 GC_static_roots[n_root_sets].r_next = 0;
268 add_roots_to_index(GC_static_roots + n_root_sets);
270 GC_root_size += e - b;
274 static GC_bool roots_were_cleared = FALSE;
276 GC_API void GC_CALL GC_clear_roots(void)
280 if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
282 roots_were_cleared = TRUE;
285 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
286 BZERO(GC_root_index, RT_SIZE * sizeof(void *));
288 # ifdef DEBUG_ADD_DEL_ROOTS
289 GC_log_printf("Clear all data root sections\n");
294 /* Internal use only; lock held. */
295 STATIC void GC_remove_root_at_pos(int i)
297 # ifdef DEBUG_ADD_DEL_ROOTS
298 GC_log_printf("Remove data root section at %d: %p .. %p%s\n",
299 i, (void *)GC_static_roots[i].r_start,
300 (void *)GC_static_roots[i].r_end,
301 GC_static_roots[i].r_tmp ? " (temporary)" : "");
303 GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
304 GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
305 GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
306 GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
310 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
311 STATIC void GC_rebuild_root_index(void)
314 BZERO(GC_root_index, RT_SIZE * sizeof(void *));
315 for (i = 0; i < n_root_sets; i++)
316 add_roots_to_index(GC_static_roots + i);
320 #if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
321 || defined(PCR) || defined(CYGWIN32)
322 /* Internal use only; lock held. */
323 STATIC void GC_remove_tmp_roots(void)
326 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
327 int old_n_roots = n_root_sets;
330 for (i = 0; i < n_root_sets; ) {
331 if (GC_static_roots[i].r_tmp) {
332 GC_remove_root_at_pos(i);
337 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
338 if (n_root_sets < old_n_roots)
339 GC_rebuild_root_index();
344 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
345 STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e);
347 GC_API void GC_CALL GC_remove_roots(void *b, void *e)
351 /* Quick check whether has nothing to do */
352 if ((((word)b + (sizeof(word) - 1)) & ~(word)(sizeof(word) - 1)) >=
353 ((word)e & ~(word)(sizeof(word) - 1)))
357 GC_remove_roots_inner((ptr_t)b, (ptr_t)e);
361 /* Should only be called when the lock is held */
362 STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e)
365 GC_bool rebuild = FALSE;
367 for (i = 0; i < n_root_sets; ) {
368 if ((word)GC_static_roots[i].r_start >= (word)b
369 && (word)GC_static_roots[i].r_end <= (word)e) {
370 GC_remove_root_at_pos(i);
377 GC_rebuild_root_index();
379 #endif /* !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) */
381 #ifdef USE_PROC_FOR_LIBRARIES
382 /* Exchange the elements of the roots table. Requires rebuild of */
383 /* the roots index table after the swap. */
384 GC_INLINE void swap_static_roots(int i, int j)
386 ptr_t r_start = GC_static_roots[i].r_start;
387 ptr_t r_end = GC_static_roots[i].r_end;
388 GC_bool r_tmp = GC_static_roots[i].r_tmp;
390 GC_static_roots[i].r_start = GC_static_roots[j].r_start;
391 GC_static_roots[i].r_end = GC_static_roots[j].r_end;
392 GC_static_roots[i].r_tmp = GC_static_roots[j].r_tmp;
393 /* No need to swap r_next values. */
394 GC_static_roots[j].r_start = r_start;
395 GC_static_roots[j].r_end = r_end;
396 GC_static_roots[j].r_tmp = r_tmp;
399 /* Remove given range from every static root which intersects with */
400 /* the range. It is assumed GC_remove_tmp_roots is called before */
401 /* this function is called repeatedly by GC_register_map_entries. */
402 GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e)
405 GC_bool rebuild = FALSE;
407 GC_ASSERT(I_HOLD_LOCK());
408 GC_ASSERT((word)b % sizeof(word) == 0 && (word)e % sizeof(word) == 0);
409 for (i = 0; i < n_root_sets; i++) {
410 ptr_t r_start, r_end;
412 if (GC_static_roots[i].r_tmp) {
413 /* The remaining roots are skipped as they are all temporary. */
414 # ifdef GC_ASSERTIONS
416 for (j = i + 1; j < n_root_sets; j++) {
417 GC_ASSERT(GC_static_roots[j].r_tmp);
422 r_start = GC_static_roots[i].r_start;
423 r_end = GC_static_roots[i].r_end;
424 if (!EXPECT((word)e <= (word)r_start || (word)r_end <= (word)b, TRUE)) {
425 # ifdef DEBUG_ADD_DEL_ROOTS
426 GC_log_printf("Removing %p .. %p from root section %d (%p .. %p)\n",
427 (void *)b, (void *)e,
428 i, (void *)r_start, (void *)r_end);
430 if ((word)r_start < (word)b) {
431 GC_root_size -= r_end - b;
432 GC_static_roots[i].r_end = b;
433 /* No need to rebuild as hash does not use r_end value. */
434 if ((word)e < (word)r_end) {
438 GC_rebuild_root_index();
441 GC_add_roots_inner(e, r_end, FALSE); /* updates n_root_sets */
442 for (j = i + 1; j < n_root_sets; j++)
443 if (GC_static_roots[j].r_tmp)
445 if (j < n_root_sets-1 && !GC_static_roots[n_root_sets-1].r_tmp) {
446 /* Exchange the roots to have all temporary ones at the end. */
447 swap_static_roots(j, n_root_sets - 1);
452 if ((word)e < (word)r_end) {
453 GC_root_size -= e - r_start;
454 GC_static_roots[i].r_start = e;
456 GC_remove_root_at_pos(i);
457 if (i < n_root_sets - 1 && GC_static_roots[i].r_tmp
458 && !GC_static_roots[i + 1].r_tmp) {
461 for (j = i + 2; j < n_root_sets; j++)
462 if (GC_static_roots[j].r_tmp)
464 /* Exchange the roots to have all temporary ones at the end. */
465 swap_static_roots(i, j - 1);
474 GC_rebuild_root_index();
476 #endif /* USE_PROC_FOR_LIBRARIES */
478 #if !defined(NO_DEBUGGING)
479 /* For the debugging purpose only. */
480 /* Workaround for the OS mapping and unmapping behind our back: */
481 /* Is the address p in one of the temporary static root sections? */
482 GC_API int GC_CALL GC_is_tmp_root(void *p)
484 static int last_root_set = MAX_ROOT_SETS;
487 if (last_root_set < n_root_sets
488 && (word)p >= (word)GC_static_roots[last_root_set].r_start
489 && (word)p < (word)GC_static_roots[last_root_set].r_end)
490 return GC_static_roots[last_root_set].r_tmp;
491 for (i = 0; i < n_root_sets; i++) {
492 if ((word)p >= (word)GC_static_roots[i].r_start
493 && (word)p < (word)GC_static_roots[i].r_end) {
495 return GC_static_roots[i].r_tmp;
500 #endif /* !NO_DEBUGGING */
502 GC_INNER ptr_t GC_approx_sp(void)
505 # if defined(CPPCHECK) || (__GNUC__ >= 4 /* GC_GNUC_PREREQ(4, 0) */ \
506 && !defined(STACK_NOT_SCANNED))
507 /* TODO: Use GC_GNUC_PREREQ after fixing a bug in cppcheck. */
508 sp = (word)__builtin_frame_address(0);
512 /* Also force stack to grow if necessary. Otherwise the */
513 /* later accesses might cause the kernel to think we're */
514 /* doing something wrong. */
519 * Data structure for excluded static roots.
520 * Real declaration is in gc_priv.h.
527 struct exclusion GC_excl_table[MAX_EXCLUSIONS];
528 -- Array of exclusions, ascending
532 STATIC size_t GC_excl_table_entries = 0;/* Number of entries in use. */
534 /* Return the first exclusion range that includes an address >= start_addr */
535 /* Assumes the exclusion table contains at least one entry (namely the */
536 /* GC data structures). */
537 STATIC struct exclusion * GC_next_exclusion(ptr_t start_addr)
540 size_t high = GC_excl_table_entries - 1;
543 size_t mid = (low + high) >> 1;
545 /* low <= mid < high */
546 if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
552 if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
553 return GC_excl_table + low;
556 /* Should only be called when the lock is held. The range boundaries */
557 /* should be properly aligned and valid. */
558 GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish)
560 struct exclusion * next;
563 GC_ASSERT((word)start % sizeof(word) == 0);
564 GC_ASSERT((word)start < (word)finish);
566 if (0 == GC_excl_table_entries) {
569 next = GC_next_exclusion((ptr_t)start);
574 if ((word)(next -> e_start) < (word) finish) {
575 /* incomplete error check. */
576 ABORT("Exclusion ranges overlap");
578 if ((word)(next -> e_start) == (word) finish) {
579 /* extend old range backwards */
580 next -> e_start = (ptr_t)start;
583 next_index = next - GC_excl_table;
584 for (i = GC_excl_table_entries; i > next_index; --i) {
585 GC_excl_table[i] = GC_excl_table[i-1];
588 next_index = GC_excl_table_entries;
590 if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
591 GC_excl_table[next_index].e_start = (ptr_t)start;
592 GC_excl_table[next_index].e_end = (ptr_t)finish;
593 ++GC_excl_table_entries;
596 GC_API void GC_CALL GC_exclude_static_roots(void *b, void *e)
600 if (b == e) return; /* nothing to exclude? */
602 /* Round boundaries (in direction reverse to that of GC_add_roots). */
603 b = (void *)((word)b & ~(word)(sizeof(word) - 1));
604 e = (void *)(((word)e + (sizeof(word) - 1)) & ~(word)(sizeof(word) - 1));
606 e = (void *)(~(word)(sizeof(word) - 1)); /* handle overflow */
609 GC_exclude_static_roots_inner(b, e);
613 #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK)
614 # define GC_PUSH_CONDITIONAL(b, t, all) \
616 ? GC_push_conditional_eager(b, t, all) \
617 : GC_push_conditional(b, t, all))
618 #elif defined(GC_DISABLE_INCREMENTAL)
619 # define GC_PUSH_CONDITIONAL(b, t, all) GC_push_all(b, t)
621 # define GC_PUSH_CONDITIONAL(b, t, all) GC_push_conditional(b, t, all)
622 /* Do either of GC_push_all or GC_push_selected */
623 /* depending on the third arg. */
626 /* Invoke push_conditional on ranges that are not excluded. */
627 STATIC void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top,
628 GC_bool all GC_ATTR_UNUSED)
630 while ((word)bottom < (word)top) {
631 struct exclusion *next = GC_next_exclusion(bottom);
635 || (word)(excl_start = next -> e_start) >= (word)top) {
636 GC_PUSH_CONDITIONAL(bottom, top, all);
639 if ((word)excl_start > (word)bottom)
640 GC_PUSH_CONDITIONAL(bottom, excl_start, all);
641 bottom = next -> e_end;
646 /* Similar to GC_push_all_stack_sections() but for IA-64 registers store. */
647 GC_INNER void GC_push_all_register_sections(ptr_t bs_lo, ptr_t bs_hi,
648 int eager, struct GC_traced_stack_sect_s *traced_stack_sect)
650 while (traced_stack_sect != NULL) {
651 ptr_t frame_bs_lo = traced_stack_sect -> backing_store_end;
652 GC_ASSERT((word)frame_bs_lo <= (word)bs_hi);
654 GC_push_all_eager(frame_bs_lo, bs_hi);
656 GC_push_all_stack(frame_bs_lo, bs_hi);
658 bs_hi = traced_stack_sect -> saved_backing_store_ptr;
659 traced_stack_sect = traced_stack_sect -> prev;
661 GC_ASSERT((word)bs_lo <= (word)bs_hi);
663 GC_push_all_eager(bs_lo, bs_hi);
665 GC_push_all_stack(bs_lo, bs_hi);
672 GC_INNER void GC_push_all_stack_sections(ptr_t lo, ptr_t hi,
673 struct GC_traced_stack_sect_s *traced_stack_sect)
675 while (traced_stack_sect != NULL) {
676 GC_ASSERT((word)lo HOTTER_THAN (word)traced_stack_sect);
677 # ifdef STACK_GROWS_UP
678 GC_push_all_stack((ptr_t)traced_stack_sect, lo);
679 # else /* STACK_GROWS_DOWN */
680 GC_push_all_stack(lo, (ptr_t)traced_stack_sect);
682 lo = traced_stack_sect -> saved_stack_ptr;
683 GC_ASSERT(lo != NULL);
684 traced_stack_sect = traced_stack_sect -> prev;
686 GC_ASSERT(!((word)hi HOTTER_THAN (word)lo));
687 # ifdef STACK_GROWS_UP
688 /* We got them backwards! */
689 GC_push_all_stack(hi, lo);
690 # else /* STACK_GROWS_DOWN */
691 GC_push_all_stack(lo, hi);
697 /* Similar to GC_push_all_eager, but only the */
698 /* part hotter than cold_gc_frame is scanned */
699 /* immediately. Needed to ensure that callee- */
700 /* save registers are not missed. */
702 * A version of GC_push_all that treats all interior pointers as valid
703 * and scans part of the area immediately, to make sure that saved
704 * register values are not lost.
705 * Cold_gc_frame delimits the stack section that must be scanned
706 * eagerly. A zero value indicates that no eager scanning is needed.
707 * We don't need to worry about the manual VDB case here, since this
708 * is only called in the single-threaded case. We assume that we
709 * cannot collect between an assignment and the corresponding
712 STATIC void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
715 #ifndef NEED_FIXUP_POINTER
716 if (GC_all_interior_pointers) {
717 /* Push the hot end of the stack eagerly, so that register values */
718 /* saved inside GC frames are marked before they disappear. */
719 /* The rest of the marking can be deferred until later. */
720 if (0 == cold_gc_frame) {
721 GC_push_all_stack(bottom, top);
724 GC_ASSERT((word)bottom <= (word)cold_gc_frame
725 && (word)cold_gc_frame <= (word)top);
726 # ifdef STACK_GROWS_DOWN
727 GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
728 GC_push_all_eager(bottom, cold_gc_frame);
729 # else /* STACK_GROWS_UP */
730 GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
731 GC_push_all_eager(cold_gc_frame, top);
732 # endif /* STACK_GROWS_UP */
736 GC_push_all_eager(bottom, top);
739 GC_add_trace_entry("GC_push_all_stack", (word)bottom, (word)top);
743 /* Similar to GC_push_all_stack_sections() but also uses cold_gc_frame. */
744 STATIC void GC_push_all_stack_part_eager_sections(ptr_t lo, ptr_t hi,
745 ptr_t cold_gc_frame, struct GC_traced_stack_sect_s *traced_stack_sect)
747 GC_ASSERT(traced_stack_sect == NULL || cold_gc_frame == NULL ||
748 (word)cold_gc_frame HOTTER_THAN (word)traced_stack_sect);
750 while (traced_stack_sect != NULL) {
751 GC_ASSERT((word)lo HOTTER_THAN (word)traced_stack_sect);
752 # ifdef STACK_GROWS_UP
753 GC_push_all_stack_partially_eager((ptr_t)traced_stack_sect, lo,
755 # else /* STACK_GROWS_DOWN */
756 GC_push_all_stack_partially_eager(lo, (ptr_t)traced_stack_sect,
759 lo = traced_stack_sect -> saved_stack_ptr;
760 GC_ASSERT(lo != NULL);
761 traced_stack_sect = traced_stack_sect -> prev;
762 cold_gc_frame = NULL; /* Use at most once. */
765 GC_ASSERT(!((word)hi HOTTER_THAN (word)lo));
766 # ifdef STACK_GROWS_UP
767 /* We got them backwards! */
768 GC_push_all_stack_partially_eager(hi, lo, cold_gc_frame);
769 # else /* STACK_GROWS_DOWN */
770 GC_push_all_stack_partially_eager(lo, hi, cold_gc_frame);
774 #endif /* !THREADS */
776 /* Push enough of the current stack eagerly to */
777 /* ensure that callee-save registers saved in */
778 /* GC frames are scanned. */
779 /* In the non-threads case, schedule entire */
780 /* stack for scanning. */
781 /* The second argument is a pointer to the */
782 /* (possibly null) thread context, for */
783 /* (currently hypothetical) more precise */
784 /* stack scanning. */
786 * In the absence of threads, push the stack contents.
787 * In the presence of threads, push enough of the current stack
788 * to ensure that callee-save registers saved in collector frames have been
790 * TODO: Merge it with per-thread stuff.
792 STATIC void GC_push_current_stack(ptr_t cold_gc_frame,
793 void * context GC_ATTR_UNUSED)
795 # if defined(THREADS)
796 if (0 == cold_gc_frame) return;
797 # ifdef STACK_GROWS_DOWN
798 GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
799 /* For IA64, the register stack backing store is handled */
800 /* in the thread-specific code. */
802 GC_push_all_eager(cold_gc_frame, GC_approx_sp());
805 GC_push_all_stack_part_eager_sections(GC_approx_sp(), GC_stackbottom,
806 cold_gc_frame, GC_traced_stack_sect);
808 /* We also need to push the register stack backing store. */
809 /* This should really be done in the same way as the */
810 /* regular stack. For now we fudge it a bit. */
811 /* Note that the backing store grows up, so we can't use */
812 /* GC_push_all_stack_partially_eager. */
814 ptr_t bsp = GC_save_regs_ret_val;
815 ptr_t cold_gc_bs_pointer = bsp - 2048;
816 if (GC_all_interior_pointers
817 && (word)cold_gc_bs_pointer > (word)BACKING_STORE_BASE) {
818 /* Adjust cold_gc_bs_pointer if below our innermost */
819 /* "traced stack section" in backing store. */
820 if (GC_traced_stack_sect != NULL
821 && (word)cold_gc_bs_pointer
822 < (word)GC_traced_stack_sect->backing_store_end)
824 GC_traced_stack_sect->backing_store_end;
825 GC_push_all_register_sections(BACKING_STORE_BASE,
826 cold_gc_bs_pointer, FALSE, GC_traced_stack_sect);
827 GC_push_all_eager(cold_gc_bs_pointer, bsp);
829 GC_push_all_register_sections(BACKING_STORE_BASE, bsp,
830 TRUE /* eager */, GC_traced_stack_sect);
832 /* All values should be sufficiently aligned that we */
833 /* don't have to worry about the boundary. */
836 # endif /* !THREADS */
839 GC_INNER void (*GC_push_typed_structures)(void) = 0;
841 /* Push GC internal roots. These are normally */
842 /* included in the static data segment, and */
843 /* Thus implicitly pushed. But we must do this */
844 /* explicitly if normal root processing is */
847 * Push GC internal roots. Only called if there is some reason to believe
848 * these would not otherwise get registered.
850 STATIC void GC_push_gc_structures(void)
852 # ifndef GC_NO_FINALIZATION
853 GC_push_finalizer_structures();
855 # if defined(THREADS)
856 GC_push_thread_structures();
858 if( GC_push_typed_structures )
859 GC_push_typed_structures();
862 GC_INNER void GC_cond_register_dynamic_libraries(void)
864 # if (defined(DYNAMIC_LOADING) && !defined(MSWIN_XBOX1)) \
865 || defined(CYGWIN32) || defined(MSWIN32) || defined(MSWINCE) \
867 GC_remove_tmp_roots();
868 if (!GC_no_dls) GC_register_dynamic_libraries();
874 STATIC void GC_push_regs_and_stack(ptr_t cold_gc_frame)
876 GC_with_callee_saves_pushed(GC_push_current_stack, cold_gc_frame);
880 * Call the mark routines (GC_push_one for a single pointer,
881 * GC_push_conditional on groups of pointers) on every top level
882 * accessible pointer.
883 * If all is FALSE, arrange to push only possibly altered values.
884 * Cold_gc_frame is an address inside a GC frame that
885 * remains valid until all marking is complete.
886 * A zero value indicates that it's OK to miss some
889 GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame GC_ATTR_UNUSED)
895 * Next push static data. This must happen early on, since it's
896 * not robust against mark stack overflow.
898 /* Re-register dynamic libraries, in case one got added. */
899 /* There is some argument for doing this as late as possible, */
900 /* especially on win32, where it can change asynchronously. */
901 /* In those cases, we do it here. But on other platforms, it's */
902 /* not safe with the world stopped, so we do it earlier. */
903 # if !defined(REGISTER_LIBRARIES_EARLY)
904 GC_cond_register_dynamic_libraries();
907 /* Mark everything in static data areas */
908 for (i = 0; i < n_root_sets; i++) {
909 GC_push_conditional_with_exclusions(
910 GC_static_roots[i].r_start,
911 GC_static_roots[i].r_end, all);
914 /* Mark all free list header blocks, if those were allocated from */
915 /* the garbage collected heap. This makes sure they don't */
916 /* disappear if we are not marking from static data. It also */
917 /* saves us the trouble of scanning them, and possibly that of */
918 /* marking the freelists. */
919 for (kind = 0; kind < GC_n_kinds; kind++) {
920 void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
922 GC_set_mark_bit(base);
926 /* Mark from GC internal roots if those might otherwise have */
928 if (GC_no_dls || roots_were_cleared) {
929 GC_push_gc_structures();
932 /* Mark thread local free lists, even if their mark */
933 /* descriptor excludes the link field. */
934 /* If the world is not stopped, this is unsafe. It is */
935 /* also unnecessary, since we will do this again with the */
937 # if defined(THREAD_LOCAL_ALLOC)
938 if (GC_world_stopped) GC_mark_thread_local_free_lists();
942 * Now traverse stacks, and mark from register contents.
943 * These must be done last, since they can legitimately overflow
945 * This is usually done by saving the current context on the
946 * stack, and then just tracing from the stack.
948 # ifndef STACK_NOT_SCANNED
949 GC_push_regs_and_stack(cold_gc_frame);
952 if (GC_push_other_roots != 0) (*GC_push_other_roots)();
953 /* In the threads case, this also pushes thread stacks. */
954 /* Note that without interior pointer recognition lots */
955 /* of stuff may have been pushed already, and this */
956 /* should be careful about mark stack overflows. */