gcc -shared -o liblinuxgc.so $(OBJS) dyn_load.o -lo
ln liblinuxgc.so libgc.so
+# Alternative Linux rule. This is preferable, but is likely to break the
+# Makefile for some non-linux platforms.
+# LIBOBJS= $(patsubst %.o, %.lo, $(OBJS))
+#
+#.SUFFIXES: .lo $(SUFFIXES)
+#
+#.c.lo:
+# $(CC) $(CFLAGS) $(CPPFLAGS) -fPIC -c $< -o $@
+#
+# liblinuxgc.so: $(LIBOBJS) dyn_load.lo
+# gcc -shared -Wl,-soname=libgc.so.0 -o libgc.so.0 $(LIBOBJS) dyn_load.lo
+# touch liblinuxgc.so
+
mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_sgi_mach_dep.s $(srcdir)/mips_ultrix_mach_dep.s $(srcdir)/rs6000_mach_dep.s $(UTILS)
rm -f mach_dep.o
./if_mach MIPS IRIX5 $(AS) -o mach_dep.o $(srcdir)/mips_sgi_mach_dep.s
gzip gc.tar
lint: $(CSRCS) test.c
- lint -DLINT $(CSRCS) test.c | egrep -v "possible pointer alignment problem|abort|exit|sbrk|mprotect|syscall"
+ lint -DLINT $(CSRCS) test.c | egrep -v "possible pointer alignment problem|abort|exit|sbrk|mprotect|syscall|change in ANSI|improper alignment"
# BTL: added to test shared library version of collector.
# Currently works only under SunOS5. Requires GC_INIT call from statically
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
-This is version 4.14alpha1 of a conservative garbage collector for C and C++.
+This is version 4.14alpha2 of a conservative garbage collector for C and C++.
You might find a more recent version of this at
Early versions of this collector were developed as a part of research
projects supported in part by the National Science Foundation
and the Defense Advance Research Projects Agency.
-Much of the code was rewritten by Hans-J. Boehm at Xerox PARC.
+Much of the code was rewritten by Hans-J. Boehm at Xerox PARC
+and is now maintained by him at SGI (boehm@sgi.com).
+
+Some other contributors:
+
+More recent contributors are mentioned in the modification history at the
+end of this file. My apologies for any omissions.
+
The SPARC specific code was contributed by Mark Weiser
(weiser@parc.xerox.com). The Encore Multimax modifications were supplied by
Kevin Kenny (kenny@m.cs.uiuc.edu). The adaptation to the RT is largely due
(Blame for misinstallation of these modifications goes to the first author,
however.)
-Credits for some more recent modifications are given in the modification
-history at the end of this file.
+OVERVIEW
This is intended to be a general purpose, garbage collecting storage
allocator. The algorithms used are described in:
Implementation.
(Both are also available from
-http://reality.sgi.com/employees/boehm_mti/papers/, among other places.)
+http://reality.sgi.com/boehm/papers/, among other places.)
Unlike the collector described in the second reference, this collector
operates either with the mutator stopped during the entire collection
doesn't cut it.
Some C optimizers may lose the last undisguised pointer to a memory
object as a consequence of clever optimizations. This has almost
-never been observed in practice. Send mail to boehm@mti.sgi.com
+never been observed in practice. Send mail to boehm@sgi.com
for suggestions on how to fix your compiler.
This is not a real-time collector. In the standard configuration,
percentage of time required for collection should be constant across
per MB of accessible memory that needs to be scanned. Your mileage
may vary.) The incremental/generational collection facility helps,
but is portable only if "stubborn" allocation is used.
- Please address bug reports to boehm@mti.sgi.com. If you are
+ Please address bug reports to boehm@sgi.com. If you are
contemplating a major addition, you might also send mail to ask whether
it's already been done (or whether we tried and discarded it).
after the relevant frame was overwritten, and the new save location
might be outside the scanned area. Fixed by more eager stack scanning.)
- PRINT_BLACK_LIST had some problems. A few source addresses were garbage.
- - Removed a prototype in code that shouldn't assume prototype support.
- Replaced Makefile.dj and added -I flags to cord make targets.
(Thanks to Gary Leavens.)
- GC_try_to_collect was broken with the nonincremental collector.
- gc_cleanup destructors could pass the wrong address to
GC_register_finalizer_ignore_self in the presence of multiple
inheritance. (Thanks to Darrell Schiebel.)
+ - Changed PowerPC Linux stack finding code.
+
+Since 4.14alpha1
+ - -DSMALL_CONFIG did not work reliably with large (> 4K) pages.
+ Recycling the mark stack during expansion could result in a size
+ zero heap segment, which confused things. (This was probably also an
+ issue with the normal config and huge pages.)
+ - Did more work to make sure that callee-save registers were scanned
+ completely, even with the setjmp-based code. Added USE_GENERIC_PUSH_REGS
+ macro to facilitate testing on machines I have access to.
+ - Added code to explicitly push register contents for win32 threads.
+ This seems to be necessary. (Thanks to Pierre de Rop.)
To do:
if (size_avail != size_needed
&& !GC_incremental
&& (word)size_needed <= GC_max_hblk_size/2
- && GC_in_last_heap_sect(hbp) && GC_should_collect()) {
+ && GC_in_last_heap_sect((ptr_t)hbp)
+ && GC_should_collect()) {
continue;
}
# endif
if (GC_incremental && GC_collection_in_progress()) {
for (i = GC_deficit; i < GC_RATE*n; i++) {
- if (GC_mark_some()) {
+ if (GC_mark_some((ptr_t)0)) {
/* Need to finish a collection */
# ifdef SAVE_CALL_CHAIN
GC_save_callers(GC_last_stack);
GC_stop_func stop_func;
{
register int i;
+ int dummy;
# ifdef PRINTSTATS
CLOCK_TYPE start_time, current_time;
# endif
START_WORLD();
return(FALSE);
}
- if (GC_mark_some()) break;
+ if (GC_mark_some((ptr_t)(&dummy))) break;
}
GC_gc_no++;
/* than normal pause times for incremental collection. However, */
/* aborted collections do no useful work; the next collection needs */
/* to start from the beginning. */
+/* Return 0 if the collection was aborted, 1 if it succeeded. */
typedef int (* GC_stop_func) GC_PROTO((void));
GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
{
register oh * ohdr = (oh *)GC_base(p);
- GC_err_printf1("0x%lx (", (unsigned long)ohdr + sizeof(oh));
+ GC_err_printf1("0x%lx (", ((unsigned long)ohdr + sizeof(oh)));
GC_err_puts(ohdr -> oh_string);
GC_err_printf2(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
(unsigned long)(ohdr -> oh_sz));
if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
|| ohdr -> oh_string == 0) {
GC_err_printf1("<smashed>, appr. sz = %ld)\n",
- GC_size((ptr_t)ohdr) - DEBUG_BYTES);
+ (GC_size((ptr_t)ohdr) - DEBUG_BYTES));
} else {
if (ohdr -> oh_string[0] == '\0') {
GC_err_puts("EMPTY(smashed?)");
#include <errno.h>
extern void * GC_roots_present();
+ /* The type is a lie, since the real type doesn't make sense here, */
+ /* and we only test for NULL. */
extern ptr_t GC_scratch_last_end_ptr; /* End of GC_scratch_alloc arena */
if (fd < 0) {
sprintf(buf, "/proc/%d", getpid());
+ /* The above generates a lint complaint, since pid_t varies. */
+ /* It's unclear how to improve this. */
fd = open(buf, O_RDONLY);
if (fd < 0) {
ABORT("/proc open failed");
if (needed_sz >= current_sz) {
current_sz = needed_sz * 2 + 1;
/* Expansion, plus room for 0 record */
- addr_map = (prmap_t *)GC_scratch_alloc(current_sz * sizeof(prmap_t));
+ addr_map = (prmap_t *)GC_scratch_alloc((word)
+ (current_sz * sizeof(prmap_t)));
}
if (ioctl(fd, PIOCMAP, addr_map) < 0) {
GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
/* than normal pause times for incremental collection. However, */
/* aborted collections do no useful work; the next collection needs */
/* to start from the beginning. */
+/* Return 0 if the collection was aborted, 1 if it succeeded. */
typedef int (* GC_stop_func) GC_PROTO((void));
GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
while (!GC_mark_stack_empty()) GC_mark_from_mark_stack(); \
if (GC_mark_state != MS_NONE) { \
GC_set_mark_bit(real_ptr); \
- while (!GC_mark_some()); \
+ while (!GC_mark_some((ptr_t)0)); \
} \
}
/* Return after about one pages worth of */
/* work. */
GC_bool GC_mark_stack_empty();
-GC_bool GC_mark_some(); /* Perform about one pages worth of marking */
+GC_bool GC_mark_some(/* cold_gc_frame */);
+ /* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
/* on the third arg. */
void GC_push_all_stack(/*b,t*/); /* As above, but consider */
/* interior pointers as valid */
-void GC_push_roots(/* GC_bool all */); /* Push all or dirty roots. */
+void GC_push_all_eager(/*b,t*/); /* Same as GC_push_all_stack, but */
+ /* ensures that stack is scanned */
+ /* immediately, not just scheduled */
+ /* for scanning. */
+#ifndef THREADS
+ void GC_push_all_stack_partially_eager(/* bottom, top, cold_gc_frame */);
+ /* Similar to GC_push_all_eager, but only the */
+ /* part hotter than cold_gc_frame is scanned */
+ /* immediately. Needed to endure that callee- */
+ /* save registers are not missed. */
+#else
+ /* In the threads case, we push part of the current thread stack */
+ /* with GC_push_all_eager when we push the registers. This gets the */
+ /* callee-save registers that may disappear. The remainder of the */
+ /* stacks are scheduled for scanning in *GC_push_other_roots, which */
+ /* is thread-package-specific. */
+#endif
+void GC_push_current_stack(/* ptr_t cold_gc_frame */);
+ /* Push enough of the current stack eagerly to */
+ /* ensure that callee-save registers saved in */
+ /* GC frames are scanned. */
+ /* In the non-threads case, schedule entire */
+ /* stack for scanning. */
+void GC_push_roots(/* GC_bool all, ptr_t cold_gc_frame */);
+ /* Push all or dirty roots. */
extern void (*GC_push_other_roots)();
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
# define HP
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(m68k)
+# define M68K
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
# define NEXT
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(i386)
+# define I386
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__FreeBSD__) && defined(i386)
# define I386
# define FREEBSD
# ifdef M68K
# define MACH_TYPE "M68K"
# define ALIGNMENT 2
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define HEURISTIC2
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# endif
# ifdef NETBSD
# define OS_TYPE "NETBSD"
# define HEURISTIC2
+ _stklen))
/* This may not be right. */
# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# endif
# ifdef FREEBSD
# define OS_TYPE "FREEBSD"
# define MPROTECT_VDB
# ifdef BSDI
# define OS_TYPE "BSDI"
# endif
-# if defined(FREEBSD) || defined(NETBSD) \
+# if defined(OPENBSD) || defined(FREEBSD) || defined(NETBSD) \
|| defined(THREE86BSD) || defined(BSDI)
# define HEURISTIC2
extern char etext;
# define THREADS
# endif
+# if defined(HP_PA) || defined(M88K) || defined(POWERPC) \
+ || (defined(I386) && defined(OS2)) || defined(UTS4) || defined(LINT)
+ /* Use setjmp based hack to mark from callee-save registers. */
+# define USE_GENERIC_PUSH_REGS
+# endif
# if defined(SPARC) && !defined(LINUX)
# define SAVE_CALL_CHAIN
# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
void GC_init_headers()
{
- register int i;
+ register unsigned i;
GC_all_nils = (bottom_index *)GC_scratch_alloc((word)sizeof(bottom_index));
BZERO(GC_all_nils, sizeof(bottom_index));
/* than normal pause times for incremental collection. However, */
/* aborted collections do no useful work; the next collection needs */
/* to start from the beginning. */
+/* Return 0 if the collection was aborted, 1 if it succeeded. */
typedef int (* GC_stop_func) GC_PROTO((void));
GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
/* Return after about one pages worth of */
/* work. */
GC_bool GC_mark_stack_empty();
-GC_bool GC_mark_some(); /* Perform about one pages worth of marking */
+GC_bool GC_mark_some(/* cold_gc_frame */);
+ /* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
/* on the third arg. */
void GC_push_all_stack(/*b,t*/); /* As above, but consider */
/* interior pointers as valid */
-void GC_push_roots(/* GC_bool all */); /* Push all or dirty roots. */
+void GC_push_all_eager(/*b,t*/); /* Same as GC_push_all_stack, but */
+ /* ensures that stack is scanned */
+ /* immediately, not just scheduled */
+ /* for scanning. */
+#ifndef THREADS
+ void GC_push_all_stack_partially_eager(/* bottom, top, cold_gc_frame */);
+ /* Similar to GC_push_all_eager, but only the */
+ /* part hotter than cold_gc_frame is scanned */
+ /* immediately. Needed to endure that callee- */
+ /* save registers are not missed. */
+#else
+ /* In the threads case, we push part of the current thread stack */
+ /* with GC_push_all_eager when we push the registers. This gets the */
+ /* callee-save registers that may disappear. The remainder of the */
+ /* stacks are scheduled for scanning in *GC_push_other_roots, which */
+ /* is thread-package-specific. */
+#endif
+void GC_push_current_stack(/* ptr_t cold_gc_frame */);
+ /* Push enough of the current stack eagerly to */
+ /* ensure that callee-save registers saved in */
+ /* GC frames are scanned. */
+ /* In the non-threads case, schedule entire */
+ /* stack for scanning. */
+void GC_push_roots(/* GC_bool all, ptr_t cold_gc_frame */);
+ /* Push all or dirty roots. */
extern void (*GC_push_other_roots)();
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
# define HP
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(m68k)
+# define M68K
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
# define NEXT
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(i386)
+# define I386
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__FreeBSD__) && defined(i386)
# define I386
# define FREEBSD
# ifdef M68K
# define MACH_TYPE "M68K"
# define ALIGNMENT 2
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define HEURISTIC2
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# endif
# ifdef NETBSD
# define OS_TYPE "NETBSD"
# define HEURISTIC2
+ _stklen))
/* This may not be right. */
# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# endif
# ifdef FREEBSD
# define OS_TYPE "FREEBSD"
# define MPROTECT_VDB
# ifdef BSDI
# define OS_TYPE "BSDI"
# endif
-# if defined(FREEBSD) || defined(NETBSD) \
+# if defined(OPENBSD) || defined(FREEBSD) || defined(NETBSD) \
|| defined(THREE86BSD) || defined(BSDI)
# define HEURISTIC2
extern char etext;
# define THREADS
# endif
+# if defined(HP_PA) || defined(M88K) || defined(POWERPC) \
+ || (defined(I386) && defined(OS2)) || defined(UTS4) || defined(LINT)
+ /* Use setjmp based hack to mark from callee-save registers. */
+# define USE_GENERIC_PUSH_REGS
+# endif
# if defined(SPARC) && !defined(LINUX)
# define SAVE_CALL_CHAIN
# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
/* on your architecture. Run the test_setjmp program to see whether */
/* there is any chance it will work. */
+#ifndef USE_GENERIC_PUSH_REGS
void GC_push_regs()
{
# ifdef RT
# endif /* M68K/SYSV */
-# if defined(HP_PA) || defined(M88K) || defined(POWERPC) || (defined(I386) && (defined(OS2) || defined(USE_GENERIC))) || defined(UTS4)
+ /* other machines... */
+# if !(defined M68K) && !(defined VAX) && !(defined RT)
+# if !(defined SPARC) && !(defined I386) && !(defined NS32K)
+# if !defined(POWERPC) && !defined(UTS4)
+ --> bad news <--
+# endif
+# endif
+# endif
+}
+#endif /* !USE_GENERIC_PUSH_REGS */
+
+#if defined(USE_GENERIC_PUSH_REGS)
+void GC_generic_push_regs(cold_gc_frame)
+ptr_t cold_gc_frame;
+{
/* Generic code */
/* The idea is due to Parag Patel at HP. */
/* We're not sure whether he would like */
# else
(void) _setjmp(regs);
# endif
- GC_push_all_stack((ptr_t)regs, lim);
+ GC_push_current_stack(cold_gc_frame);
}
-# endif
-
- /* other machines... */
-# if !(defined M68K) && !(defined VAX) && !(defined RT)
-# if !(defined SPARC) && !(defined I386) && !(defined NS32K)
-# if !defined(HP_PA) && !defined(M88K) && !defined(POWERPC)
-# if !defined(UTS4)
- --> bad news <--
-# endif
-# endif
-# endif
-# endif
}
+#endif /* USE_GENERIC_PUSH_REGS */
/* On register window machines, we need a way to force registers into */
/* the stack. Return sp. */
ptr_t GC_generic_malloc_words_small(size_t lw, int k)
#else
ptr_t GC_generic_malloc_words_small(lw, k)
- register size_t lw;
+ register word lw;
register int k;
#endif
{
GC_init_inner();
}
if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
- op = GC_clear_stack(GC_allocobj(lw, k));
+ op = GC_clear_stack(GC_allocobj((word)lw, k));
}
if (op == 0) {
UNLOCK();
/* Perform a small amount of marking. */
/* We try to touch roughly a page of memory. */
/* Return TRUE if we just finished a mark phase. */
-GC_bool GC_mark_some()
+/* Cold_gc_frame is an address inside a GC frame that */
+/* remains valid until all marking is complete. */
+/* A zero value indicates that it's OK to miss some */
+/* register values. */
+GC_bool GC_mark_some(cold_gc_frame)
+ptr_t cold_gc_frame;
{
switch(GC_mark_state) {
case MS_NONE:
GC_printf1("Marked from %lu dirty pages\n",
(unsigned long)GC_n_rescuing_pages);
# endif
- GC_push_roots(FALSE);
+ GC_push_roots(FALSE, cold_gc_frame);
GC_objects_are_marked = TRUE;
if (GC_mark_state != MS_INVALID) {
GC_mark_state = MS_ROOTS_PUSHED;
} else {
scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
if (scan_ptr == 0) {
- GC_push_roots(TRUE);
+ GC_push_roots(TRUE, cold_gc_frame);
GC_objects_are_marked = TRUE;
if (GC_mark_state != MS_INVALID) {
GC_mark_state = MS_ROOTS_PUSHED;
}
scan_ptr = GC_push_next_marked(scan_ptr);
if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
- GC_push_roots(TRUE);
+ GC_push_roots(TRUE, cold_gc_frame);
GC_objects_are_marked = TRUE;
if (GC_mark_state != MS_INVALID) {
GC_mark_state = MS_ROOTS_PUSHED;
if (GC_mark_stack_size != 0) {
if (new_stack != 0) {
word displ = (word)GC_mark_stack & (GC_page_size - 1);
- word size = GC_mark_stack_size * sizeof(struct ms_entry);
+ signed_word size = GC_mark_stack_size * sizeof(struct ms_entry);
/* Recycle old space */
if (0 != displ) displ = GC_page_size - displ;
size = (size - displ) & ~(GC_page_size - 1);
- GC_add_to_heap((struct hblk *)
- ((word)GC_mark_stack + displ), size);
+ if (size > 0) {
+ GC_add_to_heap((struct hblk *)
+ ((word)GC_mark_stack + displ), (word)size);
+ }
GC_mark_stack = new_stack;
GC_mark_stack_size = n;
# ifdef PRINTSTATS
# undef GC_least_plausible_heap_addr
}
+#ifndef THREADS
/*
* A version of GC_push_all that treats all interior pointers as valid
* and scans part of the area immediately, to make sure that saved
* register values are not lost.
+ * Cold_gc_frame delimits the stack section that must be scanned
+ * eagerly. A zero value indicates that no eager scanning is needed.
*/
-void GC_push_all_stack(bottom, top)
+void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
ptr_t bottom;
ptr_t top;
+ptr_t cold_gc_frame;
{
# ifdef ALL_INTERIOR_POINTERS
# define EAGER_BYTES 1024
/* Push the hot end of the stack eagerly, so that register values */
/* saved inside GC frames are marked before they disappear. */
/* The rest of the marking can be deferred until later. */
- ptr_t mid;
+ if (0 == cold_gc_frame) {
+ GC_push_all_stack(bottom, top);
+ return;
+ }
# ifdef STACK_GROWS_DOWN
- mid = bottom + 1024;
- if (mid < top) {
- GC_push_all_eager(bottom, mid);
- GC_push_all(mid - sizeof(ptr_t), top);
- } else {
- GC_push_all_eager(bottom, top);
- }
+ GC_push_all_eager(bottom, cold_gc_frame);
+ GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
# else /* STACK_GROWS_UP */
- mid = top - 1024;
- if (mid > bottom) {
- GC_push_all_eager(mid, top);
- GC_push_all(bottom, mid + sizeof(ptr_t));
- } else {
- GC_push_all_eager(bottom, top);
- }
+ GC_push_all_eager(cold_gc_frame, top);
+ GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
# endif /* STACK_GROWS_UP */
# else
GC_push_all_eager(bottom, top);
GC_add_trace_entry("GC_push_all_stack", bottom, top);
# endif
}
+#endif /* !THREADS */
+
+void GC_push_all_stack(bottom, top)
+ptr_t bottom;
+ptr_t top;
+{
+# ifdef ALL_INTERIOR_POINTERS
+ GC_push_all(bottom, top);
+# else
+ GC_push_all_eager(bottom, top);
+# endif
+}
#ifndef SMALL_CONFIG
/* Push all objects reachable from marked objects in the given block */
}
}
+/*
+ * In the absence of threads, push the stack contents.
+ * In the presence of threads, push enough of the current stack
+ * to ensure that callee-save registers saved in collector frames have been
+ * seen.
+ */
+void GC_push_current_stack(cold_gc_frame)
+ptr_t cold_gc_frame;
+{
+# if defined(THREADS)
+ if (0 == cold_gc_frame) return;
+# ifdef STACK_GROWS_DOWN
+ GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
+# else
+ GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
+# endif
+# else
+# ifdef STACK_GROWS_DOWN
+ GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
+ cold_gc_frame );
+# else
+ GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
+ cold_gc_frame );
+# endif
+# endif /* !THREADS */
+}
+
/*
* Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
* on groups of pointers) on every top level accessible pointer.
* If all is FALSE, arrange to push only possibly altered values.
+ * Cold_gc_frame is an address inside a GC frame that
+ * remains valid until all marking is complete.
+ * A zero value indicates that it's OK to miss some
+ * register values.
*/
-
-void GC_push_roots(all)
+void GC_push_roots(all, cold_gc_frame)
GC_bool all;
+ptr_t cold_gc_frame;
{
register int i;
* push registers - i.e., call GC_push_one(r) for each
* register contents r.
*/
+# ifdef USE_GENERIC_PUSH_REGS
+ GC_generic_push_regs(cold_gc_frame);
+# else
GC_push_regs(); /* usually defined in machine_dep.c */
+# endif
/*
* Next push static data. This must happen early on, since it's
/*
* Now traverse stacks.
*/
-# ifndef THREADS
- /* Mark everything on the stack. */
-# ifdef STACK_GROWS_DOWN
- GC_push_all_stack( GC_approx_sp(), GC_stackbottom );
-# else
- GC_push_all_stack( GC_stackbottom, GC_approx_sp() );
-# endif
+# if !defined(USE_GENERIC_PUSH_REGS)
+ GC_push_current_stack(cold_gc_frame);
+ /* IN the threads case, this only pushes collector frames. */
+ /* In the USE_GENERIC_PUSH_REGS case, this is done inside */
+ /* GC_push_regs, so that we catch callee-save registers saved */
+ /* inside the GC_push_regs frame. */
# endif
if (GC_push_other_roots != 0) (*GC_push_other_roots)();
/* In the threads case, this also pushes thread stacks. */
/* make sure the former gets defined to be the latter if appropriate. */
# include <features.h>
# if 2 <= __GLIBC__
-# include <sigcontext.h>
+# if 0 == __GLIBC_MINOR__
+ /* glibc 2.1 no longer has sigcontext.h. But signal.h */
+ /* has the right declaration for glibc 2.1. */
+# include <sigcontext.h>
+# endif /* 0 == __GLIBC_MINOR__ */
# else /* not 2 <= __GLIBC__ */
/* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
/* one. Check LINUX_VERSION_CODE to see which we should reference. */
(GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
#define GENERAL_MALLOC_IOP(lb,k) \
- (GC_PTR)GC_clear_stack(GC_generic_malloc_ignore_off_page((word)lb, k))
+ (GC_PTR)GC_clear_stack(GC_generic_malloc_ignore_off_page(lb, k))
#if defined(__STDC__) || defined(__cplusplus)
void * GC_malloc_explicitly_typed(size_t lb, GC_descr d)
FASTLOCK();
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
FASTUNLOCK();
- op = (ptr_t)GENERAL_MALLOC_IOP((word)lb, GC_explicit_kind);
+ op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
# ifdef MERGE_SIZES
lw = GC_size_map[lb]; /* May have been uninitialized. */
# endif
FASTUNLOCK();
}
} else {
- op = (ptr_t)GENERAL_MALLOC_IOP((word)lb, GC_explicit_kind);
+ op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
if (op != NULL)
lw = BYTES_TO_WORDS(GC_size(op));
}
#define GC_VERSION_MAJOR 4
#define GC_VERSION_MINOR 14
-#define GC_ALPHA_VERSION 1
+#define GC_ALPHA_VERSION 2
# define GC_NOT_ALPHA 0xff
if (thread_table[i].context.Esp >= (DWORD)thread_table[i].stack
|| thread_table[i].context.Esp < (DWORD)bottom)
ABORT("Thread stack pointer out of range");
+ GC_push_one ((word) thread_table[i].context.Edi);
+ GC_push_one ((word) thread_table[i].context.Esi);
+ GC_push_one ((word) thread_table[i].context.Ebx);
+ GC_push_one ((word) thread_table[i].context.Edx);
+ GC_push_one ((word) thread_table[i].context.Ecx);
+ GC_push_one ((word) thread_table[i].context.Eax);
GC_push_all_stack(thread_table[i].context.Esp, thread_table[i].stack);
}
}