--- /dev/null
+# Primary targets:
+# gc.a - builds basic library
+# c++ - adds C++ interface to library and include directory
+# cords - adds cords (heavyweight strings) to library and include directory
+# test - prints porting information, then builds basic version of gc.a, and runs
+# some tests of collector and cords. Does not add cords or c++ interface to gc.a
+# cord/de - builds dumb editor based on cords.
+CC= cc
+CXX=g++
+# Needed only for "make c++", which adds the c++ interface
+
+CFLAGS= -O -DALL_INTERIOR_POINTERS -DSILENT
+# Setjmp_test may yield overly optimistic results when compiled
+# without optimization.
+# -DSILENT disables statistics printing, and improves performance.
+# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
+# altered stubborn objects, at substantial performance cost.
+# -DFIND_LEAK causes the collector to assume that all inaccessible
+# objects should have been explicitly deallocated, and reports exceptions
+# -DSOLARIS_THREADS enables support for Solaris (thr_) threads.
+# (Clients should also define SOLARIS_THREADS and then include
+# gc.h before performing thr_ or GC_ operations.)
+# -DALL_INTERIOR_POINTERS allows all pointers to the interior
+# of objects to be recognized. (See gc_private.h for consequences.)
+# -DSMALL_CONFIG tries to tune the collector for small heap sizes,
+# usually causing it to use less space in such situations.
+# Incremental collection no longer works in this case.
+# -DDONT_ADD_BYTE_AT_END is meaningful only with
+# -DALL_INTERIOR_POINTERS. Normally -DALL_INTERIOR_POINTERS
+# causes all objects to be padded so that pointers just past the end of
+# an object can be recognized. This can be expensive. (The padding
+# is normally more than one byte due to alignment constraints.)
+# -DDONT_ADD_BYTE_AT_END disables the padding.
+
+AR= ar
+RANLIB= ranlib
+
+
+# Redefining srcdir allows object code for the nonPCR version of the collector
+# to be generated in different directories
+srcdir = .
+VPATH = $(srcdir)
+
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dyn_load.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o typd_mlc.o
+
+CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c typd_mlc.c
+
+CORD_SRCS= cord/cordbscs.c cord/cordxtra.c cord/cordprnt.c cord/de.c cord/cordtest.c cord/cord.h cord/ec.h cord/cord_pos.h cord/de_win.c cord/de_win.h cord/de_cmds.h cord/de_win.ICO cord/de_win.RC
+
+CORD_OBJS= cord/cordbscs.o cord/cordxtra.o cord/cordprnt.o
+
+SRCS= $(CSRCS) mips_mach_dep.s rs6000_mach_dep.s alpha_mach_dep.s sparc_mach_dep.s gc.h gc_typed.h gc_hdrs.h gc_priv.h gc_private.h config.h gc_mark.h gc_inl.h gc_inline.h gc.man if_mach.c if_not_there.c gc_c++.cc gc_c++.h $(CORD_SRCS)
+
+OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE \
+ README test.c setjmp_t.c SMakefile.amiga SCoptions.amiga \
+ README.amiga README.win32 cord/README include/gc.h \
+ include/gc_typed.h README.QUICK callprocs pc_excludes \
+ barrett_diagram README.OS2
+
+CORD_INCLUDE_FILES= $(srcdir)/gc.h $(srcdir)/cord/cord.h $(srcdir)/cord/ec.h \
+ $(srcdir)/cord/cord_pos.h
+
+# Libraries needed for curses applications. Only needed for de.
+CURSES= -lcurses -ltermlib
+
+# The following is irrelevant on most systems. But a few
+# versions of make otherwise fork the shell specified in
+# the SHELL environment variable.
+SHELL= /bin/sh
+
+SPECIALCFLAGS =
+# Alternative flags to the C compiler for mach_dep.c.
+# Mach_dep.c often doesn't like optimization, and it's
+# not time-critical anyway.
+# Set SPECIALCFLAGS to -q nodirect_code on Encore.
+
+ALPHACFLAGS = -non_shared
+# Extra flags for linking compilation on DEC Alpha
+
+all: gc.a gctest
+
+pcr: PCR-Makefile gc_private.h gc_hdrs.h gc.h config.h mach_dep.o $(SRCS)
+ make -f PCR-Makefile depend
+ make -f PCR-Makefile
+
+$(OBJS) test.o: $(srcdir)/gc_priv.h $(srcdir)/gc_hdrs.h $(srcdir)/gc.h \
+ $(srcdir)/config.h $(srcdir)/gc_typed.h Makefile
+# The dependency on Makefile is needed. Changing
+# options such as -DSILENT affects the size of GC_arrays,
+# invalidating all .o files that rely on gc_priv.h
+
+mark.o typd_mlc.o finalize.o: $(srcdir)/gc_mark.h
+
+gc.a: $(OBJS)
+ $(AR) ru gc.a $(OBJS)
+ $(RANLIB) gc.a || cat /dev/null
+# ignore ranlib failure; that usually means it doesn't exist, and isn't needed
+
+cords: $(CORD_OBJS) cord/cordtest
+ $(AR) ru gc.a $(CORD_OBJS)
+ $(RANLIB) gc.a || cat /dev/null
+ cp $(srcdir)/cord/cord.h include/cord.h
+ cp $(srcdir)/cord/ec.h include/ec.h
+ cp $(srcdir)/cord/cord_pos.h include/cord_pos.h
+
+gc_c++.o: $(srcdir)/gc_c++.cc $(srcdir)/gc_c++.h
+ $(CXX) -c -O $(srcdir)/gc_c++.cc
+
+c++: gc_c++.o $(srcdir)/gc_c++.h
+ $(AR) ru gc.a gc_c++.o
+ $(RANLIB) gc.a || cat /dev/null
+ cp $(srcdir)/gc_c++.h include/gc_c++.h
+
+mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_mach_dep.s $(srcdir)/rs6000_mach_dep.s if_mach if_not_there
+ rm -f mach_dep.o
+ ./if_mach MIPS "" as -o mach_dep.o $(srcdir)/mips_mach_dep.s
+ ./if_mach RS6000 "" as -o mach_dep.o $(srcdir)/rs6000_mach_dep.s
+ ./if_mach ALPHA "" as -o mach_dep.o $(srcdir)/alpha_mach_dep.s
+ ./if_mach SPARC SUNOS5 as -o mach_dep.o $(srcdir)/sparc_mach_dep.s
+ ./if_not_there mach_dep.o $(CC) -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
+
+mark_rts.o: $(srcdir)/mark_rts.c if_mach if_not_there
+ rm -f mark_rts.o
+ ./if_mach ALPHA "" $(CC) -c $(CFLAGS) -Wo,-notail $(srcdir)/mark_rts.c
+ ./if_not_there mark_rts.o $(CC) -c $(CFLAGS) $(srcdir)/mark_rts.c
+# work-around for DEC optimizer tail recursion elimination bug
+
+cord/cordbscs.o: $(srcdir)/cord/cordbscs.c $(CORD_INCLUDE_FILES)
+ $(CC) $(CFLAGS) -c $(srcdir)/cord/cordbscs.c
+ mv cordbscs.o cord/cordbscs.o
+# not all compilers understand -o filename
+
+cord/cordxtra.o: $(srcdir)/cord/cordxtra.c $(CORD_INCLUDE_FILES)
+ $(CC) $(CFLAGS) -c $(srcdir)/cord/cordxtra.c
+ mv cordxtra.o cord/cordxtra.o
+
+cord/cordprnt.o: $(srcdir)/cord/cordprnt.c $(CORD_INCLUDE_FILES)
+ $(CC) $(CFLAGS) -c $(srcdir)/cord/cordprnt.c
+ mv cordprnt.o cord/cordprnt.o
+
+cord/cordtest: $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a
+ rm -f cord/cordtest
+ ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a -lthread
+ ./if_not_there cord/cord_test $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a
+
+cord/de: $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a
+ rm -f cord/de
+ ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a $(CURSES) -lthread
+ ./if_mach RS6000 "" $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a -lcurses
+ ./if_not_there cord/de $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a $(CURSES)
+
+if_mach: $(srcdir)/if_mach.c $(srcdir)/config.h
+ $(CC) $(CFLAGS) -o if_mach $(srcdir)/if_mach.c
+
+if_not_there: $(srcdir)/if_not_there.c
+ $(CC) $(CFLAGS) -o if_not_there $(srcdir)/if_not_there.c
+
+clean:
+ rm -f gc.a test.o gctest output-local output-diff $(OBJS) \
+ setjmp_test mon.out gmon.out a.out core if_not_there if_mach \
+ $(CORD_OBJS) cord/cordtest cord/de
+ -rm -f *~
+
+gctest: test.o gc.a if_mach if_not_there
+ rm -f gctest
+ ./if_mach ALPHA "" $(CC) $(CFLAGS) -o gctest $(ALPHACFLAGS) test.o gc.a
+ ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o gctest $(CFLAGS) test.o gc.a -lthread
+ ./if_not_there gctest $(CC) $(CFLAGS) -o gctest test.o gc.a
+
+# If an optimized setjmp_test generates a segmentation fault,
+# odds are your compiler is broken. Gctest may still work.
+# Try compiling setjmp_t.c unoptimized.
+setjmp_test: $(srcdir)/setjmp_t.c $(srcdir)/gc.h if_mach if_not_there
+ rm -f setjmp_test
+ ./if_mach ALPHA "" $(CC) $(CFLAGS) -o setjmp_test $(ALPHACFLAGS) $(srcdir)/setjmp_t.c
+ ./if_not_there setjmp_test $(CC) $(CFLAGS) -o setjmp_test $(srcdir)/setjmp_t.c
+
+test: setjmp_test gctest
+ ./setjmp_test
+ ./gctest
+ make cord/cordtest
+ cord/cordtest
+
+gc.tar: $(SRCS) $(OTHER_FILES)
+ tar cvf gc.tar $(SRCS) $(OTHER_FILES)
+
+pc_gc.tar: $(SRCS) $(OTHER_FILES)
+ tar cvfX pc_gc.tar pc_excludes $(SRCS) $(OTHER_FILES)
+
+floppy: pc_gc.tar
+ -mmd a:/cord
+ -mmd a:/include
+ mkdir /tmp/pc_gc
+ cat pc_gc.tar | (cd /tmp/pc_gc; tar xvf -)
+ -mcopy -tmn /tmp/pc_gc/* a:
+ -mcopy -tmn /tmp/pc_gc/cord/* a:/cord
+ -mcopy -mn /tmp/pc_gc/cord/de_win.ICO a:/cord
+ -mcopy -tmn /tmp/pc_gc/include/* a:/cord
+ rm -r /tmp/pc_gc
+
+gc.tar.Z: gc.tar
+ compress gc.tar
+
+lint: $(CSRCS) test.c
+ lint -DLINT $(CSRCS) test.c | egrep -v "possible pointer alignment problem|abort|exit|sbrk|mprotect|syscall"
--- /dev/null
+# Makefile for Windows NT. Assumes Microsoft compiler, and a single thread.
+# DLLs are included in the root set under NT, but not under win32S.
+# Use "nmake nodebug=1 all" for optimized versions of library, gctest and editor.
+
+!include <ntwin32.mak>
+
+# We also haven't figured out how to do partial links or build static libraries. Hence a
+# client currently needs to link against all of the following:
+
+OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_rts.obj headers.obj mark.obj obj_map.obj blacklst.obj finalize.obj new_hblk.obj dbg_mlc.obj malloc.obj stubborn.obj dyn_load.obj typd_mlc.obj
+
+all: gctest.exe cord\de.exe
+
+.c.obj:
+ $(cc) $(cdebug) $(cflags) $(cvars) -DSMALL_CONFIG -DSILENT -DALL_INTERIOR_POINTERS $*.c /Fo$*.obj
+
+$(OBJS) test.obj: gc_priv.h gc_hdrs.h gc.h
+
+gc.lib: $(OBJS)
+ lib32 /MACHINE:i386 /out:gc.lib $(OBJS)
+
+gctest.exe: test.obj gc.lib
+# The following works for win32 debugging. For win32s debugging use debugtype:coff
+# and add mapsympe line.
+ $(link) -debug:full -debugtype:cv $(guiflags) -stack:131072 -out:$*.exe test.obj $(conlibs) gc.lib
+# mapsympe -n -o gctest.sym gctest.exe
+
+cord\de_win.rbj: cord\de_win.res
+ cvtres -$(CPU) cord\de_win.res -o cord\de_win.rbj
+
+cord\de.obj cord\de_win.obj: cord\cord.h cord\cord_pos.h cord\de_win.h cord\de_cmds.h
+
+cord\de_win.res: cord\de_win.rc cord\de_win.h cord\de_cmds.h
+ $(rc) $(rcvars) -r -fo cord\de_win.res $(cvars) cord\de_win.rc
+
+cord\de.exe: cord\cordbscs.obj cord\cordxtra.obj cord\de.obj cord\de_win.obj cord\de_win.rbj gc.lib
+ $(link) -debug:full -debugtype:cv $(guiflags) -stack:16384 -out:cord\de.exe cord\cordbscs.obj cord\cordxtra.obj cord\de.obj cord\de_win.obj cord\de_win.rbj gc.lib $(guilibs)
\ No newline at end of file
--- /dev/null
+# Makefile for OS/2. Assumes IBM's compiler, static linking, and a single thread.
+# Adding dynamic linking support seems easy, but takes a little bit of work.
+# Adding thread support may be nontrivial, since we haven't yet figured out how to
+# look at another thread's registers.
+
+# We also haven't figured out how to do partial links or build static libraries. Hence a
+# client currently needs to link against all of the following:
+
+OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_rts.obj headers.obj mark.obj obj_map.obj blacklst.obj finalize.obj new_hblk.obj dbg_mlc.obj malloc.obj stubborn.obj typd_mlc.obj
+
+CORDOBJS= cord\cordbscs.obj cord\cordxtra.obj cord\cordprnt.obj
+
+CC= icc
+CFLAGS= /O /Q /DSILENT /DSMALL_CONFIG /DALL_INTERIOR_POINTERS
+# Use /Ti instead of /O for debugging
+# Setjmp_test may yield overly optimistic results when compiled
+# without optimization.
+
+all: $(OBJS) gctest.exe cord\cordtest.exe
+
+$(OBJS) test.obj: gc_priv.h gc_hdrs.h gc.h
+
+mach_dep.obj: mach_dep.c
+ $(CC) $(CFLAGS) /C mach_dep.c
+
+gctest.exe: test.obj $(OBJS)
+ $(CC) $(CFLAGS) /B"/STACK:524288" /Fegctest test.obj $(OBJS)
+
+cord\cordbscs.obj: cord\cordbscs.c cord\cord.h cord\cord_pos.h
+ $(CC) $(CFLAGS) /C /Focord\cordbscs cord\cordbscs.c
+
+cord\cordxtra.obj: cord\cordxtra.c cord\cord.h cord\cord_pos.h cord\ec.h
+ $(CC) $(CFLAGS) /C /Focord\cordxtra cord\cordxtra.c
+
+cord\cordprnt.obj: cord\cordprnt.c cord\cord.h cord\cord_pos.h cord\ec.h
+ $(CC) $(CFLAGS) /C /Focord\cordprnt cord\cordprnt.c
+
+cord\cordtest.exe: cord\cordtest.c cord\cord.h cord\cord_pos.h cord\ec.h $(CORDOBJS)
+ $(CC) $(CFLAGS) /B"/STACK:65536" /Fecord\cordtest cord\cordtest.c $(OBJS) $(CORDOBJS)
\ No newline at end of file
--- /dev/null
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o pcr_interface.o blacklst.o finalize.o new_hblk.o real_malloc.o dynamic_load.o dbg_mlc.o malloc.o stubborn.o
+
+CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dynamic_load.c debug_mlc.c malloc.c stubborn.c
+
+SHELL= /bin/sh
+
+# Fix to point to local pcr installation directory.
+PCRDIR= /project/ppcr/dev
+CC= gcc
+CFLAGS= -g -DPCR -I$(PCRDIR) -I$(PCRDIR)/ansi -I$(PCRDIR)/posix
+
+# We assume that mach_dep.o has already been built by top level makefile. It doesn't
+# care about pcr vs UNIX, and we don't want to repeat that cruft.
+
+default: gc.o
+
+all: gc.o test.o gcpcr
+
+gcpcr: gc.o test.o $(PCRDIR)/base/pcr.o $(PCRDIR)/base/PCR_BaseMain.o
+ $(CC) -o gcpcr $(PCRDIR)/base/pcr.o $(PCRDIR)/base/PCR_BaseMain.o gc.o test.o -ldl
+
+gc.o: $(OBJS)
+ -ld -r -o gc.o $(OBJS)
+
+#
+# Dependency construction
+#
+# NOTE: the makefile must include "# DO NOT DELETE THIS LINE" after the
+# last target. "make depend" will replace everything following that line
+# by a newly-constructed list of dependencies.
+#
+depend: $(CSRCS)
+ rm -f makedep eddep ; \
+ $(CC) -M $(CFLAGS) $(CSRCS) \
+ | sed -e '/:$$/d' > makedep ; \
+ echo '/^# DO NOT DELETE THIS LINE/+1,$$d' >eddep ; \
+ echo '$$r makedep' >>eddep ; \
+ echo 'w' >>eddep ; \
+ cp PCR-Makefile PCR-Makefile.bak ; \
+ ex - PCR-Makefile < eddep ; \
+ rm -f eddep makedep
+ touch depend
+
+# DO NOT DELETE THIS LINE
+
+
--- /dev/null
+Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program
+for any purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is granted,
+provided the above notices are retained, and a notice that the code was
+modified is included with the above copyright notice.
+
+This is version 4.1 of a conservative garbage collector for C and C++.
+
+HISTORY -
+
+ Early versions of this collector were developed as a part of research
+projects supported in part by the National Science Foundation
+and the Defense Advance Research Projects Agency.
+Much of the code was rewritten by Hans-J. Boehm at Xerox PARC.
+The SPARC specific code was contributed by Mark Weiser
+(weiser@parc.xerox.com). The Encore Multimax modifications were supplied by
+Kevin Kenny (kenny@m.cs.uiuc.edu). The adaptation to the RT is largely due
+to Vernon Lee (scorpion@rice.edu), on machines made available by IBM.
+Much of the HP specific code and a number of good suggestions for improving the
+generic code are due to Walter Underwood (wunder@hp-ses.sde.hp.com).
+Robert Brazile (brazile@diamond.bbn.com) originally supplied the ULTRIX code.
+Al Dosser (dosser@src.dec.com) and Regis Cridlig (Regis.Cridlig@cl.cam.ac.uk)
+subsequently provided updates and information on variation between ULTRIX
+systems. Parag Patel (parag@netcom.com) supplied the A/UX code.
+Jesper Peterson(jep@mtiame.mtia.oz.au) supplied the Amiga port.
+Thomas Funke (thf@zelator.in-berlin.de(?)) supplied the NeXT port.
+Bill Janssen (janssen@parc.xerox.com) supplied the SunOS dynamic loader
+specific code. Manuel Serrano (serrano@cornas.inria.fr) supplied linux and
+Sony News specific code. Al Dosser provided Alpha/OSF/1 code. He and
+Dave Detlefs(detlefs@src.dec.com) also provided several generic bug fixes.
+Alistair G. Crooks(agc@uts.amdahl.com) supplied the NetBSD and 386BSD ports.
+Jeffrey Hsu (hsu@soda.berkeley.edu) provided the FreeBSD port.
+Brent Benson (brent@jade.ssd.csd.harris.com) ported the collector to
+a Motorola 88K processor running CX/UX (Harris NightHawk).
+Ari Huttunen (Ari.Huttunen@hut.fi) generalized the OS/2 port to
+nonIBM development environments (a nontrivial task).
+David Chase, then at Olivetti Research, suggested several improvements.
+Scott Schwartz (schwartz@groucho.cse.psu.edu) supplied some of the
+code to save and print call stacks for leak detection on a SPARC.
+Jesse Hull and John Ellis supplied the C++ interface code.
+Zhong Shao performed much of the experimentation that led to the
+current typed allocation facility. (His dynamic type inference code hasn't
+made it into the released version of the collector, yet.)
+(Blame for misinstallation of these modifications goes to the first author,
+however.)
+
+ This is intended to be a general purpose, garbage collecting storage
+allocator. The algorithms used are described in:
+
+Boehm, H., and M. Weiser, "Garbage Collection in an Uncooperative Environment",
+Software Practice & Experience, September 1988, pp. 807-820.
+
+Boehm, H., A. Demers, and S. Shenker, "Mostly Parallel Garbage Collection",
+Proceedings of the ACM SIGPLAN '91 Conference on Programming Language Design
+and Implementation, SIGPLAN Notices 26, 6 (June 1991), pp. 157-164.
+
+Boehm, H., "Space Efficient Conservative Garbage Collection", Proceedings
+of the ACM SIGPLAN '91 Conference on Programming Language Design and
+Implementation, SIGPLAN Notices 28, 6 (June 1993), pp. 197-206.
+
+ Unlike the collector described in the second reference, this collector
+operates either with the mutator stopped during the entire collection
+(default) or incrementally during allocations. (The latter is supported
+on only a few machines.) It does not rely on threads, but is intended
+to be thread-safe.
+
+ Some of the ideas underlying the collector have previously been explored
+by others. (Doug McIlroy wrote a vaguely similar collector that is part of
+version 8 UNIX (tm).) However none of this work appears to have been widely
+disseminated.
+
+ Rudimentary tools for use of the collector as a leak detector are included, as
+is a fairly sophisticated string package "cord" that makes use of the collector.
+(See cord/README.)
+
+
+GENERAL DESCRIPTION
+
+ This is a garbage colecting storage allocator that is intended to be
+used as a plug-in replacement for C's malloc.
+
+ Since the collector does not require pointers to be tagged, it does not
+attempt to ensure that all inaccessible storage is reclaimed. However,
+in our experience, it is typically more successful at reclaiming unused
+memory than most C programs using explicit deallocation. Unlike manually
+introduced leaks, the amount of unreclaimed memory typically stays
+bounded.
+
+ In the following, an "object" is defined to be a region of memory allocated
+by the routines described below.
+
+ Any objects not intended to be collected must be pointed to either
+from other such accessible objects, or from the registers,
+stack, data, or statically allocated bss segments. Pointers from
+the stack or registers may point to anywhere inside an object.
+However, it is usually assumed that all pointers originating in the
+heap point to the beginning of an object. (This does
+not disallow interior pointers; it simply requires that there must be a
+pointer to the beginning of every accessible object, in addition to any
+interior pointers.) There are two facilities for altering this behavior.
+The macro ALL_INTERIOR_POINTERS may be defined in gc_private.h to
+cause any pointer into an object (or one past the end) to retain the
+object. A routine GC_register_displacement is provided to allow for
+more controlled interior pointer use in the heap. Defining
+ALL_INTERIOR_POINTERS is somewhat dangerous, in that it can result
+in unnecessary memroy retention. However this is much less of a
+problem than with older collector versions. The routine
+GC_register_displacement is described in gc.h.
+
+ Note that pointers inside memory allocated by the standard "malloc" are not
+seen by the garbage collector. Thus objects pointed to only from such a
+region may be prematurely deallocated. It is thus suggested that the
+standard "malloc" be used only for memory regions, such as I/O buffers, that
+are guaranteed not to contain pointers. Pointers in C language automatic,
+static, or register variables, are correctly recognized. (Note that
+GC_malloc_uncollectable has semantics similar to standard malloc,
+but allocates objects that are traced by the collector.)
+
+ The collector does not generally know how to find pointers in data
+areas that are associated with dynamic libraries. This is easy to
+remedy IF you know how to find those data areas on your operating
+system (see GC_add_roots). Code for doing this under SunOS and IRIX 5.X is
+included (see dynamic_load.c).
+
+ Note that the garbage collector does not need to be informed of shared
+read-only data. However if the shared library mechanism can introduce
+discontiguous data areas that may contain pointers, then the collector does
+need to be informed.
+
+ Signal processing for most signals is normally deferred during collection,
+and during uninterruptible parts of the allocation process. Unlike
+standard ANSI C mallocs, it is intended to be safe to invoke malloc
+from a signal handler while another malloc is in progress, provided
+the original malloc is not restarted. (Empirically, many UNIX
+applications already asssume this.) Even this modest level of signal-
+safety may be too expensive on some systems. If so, ENABLE_SIGNALS
+and DISABLE_SIGNALS may be redefined to the empty statement in gc_private.h.
+
+ The allocator/collector can also be configured for thread-safe operation.
+(Full signal safety can also be acheived, but only at the cost of two system
+calls per malloc, which is usually unacceptable.)
+
+INSTALLATION AND PORTABILITY
+
+ As distributed, the macro SILENT is defined in Makefile.
+In the event of problems, this can be removed to obtain a moderate
+amount of descriptive output for each collection.
+(The given statistics exhibit a few peculiarities.
+Things don't appear to add up for a variety of reasons, most notably
+fragmentation losses. These are probably much more significant for the
+contrived program "test.c" than for your application.)
+
+ Note that typing "make test" will automatically build the collector
+and then run setjmp_test and gctest. Setjmp_test will give you information
+about configuring the collector, which is useful primarily if you have
+a machine that's not already supported. Gctest is a somewhat superficial
+test of collector functionality. Failure is indicated by a core dump or
+a message to the effect that the collector is broken. Gctest takes about
+35 seconds to run on a SPARCstation 2. On a slower machine,
+expect it to take a while. It may use up to 8 MB of memory. (The
+multi-threaded version will use more.) "Make test" will also, as
+its last step, attempt to build and test the "cord" string library.
+This will fail without an ANSI C compiler.
+
+ The Makefile will generate a library gc.a which you should link against.
+Typing "make cords" will add the cord library to gc.a.
+Note that this requires an ANSI C compiler.
+
+ It is suggested that if you need to replace a piece of the collector
+(e.g. GC_mark_rts.c) you simply list your version ahead of gc.a on the
+ld command line, rather than replacing the one in gc.a. (This will
+generate numerous warnings under some versions of AIX, but it still
+works.)
+
+ All include files that need to be used by clients will be put in the
+include subdirectory. (Normally this is just gc.h. "Make cords" adds
+"cord.h" and "ec.h".)
+
+ The collector currently is designed to run essentially unmodified on
+the following machines (most of the operating systems mentioned are
+trademarks of their respective holders):
+
+ Sun 3
+ Sun 4 under SunOS 4.X or Solaris2.X (with or without threads)
+ Vax under 4.3BSD, Ultrix
+ Intel 386 or 486 under many operating systems, but not MSDOS.
+ (Win32S is somewhat supported, so it is possible to
+ build applications for Windows 3.1)
+ Sequent Symmetry (single threaded)
+ Encore Multimax (single threaded)
+ MIPS M/120 (and presumably M/2000) (RISC/os 4.0 with BSD libraries)
+ IBM PC/RT (Berkeley UNIX)
+ IBM RS/6000
+ HP9000/300
+ HP9000/700
+ DECstations under Ultrix
+ DEC Alpha running OSF/1
+ SGI workstations under IRIX
+ Sony News
+ Apple MacIntosh under A/UX
+ Commodore Amiga (see README.amiga)
+ NeXT machines
+
+ In a few cases (Amiga, OS/2, Win32) a separate makefile is supplied.
+
+ Dynamic libraries are completely supported only under SunOS
+(and even that support is not functional on the last Sun 3 release),
+IRIX 5, Win32 (not Win32S) and OSF/1 on DEC AXP machines.
+On other machines we recommend that you do one of the following:
+
+ 1) Add dynamic library support (and send us the code).
+ 2) Use static versions of the libraries.
+ 3) Arrange for dynamic libraries to use the standard malloc.
+ This is still dangerous if the library stores a pointer to a
+ garbage collected object. But nearly all standard interfaces
+ prohibit this, because they deal correctly with pointers
+ to stack allocated objects. (Strtok is an exception. Don't
+ use it.)
+
+ In all cases we assume that pointer alignment is consistent with that
+enforced by the standard C compilers. If you use a nonstandard compiler
+you may have to adjust the alignment parameters defined in gc_private.h.
+
+ A port to a machine that is not byte addressed, or does not use 32 bit
+addresses will require a major effort. (Parts of the code try to anticipate
+64 bit addresses. Others will need to be rewritten, since different data
+structures are needed.) A port to MSDOS is hopeless, unless you are willing
+to assume an 80386 or better, and that only flat 32 bit pointers will ever be
+used.
+
+ For machines not already mentioned, or for nonstandard compilers, the
+following are likely to require change:
+
+1. The parameters at the top of gc_private.h.
+ The parameters that will usually require adjustment are
+ STACKBOTTOM, ALIGNMENT and DATASTART. Setjmp_test
+ prints its guesses of the first two.
+ DATASTART should be an expression for computing the
+ address of the beginning of the data segment. This can often be
+ &etext. But some memory management units require that there be
+ some unmapped space between the text and the data segment. Thus
+ it may be more complicated. On UNIX systems, this is rarely
+ documented. But the adb "$m" command may be helpful. (Note
+ that DATASTART will usually be a function of &etext. Thus a
+ single experiment is usually insufficient.)
+ STACKBOTTOM is used to initialize GC_stackbottom, which
+ should be a sufficient approximation to the coldest stack address.
+ On some machines, it is difficult to obtain such a value that is
+ valid across a variety of MMUs, OS releases, etc. A number of
+ alternatives exist for using the collector in spite of this. See the
+ discussion in config.h.h immediately preceding the various
+ definitions of STACKBOTTOM.
+
+2. mach_dep.c.
+ The most important routine here is one to mark from registers.
+ The distributed file includes a generic hack (based on setjmp) that
+ happens to work on many machines, and may work on yours. Try
+ compiling and running setjmp_test.c to see whether it has a chance of
+ working. (This is not correct C, so don't blame your compiler if it
+ doesn't work. Based on limited experience, register window machines
+ are likely to cause trouble. If your version of setjmp claims that
+ all accessible variables, including registers, have the value they
+ had at the time of the longjmp, it also will not work. Vanilla 4.2 BSD
+ makes such a claim. SunOS does not.)
+ If your compiler does not allow in-line assembly code, or if you prefer
+ not to use such a facility, mach_dep.c may be replaced by a .s file
+ (as we did for the MIPS machine and the PC/RT).
+
+3. mark_roots.c.
+ These are the top level mark routines that determine which sections
+ of memory the collector should mark from. This is normally not
+ architecture specific (aside from the macros defined in gc_private.h and
+ referenced here), but it can be programming language and compiler
+ specific. The supplied routine should work for most C compilers
+ running under UNIX. Calls to GC_add_roots may sometimes be used
+ for similar effect.
+
+4. The sigsetmask call does not appear to exist under early system V UNIX.
+ It is used by the collector to block and unblock signals at times at
+ which an asynchronous allocation inside a signal handler could not
+ be tolerated. Under system V, it is possible to remove these calls,
+ provided no storage allocation is done by signal handlers. The
+ alternative is to issue a sequence of system V system calls, one per
+ signal that is actually used. This may be a bit slow.
+
+ For a different versions of Berkeley UN*X or different machines using the
+Motorola 68000, Vax, SPARC, 80386, NS 32000, PC/RT, or MIPS architecture,
+it should frequently suffice to change definitions in gc_private.h.
+
+
+THE C INTERFACE TO THE ALLOCATOR
+
+ The following routines are intended to be directly called by the user.
+Note that usually only GC_malloc is necessary. GC_clear_roots and GC_add_roots
+calls may be required if the collector has to trace from nonstandard places
+(e.g. from dynamic library data areas on a machine on which the
+collector doesn't already understand them.) On some machines, it may
+be desirable to set GC_stacktop to a good approximation of the stack base.
+(This enhances code portability on HP PA machines, since there is no
+good way for the collector to compute this value.) Client code may include
+"gc.h", which defines all of the following, plus a few others.
+
+1) GC_malloc(nbytes)
+ - allocate an object of size nbytes. Unlike malloc, the object is
+ cleared before being returned to the user. Gc_malloc will
+ invoke the garbage collector when it determines this to be appropriate.
+ GC_malloc may return 0 if it is unable to acquire sufficient
+ space from the operating system. This is the most probable
+ consequence of running out of space. Other possible consequences
+ are that a function call will fail due to lack of stack space,
+ or that the collector will fail in other ways because it cannot
+ maintain its internal data structures, or that a crucial system
+ process will fail and take down the machine. Most of these
+ possibilities are independent of the malloc implementation.
+
+2) GC_malloc_atomic(nbytes)
+ - allocate an object of size nbytes that is guaranteed not to contain any
+ pointers. The returned object is not guaranteed to be cleeared.
+ (Can always be replaced by GC_malloc, but results in faster collection
+ times. The collector will probably run faster if large character
+ arrays, etc. are allocated with GC_malloc_atomic than if they are
+ statically allocated.)
+
+3) GC_realloc(object, new_size)
+ - change the size of object to be new_size. Returns a pointer to the
+ new object, which may, or may not, be the same as the pointer to
+ the old object. The new object is taken to be atomic iff the old one
+ was. If the new object is composite and larger than the original object,
+ then the newly added bytes are cleared (we hope). This is very likely
+ to allocate a new object, unless MERGE_SIZES is defined in gc_private.h.
+ Even then, it is likely to recycle the old object only if the object
+ is grown in small additive increments (which, we claim, is generally bad
+ coding practice.)
+
+4) GC_free(object)
+ - explicitly deallocate an object returned by GC_malloc or
+ GC_malloc_atomic. Not necessary, but can be used to minimize
+ collections if performance is critical.
+
+5) GC_expand_hp(number_of_4K_blocks)
+ - Explicitly increase the heap size. (This is normally done automatically
+ if a garbage collection failed to GC_reclaim enough memory. Explicit
+ calls to GC_expand_hp may prevent unnecessarily frequent collections at
+ program startup.)
+
+6) GC_clear_roots()
+ - Reset the collectors idea of where static variables containing pointers
+ may be located to the empty set of locations. No statically allocated
+ variables will be traced from after this call, unless there are
+ intervening GC_add_roots calls. The collector will still trace from
+ registers and the program stack.
+
+7) GC_add_roots(low_address, high_address_plus_1)
+ - Add [low_address, high_address) as an area that may contain root pointers
+ and should be traced by the collector. The static data and bss segments
+ are considered by default, and should not be added unless GC_clear_roots
+ has been called. The number of root areas is currently limited to 50.
+ This is intended as a way to register data areas for dynamic libraries,
+ or to replace the entire data ans bss segments by smaller areas that are
+ known to contain all the roots.
+
+8) Several routines to allow for registration of finalization code.
+ User supplied finalization code may be invoked when an object becomes
+ unreachable. To call (*f)(obj, x) when obj becomes inaccessible, use
+ GC_register_finalizer(obj, f, x, 0, 0);
+ For more sophisticated uses, and for finalization ordering issues,
+ see gc.h.
+
+ The global variable GC_free_space_divisor may be adjusted up from its
+default value of 4 to use less space and more collection time, or down for
+the opposite effect. Setting it to 1 or 0 will effectively disable collections
+and cause all allocations to simply grow the heap.
+
+ The variable GC_non_gc_bytes, which is normally 0, may be changed to reflect
+the amount of memory allocated by the above routines that should not be
+considered as a candidate for collection. Careless use may, of course, result
+in excessive memory consumption.
+
+ Some additional tuning is possible through the parameters defined
+near the top of gc_private.h.
+
+ If only GC_malloc is intended to be used, it might be appropriate to define:
+
+#define malloc(n) GC_malloc(n)
+#define calloc(m,n) GC_malloc((m)*(n))
+
+ For small pieces of VERY allocation intensive code, gc_inl.h
+includes some allocation macros that may be used in place of GC_malloc
+and friends.
+
+ All externally visible names in the garbage collector start with "GC_".
+To avoid name conflicts, client code should avoid this prefix, except when
+accessing garbage collector routines or variables.
+
+ Thre are provisions for allocation with explicit type information.
+This is rarely necessary. Details can be found in gc_typed.h.
+
+
+USE AS LEAK DETECTOR:
+
+ The collector may be used to track down leaks in C programs that are
+intended to run with malloc/free (e.g. code with extreme real-time or
+portability constraints). To do so define FIND_LEAK somewhere in
+gc_priv.h. This will cause the collector to invoke the report_leak
+routine defined near the top of reclaim.c whenever an inaccessible
+object is found that has not been explicitly freed.
+ Productive use of this facility normally involves redefining report_leak
+to do something more intelligent. This typically requires annotating
+objects with additional information (e.g. creation time stack trace) that
+identifies their origin. Such code is typically not very portable, and is
+not included here, except on SPARC machines.
+ If all objects are allocated with GC_DEBUG_MALLOC (see next section),
+then the default version of report_leak will report the source file
+and line number at which the leaked object was allocated. This may
+sometimes be sufficient. (On SPARC/SUNOS4 machines, it will also report
+a cryptic stack trace. This can often be turned into a sympolic stack
+trace by invoking program "foo" with "callprocs foo". Callprocs is
+a short shell script that invokes adb to expand program counter values
+to symbolic addresses. It was largely supplied by Scott Schwartz.)
+ Note that the debugging facilities described in the next section can
+sometimes be slightly LESS effective in leak finding mode, since in
+leak finding mode, GC_debug_free actually results in reuse of the object.
+(Otherwise the object is simply marked invalid.)
+
+DEBUGGING FACILITIES:
+
+ The routines GC_debug_malloc, GC_debug_malloc_atomic, GC_debug_realloc,
+and GC_debug_free provide an alternate interface to the collector, which
+provides some help with memory overwrite errors, and the like.
+Objects allocated in this way are annotated with additional
+information. Some of this information is checked during garbage
+collections, and detected inconsistencies are reported to stderr.
+
+ Simple cases of writing past the end of an allocated object should
+be caught if the object is explicitly deallocated, or if the
+collector is invoked while the object is live. The first deallocation
+of an object will clear the debugging info associated with an
+object, so accidentally repeated calls to GC_debug_free will report the
+deallocation of an object without debugging information. Out of
+memory errors will be reported to stderr, in addition to returning
+NIL.
+
+ GC_debug_malloc checking during garbage collection is enabled
+with the first call to GC_debug_malloc. This will result in some
+slowdown during collections. If frequent heap checks are desired,
+this can be acheived by explicitly invoking GC_gcollect, e.g. from
+the debugger.
+
+ GC_debug_malloc allocated objects should not be passed to GC_realloc
+or GC_free, and conversely. It is however acceptable to allocate only
+some objects with GC_debug_malloc, and to use GC_malloc for other objects,
+provided the two pools are kept distinct. In this case, there is a very
+low probablility that GC_malloc allocated objects may be misidentified as
+having been overwritten. This should happen with probability at most
+one in 2**32. This probability is zero if GC_debug_malloc is never called.
+
+ GC_debug_malloc, GC_malloc_atomic, and GC_debug_realloc take two
+additional trailing arguments, a string and an integer. These are not
+interpreted by the allocator. They are stored in the object (the string is
+not copied). If an error involving the object is detected, they are printed.
+
+ The macros GC_MALLOC, GC_MALLOC_ATOMIC, GC_REALLOC, GC_FREE, and
+GC_REGISTER_FINALIZER are also provided. These require the same arguments
+as the corresponding (nondebugging) routines. If gc.h is included
+with GC_DEBUG defined, they call the debugging versions of these
+functions, passing the current file name and line number as the two
+extra arguments, where appropriate. If gc.h is included without GC_DEBUG
+defined, then all these macros will instead be defined to their nondebugging
+equivalents. (GC_REGISTER_FINALIZER is necessary, since pointers to
+objects with debugging information are really pointers to a displacement
+of 16 bytes form the object beginning, and some translation is necessary
+when finalization routines are invoked. For details, about what's stored
+in the header, see the definition of the type oh in debug_malloc.c)
+
+INCREMENTAL/GENERATIONAL COLLECTION:
+
+The collector normally interrupts client code for the duration of
+a garbage collection mark phase. This may be unacceptable if interactive
+response is needed for programs with large heaps. The collector
+can also run in a "generational" mode, in which it usually attempts to
+collect only objects allocated since the last garbage collection.
+Furthermore, in this mode, garbage collections run mostly incrementally,
+with a small amount of work performed in response to each of a large number of
+GC_malloc requests.
+
+This mode is enabled by a call to GC_enable_incremental().
+
+Incremental and generational collection is effective in reducing
+pause times only if the collector has some way to tell which objects
+or pages have been recently modified. The collector uses two sources
+of information:
+
+1. Information provided by the VM system. This may be provided in
+one of several forms. Under Solaris 2.X (and potentially under other
+similar systems) information on dirty pages can be read from the
+/proc file system. Under other systems (currently SunOS4.X) it is
+possible to write-protect the heap, and catch the resulting faults.
+On these systems we require that system calls writing to the heap
+(other than read) be handled specially by client code.
+See os_dep.c for details.
+
+2. Information supplied by the programmer. We define "stubborn"
+objects to be objects that are rarely changed. Such an object
+can be allocated (and enabled for writing) with GC_malloc_stubborn.
+Once it has been initialized, the collector should be informed with
+a call to GC_end_stubborn_change. Subsequent writes that store
+pointers into the object must be preceded by a call to
+GC_change_stubborn.
+
+This mechanism performs best for objects that are written only for
+initialization, and such that only one stubborn object is writable
+at once. It is typically not worth using for short-lived
+objects. Stubborn objects are treated less efficiently than pointerfree
+(atomic) objects.
+
+A rough rule of thumb is that, in the absence of VM information, garbage
+collection pauses are proportional to the amount of pointerful storage
+plus the amount of modified "stubborn" storage that is reachable during
+the collection.
+
+Initial allocation of stubborn objects takes longer than allocation
+of other objects, since other data structures need to be maintained.
+
+We recommend against random use of stubborn objects in client
+code, since bugs caused by inappropriate writes to stubborn objects
+are likely to be very infrequently observed and hard to trace.
+However, their use may be appropriate in a few carefully written
+library routines that do not make the objects themselves available
+for writing by client code.
+
+
+BUGS:
+
+ Any memory that does not have a recognizable pointer to it will be
+reclaimed. Exclusive-or'ing forward and backward links in a list
+doesn't cut it.
+ Some C optimizers may lose the last undisguised pointer to a memory
+object as a consequence of clever optimizations. This has almost
+never been observed in practice. Send mail to boehm@parc.xerox.com
+for suggestions on how to fix your compiler.
+ This is not a real-time collector. In the standard configuration,
+percentage of time required for collection should be constant across
+heap sizes. But collection pauses will increase for larger heaps.
+(On SPARCstation 2s collection times will be on the order of 300 msecs
+per MB of accessible memory that needs to be scanned. Your mileage
+may vary.) The incremental/generational collection facility helps,
+but is portable only if "stubborn" allocation is used.
+ Please address bug reports to boehm@parc.xerox.com. If you are
+contemplating a major addition, you might also send mail to ask whether
+it's already been done.
+
+RECENT VERSIONS:
+
+ Version 1.3 and immediately preceding versions contained spurious
+assembly language assignments to TMP_SP. Only the assignment in the PC/RT
+code is necessary. On other machines, with certain compiler options,
+the assignments can lead to an unsaved register being overwritten.
+Known to cause problems under SunOS 3.5 WITHOUT the -O option. (With
+-O the compiler recognizes it as dead code. It probably shouldn't,
+but that's another story.)
+
+ Version 1.4 and earlier versions used compile time determined values
+for the stack base. This no longer works on Sun 3s, since Sun 3/80s use
+a different stack base. We now use a straightforward heuristic on all
+machines on which it is known to work (incl. Sun 3s) and compile-time
+determined values for the rest. There should really be library calls
+to determine such values.
+
+ Version 1.5 and earlier did not ensure 8 byte alignment for objects
+allocated on a sparc based machine.
+
+ Version 1.8 added ULTRIX support in gc_private.h.
+
+ Version 1.9 fixed a major bug in gc_realloc.
+
+ Version 2.0 introduced a consistent naming convention for collector
+routines and added support for registering dynamic library data segments
+in the standard mark_roots.c. Most of the data structures were revamped.
+The treatment of interior pointers was completely changed. Finalization
+was added. Support for locking was added. Object kinds were added.
+We added a black listing facility to avoid allocating at addresses known
+to occur as integers somewhere in the address space. Much of this
+was accomplished by adapting ideas and code from the PCR collector.
+The test program was changed and expanded.
+
+ Version 2.1 was the first stable version since 1.9, and added support
+for PPCR.
+
+ Version 2.2 added debugging allocation, and fixed various bugs. Among them:
+- GC_realloc could fail to extend the size of the object for certain large object sizes.
+- A blatant subscript range error in GC_printf, which unfortunately
+ wasn't excercised on machines with sufficient stack alignment constraints.
+- GC_register_displacement did the wrong thing if it was called after
+ any allocation had taken place.
+- The leak finding code would eventually break after 2048 byte
+ byte objects leaked.
+- interface.c didn't compile.
+- The heap size remained much too small for large stacks.
+- The stack clearing code behaved badly for large stacks, and perhaps
+ on HP/PA machines.
+
+ Version 2.3 added ALL_INTERIOR_POINTERS and fixed the following bugs:
+- Missing declaration of etext in the A/UX version.
+- Some PCR root-finding problems.
+- Blacklisting was not 100% effective, because the plausible future
+ heap bounds were being miscalculated.
+- GC_realloc didn't handle out-of-memory correctly.
+- GC_base could return a nonzero value for addresses inside free blocks.
+- test.c wasn't really thread safe, and could erroneously report failure
+ in a multithreaded environment. (The locking primitives need to be
+ replaced for other threads packages.)
+- GC_CONS was thoroughly broken.
+- On a SPARC with dynamic linking, signals stayed diabled while the
+ client code was running.
+ (Thanks to Manuel Serrano at INRIA for reporting the last two.)
+
+ Version 2.4 added GC_free_space_divisor as a tuning knob, added
+ support for OS/2 and linux, and fixed the following bugs:
+- On machines with unaligned pointers (e.g. Sun 3), every 128th word could
+ fail to be considered for marking.
+- Dynamic_load.c erroneously added 4 bytes to the length of the data and
+ bss sections of the dynamic library. This could result in a bad memory
+ reference if the actual length was a multiple of a page. (Observed on
+ Sun 3. Can probably also happen on a Sun 4.)
+ (Thanks to Robert Brazile for pointing out that the Sun 3 version
+ was broken. Dynamic library handling is still broken on Sun 3s
+ under 4.1.1U1, but apparently not 4.1.1. If you have such a machine,
+ use -Bstatic.)
+
+ Version 2.5 fixed the following bugs:
+- Removed an explicit call to exit(1)
+- Fixed calls to GC_printf and GC_err_printf, so the correct number of
+ arguments are always supplied. The OS/2 C compiler gets confused if
+ the number of actuals and the number of formals differ. (ANSI C
+ doesn't require this to work. The ANSI sanctioned way of doing things
+ causes too many compatibility problems.)
+
+ Version 3.0 added generational/incremental collection and stubborn
+ objects.
+
+ Version 3.1 added the following features:
+- A workaround for a SunOS 4.X SPARC C compiler
+ misfeature that caused problems when the collector was turned into
+ a dynamic library.
+- A fix for a bug in GC_base that could result in a memory fault.
+- A fix for a performance bug (and several other misfeatures) pointed
+ out by Dave Detelfs and Al Dosser.
+- Use of dirty bit information for static data under Solaris 2.X.
+- DEC Alpha/OSF1 support (thanks to Al Dosser).
+- Incremental collection on more platforms.
+- A more refined heap expansion policy. Less space usage by default.
+- Various minor enhancements to reduce space usage, and to reduce
+ the amount of memory scanned by the collector.
+- Uncollectable allocation without per object overhead.
+- More conscientious handling of out-of-memory conditions.
+- Fixed a bug in debugging stubborn allocation.
+- Fixed a bug that resulted in occasional erroneous reporting of smashed
+ objects with debugging allocation.
+- Fixed bogus leak reports of size 4096 blocks with FIND_LEAK.
+
+ Version 3.2 fixed a serious and not entirely repeatable bug in
+ the incremental collector. It appeared only when dirty bit info
+ on the roots was available, which is normally only under Solaris.
+ It also added GC_general_register_disappearing_link, and some
+ testing code. Interface.c disappeared.
+
+ Version 3.3 fixes several bugs and adds new ports:
+- PCR-specific bugs.
+- Missing locking in GC_free, redundant FASTUNLOCK
+ in GC_malloc_stubborn, and 2 bugs in
+ GC_unregister_disappearing_link.
+ All of the above were pointed out by Neil Sharman
+ (neil@cs.mu.oz.au).
+- Common symbols allocated by the SunOS4.X dynamic loader
+ were not included in the root set.
+- Bug in GC_finalize (reported by Brian Beuning and Al Dosser)
+- Merged Amiga port from Jesper Peterson (untested)
+- Merged NeXT port from Thomas Funke (significantly
+ modified and untested)
+
+ Version 3.4:
+- Fixed a performance bug in GC_realloc.
+- Updated the amiga port.
+- Added NetBSD and 386BSD ports.
+- Added cord library.
+- Added trivial performance enhancement for
+ ALL_INTERIOR_POINTERS. (Don't scan last word.)
+
+ Version 3.5
+- Minor collections now mark from roots only once, if that
+ doesn't cause an excessive pause.
+- The stack clearing heuristic was refined to prevent anomalies
+ with very heavily recursive programs and sparse stacks.
+- Fixed a bug that prevented mark stack growth in some cases.
+ GC_objects_are_marked should be set to TRUE after a call
+ to GC_push_roots and as part of GC_push_marked, since
+ both can now set mark bits. I think this is only a performance
+ bug, but I wouldn't bet on it. It's certainly very hard to argue
+ that the old version was correct.
+- Fixed an incremental collection bug that prevented it from
+ working at all when HBLKSIZE != getpagesize()
+- Changed dynamic_loading.c to include gc_private.h before testing
+ DYNAMIC_LOADING. SunOS dynamic library scanning
+ must have been broken in 3.4.
+- Object size rounding now adapts to program behavior.
+- Added a workaround (provided by Manuel Serrano and
+ colleagues) to a long-standing SunOS 4.X (and 3.X?) ld bug
+ that I had incorrectly assumed to have been squished.
+ The collector was broken if the text segment size was within
+ 32 bytes of a multiple of 8K bytes, and if the beginning of
+ the data segment contained interesting roots. The workaround
+ assumes a demand-loadable executable. The original may have
+ have "worked" in some other cases.
+- Added dynamic library support under IRIX5.
+- Added support for EMX under OS/2 (thanks to Ari Huttunen).
+
+Version 3.6:
+- fixed a bug in the mark stack growth code that was introduced
+ in 3.4.
+- fixed Makefile to work around DEC AXP compiler tail recursion
+ bug.
+
+Version 3.7:
+- Added a workaround for an HP/UX compiler bug.
+- Fixed another stack clearing performance bug. Reworked
+ that code once more.
+
+Version 4.0:
+- Added support for Solaris threads (which was possible
+ only be reimplementing some fraction of Solaris threads,
+ since Sun doesn't currently make the thread debugging
+ interface available).
+- Added non-threads win32 and win32S support.
+- (Grudgingly, with suitable muttering of obscenities) renamed
+ files so that the collector distribution could live on a FAT
+ file system. Files that are guaranteed to be useless on
+ a PC still have long names. Gc_inline.h and gc_private.h
+ still exist, but now just include gc_inl.h and gc_priv.h.
+- Fixed a really obscure bug in finalization that could cause
+ undetected mark stack overflows. (I would be surprised if
+ any real code ever tickled this one.)
+- Changed finalization code to dynamically resize the hash
+ tables it maintains. (This probably does not matter for well-
+ -written code. It no doubt does for C++ code that overuses
+ destructors.)
+- Added typed allocation primitves. Rewrote the marker to
+ accommodate them with more reasonable efficiency. This
+ change should also speed up marking for GC_malloc allocated
+ objects a little. See gc_typed.h for new primitives.
+- Improved debugging facilities slightly. Allocation time
+ stack traces are now kept by default on SPARC/SUNOS4.
+ (Thanks to Scott Schwartz.)
+- Added better support for small heap applications.
+- Significantly extended cord package. Fixed a bug in the
+ implementation of lazily read files. Printf and friends now
+ have cord variants. Cord traversals are a bit faster.
+- Made ALL_INTERIOR_POINTERS recognition the default.
+- Fixed de so that it can run in constant space, independent
+ of file size. Added simple string searching to cords and de.
+- Added the Hull-Ellis C++ interface.
+- Added dynamic library support for OSF/1.
+ (Thanks to Al Dosser and Tim Bingham at DEC.)
+- Changed argument to GC_expand_hp to be expressed
+ in units of bytes instead of heap blocks. (Necessary
+ since the heap block size now varies depending on
+ configuration. The old version was never very clean.)
+- Added GC_get_heap_size(). The previous "equivalent"
+ was broken.
+- Restructured the Makefile a bit.
+
+Since version 4.0:
+- Changed finalization implementation to guarantee that
+ finalization procedures are called outside of the allocation
+ lock, making direct use of the interface a little less dangerous.
+ MAY BREAK EXISTING CLIENTS that assume finalizers
+ are protected by a lock. Since there seem to be few multithreaded
+ clients that use finalization, this is hopefully not much of
+ a problem.
+- Fixed a gross bug in CORD_prev.
+- Fixed a bug in blacklst.c that could result in unbounded
+ heap growth during startup on machines that do not clear
+ memory obtained from the OS (e.g. win32S).
+- Ported de editor to win32/win32S. (This is now the only
+ version with a mouse-sensitive UI.)
+- Added GC_malloc_ignore_off_page to allocate large arrays
+ in the presence of ALL_INTERIOR_POINTERS.
+- Changed GC_call_with_alloc_lock to not disable signals in
+ the single-threaded case.
+- Reduced retry count in GC_collect_or_expand for garbage
+ collecting when out of memory.
+- Made uncollectable allocations bypass black-listing, as they
+ should.
+- Fixed a bug in typed_test in test.c that could cause (legitimate)
+ GC crashes.
+- Fixed some potential synchronization problems in finalize.c
+- Fixed a real locking problem in typd_mlc.c.
+- Worked around an AIX 3.2 compiler feature that results in
+ out of bounds memory references.
+- Partially worked around an IRIX5.2 beta problem (which may
+ or may not persist to the final release).
+- Fixed a bug in the heap integrity checking code that could
+ result in explicitly deallocated objects being identified as
+ smashed. Fixed a bug in the dbg_mlc stack saving code
+ that caused old argument pointers to be considered live.
+- Fixed a bug in CORD_ncmp (and hence CORD_str).
+- Repaired the OS2 port, which had suffered from bit rot
+ in 4.0. Worked around what appears to be CSet/2 V1.0
+ optimizer bug.
+- Fixed a Makefile bug for target "c++".
--- /dev/null
+The code assumes static linking, and a single thread. The editor de has
+not been ported. The cord test program has. The supplied OS2_MAKEFILE
+assumes the IBM C Set/2 environment, but the code shouldn't.
+
+Since we haven't figured out hoe to do perform partial links or to build static
+libraries, clients currently need to link against a long list of executables.
--- /dev/null
+Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program
+for any purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is granted,
+provided the above notices are retained, and a notice that the code was
+modified is included with the above copyright notice.
+
+
+For more details and the names of other contributors, see the
+README file and gc.h. This file describes typical use of
+the collector on a machine that is already supported.
+
+INSTALLATION:
+Under UN*X, type "make test". Under OS/2 or Windows NT, copy the
+appropriate makefile to MAKEFILE, read it, and type "nmake test".
+Read the machine specific README if one exists. The only way to
+develop code with the collector for Windows 3.1 is to develop under
+Windows NT, and then to use win32S.
+
+If you wish to use the cord (structured string) library type
+"make cords". (This requires an ANSI C compiler. You may need
+to redefine CC in the Makefile.)
+
+If you wish to use the collector from C++, type
+"make c++". These add further files to gc.a and to the include
+subdirectory. See cord/cord.h and gc_c++.h.
+
+TYPICAL USE:
+Include "gc.h" from this directory. Link against the appropriate library
+("gc.a" under UN*X). Replace calls to malloc by calls to GC_MALLOC,
+and calls to realloc by calls to GC_REALLOC. If the object is known
+to never contain pointers, use GC_MALLOC_ATOMIC instead of
+GC_MALLOC.
+
--- /dev/null
+
+ADDITIONAL NOTES FOR AMIGA PORT
+
+These notes assume some familiarity with Amiga internals.
+
+WHY I PORTED TO THE AMIGA
+
+The sole reason why I made this port was as a first step in getting
+the Sather(*) language on the Amiga. A port of this language will
+be done as soon as the Sather 1.0 sources are made available to me.
+Given this motivation, the garbage collection (GC) port is rather
+minimal.
+
+(*) For information on Sather read the comp.lang.sather newsgroup.
+
+LIMITATIONS
+
+This port assumes that the startup code linked with target programs
+is that supplied with SAS/C versions 6.0 or later. This allows
+assumptions to be made about where to find the stack base pointer
+and data segments when programs are run from WorkBench, as opposed
+to running from the CLI. The compiler dependent code is all in the
+GC_get_stack_base() and GC_register_data_segments() functions, but
+may spread as I add Amiga specific features.
+
+Given that SAS/C was assumed, the port is set up to be built with
+"smake" using the "SMakefile". Compiler options in "SCoptions" can
+be set with "scopts" program. Both "smake" and "scopts" are part of
+the SAS/C commercial development system.
+
+In keeping with the porting philosophy outlined above, this port
+will not behave well with Amiga specific code. Especially not inter-
+process comms via messages, and setting up public structures like
+Intuition objects or anything else in the system lists. For the
+time being the use of this library is limited to single threaded
+ANSI/POSIX compliant or near-complient code. (ie. Stick to stdio
+for now). Given this limitation there is currently no mechanism for
+allocating "CHIP" or "PUBLIC" memory under the garbage collector.
+I'll add this after giving it considerable thought. The major
+problem is the entire physical address space may have to me scanned,
+since there is no telling who we may have passed memory to.
+
+If you allocate your own stack in client code, you will have to
+assign the pointer plus stack size to GC_stackbottom.
+
+The initial stack size of the target program can be compiled in by
+setting the __stack symbol (see SAS documentaion). It can be over-
+ridden from the CLI by running the AmigaDOS "stack" program, or from
+the WorkBench by setting the stack size in the tool types window.
+
+SAS/C COMPILER OPTIONS (SCoptions)
+
+You may wish to check the "CPU" code option is appropriate for your
+intended target system.
+
+Under no circumstances set the "StackExtend" code option in either
+compiling the library or *ANY* client code.
+
+All benign compiler warnings have been suppressed. These mainly
+involve lack of prototypes in the code, and dead assignments
+detected by the optimizer.
+
+THE GOOD NEWS
+
+The library as it stands is compatible with the GigaMem commercial
+virtual memory software, and probably similar PD software.
+
+The performance of "gctest" on an Amiga 2630 (68030 @ 25Mhz)
+compares favourably with an HP9000 with similar architecture (a 325
+with a 68030 I think).
+
+-----------------------------------------------------------------------
+
+The Amiga port has been brought to you by:
+
+Jesper Peterson.
+
+jep@mtiame.mtia.oz.au (preferred, but 1 week turnaround)
+jep@orca1.vic.design.telecom.au (that's orca<one>, 1 day turnaround)
+
+At least one of these addresses should be around for a while, even
+though I don't work for either of the companies involved.
+
--- /dev/null
+The collector currently does not handle multiple threads. There
+is good reason to believe this is fixable. (SRC M3 works with
+NT threads.)
+
+The collector has only been compiled under Windows NT, with the
+Microsoft tools.
+
+It runs under both win32s and win32, but with different semantics.
+Under win32, all writable pages outside of the heaps and stack are
+scanned for roots. Thus the collector sees pointers in DLL data
+segments. Under win32s, only the main data segment is scanned.
+Thus all accessible objects should be excessible from local variables
+or variables in the main data segment. Alternatively, other data
+segments (e.g. in DLLs) may be registered with the collector by
+calling GC_init() and then GC_register_root_section(a), where
+a is the address of some variable inside the data segment. (Duplicate
+registrations are ignored, but not terribly quickly.)
+
+(There are two reasons for this. We didn't want to see many 16:16
+pointers. And the VirtualQuery call has different semantics under
+the two systems.)
+
+The collector test program "gctest" is linked as a GUI application,
+but does not open any windows. Its output appears in the file
+"gc.log". It may be started from the file manager. The hour glass
+cursor will appear as long as it's running.
+
+The cord test program has not been ported (but should port
+easily). A toy editor (cord/de.exe) based on cords (heavyweight
+strings represented as trees) has been ported and is included.
+It runs fine under either win32 or win32S. It serves as an example
+of a true Windows application, except that it was written by a
+nonexpert Windows programmer. (There are some peculiarities
+in the way files are displayed. The <cr> is displayed explicitly
+for standard DOS text files. As in the UNIX version, control
+characters are displayed explicitly, but in this case as red text.
+This may be suboptimal for some tastes and/or sets of default
+window colors.)
+
--- /dev/null
+CPU=68030
+NOSTACKCHECK
+ERRORREXX
+OPTIMIZE
+MAPHUNK
+NOVERSION
+NOICONS
+OPTIMIZERTIME
+DEFINE SILENT
+IGNORE=105
+IGNORE=304
+IGNORE=154
+IGNORE=85
+IGNORE=100
+IGNORE=161
--- /dev/null
+OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o malloc.o stubborn.o checksums.o
+
+INC= gc_private.h gc_headers.h gc.h config.h
+
+all: gctest setjmp_test
+
+alloc.o : alloc.c $(INC)
+reclaim.o : reclaim.c $(INC)
+allochblk.o : allochblk.c $(INC)
+misc.o : misc.c $(INC)
+os_dep.o : os_dep.c $(INC)
+mark_roots.o : mark_roots.c $(INC)
+headers.o : headers.c $(INC)
+mark.o : mark.c $(INC)
+obj_map.o : obj_map.c $(INC)
+black_list.o : black_list.c $(INC)
+finalize.o : finalize.c $(INC)
+new_hblk.o : new_hblk.c $(INC)
+real_malloc.o : real_malloc.c $(INC)
+dynamic_load.o : dynamic_load.c $(INC)
+debug_malloc.o : debug_malloc.c $(INC)
+malloc.o : malloc.c $(INC)
+stubborn.o : stubborn.c $(INC)
+checksums.o : checksums.c $(INC)
+test.o : test.c $(INC)
+
+mach_dep.o : mach_dep.c $(INC)
+ sc noopt mach_dep.c # optimizer mangles reg save hack
+
+gc.lib: $(OBJS)
+ oml gc.lib r $(OBJS)
+
+clean:
+ delete gc.lib gctest setjmp_test \#?.o
+
+gctest: gc.lib test.o
+ slink LIB:c.o test.o to $@ lib gc.lib LIB:sc.lib LIB:scm.lib
+
+setjmp_test: setjmp_test.c gc.h
+ sc setjmp_test.c
+ slink LIB:c.o $@.o to $@ lib LIB:sc.lib
+
+test: setjmp_test gctest
+ setjmp_test
+ gctest
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 1:55 pm PDT */
+
+#define DEBUG
+#undef DEBUG
+#include <stdio.h>
+#include "gc_priv.h"
+
+
+/**/
+/* allocate/free routines for heap blocks
+/* Note that everything called from outside the garbage collector
+/* should be prepared to abort at any point as the result of a signal.
+/**/
+
+/*
+ * Free heap blocks are kept on a list sorted by address.
+ * The hb_hdr.hbh_sz field of a free heap block contains the length
+ * (in bytes) of the entire block.
+ * Neighbors are coalesced.
+ */
+
+# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
+ /* largest block we will allocate starting on a black */
+ /* listed block. Must be >= HBLKSIZE. */
+
+struct hblk * GC_hblkfreelist = 0;
+
+struct hblk *GC_savhbp = (struct hblk *)0; /* heap block preceding next */
+ /* block to be examined by */
+ /* GC_allochblk. */
+
+void GC_print_hblkfreelist()
+{
+ struct hblk * h = GC_hblkfreelist;
+ word total_free = 0;
+ hdr * hhdr = HDR(h);
+ word sz;
+
+ while (h != 0) {
+ sz = hhdr -> hb_sz;
+ GC_printf2("0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
+ total_free += sz;
+ if (GC_is_black_listed(h, HBLKSIZE) != 0) {
+ GC_printf0("start black listed\n");
+ } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
+ GC_printf0("partially black listed\n");
+ } else {
+ GC_printf0("not black listed\n");
+ }
+ h = hhdr -> hb_next;
+ hhdr = HDR(h);
+ }
+ GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
+}
+
+/* Initialize hdr for a block containing the indicated size and */
+/* kind of objects. */
+/* Return FALSE on failure. */
+static bool setup_header(hhdr, sz, kind, flags)
+register hdr * hhdr;
+word sz; /* object size in words */
+int kind;
+unsigned char flags;
+{
+ register word descr;
+
+ /* Add description of valid object pointers */
+ if (!GC_add_map_entry(sz)) return(FALSE);
+ hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
+
+ /* Set size, kind and mark proc fields */
+ hhdr -> hb_sz = sz;
+ hhdr -> hb_obj_kind = kind;
+ hhdr -> hb_flags = flags;
+ descr = GC_obj_kinds[kind].ok_descriptor;
+ if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
+ hhdr -> hb_descr = descr;
+
+ /* Clear mark bits */
+ GC_clear_hdr_marks(hhdr);
+
+ hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
+ return(TRUE);
+}
+
+/*
+ * Allocate (and return pointer to) a heap block
+ * for objects of size sz words.
+ *
+ * NOTE: We set obj_map field in header correctly.
+ * Caller is resposnsible for building an object freelist in block.
+ *
+ * We clear the block if it is destined for large objects, and if
+ * kind requires that newly allocated objects be cleared.
+ */
+struct hblk *
+GC_allochblk(sz, kind, flags)
+word sz;
+int kind;
+unsigned char flags;
+{
+ register struct hblk *thishbp;
+ register hdr * thishdr; /* Header corr. to thishbp */
+ register struct hblk *hbp;
+ register hdr * hhdr; /* Header corr. to hbp */
+ struct hblk *prevhbp;
+ register hdr * phdr; /* Header corr. to prevhbp */
+ signed_word size_needed; /* number of bytes in requested objects */
+ signed_word size_avail; /* bytes available in this block */
+ bool first_time = TRUE;
+
+ size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
+
+ /* search for a big enough block in free list */
+ hbp = GC_savhbp;
+ hhdr = HDR(hbp);
+ for(;;) {
+
+ prevhbp = hbp;
+ phdr = hhdr;
+ hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);
+ hhdr = HDR(hbp);
+
+ if( prevhbp == GC_savhbp && !first_time) {
+ return(0);
+ }
+
+ first_time = FALSE;
+
+ if( hbp == 0 ) continue;
+
+ size_avail = hhdr->hb_sz;
+ if (size_avail < size_needed) continue;
+ /* If the next heap block is obviously better, go on. */
+ /* This prevents us from disassembling a single large block */
+ /* to get tiny blocks. */
+ {
+ signed_word next_size;
+
+ thishbp = hhdr -> hb_next;
+ if (thishbp == 0) thishbp = GC_hblkfreelist;
+ thishdr = HDR(thishbp);
+ next_size = (signed_word)(thishdr -> hb_sz);
+ if (next_size < size_avail
+ && next_size >= size_needed
+ && !GC_is_black_listed(thishbp, (word)size_needed)) {
+ continue;
+ }
+ }
+ if ( kind != UNCOLLECTABLE &&
+ (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
+ struct hblk * lasthbp = hbp;
+ ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
+ signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
+ HBLKSIZE
+ : size_needed);
+
+
+ while ((ptr_t)lasthbp <= search_end
+ && (thishbp = GC_is_black_listed(lasthbp,
+ (word)eff_size_needed))) {
+ lasthbp = thishbp;
+ }
+ size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
+ thishbp = lasthbp;
+ if (size_avail >= size_needed && thishbp != hbp
+ && GC_install_header(thishbp)) {
+ /* Split the block at thishbp */
+ thishdr = HDR(thishbp);
+ /* GC_invalidate_map not needed, since we will */
+ /* allocate this block. */
+ thishdr -> hb_next = hhdr -> hb_next;
+ thishdr -> hb_sz = size_avail;
+ hhdr -> hb_sz = (ptr_t)thishbp - (ptr_t)hbp;
+ hhdr -> hb_next = thishbp;
+ /* Advance to thishbp */
+ prevhbp = hbp;
+ phdr = hhdr;
+ hbp = thishbp;
+ hhdr = thishdr;
+ } else if (size_avail == 0
+ && size_needed == HBLKSIZE
+ && prevhbp != 0) {
+# ifndef FIND_LEAK
+ static unsigned count = 0;
+
+ /* The block is completely blacklisted. We need */
+ /* to drop some such blocks, since otherwise we spend */
+ /* all our time traversing them if pointerfree */
+ /* blocks are unpopular. */
+ /* A dropped block will be reconsidered at next GC. */
+ if ((++count & 3) == 0) {
+ /* Allocate and drop the block */
+ if (GC_install_counts(hbp, hhdr->hb_sz)) {
+ phdr -> hb_next = hhdr -> hb_next;
+ (void) setup_header(
+ hhdr,
+ BYTES_TO_WORDS(hhdr->hb_sz - HDR_BYTES),
+ PTRFREE, 0); /* Cant fail */
+ if (GC_debugging_started) {
+ BZERO(hbp + HDR_BYTES, hhdr->hb_sz - HDR_BYTES);
+ }
+ if (GC_savhbp == hbp) GC_savhbp = prevhbp;
+ }
+ /* Restore hbp to point at free block */
+ hbp = prevhbp;
+ hhdr = phdr;
+ if (hbp == GC_savhbp) first_time = TRUE;
+ }
+# endif
+ }
+ }
+ if( size_avail >= size_needed ) {
+ /* found a big enough block */
+ /* let thishbp --> the block */
+ /* set prevhbp, hbp to bracket it */
+ thishbp = hbp;
+ thishdr = hhdr;
+ if( size_avail == size_needed ) {
+ hbp = hhdr->hb_next;
+ hhdr = HDR(hbp);
+ } else {
+ hbp = (struct hblk *)
+ (((word)thishbp) + size_needed);
+ if (!GC_install_header(hbp)) continue;
+ hhdr = HDR(hbp);
+ GC_invalidate_map(hhdr);
+ hhdr->hb_next = thishdr->hb_next;
+ hhdr->hb_sz = size_avail - size_needed;
+ }
+ /* remove *thishbp from hblk freelist */
+ if( prevhbp == 0 ) {
+ GC_hblkfreelist = hbp;
+ } else {
+ phdr->hb_next = hbp;
+ }
+ /* save current list search position */
+ GC_savhbp = hbp;
+ break;
+ }
+ }
+
+ /* Notify virtual dirty bit implementation that we are about to write. */
+ GC_write_hint(thishbp);
+
+ /* Add it to map of valid blocks */
+ if (!GC_install_counts(thishbp, (word)size_needed)) return(0);
+ /* This leaks memory under very rare conditions. */
+
+ /* Set up header */
+ if (!setup_header(thishdr, sz, kind, flags)) {
+ GC_remove_counts(thishbp, (word)size_needed);
+ return(0); /* ditto */
+ }
+
+ /* Clear block if necessary */
+ if (GC_debugging_started
+ || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
+ BZERO(thishbp + HDR_BYTES, size_needed - HDR_BYTES);
+ }
+
+ return( thishbp );
+}
+
+struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
+
+/*
+ * Free a heap block.
+ *
+ * Coalesce the block with its neighbors if possible.
+ *
+ * All mark words are assumed to be cleared.
+ */
+void
+GC_freehblk(p)
+register struct hblk *p;
+{
+register hdr *phdr; /* Header corresponding to p */
+register struct hblk *hbp, *prevhbp;
+register hdr *hhdr, *prevhdr;
+register signed_word size;
+
+ /* GC_savhbp may become invalid due to coalescing. Clear it. */
+ GC_savhbp = (struct hblk *)0;
+
+ phdr = HDR(p);
+ size = phdr->hb_sz;
+ size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
+ GC_remove_counts(p, (word)size);
+ phdr->hb_sz = size;
+ GC_invalidate_map(phdr);
+ prevhbp = 0;
+
+ /* The following optimization was suggested by David Detlefs. */
+ /* Note that the header cannot be NIL, since there cannot be an */
+ /* intervening call to GC_freehblk without resetting */
+ /* GC_freehblk_ptr. */
+ if (GC_freehblk_ptr != 0 &&
+ HDR(GC_freehblk_ptr)->hb_map == GC_invalid_map &&
+ (ptr_t)GC_freehblk_ptr < (ptr_t)p) {
+ hbp = GC_freehblk_ptr;
+ } else {
+ hbp = GC_hblkfreelist;
+ };
+ hhdr = HDR(hbp);
+
+ while( (hbp != 0) && (hbp < p) ) {
+ prevhbp = hbp;
+ prevhdr = hhdr;
+ hbp = hhdr->hb_next;
+ hhdr = HDR(hbp);
+ }
+ GC_freehblk_ptr = prevhbp;
+
+ /* Check for duplicate deallocation in the easy case */
+ if (hbp != 0 && (ptr_t)p + size > (ptr_t)hbp
+ || prevhbp != 0 && (ptr_t)prevhbp + prevhdr->hb_sz > (ptr_t)p) {
+ GC_printf1("Duplicate large block deallocation of 0x%lx\n",
+ (unsigned long) p);
+ GC_printf2("Surrounding free blocks are 0x%lx and 0x%lx\n",
+ (unsigned long) prevhbp, (unsigned long) hbp);
+ }
+
+ /* Coalesce with successor, if possible */
+ if( (((word)p)+size) == ((word)hbp) ) {
+ phdr->hb_next = hhdr->hb_next;
+ phdr->hb_sz += hhdr->hb_sz;
+ GC_remove_header(hbp);
+ } else {
+ phdr->hb_next = hbp;
+ }
+
+
+ if( prevhbp == 0 ) {
+ GC_hblkfreelist = p;
+ } else if( (((word)prevhbp) + prevhdr->hb_sz)
+ == ((word)p) ) {
+ /* Coalesce with predecessor */
+ prevhdr->hb_next = phdr->hb_next;
+ prevhdr->hb_sz += phdr->hb_sz;
+ GC_remove_header(p);
+ } else {
+ prevhdr->hb_next = p;
+ }
+}
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+/* Boehm, May 19, 1994 2:02 pm PDT */
+
+
+# include <stdio.h>
+# include <signal.h>
+# include <sys/types.h>
+# include "gc_priv.h"
+
+/*
+ * Separate free lists are maintained for different sized objects
+ * up to MAXOBJSZ.
+ * The call GC_allocobj(i,k) ensures that the freelist for
+ * kind k objects of size i points to a non-empty
+ * free list. It returns a pointer to the first entry on the free list.
+ * In a single-threaded world, GC_allocobj may be called to allocate
+ * an object of (small) size i as follows:
+ *
+ * opp = &(GC_objfreelist[i]);
+ * if (*opp == 0) GC_allocobj(i, NORMAL);
+ * ptr = *opp;
+ * *opp = obj_link(ptr);
+ *
+ * Note that this is very fast if the free list is non-empty; it should
+ * only involve the execution of 4 or 5 simple instructions.
+ * All composite objects on freelists are cleared, except for
+ * their first word.
+ */
+
+/*
+ * The allocator uses GC_allochblk to allocate large chunks of objects.
+ * These chunks all start on addresses which are multiples of
+ * HBLKSZ. Each allocated chunk has an associated header,
+ * which can be located quickly based on the address of the chunk.
+ * (See headers.c for details.)
+ * This makes it possible to check quickly whether an
+ * arbitrary address corresponds to an object administered by the
+ * allocator.
+ */
+
+word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
+
+word GC_gc_no = 0;
+
+int GC_incremental = 0; /* By default, stop the world. */
+
+int GC_full_freq = 4; /* Every 5th collection is a full */
+ /* collection. */
+
+char * GC_copyright[] =
+{"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers",
+"Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.",
+"THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
+" EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK."};
+
+
+/* some more variables */
+
+extern signed_word GC_mem_found; /* Number of reclaimed longwords */
+ /* after garbage collection */
+
+bool GC_dont_expand = 0;
+
+word GC_free_space_divisor = 4;
+
+/* Return the minimum number of words that must be allocated between */
+/* collections to amortize the collection cost. */
+static word min_words_allocd()
+{
+ int dummy;
+# ifdef THREADS
+ /* We punt, for now. */
+ register signed_word stack_size = 10000;
+# else
+ register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
+# endif
+ register word total_root_size; /* includes double stack size, */
+ /* since the stack is expensive */
+ /* to scan. */
+
+ if (stack_size < 0) stack_size = -stack_size;
+ total_root_size = 2 * stack_size + GC_root_size;
+ if (GC_incremental) {
+ return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
+ / (2 * GC_free_space_divisor));
+ } else {
+ return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
+ / GC_free_space_divisor);
+ }
+}
+
+/* Return the number of words allocated, adjusted for explicit storage */
+/* management, etc.. This number is used in deciding when to trigger */
+/* collections. */
+word GC_adj_words_allocd()
+{
+ register signed_word result;
+ register signed_word expl_managed =
+ BYTES_TO_WORDS((long)GC_non_gc_bytes
+ - (long)GC_non_gc_bytes_at_gc);
+
+ /* Don't count what was explicitly freed, or newly allocated for */
+ /* explicit management. Note that deallocating an explicitly */
+ /* managed object should not alter result, assuming the client */
+ /* is playing by the rules. */
+ result = (signed_word)GC_words_allocd
+ - (signed_word)GC_mem_freed - expl_managed;
+ if (result > (signed_word)GC_words_allocd) result = GC_words_allocd;
+ /* probably client bug or unfortunate scheduling */
+ result += GC_words_wasted;
+ /* This doesn't reflect useful work. But if there is lots of */
+ /* new fragmentation, the same is probably true of the heap, */
+ /* and the collection will be correspondingly cheaper. */
+ if (result < (signed_word)(GC_words_allocd >> 2)) {
+ /* Always count at least 1/8 of the allocations. We don't want */
+ /* to collect too infrequently, since that would inhibit */
+ /* coalescing of free storage blocks. */
+ /* This also makes us partially robust against client bugs. */
+ return(GC_words_allocd >> 3);
+ } else {
+ return(result);
+ }
+}
+
+
+/* Clear up a few frames worth of garbage left at the top of the stack. */
+/* This is used to prevent us from accidentally treating garbade left */
+/* on the stack by other parts of the collector as roots. This */
+/* differs from the code in misc.c, which actually tries to keep the */
+/* stack clear of long-lived, client-generated garbage. */
+void GC_clear_a_few_frames()
+{
+# define NWORDS 64
+ word frames[NWORDS];
+ register int i;
+
+ for (i = 0; i < NWORDS; i++) frames[i] = 0;
+}
+
+/* Have we allocated enough to amortize a collection? */
+bool GC_should_collect()
+{
+ return(GC_adj_words_allocd() >= min_words_allocd());
+}
+
+/*
+ * Initiate a garbage collection if appropriate.
+ * Choose judiciously
+ * between partial, full, and stop-world collections.
+ * Assumes lock held, signals disabled.
+ */
+void GC_maybe_gc()
+{
+ static int n_partial_gcs = 0;
+ if (GC_should_collect()) {
+ if (!GC_incremental) {
+ GC_gcollect_inner();
+ n_partial_gcs = 0;
+ } else if (n_partial_gcs >= GC_full_freq) {
+ GC_initiate_full();
+ n_partial_gcs = 0;
+ } else {
+ /* We try to mark with the world stopped. */
+ /* If we run out of time, this turns into */
+ /* incremental marking. */
+ if (GC_stopped_mark(FALSE)) GC_finish_collection();
+ n_partial_gcs++;
+ }
+ }
+}
+
+/*
+ * Stop the world garbage collection. Assumes lock held, signals disabled.
+ */
+void GC_gcollect_inner()
+{
+# ifdef PRINTSTATS
+ GC_printf2(
+ "Initiating full world-stop collection %lu after %ld allocd bytes\n",
+ (unsigned long) GC_gc_no+1,
+ (long)WORDS_TO_BYTES(GC_words_allocd));
+# endif
+ GC_promote_black_lists();
+ /* GC_reclaim_or_delete_all(); -- not needed: no intervening allocation */
+ GC_clear_marks();
+ (void) GC_stopped_mark(TRUE);
+ GC_finish_collection();
+}
+
+/*
+ * Perform n units of garbage collection work. A unit is intended to touch
+ * roughly a GC_RATE pages. Every once in a while, we do more than that.
+ */
+# define GC_RATE 8
+
+int GC_deficit = 0; /* The number of extra calls to GC_mark_some */
+ /* that we have made. */
+ /* Negative values are equivalent to 0. */
+extern bool GC_collection_in_progress();
+
+void GC_collect_a_little(n)
+int n;
+{
+ register int i;
+
+ if (GC_collection_in_progress()) {
+ for (i = GC_deficit; i < GC_RATE*n; i++) {
+ if (GC_mark_some()) {
+ /* Need to finish a collection */
+ (void) GC_stopped_mark(TRUE);
+ GC_finish_collection();
+ break;
+ }
+ }
+ if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
+ } else {
+ GC_maybe_gc();
+ }
+}
+
+/*
+ * Assumes lock is held, signals are disabled.
+ * We stop the world.
+ * If final is TRUE, then we finish the collection, no matter how long
+ * it takes.
+ * Otherwise we may fail and return FALSE if this takes too long.
+ * Increment GC_gc_no if we succeed.
+ */
+bool GC_stopped_mark(final)
+bool final;
+{
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE current_time;
+ unsigned long time_diff;
+ register int i;
+
+ GET_TIME(start_time);
+ STOP_WORLD();
+# ifdef PRINTSTATS
+ GC_printf1("--> Marking for collection %lu ",
+ (unsigned long) GC_gc_no + 1);
+ GC_printf2("after %lu allocd bytes + %lu wasted bytes\n",
+ (unsigned long) WORDS_TO_BYTES(GC_words_allocd),
+ (unsigned long) WORDS_TO_BYTES(GC_words_wasted));
+# endif
+
+ /* Mark from all roots. */
+ /* Minimize junk left in my registers and on the stack */
+ GC_clear_a_few_frames();
+ GC_noop(0,0,0,0,0,0);
+ GC_initiate_partial();
+ for(i = 0;;i++) {
+ if (GC_mark_some()) break;
+ if (final) continue;
+ if ((i & 3) == 0) {
+ GET_TIME(current_time);
+ time_diff = MS_TIME_DIFF(current_time,start_time);
+ if (time_diff >= TIME_LIMIT) {
+ START_WORLD();
+# ifdef PRINTSTATS
+ GC_printf0("Abandoning stopped marking after ");
+ GC_printf2("%lu iterations and %lu msecs\n",
+ (unsigned long)i,
+ (unsigned long)time_diff);
+# endif
+ GC_deficit = i; /* Give the mutator a chance. */
+ return(FALSE);
+ }
+ }
+ }
+
+ GC_gc_no++;
+# ifdef PRINTSTATS
+ GC_printf2("Collection %lu reclaimed %ld bytes",
+ (unsigned long) GC_gc_no - 1,
+ (long)WORDS_TO_BYTES(GC_mem_found));
+ GC_printf1(" ---> heapsize = %lu bytes\n",
+ (unsigned long) GC_heapsize);
+ /* Printf arguments may be pushed in funny places. Clear the */
+ /* space. */
+ GC_printf0("");
+# endif
+
+ /* Check all debugged objects for consistency */
+ if (GC_debugging_started) {
+ (*GC_check_heap)();
+ }
+
+# ifdef PRINTTIMES
+ GET_TIME(current_time);
+ GC_printf1("World-stopped marking took %lu msecs\n",
+ MS_TIME_DIFF(current_time,start_time));
+# endif
+ START_WORLD();
+ return(TRUE);
+}
+
+
+/* Finish up a collection. Assumes lock is held, signals are disabled, */
+/* but the world is otherwise running. */
+void GC_finish_collection()
+{
+# ifdef PRINTTIMES
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE finalize_time;
+ CLOCK_TYPE done_time;
+
+ GET_TIME(start_time);
+ finalize_time = start_time;
+# endif
+
+# ifdef GATHERSTATS
+ GC_mem_found = 0;
+# endif
+# ifdef FIND_LEAK
+ /* Mark all objects on the free list. All objects should be */
+ /* marked when we're done. */
+ {
+ register word size; /* current object size */
+ register ptr_t p; /* pointer to current object */
+ register struct hblk * h; /* pointer to block containing *p */
+ register hdr * hhdr;
+ register int word_no; /* "index" of *p in *q */
+ int kind;
+
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ for (size = 1; size <= MAXOBJSZ; size++) {
+ for (p= GC_obj_kinds[kind].ok_freelist[size];
+ p != 0; p=obj_link(p)){
+ h = HBLKPTR(p);
+ hhdr = HDR(h);
+ word_no = (((word *)p) - ((word *)h));
+ set_mark_bit_from_hdr(hhdr, word_no);
+ }
+ }
+ }
+ }
+ /* Check that everything is marked */
+ GC_start_reclaim(TRUE);
+# else
+
+ GC_finalize();
+# ifdef STUBBORN_ALLOC
+ GC_clean_changing_list();
+# endif
+
+# ifdef PRINTTIMES
+ GET_TIME(finalize_time);
+# endif
+
+ /* Clear free list mark bits, in case they got accidentally marked */
+ /* Note: HBLKPTR(p) == pointer to head of block containing *p */
+ /* Also subtract memory remaining from GC_mem_found count. */
+ /* Note that composite objects on free list are cleared. */
+ /* Thus accidentally marking a free list is not a problem; only */
+ /* objects on the list itself will be marked, and that's fixed here. */
+ {
+ register word size; /* current object size */
+ register ptr_t p; /* pointer to current object */
+ register struct hblk * h; /* pointer to block containing *p */
+ register hdr * hhdr;
+ register int word_no; /* "index" of *p in *q */
+ int kind;
+
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ for (size = 1; size <= MAXOBJSZ; size++) {
+ for (p= GC_obj_kinds[kind].ok_freelist[size];
+ p != 0; p=obj_link(p)){
+ h = HBLKPTR(p);
+ hhdr = HDR(h);
+ word_no = (((word *)p) - ((word *)h));
+ clear_mark_bit_from_hdr(hhdr, word_no);
+# ifdef GATHERSTATS
+ GC_mem_found -= size;
+# endif
+ }
+ }
+ }
+ }
+
+
+# ifdef PRINTSTATS
+ GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n",
+ (long)WORDS_TO_BYTES(GC_mem_found));
+# endif
+
+ /* Reconstruct free lists to contain everything not marked */
+ GC_start_reclaim(FALSE);
+
+# endif /* !FIND_LEAK */
+
+# ifdef PRINTSTATS
+ GC_printf2(
+ "Immediately reclaimed %ld bytes in heap of size %lu bytes\n",
+ (long)WORDS_TO_BYTES(GC_mem_found),
+ (unsigned long)GC_heapsize);
+ GC_printf2("%lu (atomic) + %lu (composite) bytes in use\n",
+ (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
+ (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
+# endif
+
+ /* Reset or increment counters for next cycle */
+ GC_words_allocd_before_gc += GC_words_allocd;
+ GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
+ GC_words_allocd = 0;
+ GC_words_wasted = 0;
+ GC_mem_freed = 0;
+
+# ifdef PRINTTIMES
+ GET_TIME(done_time);
+ GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
+ MS_TIME_DIFF(finalize_time,start_time),
+ MS_TIME_DIFF(done_time,finalize_time));
+# endif
+}
+
+/* Externally callable routine to invoke full, stop-world collection */
+void GC_gcollect()
+{
+ DCL_LOCK_STATE;
+
+ GC_invoke_finalizers();
+ DISABLE_SIGNALS();
+ LOCK();
+ if (!GC_is_initialized) GC_init_inner();
+ /* Minimize junk left in my registers */
+ GC_noop(0,0,0,0,0,0);
+ GC_gcollect_inner();
+ UNLOCK();
+ ENABLE_SIGNALS();
+ GC_invoke_finalizers();
+}
+
+word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
+
+/*
+ * Use the chunk of memory starting at p of syze bytes as part of the heap.
+ * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
+ */
+void GC_add_to_heap(p, bytes)
+struct hblk *p;
+word bytes;
+{
+ word words;
+
+ if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
+ ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
+ }
+ if (!GC_install_header(p)) {
+ /* This is extremely unlikely. Can't add it. This will */
+ /* almost certainly result in a 0 return from the allocator, */
+ /* which is entirely appropriate. */
+ return;
+ }
+ GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
+ GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
+ GC_n_heap_sects++;
+ words = BYTES_TO_WORDS(bytes - HDR_BYTES);
+ HDR(p) -> hb_sz = words;
+ GC_freehblk(p);
+ GC_heapsize += bytes;
+ if ((ptr_t)p <= GC_least_plausible_heap_addr
+ || GC_least_plausible_heap_addr == 0) {
+ GC_least_plausible_heap_addr = (ptr_t)p - sizeof(word);
+ /* Making it a little smaller than necessary prevents */
+ /* us from getting a false hit from the variable */
+ /* itself. There's some unintentional reflection */
+ /* here. */
+ }
+ if ((ptr_t)p + bytes >= GC_greatest_plausible_heap_addr) {
+ GC_greatest_plausible_heap_addr = (ptr_t)p + bytes;
+ }
+}
+
+ptr_t GC_least_plausible_heap_addr = (ptr_t)ONES;
+ptr_t GC_greatest_plausible_heap_addr = 0;
+
+ptr_t GC_max(x,y)
+ptr_t x, y;
+{
+ return(x > y? x : y);
+}
+
+ptr_t GC_min(x,y)
+ptr_t x, y;
+{
+ return(x < y? x : y);
+}
+
+/*
+ * this explicitly increases the size of the heap. It is used
+ * internally, but may also be invoked from GC_expand_hp by the user.
+ * The argument is in units of HBLKSIZE.
+ * Tiny values of n are rounded up.
+ * Returns FALSE on failure.
+ */
+bool GC_expand_hp_inner(n)
+word n;
+{
+ word bytes;
+ struct hblk * space;
+ word expansion_slop; /* Number of bytes by which we expect the */
+ /* heap to expand soon. */
+
+ if (n < MINHINCR) n = MINHINCR;
+ bytes = n * HBLKSIZE;
+ space = GET_MEM(bytes);
+ if( space == 0 ) {
+ return(FALSE);
+ }
+# ifdef PRINTSTATS
+ GC_printf2("Increasing heap size by %lu after %lu allocated bytes\n",
+ (unsigned long)bytes,
+ (unsigned long)WORDS_TO_BYTES(GC_words_allocd));
+# ifdef UNDEFINED
+ GC_printf1("Root size = %lu\n", GC_root_size);
+ GC_print_block_list(); GC_print_hblkfreelist();
+ GC_printf0("\n");
+# endif
+# endif
+ expansion_slop = 8 * WORDS_TO_BYTES(min_words_allocd());
+ if (5 * HBLKSIZE * MAXHINCR > expansion_slop) {
+ expansion_slop = 5 * HBLKSIZE * MAXHINCR;
+ }
+ if (GC_last_heap_addr == 0 && !((word)space & SIGNB)
+ || GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space) {
+ /* Assume the heap is growing up */
+ GC_greatest_plausible_heap_addr =
+ GC_max(GC_greatest_plausible_heap_addr,
+ (ptr_t)space + bytes + expansion_slop);
+ } else {
+ /* Heap is growing down */
+ GC_least_plausible_heap_addr =
+ GC_min(GC_least_plausible_heap_addr,
+ (ptr_t)space - expansion_slop);
+ }
+ GC_prev_heap_addr = GC_last_heap_addr;
+ GC_last_heap_addr = (ptr_t)space;
+ GC_add_to_heap(space, bytes);
+ return(TRUE);
+}
+
+/* Really returns a bool, but it's externally visible, so that's clumsy. */
+/* Arguments is in bytes. */
+int GC_expand_hp(bytes)
+size_t bytes;
+{
+ int result;
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ if (!GC_is_initialized) GC_init_inner();
+ result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(result);
+}
+
+bool GC_collect_or_expand(needed_blocks)
+word needed_blocks;
+{
+ static int count = 0; /* How many failures? */
+
+ if (!GC_incremental && !GC_dont_gc && GC_should_collect()) {
+ GC_gcollect_inner();
+ } else {
+ word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
+ + needed_blocks;
+
+ if (blocks_to_get > MAXHINCR) {
+ if (needed_blocks > MAXHINCR) {
+ blocks_to_get = needed_blocks;
+ } else {
+ blocks_to_get = MAXHINCR;
+ }
+ }
+ if (!GC_expand_hp_inner(blocks_to_get)
+ && !GC_expand_hp_inner(needed_blocks)) {
+ if (count++ < 5) {
+ WARN("Out of Memory! Trying to continue ...\n");
+ GC_gcollect_inner();
+ } else {
+ WARN("Out of Memory! Returning NIL!\n");
+ return(FALSE);
+ }
+ }
+ }
+ return(TRUE);
+}
+
+/*
+ * Make sure the object free list for sz is not empty.
+ * Return a pointer to the first object on the free list.
+ * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
+ * Assumes we hold the allocator lock and signals are disabled.
+ *
+ */
+ptr_t GC_allocobj(sz, kind)
+word sz;
+int kind;
+{
+ register ptr_t * flh = &(GC_obj_kinds[kind].ok_freelist[sz]);
+
+ if (sz == 0) return(0);
+
+ while (*flh == 0) {
+ /* Do our share of marking work */
+ if(GC_incremental && !GC_dont_gc) GC_collect_a_little(1);
+ /* Sweep blocks for objects of this size */
+ GC_continue_reclaim(sz, kind);
+ if (*flh == 0) {
+ GC_new_hblk(sz, kind);
+ }
+ if (*flh == 0) {
+ if (!GC_collect_or_expand((word)1)) return(0);
+ }
+ }
+
+ return(*flh);
+}
--- /dev/null
+ # $Id: alpha_mach_dep.s,v 1.2 1993/01/18 22:54:51 dosser Exp $
+
+# define call_push(x) lda $16, 0(x); jsr $26, GC_push_one
+
+ .text
+ .align 4
+ .globl GC_push_regs
+ .ent GC_push_regs 2
+GC_push_regs:
+ ldgp $gp, 0($27)
+ lda $sp, -32($sp)
+ stq $26, 8($sp)
+ .mask 0x04000000, -8
+ .frame $sp, 16, $26, 0
+
+ # call_push($0) # expression eval and int func result
+
+ # call_push($1) # temp regs - not preserved cross calls
+ # call_push($2)
+ # call_push($3)
+ # call_push($4)
+ # call_push($5)
+ # call_push($6)
+ # call_push($7)
+ # call_push($8)
+
+ call_push($9) # Saved regs
+ call_push($10)
+ call_push($11)
+ call_push($12)
+ call_push($13)
+ call_push($14)
+
+ call_push($15) # frame ptr or saved reg
+
+ # call_push($16) # argument regs - not preserved cross calls
+ # call_push($17)
+ # call_push($18)
+ # call_push($19)
+ # call_push($20)
+ # call_push($21)
+
+ # call_push($22) # temp regs - not preserved cross calls
+ # call_push($23)
+ # call_push($24)
+ # call_push($25)
+
+ # call_push($26) # return address - expression eval
+ # call_push($27) # procedure value or temporary reg
+ # call_push($28) # assembler temp - not presrved
+ call_push($29) # Global Pointer
+ # call_push($30) # Stack Pointer
+
+ ldgp $gp, 0($26)
+ ldq $26, 8($sp)
+ lda $sp, 32($sp)
+ ret $31, ($26), 1
+ .end GC_push_regs
--- /dev/null
+This is an ASCII diagram of the data structure used to check pointer
+validity. It was provided by Dave Barrett <barrett@asgard.cs.colorado.edu>,
+and should be of use to others attempting to understand the code.
+The data structure in GC4.X is essentially the same. -HB
+
+
+
+
+ Data Structure used by GC_base in gc3.7:
+ 21-Apr-94
+
+
+
+
+ 63 LOG_TOP_SZ[11] LOG_BOTTOM_SZ[10] LOG_HBLKSIZE[13]
+ +------------------+----------------+------------------+------------------+
+ p:| | TL_HASH(hi) | | HBLKDISPL(p) |
+ +------------------+----------------+------------------+------------------+
+ \-----------------------HBLKPTR(p)-------------------/
+ \------------hi-------------------/
+ \______ ________/ \________ _______/ \________ _______/
+ V V V
+ | | |
+ GC_top_index[] | | |
+ --- +--------------+ | | |
+ ^ | | | | |
+ | | | | | |
+ TOP +--------------+<--+ | |
+ _SZ +-<| [] | * | |
+(items)| +--------------+ if 0 < bi< HBLKSIZE | |
+ | | | | then large object | |
+ | | | | starts at the bi'th | |
+ v | | | HBLK before p. | i |
+ --- | +--------------+ | (word- |
+ v | aligned) |
+ bi= |GET_BI(p){->hash_link}->key==hi | |
+ v | |
+ | (bottom_index) \ scratch_alloc'd | |
+ | ( struct bi ) / by get_index() | |
+ --- +->+--------------+ | |
+ ^ | | | |
+ ^ | | | |
+ BOTTOM | | ha=GET_HDR_ADDR(p) | |
+_SZ(items)+--------------+<----------------------+ +-------+
+ | +--<| index[] | |
+ | | +--------------+ GC_obj_map: v
+ | | | | from / +-+-+-----+-+-+-+-+ ---
+ v | | | GC_add < 0| | | | | | | | ^
+ --- | +--------------+ _map_entry \ +-+-+-----+-+-+-+-+ |
+ | | asc_link | +-+-+-----+-+-+-+-+ MAXOBJSZ
+ | +--------------+ +-->| | | j | | | | | +1
+ | | key | | +-+-+-----+-+-+-+-+ |
+ | +--------------+ | +-+-+-----+-+-+-+-+ |
+ | | hash_link | | | | | | | | | | v
+ | +--------------+ | +-+-+-----+-+-+-+-+ ---
+ | | |<--MAX_OFFSET--->|
+ | | (bytes)
+HDR(p)| GC_find_header(p) | |<--MAP_ENTRIES-->|
+ | \ from | =HBLKSIZE/WORDSZ
+ | (hdr) (struct hblkhdr) / alloc_hdr() | (1024 on Alpha)
+ +-->+----------------------+ | (8/16 bits each)
+GET_HDR(p)| word hb_sz (words) | |
+ +----------------------+ |
+ | struct hblk *hb_next | |
+ +----------------------+ |
+ |mark_proc hb_mark_proc| |
+ +----------------------+ |
+ | char * hb_map |>-------------+
+ +----------------------+
+ | ushort hb_obj_kind |
+ +----------------------+
+ | hb_last_reclaimed |
+ --- +----------------------+
+ ^ | |
+ MARK_BITS| hb_marks[] | *if hdr is free, hb_sz + DISCARD_WORDS
+_SZ(words)| | is the size of a heap chunk (struct hblk)
+ v | | of at least MININCR*HBLKSIZE bytes (below),
+ --- +----------------------+ otherwise, size of each object in chunk.
+
+Dynamic data structures above are interleaved throughout the heap in blocks of
+size MININCR * HBLKSIZE bytes as done by gc_scratch_alloc which cannot be
+freed; free lists are used (e.g. alloc_hdr). HBLKs's below are collected.
+
+ (struct hblk)
+ --- +----------------------+ < HBLKSIZE --- --- DISCARD_
+ ^ |garbage[DISCARD_WORDS]| aligned ^ ^ HDR_BYTES WORDS
+ | | | | v (bytes) (words)
+ | +-----hb_body----------+ < WORDSZ | --- ---
+ | | | aligned | ^ ^
+ | | Object 0 | | hb_sz |
+ | | | i |(word- (words)|
+ | | | (bytes)|aligned) v |
+ | + - - - - - - - - - - -+ --- | --- |
+ | | | ^ | ^ |
+ n * | | j (words) | hb_sz BODY_SZ
+ HBLKSIZE | Object 1 | v v | (words)
+ (bytes) | |--------------- v MAX_OFFSET
+ | + - - - - - - - - - - -+ --- (bytes)
+ | | | !All_INTERIOR_PTRS ^ |
+ | | | sets j only for hb_sz |
+ | | Object N | valid object offsets. | |
+ v | | All objects WORDSZ v v
+ --- +----------------------+ aligned. --- ---
+
+DISCARD_WORDS is normally zero. Indeed the collector has not been tested
+with another value in ages.
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 1:56 pm PDT */
+# include "gc_priv.h"
+
+/*
+ * We maintain several hash tables of hblks that have had false hits.
+ * Each contains one bit per hash bucket; If any page in the bucket
+ * has had a false hit, we assume that all of them have.
+ * See the definition of page_hash_table in gc_private.h.
+ * False hits from the stack(s) are much more dangerous than false hits
+ * from elsewhere, since the former can pin a large object that spans the
+ * block, eventhough it does not start on the dangerous block.
+ */
+
+/*
+ * Externally callable routines are:
+
+ * GC_add_to_black_list_normal
+ * GC_add_to_black_list_stack
+ * GC_promote_black_lists
+ * GC_is_black_listed
+ *
+ * All require that the allocator lock is held.
+ */
+
+/* Pointers to individual tables. We replace one table by another by */
+/* switching these pointers. */
+word * GC_old_normal_bl;
+ /* Nonstack false references seen at last full */
+ /* collection. */
+word * GC_incomplete_normal_bl;
+ /* Nonstack false references seen since last */
+ /* full collection. */
+word * GC_old_stack_bl;
+word * GC_incomplete_stack_bl;
+
+void GC_clear_bl();
+
+void GC_bl_init()
+{
+# ifndef ALL_INTERIOR_POINTERS
+ GC_old_normal_bl = (word *)
+ GC_scratch_alloc((word)(sizeof (page_hash_table)));
+ GC_incomplete_normal_bl = (word *)GC_scratch_alloc
+ ((word)(sizeof(page_hash_table)));
+ if (GC_old_normal_bl == 0 || GC_incomplete_normal_bl == 0) {
+ GC_err_printf0("Insufficient memory for black list\n");
+ EXIT();
+ }
+ GC_clear_bl(GC_old_normal_bl);
+ GC_clear_bl(GC_incomplete_normal_bl);
+# endif
+ GC_old_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(page_hash_table)));
+ GC_incomplete_stack_bl = (word *)GC_scratch_alloc
+ ((word)(sizeof(page_hash_table)));
+ if (GC_old_stack_bl == 0 || GC_incomplete_stack_bl == 0) {
+ GC_err_printf0("Insufficient memory for black list\n");
+ EXIT();
+ }
+ GC_clear_bl(GC_old_stack_bl);
+ GC_clear_bl(GC_incomplete_stack_bl);
+}
+
+void GC_clear_bl(doomed)
+word *doomed;
+{
+ BZERO(doomed, sizeof(page_hash_table));
+}
+
+/* Signal the completion of a collection. Turn the incomplete black */
+/* lists into new black lists, etc. */
+void GC_promote_black_lists()
+{
+ word * very_old_normal_bl = GC_old_normal_bl;
+ word * very_old_stack_bl = GC_old_stack_bl;
+
+ GC_old_normal_bl = GC_incomplete_normal_bl;
+ GC_old_stack_bl = GC_incomplete_stack_bl;
+# ifndef ALL_INTERIOR_POINTERS
+ GC_clear_bl(very_old_normal_bl);
+# endif
+ GC_clear_bl(very_old_stack_bl);
+ GC_incomplete_normal_bl = very_old_normal_bl;
+ GC_incomplete_stack_bl = very_old_stack_bl;
+}
+
+# ifndef ALL_INTERIOR_POINTERS
+/* P is not a valid pointer reference, but it falls inside */
+/* the plausible heap bounds. */
+/* Add it to the normal incomplete black list if appropriate. */
+void GC_add_to_black_list_normal(p)
+word p;
+{
+ if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
+ {
+ register int index = PHT_HASH(p);
+
+ if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
+# ifdef PRINTBLACKLIST
+ if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
+ GC_printf1("Black listing (normal) 0x%lx\n",
+ (unsigned long) p);
+ }
+# endif
+ set_pht_entry_from_index(GC_incomplete_normal_bl, index);
+ } /* else this is probably just an interior pointer to an allocated */
+ /* object, and isn't worth black listing. */
+ }
+}
+# endif
+
+/* And the same for false pointers from the stack. */
+void GC_add_to_black_list_stack(p)
+word p;
+{
+ register int index = PHT_HASH(p);
+
+ if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
+# ifdef PRINTBLACKLIST
+ if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
+ GC_printf1("Black listing (stack) 0x%lx\n",
+ (unsigned long)p);
+ }
+# endif
+ set_pht_entry_from_index(GC_incomplete_stack_bl, index);
+ }
+}
+
+/*
+ * Is the block starting at h of size len bytes black listed? If so,
+ * return the address of the next plausible r such that (r, len) might not
+ * be black listed. (R may not actually be in the heap. We guarantee only
+ * that every smaller value of r after h is also black listed.)
+ * If (h,len) is not black listed, return 0.
+ * Knows about the structure of the black list hash tables.
+ */
+struct hblk * GC_is_black_listed(h, len)
+struct hblk * h;
+word len;
+{
+ register int index = PHT_HASH((word)h);
+ register word i;
+ word nblocks = divHBLKSZ(len);
+
+# ifndef ALL_INTERIOR_POINTERS
+ if (get_pht_entry_from_index(GC_old_normal_bl, index)
+ || get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
+ return(h+1);
+ }
+# endif
+
+ for (i = 0; ; ) {
+ if (GC_old_stack_bl[divWORDSZ(index)] == 0
+ && GC_incomplete_stack_bl[divWORDSZ(index)] == 0) {
+ /* An easy case */
+ i += WORDSZ - modWORDSZ(index);
+ } else {
+ if (get_pht_entry_from_index(GC_old_stack_bl, index)
+ || get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
+ return(h+i+1);
+ }
+ i++;
+ }
+ if (i >= nblocks) break;
+ index = PHT_HASH((word)(h+i));
+ }
+ return(0);
+}
+
--- /dev/null
+#!/bin/sh
+GC_DEBUG=1
+$* 2>&1 | awk '{print "0x3e=c\""$0"\""};/^\t##PC##=/ {if ($2 != 0) {print $2"?i"}}' | adb $1 | sed "s/^ >/>/"
--- /dev/null
+/*
+ * Copyright (c) 1992-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:07 pm PDT */
+# ifdef CHECKSUMS
+
+# include "gc_priv.h"
+
+/* This is debugging code intended to verify the results of dirty bit */
+/* computations. Works only in a single threaded environment. */
+/* We assume that stubborn objects are changed only when they are */
+/* enabled for writing. (Certain kinds of writing are actually */
+/* safe under other conditions.) */
+# define NSUMS 2000
+
+# define OFFSET 100000
+
+typedef struct {
+ bool new_valid;
+ word old_sum;
+ word new_sum;
+ struct hblk * block; /* Block to which this refers + OFFSET */
+ /* to hide it from colector. */
+} page_entry;
+
+page_entry GC_sums [NSUMS];
+
+word GC_checksum(h)
+struct hblk *h;
+{
+ register word *p = (word *)h;
+ register word *lim = (word *)(h+1);
+ register word result = 0;
+
+ while (p < lim) {
+ result += *p++;
+ }
+ return(result);
+}
+
+# ifdef STUBBORN_ALLOC
+/* Check whether a stubborn object from the given block appears on */
+/* the appropriate free list. */
+bool GC_on_free_list(h)
+struct hblk *h;
+{
+ register hdr * hhdr = HDR(h);
+ register int sz = hhdr -> hb_sz;
+ ptr_t p;
+
+ if (sz > MAXOBJSZ) return(FALSE);
+ for (p = GC_sobjfreelist[sz]; p != 0; p = obj_link(p)) {
+ if (HBLKPTR(p) == h) return(TRUE);
+ }
+ return(FALSE);
+}
+# endif
+
+int GC_n_dirty_errors;
+int GC_n_changed_errors;
+int GC_n_clean;
+int GC_n_dirty;
+
+void GC_update_check_page(h, index)
+struct hblk *h;
+int index;
+{
+ page_entry *pe = GC_sums + index;
+ register hdr * hhdr = HDR(h);
+
+ if (pe -> block != 0 && pe -> block != h + OFFSET) ABORT("goofed");
+ pe -> old_sum = pe -> new_sum;
+ pe -> new_sum = GC_checksum(h);
+ if (GC_page_was_dirty(h)) {
+ GC_n_dirty++;
+ } else {
+ GC_n_clean++;
+ }
+ if (pe -> new_valid && pe -> old_sum != pe -> new_sum) {
+ if (!GC_page_was_dirty(h)) {
+ /* Set breakpoint here */GC_n_dirty_errors++;
+ }
+# ifdef STUBBORN_ALLOC
+ if (!IS_FORWARDING_ADDR_OR_NIL(hhdr)
+ && hhdr -> hb_map != GC_invalid_map
+ && hhdr -> hb_obj_kind == STUBBORN
+ && !GC_page_was_changed(h)
+ && !GC_on_free_list(h)) {
+ /* if GC_on_free_list(h) then reclaim may have touched it */
+ /* without any allocations taking place. */
+ /* Set breakpoint here */GC_n_changed_errors++;
+ }
+# endif
+ }
+ pe -> new_valid = TRUE;
+ pe -> block = h + OFFSET;
+}
+
+/* Should be called immediately after GC_read_dirty and GC_read_changed. */
+void GC_check_dirty()
+{
+ register int index;
+ register int i;
+ register struct hblk *h;
+ register ptr_t start;
+
+ GC_n_dirty_errors = 0;
+ GC_n_changed_errors = 0;
+ GC_n_clean = 0;
+ GC_n_dirty = 0;
+
+ index = 0;
+ for (i = 0; i < GC_n_heap_sects; i++) {
+ start = GC_heap_sects[i].hs_start;
+ for (h = (struct hblk *)start;
+ h < (struct hblk *)(start + GC_heap_sects[i].hs_bytes);
+ h++) {
+ GC_update_check_page(h, index);
+ index++;
+ if (index >= NSUMS) goto out;
+ }
+ }
+out:
+ GC_printf2("Checked %lu clean and %lu dirty pages\n",
+ (unsigned long) GC_n_clean, (unsigned long) GC_n_dirty);
+ if (GC_n_dirty_errors > 0) {
+ GC_printf1("Found %lu dirty bit errors\n",
+ (unsigned long)GC_n_dirty_errors);
+ }
+ if (GC_n_changed_errors > 0) {
+ GC_printf1("Found %lu changed bit errors\n",
+ (unsigned long)GC_n_changed_errors);
+ }
+}
+
+# else
+
+extern int GC_quiet;
+ /* ANSI C doesn't allow translation units to be empty. */
+ /* So we guarantee this one is nonempty. */
+
+# endif /* CHECKSUMS */
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:11 pm PDT */
+
+#ifndef CONFIG_H
+
+# define CONFIG_H
+
+/* Machine dependent parameters. Some tuning parameters can be found */
+/* near the top of gc_private.h. */
+
+/* Machine specific parts contributed by various people. See README file. */
+
+/* Determine the machine type: */
+# if defined(sun) && defined(mc68000)
+# define M68K
+# define SUNOS4
+# define mach_type_known
+# endif
+# if defined(hp9000s300)
+# define M68K
+# define HP
+# define mach_type_known
+# endif
+# if defined(vax)
+# define VAX
+# ifdef ultrix
+# define ULTRIX
+# else
+# define BSD
+# endif
+# define mach_type_known
+# endif
+# if defined(mips)
+# define MIPS
+# ifdef ultrix
+# define ULTRIX
+# else
+# ifdef _SYSTYPE_SVR4
+# define IRIX5
+# else
+# define RISCOS /* or IRIX 4.X */
+# endif
+# endif
+# define mach_type_known
+# endif
+# if defined(sequent) && defined(i386)
+# define I386
+# define SEQUENT
+# define mach_type_known
+# endif
+# if defined(sun) && defined(i386)
+# define I386
+# define SUNOS5
+# define mach_type_known
+# endif
+# if defined(__OS2__) && defined(__32BIT__)
+# define I386
+# define OS2
+# define mach_type_known
+# endif
+# if defined(ibm032)
+# define RT
+# define mach_type_known
+# endif
+# if defined(sun) && defined(sparc)
+# define SPARC
+ /* Test for SunOS 5.x */
+# include <errno.h>
+# ifdef ECHRNG
+# define SUNOS5
+# else
+# define SUNOS4
+# endif
+# define mach_type_known
+# endif
+# if defined(_IBMR2)
+# define RS6000
+# define mach_type_known
+# endif
+# if defined(SCO)
+# define I386
+# define SCO
+# define mach_type_known
+/* --> incompletely implemented */
+# endif
+# if defined(_AUX_SOURCE)
+# define M68K
+# define SYSV
+# define mach_type_known
+# endif
+# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1)
+# define HP_PA
+# define mach_type_known
+# endif
+# if defined(linux) && defined(i386)
+# define I386
+# define LINUX
+# define mach_type_known
+# endif
+# if defined(__alpha)
+# define ALPHA
+# define mach_type_known
+# endif
+# if defined(_AMIGA)
+# define AMIGA
+# define M68K
+# define mach_type_known
+# endif
+# if defined(NeXT) && defined(mc68000)
+# define M68K
+# define NEXT
+# define mach_type_known
+# endif
+# if defined(__FreeBSD__) && defined(i386)
+# define I386
+# define FREEBSD
+# define mach_type_known
+# endif
+# if defined(__NetBSD__) && defined(i386)
+# define I386
+# define NETBSD
+# define mach_type_known
+# endif
+# if defined(bsdi) && defined(i386)
+# define I386
+# define BSDI
+# define mach_type_known
+# endif
+# if !defined(mach_type_known) && defined(__386BSD__)
+# define I386
+# define THREE86BSD
+# define mach_type_known
+# endif
+# if defined(_CX_UX) && defined(_M88K)
+# define M88K
+# define CX_UX
+# define mach_type_known
+# endif
+# if defined(_MSDOS) && (_M_IX86 == 300) || (_M_IX86 == 400)
+# define I386
+# define MSWIN32 /* or Win32s */
+# define mach_type_known
+# endif
+
+/* Feel free to add more clauses here */
+
+/* Or manually define the machine type here. A machine type is */
+/* characterized by the architecture. Some */
+/* machine types are further subdivided by OS. */
+/* the macros ULTRIX, RISCOS, and BSD to distinguish. */
+/* Note that SGI IRIX is treated identically to RISCOS. */
+/* SYSV on an M68K actually means A/UX. */
+/* The distinction in these cases is usually the stack starting address */
+# ifndef mach_type_known
+ --> unknown machine type
+# endif
+ /* Mapping is: M68K ==> Motorola 680X0 */
+ /* (SUNOS4,HP,NEXT, and SYSV (A/UX), */
+ /* and AMIGA variants) */
+ /* I386 ==> Intel 386 */
+ /* (SEQUENT, OS2, SCO, LINUX, NETBSD, */
+ /* FREEBSD, THREE86BSD, MSWIN32, */
+ /* BSDI, SUNOS5 variants) */
+ /* NS32K ==> Encore Multimax */
+ /* MIPS ==> R2000 or R3000 */
+ /* (RISCOS, ULTRIX variants) */
+ /* VAX ==> DEC VAX */
+ /* (BSD, ULTRIX variants) */
+ /* RS6000 ==> IBM RS/6000 AIX3.1 */
+ /* RT ==> IBM PC/RT */
+ /* HP_PA ==> HP9000/700 & /800 */
+ /* HP/UX */
+ /* SPARC ==> SPARC under SunOS */
+ /* (SUNOS4, SUNOS5 variants) */
+ /* ALPHA ==> DEC Alpha OSF/1 */
+ /* M88K ==> Motorola 88XX0 */
+ /* (CX/UX so far) */
+
+
+/*
+ * For each architecture and OS, the following need to be defined:
+ *
+ * CPP_WORD_SZ is a simple integer constant representing the word size.
+ * in bits. We assume byte addressibility, where a byte has 8 bits.
+ * We also assume CPP_WORD_SZ is either 32 or 64.
+ * (We care about the length of pointers, not hardware
+ * bus widths. Thus a 64 bit processor with a C compiler that uses
+ * 32 bit pointers should use CPP_WORD_SZ of 32, not 64. Default is 32.)
+ *
+ * MACH_TYPE is a string representation of the machine type.
+ * OS_TYPE is analogous for the OS.
+ *
+ * ALIGNMENT is the largest N, such that
+ * all pointer are guaranteed to be aligned on N byte boundaries.
+ * defining it to be 1 will always work, but perform poorly.
+ *
+ * DATASTART is the beginning of the data segment.
+ * On UNIX systems, the collector will scan the area between DATASTART
+ * and &end for root pointers.
+ *
+ * STACKBOTTOM is the cool end of the stack, which is usually the
+ * highest address in the stack.
+ * Under PCR or OS/2, we have other ways of finding thread stacks.
+ * For each machine, the following should:
+ * 1) define STACK_GROWS_UP if the stack grows toward higher addresses, and
+ * 2) define exactly one of
+ * STACKBOTTOM (should be defined to be an expression)
+ * HEURISTIC1
+ * HEURISTIC2
+ * If either of the last two macros are defined, then STACKBOTTOM is computed
+ * during collector startup using one of the following two heuristics:
+ * HEURISTIC1: Take an address inside GC_init's frame, and round it up to
+ * the next multiple of 16 MB.
+ * HEURISTIC2: Take an address inside GC_init's frame, increment it repeatedly
+ * in small steps (decrement if STACK_GROWS_UP), and read the value
+ * at each location. Remember the value when the first
+ * Segmentation violation or Bus error is signalled. Round that
+ * to the nearest plausible page boundary, and use that instead
+ * of STACKBOTTOM.
+ *
+ * If no expression for STACKBOTTOM can be found, and neither of the above
+ * heuristics are usable, the collector can still be used with all of the above
+ * undefined, provided one of the following is done:
+ * 1) GC_mark_roots can be changed to somehow mark from the correct stack(s)
+ * without reference to STACKBOTTOM. This is appropriate for use in
+ * conjunction with thread packages, since there will be multiple stacks.
+ * (Allocating thread stacks in the heap, and treating them as ordinary
+ * heap data objects is also possible as a last resort. However, this is
+ * likely to introduce significant amounts of excess storage retention
+ * unless the dead parts of the thread stacks are periodically cleared.)
+ * 2) Client code may set GC_stackbottom before calling any GC_ routines.
+ * If the author of the client code controls the main program, this is
+ * easily accomplished by introducing a new main program, setting
+ * GC_stackbottom to the address of a local variable, and then calling
+ * the original main program. The new main program would read something
+ * like:
+ *
+ * # include "gc_private.h"
+ *
+ * main(argc, argv, envp)
+ * int argc;
+ * char **argv, **envp;
+ * {
+ * int dummy;
+ *
+ * GC_stackbottom = (ptr_t)(&dummy);
+ * return(real_main(argc, argv, envp));
+ * }
+ *
+ *
+ * Each architecture may also define the style of virtual dirty bit
+ * implementation to be used:
+ * MPROTECT_VDB: Write protect the heap and catch faults.
+ * PROC_VDB: Use the SVR4 /proc primitives to read dirty bits.
+ *
+ * An architecture may define DYNAMIC_LOADING if dynamic_load.c
+ * defined GC_register_dynamic_libraries() for the architecture.
+ */
+
+
+# ifdef M68K
+# define MACH_TYPE "M68K"
+# define ALIGNMENT 2
+# ifdef SUNOS4
+# define OS_TYPE "SUNOS4"
+ extern char etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x1ffff) & ~0x1ffff))
+# define HEURISTIC1 /* differs */
+# define DYNAMIC_LOADING
+# endif
+# ifdef HP
+# define OS_TYPE "HP"
+ extern char etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# define STACKBOTTOM ((ptr_t) 0xffeffffc)
+ /* empirically determined. seems to work. */
+# endif
+# ifdef SYSV
+# define OS_TYPE "SYSV"
+ extern etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)&etext & 0x1fff))
+ /* This only works for shared-text binaries with magic number 0413.
+ The other sorts of SysV binaries put the data at the end of the text,
+ in which case the default of &etext would work. Unfortunately,
+ handling both would require having the magic-number available.
+ -- Parag
+ */
+# define STACKBOTTOM ((ptr_t)0xFFFFFFFE)
+ /* The stack starts at the top of memory, but */
+ /* 0x0 cannot be used as setjump_test complains */
+ /* that the stack direction is incorrect. Two */
+ /* bytes down from 0x0 should be safe enough. */
+ /* --Parag */
+# endif
+# ifdef AMIGA
+# define OS_TYPE "AMIGA"
+ /* STACKBOTTOM and DATASTART handled specially */
+ /* in os_dep.c */
+# endif
+# ifdef NEXT
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0x4000000)
+# endif
+# endif
+
+# ifdef VAX
+# define MACH_TYPE "VAX"
+# define ALIGNMENT 4 /* Pointers are longword aligned by 4.2 C compiler */
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# ifdef BSD
+# define OS_TYPE "BSD"
+# define HEURISTIC1
+ /* HEURISTIC2 may be OK, but it's hard to test. */
+# endif
+# ifdef ULTRIX
+# define OS_TYPE "ULTRIX"
+# define STACKBOTTOM ((ptr_t) 0x7fffc800)
+# endif
+# endif
+
+# ifdef RT
+# define MACH_TYPE "RT"
+# define ALIGNMENT 4
+# define DATASTART ((ptr_t) 0x10000000)
+# define STACKBOTTOM ((ptr_t) 0x1fffd800)
+# endif
+
+# ifdef SPARC
+# define MACH_TYPE "SPARC"
+# define ALIGNMENT 4 /* Required by hardware */
+ extern int etext;
+# ifdef SUNOS5
+# define OS_TYPE "SUNOS5"
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x10003) & ~0x3))
+# define PROC_VDB
+# endif
+# ifdef SUNOS4
+# define OS_TYPE "SUNOS4"
+ /* [If you have a weak stomach, don't read this.] */
+ /* We would like to use: */
+/* # define DATASTART ((ptr_t)((((word) (&etext)) + 0x1fff) & ~0x1fff)) */
+ /* This fails occasionally, due to an ancient, but very */
+ /* persistent ld bug. &etext is set 32 bytes too high. */
+ /* We instead read the text segment size from the a.out */
+ /* header, which happens to be mapped into our address space */
+ /* at the start of the text segment. The detective work here */
+ /* was done by Robert Ehrlich, Manuel Serrano, and Bernard */
+ /* Serpette of INRIA. */
+ /* This assumes ZMAGIC, i.e. demand-loadable executables. */
+# define DATASTART ((ptr_t)(*(int *)0x2004+0x2000))
+# define MPROTECT_VDB
+# endif
+# define HEURISTIC1
+# define DYNAMIC_LOADING
+# endif
+
+# ifdef I386
+# define MACH_TYPE "I386"
+# define ALIGNMENT 4 /* Appears to hold for all "32 bit" compilers */
+# ifdef SEQUENT
+# define OS_TYPE "SEQUENT"
+ extern int etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# define STACKBOTTOM ((ptr_t) 0x3ffff000)
+# endif
+# ifdef SUNOS5
+# define OS_TYPE "SUNOS5"
+ extern int etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x1003) & ~0x3))
+ extern int _start();
+# define STACKBOTTOM ((ptr_t)(&_start))
+# define PROC_VDB
+# endif
+# ifdef SCO
+# define OS_TYPE "SCO"
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)&etext & 0xfff))
+# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+ extern int etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# define STACKBOTTOM ((ptr_t)0xc0000000)
+# endif
+# ifdef OS2
+# define OS_TYPE "OS2"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. OS2 actually has the right */
+ /* system call! */
+# endif
+# ifdef MSWIN32
+# define OS_TYPE "MSWIN32"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. */
+# endif
+# ifdef FREEBSD
+# define OS_TYPE "FREEBSD"
+# define MPROTECT_VDB
+# endif
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# endif
+# ifdef THREE86BSD
+# define OS_TYPE "THREE86BSD"
+# endif
+# ifdef BSDI
+# define OS_TYPE "BSDI"
+# endif
+# if defined(FREEBSD) || defined(NETBSD) \
+ || defined(THREE86BSD) || defined(BSDI)
+# define HEURISTIC2
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# endif
+# endif
+
+# ifdef NS32K
+# define MACH_TYPE "NS32K"
+# define ALIGNMENT 4
+ extern char **environ;
+# define DATASTART ((ptr_t)(&environ))
+ /* hideous kludge: environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+# define STACKBOTTOM ((ptr_t) 0xfffff000) /* for Encore */
+# endif
+
+# ifdef MIPS
+# define MACH_TYPE "MIPS"
+# define ALIGNMENT 4 /* Required by hardware */
+# define DATASTART 0x10000000
+ /* Could probably be slightly higher since */
+ /* startup code allocates lots of junk */
+# define HEURISTIC2
+# ifdef ULTRIX
+# define OS_TYPE "ULTRIX"
+# endif
+# ifdef RISCOS
+# define OS_TYPE "RISCOS"
+# endif
+# ifdef IRIX5
+# define OS_TYPE "IRIX5"
+# define MPROTECT_VDB
+# define DYNAMIC_LOADING
+# endif
+# endif
+
+# ifdef RS6000
+# define MACH_TYPE "RS6000"
+# define ALIGNMENT 4
+# define DATASTART ((ptr_t)0x20000000)
+# define STACKBOTTOM ((ptr_t)0x2ff80000)
+# endif
+
+# ifdef HP_PA
+# define MACH_TYPE "HP_PA"
+# define ALIGNMENT 4
+ extern int __data_start;
+# define DATASTART ((ptr_t)(&__data_start))
+# define HEURISTIC2
+# define STACK_GROWS_UP
+# endif
+
+# ifdef ALPHA
+# define MACH_TYPE "ALPHA"
+# define ALIGNMENT 8
+# define DATASTART ((ptr_t) 0x140000000)
+# define HEURISTIC2
+# define CPP_WORDSZ 64
+# define MPROTECT_VDB
+# endif
+
+# ifdef M88K
+# define MACH_TYPE "M88K"
+# define ALIGNMENT 4
+# define DATASTART ((((word)&etext + 0x3fffff) & ~0x3fffff) + 0x10000)
+# define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */
+# endif
+
+# ifndef STACK_GROWS_UP
+# define STACK_GROWS_DOWN
+# endif
+
+# ifndef CPP_WORDSZ
+# define CPP_WORDSZ 32
+# endif
+
+# ifndef OS_TYPE
+# define OS_TYPE ""
+# endif
+
+# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
+ -> bad word size
+# endif
+
+# ifdef PCR
+# undef DYNAMIC_LOADING
+# undef STACKBOTTOM
+# undef HEURISTIC1
+# undef HEURISTIC2
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# define PCR_VDB
+# endif
+
+# ifdef SRC_M3
+/* Postponed for now. */
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# endif
+
+# ifdef SMALL_CONFIG
+/* Presumably not worth the space it takes. */
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# endif
+
+# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
+# define DEFAULT_VDB
+# endif
+
+# endif
--- /dev/null
+Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program
+for any purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is granted,
+provided the above notices are retained, and a notice that the code was
+modified is included with the above copyright notice.
+
+Please send bug reports to Hans-J. Boehm (boehm@parc.xerox.com).
+
+This is a string packages that uses a tree-based representation.
+See gc.h for a description of the functions provided. Ec.h describes
+"extensible cords", which are essentially output streams that write
+to a cord. These allow for efficient construction of cords without
+requiring a bound on the size of a cord.
+
+de.c is a very dumb text editor that illustrates the use of cords.
+It maintains a list of file versions. Each version is simply a
+cord representing the file contents. Nonetheless, standard
+editing operations are efficient, even on very large files.
+(Its 3 line "user manual" can be obtained by invoking it without
+arguments. Note that ^R^N and ^R^P move the cursor by
+almost a screen. It does not understand tabs, which will show
+up as highlighred "I"s. Use the UNIX "expand" program first.)
+To build the editor, type "make cord/de" in the gc directory.
+
+This package assumes an ANSI C compiler such as gcc. It will
+not compile with an old-style K&R compiler.
--- /dev/null
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Author: Hans-J. Boehm (boehm@parc.xerox.com)
+ */
+/* Boehm, May 19, 1994 2:22 pm PDT */
+
+/*
+ * Cords are immutable character strings. A number of operations
+ * on long cords are much more efficient than their strings.h counterpart.
+ * In particular, concatenation takes constant time independent of the length
+ * of the arguments. (Cords are represented as trees, with internal
+ * nodes representing concatenation and leaves consisting of either C
+ * strings or a functional description of the string.)
+ *
+ * The following are reasonable applications of cords. They would perform
+ * unacceptably if C strings were used:
+ * - A compiler that produces assembly language output by repeatedly
+ * concatenating instructions onto a cord representing the output file.
+ * - A text editor that converts the input file to a cord, and then
+ * performs editing operations by producing a new cord representing
+ * the file after echa character change (and keeping the old ones in an
+ * edit history)
+ *
+ * For optimal performance, cords should be built by
+ * concatenating short sections.
+ * This interface is designed for maximum compatibility with C strings.
+ * ASCII NUL characters may be embedded in cords using CORD_from_fn.
+ * This is handled correctly, but CORD_to_char_star will produce a string
+ * with embedded NULs when given such a cord.
+ */
+# ifndef CORD_H
+
+# define CORD_H
+# include <stddef.h>
+# include <stdio.h>
+/* Cords have type const char *. This is cheating quite a bit, and not */
+/* 100% portable. But it means that nonempty character string */
+/* constants may be used as cords directly, provided the string is */
+/* never modified in place. The empty cord is represented by, and */
+/* can be written as, 0. */
+
+typedef const char * CORD;
+
+/* An empty cord is always represented as nil */
+# define CORD_EMPTY 0
+
+/* Is a nonempty cord represented as a C string? */
+#define IS_STRING(s) (*(s) != '\0')
+
+/* Concatenate two cords. If the arguments are C strings, they may */
+/* not be subsequently altered. */
+CORD CORD_cat(CORD x, CORD y);
+
+/* Concatenate a cord and a C string with known length. Except for the */
+/* empty string case, this is a special case of CORD_cat. Since the */
+/* length is known, it can be faster. */
+CORD CORD_cat_char_star(CORD x, const char * y, size_t leny);
+
+/* Compute the length of a cord */
+size_t CORD_len(CORD x);
+
+/* Cords may be represented by functions defining the ith character */
+typedef char (* CORD_fn)(size_t i, void * client_data);
+
+/* Turn a functional description into a cord. */
+CORD CORD_from_fn(CORD_fn fn, void * client_data, size_t len);
+
+/* Return the substring (subcord really) of x with length at most n, */
+/* starting at position i. (The initial character has position 0.) */
+CORD CORD_substr(CORD x, size_t i, size_t n);
+
+/* Return the argument, but rebalanced to allow more efficient */
+/* character retrieval, substring operations, and comparisons. */
+/* This is useful only for cords that were built using repeated */
+/* concatenation. Guarantees log time access to the result, unless */
+/* x was obtained through a large number of repeated substring ops */
+/* or the embedded functional descriptions take longer to evaluate. */
+/* May reallocate significant parts of the cord. The argument is not */
+/* modified; only the result is balanced. */
+CORD CORD_balance(CORD x);
+
+/* The following traverse a cord by applying a function to each */
+/* character. This is occasionally appropriate, especially where */
+/* speed is crucial. But, since C doesn't have nested functions, */
+/* clients of this sort of traversal are clumsy to write. Consider */
+/* the functions that operate on cord positions instead. */
+
+/* Function to iteratively apply to individual characters in cord. */
+typedef int (* CORD_iter_fn)(char c, void * client_data);
+
+/* Function to apply to substrings of a cord. Each substring is a */
+/* a C character string, not a general cord. */
+typedef int (* CORD_batched_iter_fn)(const char * s, void * client_data);
+# define CORD_NO_FN ((CORD_batched_iter_fn)0)
+
+/* Apply f1 to each character in the cord, in ascending order, */
+/* starting at position i. If */
+/* f2 is not CORD_NO_FN, then multiple calls to f1 may be replaced by */
+/* a single call to f2. The parameter f2 is provided only to allow */
+/* some optimization by the client. This terminates when the right */
+/* end of this string is reached, or when f1 or f2 return != 0. In the */
+/* latter case CORD_iter returns != 0. Otherwise it returns 0. */
+/* The specified value of i must be < CORD_len(x). */
+int CORD_iter5(CORD x, size_t i, CORD_iter_fn f1,
+ CORD_batched_iter_fn f2, void * client_data);
+
+/* A simpler version that starts at 0, and without f2: */
+int CORD_iter(CORD x, CORD_iter_fn f1, void * client_data);
+# define CORD_iter(x, f1, cd) CORD_iter5(x, 0, f1, CORD_NO_FN, cd)
+
+/* Similar to CORD_iter5, but end-to-beginning. No provisions for */
+/* CORD_batched_iter_fn. */
+int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data);
+
+/* A simpler version that starts at the end: */
+int CORD_riter(CORD x, CORD_iter_fn f1, void * client_data);
+
+/* Functions that operate on cord positions. The easy way to traverse */
+/* cords. A cord position is logically a pair consisting of a cord */
+/* and an index into that cord. But it is much faster to retrieve a */
+/* charcter based on a position than on an index. Unfortunately, */
+/* positions are big (order of a few 100 bytes), so allocate them with */
+/* caution. */
+/* Things in cord_pos.h should be treated as opaque, except as */
+/* described below. Also note that */
+/* CORD_pos_fetch, CORD_next and CORD_prev have both macro and function */
+/* definitions. The former may evaluate their argument more than once. */
+# include "cord_pos.h"
+
+/*
+ Visible definitions from above:
+
+ typedef <OPAQUE but fairly big> CORD_pos[1];
+
+ /* Extract the cord from a position:
+ CORD CORD_pos_to_cord(CORD_pos p);
+
+ /* Extract the current index from a position:
+ size_t CORD_pos_to_index(CORD_pos p);
+
+ /* Fetch the character located at the given position:
+ char CORD_pos_fetch(register CORD_pos p);
+
+ /* Initialize the position to refer to the give cord and index.
+ /* Note that this is the most expensive function on positions:
+ void CORD_set_pos(CORD_pos p, CORD x, size_t i);
+
+ /* Advance the position to the next character.
+ /* P must be initialized and valid.
+ /* Invalidates p if past end:
+ void CORD_next(CORD_pos p);
+
+ /* Move the position to the preceding character.
+ /* P must be initialized and valid.
+ /* Invalidates p if past beginning:
+ void CORD_prev(CORD_pos p);
+
+ /* Is the position valid, i.e. inside the cord?
+ int CORD_pos_valid(CORD_pos p);
+*/
+# define CORD_FOR(pos, cord) \
+ for (CORD_set_pos(pos, cord, 0); CORD_pos_valid(pos); CORD_next(pos))
+
+
+/* An out of memory handler to call. May be supplied by client. */
+/* Must not return. */
+extern void (* CORD_oom_fn)(void);
+
+/* Dump the representation of x to stdout in an implementation defined */
+/* manner. Intended for debugging only. */
+void CORD_dump(CORD x);
+
+/* The following could easily be implemented by the client. They are */
+/* provided in cord_xtra.c for convenience. */
+
+/* Concatenate a character to the end of a cord. */
+CORD CORD_cat_char(CORD x, char c);
+
+/* Return the character in CORD_substr(x, i, 1) */
+char CORD_fetch(CORD x, size_t i);
+
+/* Return < 0, 0, or > 0, depending on whether x < y, x = y, x > y */
+int CORD_cmp(CORD x, CORD y);
+
+/* A generalization that takes both starting positions for the */
+/* comparison, and a limit on the number of characters to be compared. */
+int CORD_ncmp(CORD x, size_t x_start, CORD y, size_t y_start, size_t len);
+
+/* Find the first occurrence of s in x at position start or later. */
+/* Return the position of the first character of s in x, or */
+/* CORD_NOT_FOUND if there is none. */
+size_t CORD_str(CORD x, size_t start, CORD s);
+
+/* Return a cord consisting of i copies of (possibly NUL) c. Dangerous */
+/* in conjunction with CORD_to_char_star. */
+/* The resulting representation takes constant space, independent of i. */
+CORD CORD_chars(char c, size_t i);
+# define CORD_nul(i) CORD_chars('\0', (i))
+
+/* Turn a file into cord. The file must be seekable. Its contents */
+/* must remain constant. The file may be accessed as an immediate */
+/* result of this call and/or as a result of subsequent accesses to */
+/* the cord. Short files are likely to be immediately read, but */
+/* long files are likely to be read on demand, possibly relying on */
+/* stdio for buffering. */
+/* We must have exclusive access to the descriptor f, i.e. we may */
+/* read it at any time, and expect the file pointer to be */
+/* where we left it. Normally this should be invoked as */
+/* CORD_from_file(fopen(...)) */
+/* CORD_from_file arranges to close the file descriptor when it is no */
+/* longer needed (e.g. when the result becomes inaccessible). */
+/* The file f must be such that ftell reflects the actual character */
+/* position in the file, i.e. the number of characters that can be */
+/* or were read with fread. On UNIX systems this is always true. On */
+/* MS Windows systems, f must be opened in binary mode. */
+CORD CORD_from_file(FILE * f);
+
+/* Equivalent to the above, except that the entire file will be read */
+/* and the file pointer will be closed immediately. */
+/* The binary mode restriction from above does not apply. */
+CORD CORD_from_file_eager(FILE * f);
+
+/* Equivalent to the above, except that the file will be read on demand.*/
+/* The binary mode restriction applies. */
+CORD CORD_from_file_lazy(FILE * f);
+
+/* Turn a cord into a C string. The result shares no structure with */
+/* x, and is thus modifiable. */
+char * CORD_to_char_star(CORD x);
+
+/* Write a cord to a file, starting at the current position. No */
+/* trailing NULs are newlines are added. */
+/* Returns EOF if a write error occurs, 1 otherwise. */
+int CORD_put(CORD x, FILE * f);
+
+/* "Not found" result for the following two functions. */
+# define CORD_NOT_FOUND ((size_t)(-1))
+
+/* A vague analog of strchr. Returns the position (an integer, not */
+/* a pointer) of the first occurrence of (char) c inside x at position */
+/* i or later. The value i must be < CORD_len(x). */
+size_t CORD_chr(CORD x, size_t i, int c);
+
+/* A vague analog of strrchr. Returns index of the last occurrence */
+/* of (char) c inside x at position i or earlier. The value i */
+/* must be < CORD_len(x). */
+size_t CORD_rchr(CORD x, size_t i, int c);
+
+
+/* The following are also not primitive, but are implemented in */
+/* cordprnt.c. They provide functionality similar to the ANSI C */
+/* functions with corresponding names, but with the following */
+/* additions and changes: */
+/* 1. A %r conversion specification specifies a CORD argument. Field */
+/* width, precision, etc. have the same semantics as for %s. */
+/* (Note that %c,%C, and %S were already taken.) */
+/* 2. The format string is represented as a CORD. */
+/* 3. CORD_sprintf and CORD_vsprintf assign the result through the 1st */ /* argument. Unlike their ANSI C versions, there is no need to guess */
+/* the correct buffer size. */
+/* 4. Most of the conversions are implement through the native */
+/* vsprintf. Hence they are usually no faster, and */
+/* idiosyncracies of the native printf are preserved. However, */
+/* CORD arguments to CORD_sprintf and CORD_vsprintf are NOT copied; */
+/* the result shares the original structure. This may make them */
+/* very efficient in some unusual applications. */
+/* The format string is copied. */
+/* All functions return the number of characters generated or -1 on */
+/* error. This complies with the ANSI standard, but is inconsistent */
+/* with some older implementations of sprintf. */
+
+/* The implementation of these is probably less portable than the rest */
+/* of this package. */
+
+#ifndef CORD_NO_IO
+
+#include <stdarg.h>
+
+int CORD_sprintf(CORD * out, CORD format, ...);
+int CORD_vsprintf(CORD * out, CORD format, va_list args);
+int CORD_fprintf(FILE * f, CORD format, ...);
+int CORD_vfprintf(FILE * f, CORD format, va_list args);
+int CORD_printf(CORD format, ...);
+int CORD_vprintf(CORD format, va_list args);
+
+#endif /* CORD_NO_IO */
+
+# endif /* CORD_H */
--- /dev/null
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:23 pm PDT */
+# ifndef CORD_POSITION_H
+
+/* The representation of CORD_position. This is private to the */
+/* implementation, but the ise is known to clients. Also */
+/* the implementation of some exported macros relies on it. */
+/* Don't use anything defined here and not in cord.h. */
+
+# define MAX_DEPTH 48
+ /* The maximum depth of a balanced cord + 1. */
+ /* We don't let cords get deeper than MAX_DEPTH. */
+
+struct CORD_pe {
+ CORD pe_cord;
+ size_t pe_start_pos;
+};
+
+/* A structure describing an entry on the path from the root */
+/* to current position. */
+typedef struct CORD_pos {
+ size_t cur_pos;
+ int path_len;
+# define CORD_POS_INVALID (0x55555555)
+ /* path_len == INVALID <==> position invalid */
+ const char *cur_leaf; /* Current leaf, if it is a string. */
+ /* If the current leaf is a function, */
+ /* then this may point to function_buf */
+ /* containing the next few characters. */
+ /* Always points to a valid string */
+ /* containing the current character */
+ /* unless cur_end is 0. */
+ size_t cur_start; /* Start position of cur_leaf */
+ size_t cur_end; /* Ending position of cur_leaf */
+ /* 0 if cur_leaf is invalid. */
+ struct CORD_pe path[MAX_DEPTH + 1];
+ /* path[path_len] is the leaf corresponding to cur_pos */
+ /* path[0].pe_cord is the cord we point to. */
+# define FUNCTION_BUF_SZ 8
+ char function_buf[FUNCTION_BUF_SZ]; /* Space for next few chars */
+ /* from function node. */
+} CORD_pos[1];
+
+/* Extract the cord from a position: */
+CORD CORD_pos_to_cord(CORD_pos p);
+
+/* Extract the current index from a position: */
+size_t CORD_pos_to_index(CORD_pos p);
+
+/* Fetch the character located at the given position: */
+char CORD_pos_fetch(CORD_pos p);
+
+/* Initialize the position to refer to the give cord and index. */
+/* Note that this is the most expensive function on positions: */
+void CORD_set_pos(CORD_pos p, CORD x, size_t i);
+
+/* Advance the position to the next character. */
+/* P must be initialized and valid. */
+/* Invalidates p if past end: */
+void CORD_next(CORD_pos p);
+
+/* Move the position to the preceding character. */
+/* P must be initialized and valid. */
+/* Invalidates p if past beginning: */
+void CORD_prev(CORD_pos p);
+
+/* Is the position valid, i.e. inside the cord? */
+int CORD_pos_valid(CORD_pos p);
+
+char CORD__pos_fetch(CORD_pos);
+void CORD__next(CORD_pos);
+void CORD__prev(CORD_pos);
+
+#define CORD_pos_fetch(p) \
+ (((p)[0].cur_end != 0)? \
+ (p)[0].cur_leaf[(p)[0].cur_pos - (p)[0].cur_start] \
+ : CORD__pos_fetch(p))
+
+#define CORD_next(p) \
+ (((p)[0].cur_pos + 1 < (p)[0].cur_end)? \
+ (p)[0].cur_pos++ \
+ : (CORD__next(p), 0))
+
+#define CORD_prev(p) \
+ (((p)[0].cur_end != 0 && (p)[0].cur_pos > (p)[0].cur_start)? \
+ (p)[0].cur_pos-- \
+ : (CORD__prev(p), 0))
+
+#define CORD_pos_to_index(p) ((p)[0].cur_pos)
+
+#define CORD_pos_to_cord(p) ((p)[0].path[0].pe_cord)
+
+#define CORD_pos_valid(p) ((p)[0].path_len != CORD_POS_INVALID)
+
+/* Some grubby stuff for performance-critical friends: */
+#define CORD_pos_chars_left(p) ((long)((p)[0].cur_end) - (long)((p)[0].cur_pos))
+ /* Number of characters in cache. <= 0 ==> none */
+
+#define CORD_pos_advance(p,n) ((p)[0].cur_pos += (n) - 1, CORD_next(p))
+ /* Advance position by n characters */
+ /* 0 < n < CORD_pos_chars_left(p) */
+
+#define CORD_pos_cur_char_addr(p) \
+ (p)[0].cur_leaf + ((p)[0].cur_pos - (p)[0].cur_start)
+ /* address of current character in cache. */
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Author: Hans-J. Boehm (boehm@parc.xerox.com)
+ */
+/* Boehm, May 19, 1994 2:18 pm PDT */
+# include "../gc.h"
+# include "cord.h"
+# include <stdlib.h>
+# include <stdio.h>
+# include <string.h>
+
+/* An implementation of the cord primitives. These are the only */
+/* Functions that understand the representation. We perform only */
+/* minimal checks on arguments to these functions. Out of bounds */
+/* arguments to the iteration functions may result in client functions */
+/* invoked on garbage data. In most cases, client functions should be */
+/* programmed defensively enough that this does not result in memory */
+/* smashes. */
+
+typedef void (* oom_fn)(void);
+
+oom_fn CORD_oom_fn = (oom_fn) 0;
+
+# define OUT_OF_MEMORY { if (CORD_oom_fn != (oom_fn) 0) (*CORD_oom_fn)(); \
+ ABORT("Out of memory\n"); }
+# define ABORT(msg) { fprintf(stderr, "%s\n", msg); abort(); }
+
+typedef unsigned long word;
+
+typedef union {
+ struct Concatenation {
+ char null;
+ char header;
+ char depth; /* concatenation nesting depth. */
+ unsigned char left_len;
+ /* Length of left child if it is sufficiently */
+ /* short; 0 otherwise. */
+# define MAX_LEFT_LEN 255
+ word len;
+ CORD left; /* length(left) > 0 */
+ CORD right; /* length(right) > 0 */
+ } concatenation;
+ struct Function {
+ char null;
+ char header;
+ char depth; /* always 0 */
+ char left_len; /* always 0 */
+ word len;
+ CORD_fn fn;
+ void * client_data;
+ } function;
+ struct Generic {
+ char null;
+ char header;
+ char depth;
+ char left_len;
+ word len;
+ } generic;
+ char string[1];
+} CordRep;
+
+# define CONCAT_HDR 1
+
+# define FN_HDR 4
+# define SUBSTR_HDR 6
+ /* Substring nodes are a special case of function nodes. */
+ /* The client_data field is known to point to a substr_args */
+ /* structure, and the function is either CORD_apply_access_fn */
+ /* or CORD_index_access_fn. */
+
+/* The following may be applied only to function and concatenation nodes: */
+#define IS_CONCATENATION(s) (((CordRep *)s)->generic.header == CONCAT_HDR)
+
+#define IS_FUNCTION(s) ((((CordRep *)s)->generic.header & FN_HDR) != 0)
+
+#define IS_SUBSTR(s) (((CordRep *)s)->generic.header == SUBSTR_HDR)
+
+#define LEN(s) (((CordRep *)s) -> generic.len)
+#define DEPTH(s) (((CordRep *)s) -> generic.depth)
+#define GEN_LEN(s) (IS_STRING(s) ? strlen(s) : LEN(s))
+
+#define LEFT_LEN(c) ((c) -> left_len != 0? \
+ (c) -> left_len \
+ : (IS_STRING((c) -> left) ? \
+ (c) -> len - GEN_LEN((c) -> right) \
+ : LEN((c) -> left)))
+
+#define SHORT_LIMIT (sizeof(CordRep) - 1)
+ /* Cords shorter than this are C strings */
+
+
+/* Dump the internal representation of x to stdout, with initial */
+/* indentation level n. */
+void CORD_dump_inner(CORD x, unsigned n)
+{
+ register size_t i;
+
+ for (i = 0; i < (size_t)n; i++) {
+ fputs(" ", stdout);
+ }
+ if (x == 0) {
+ fputs("NIL\n", stdout);
+ } else if (IS_STRING(x)) {
+ for (i = 0; i <= SHORT_LIMIT; i++) {
+ if (x[i] == '\0') break;
+ putchar(x[i]);
+ }
+ if (x[i] != '\0') fputs("...", stdout);
+ putchar('\n');
+ } else if (IS_CONCATENATION(x)) {
+ register struct Concatenation * conc =
+ &(((CordRep *)x) -> concatenation);
+ printf("Concatenation: %p (len: %d, depth: %d)\n",
+ x, (int)(conc -> len), (int)(conc -> depth));
+ CORD_dump_inner(conc -> left, n+1);
+ CORD_dump_inner(conc -> right, n+1);
+ } else /* function */{
+ register struct Function * func =
+ &(((CordRep *)x) -> function);
+ if (IS_SUBSTR(x)) printf("(Substring) ");
+ printf("Function: %p (len: %d): ", x, (int)(func -> len));
+ for (i = 0; i < 20 && i < func -> len; i++) {
+ putchar((*(func -> fn))(i, func -> client_data));
+ }
+ if (i < func -> len) fputs("...", stdout);
+ putchar('\n');
+ }
+}
+
+/* Dump the internal representation of x to stdout */
+void CORD_dump(CORD x)
+{
+ CORD_dump_inner(x, 0);
+ fflush(stdout);
+}
+
+CORD CORD_cat_char_star(CORD x, const char * y, size_t leny)
+{
+ register size_t result_len;
+ register size_t lenx;
+ register int depth;
+
+ if (x == CORD_EMPTY) return(y);
+ if (leny == 0) return(x);
+ if (IS_STRING(x)) {
+ lenx = strlen(x);
+ result_len = lenx + leny;
+ if (result_len <= SHORT_LIMIT) {
+ register char * result = GC_MALLOC_ATOMIC(result_len+1);
+
+ if (result == 0) OUT_OF_MEMORY;
+ memcpy(result, x, lenx);
+ memcpy(result + lenx, y, leny);
+ result[result_len] = '\0';
+ return((CORD) result);
+ } else {
+ depth = 1;
+ }
+ } else {
+ register CORD right;
+ register CORD left;
+ register char * new_right;
+ register size_t right_len;
+
+ lenx = LEN(x);
+
+ if (leny <= SHORT_LIMIT/2
+ && IS_CONCATENATION(x)
+ && IS_STRING(right = ((CordRep *)x) -> concatenation.right)) {
+ /* Merge y into right part of x. */
+ if (!IS_STRING(left = ((CordRep *)x) -> concatenation.left)) {
+ right_len = lenx - LEN(left);
+ } else if (((CordRep *)x) -> concatenation.left_len != 0) {
+ right_len = lenx - ((CordRep *)x) -> concatenation.left_len;
+ } else {
+ right_len = strlen(right);
+ }
+ result_len = right_len + leny; /* length of new_right */
+ if (result_len <= SHORT_LIMIT) {
+ new_right = GC_MALLOC_ATOMIC(result_len + 1);
+ memcpy(new_right, right, right_len);
+ memcpy(new_right + right_len, y, leny);
+ new_right[result_len] = '\0';
+ y = new_right;
+ leny = result_len;
+ x = left;
+ lenx -= right_len;
+ /* Now fall through to concatenate the two pieces: */
+ }
+ if (IS_STRING(x)) {
+ depth = 1;
+ } else {
+ depth = DEPTH(x) + 1;
+ }
+ } else {
+ depth = DEPTH(x) + 1;
+ }
+ result_len = lenx + leny;
+ }
+ {
+ /* The general case; lenx, result_len is known: */
+ register struct Concatenation * result;
+
+ result = GC_NEW(struct Concatenation);
+ if (result == 0) OUT_OF_MEMORY;
+ result->header = CONCAT_HDR;
+ result->depth = depth;
+ if (lenx <= MAX_LEFT_LEN) result->left_len = lenx;
+ result->len = result_len;
+ result->left = x;
+ result->right = y;
+ if (depth > MAX_DEPTH) {
+ return(CORD_balance((CORD)result));
+ } else {
+ return((CORD) result);
+ }
+ }
+}
+
+
+CORD CORD_cat(CORD x, CORD y)
+{
+ register size_t result_len;
+ register int depth;
+ register size_t lenx;
+
+ if (x == CORD_EMPTY) return(y);
+ if (y == CORD_EMPTY) return(x);
+ if (IS_STRING(y)) {
+ return(CORD_cat_char_star(x, y, strlen(y)));
+ } else if (IS_STRING(x)) {
+ lenx = strlen(x);
+ depth = DEPTH(y) + 1;
+ } else {
+ register int depthy = DEPTH(y);
+
+ lenx = LEN(x);
+ depth = DEPTH(x) + 1;
+ if (depthy >= depth) depth = depthy + 1;
+ }
+ result_len = lenx + LEN(y);
+ {
+ register struct Concatenation * result;
+
+ result = GC_NEW(struct Concatenation);
+ if (result == 0) OUT_OF_MEMORY;
+ result->header = CONCAT_HDR;
+ result->depth = depth;
+ if (lenx <= MAX_LEFT_LEN) result->left_len = lenx;
+ result->len = result_len;
+ result->left = x;
+ result->right = y;
+ return((CORD) result);
+ }
+}
+
+
+
+CORD CORD_from_fn(CORD_fn fn, void * client_data, size_t len)
+{
+ if (len <= 0) return(0);
+ if (len <= SHORT_LIMIT) {
+ register char * result;
+ register size_t i;
+ char buf[SHORT_LIMIT+1];
+ register char c;
+
+ for (i = 0; i < len; i++) {
+ c = (*fn)(i, client_data);
+ if (c == '\0') goto gen_case;
+ buf[i] = c;
+ }
+ buf[i] = '\0';
+ result = GC_MALLOC_ATOMIC(len+1);
+ if (result == 0) OUT_OF_MEMORY;
+ strcpy(result, buf);
+ result[len] = '\0';
+ return((CORD) result);
+ }
+ gen_case:
+ {
+ register struct Function * result;
+
+ result = GC_NEW(struct Function);
+ if (result == 0) OUT_OF_MEMORY;
+ result->header = FN_HDR;
+ /* depth is already 0 */
+ result->len = len;
+ result->fn = fn;
+ result->client_data = client_data;
+ return((CORD) result);
+ }
+}
+
+size_t CORD_len(CORD x)
+{
+ if (x == 0) {
+ return(0);
+ } else {
+ return(GEN_LEN(x));
+ }
+}
+
+struct substr_args {
+ CordRep * sa_cord;
+ size_t sa_index;
+};
+
+char CORD_index_access_fn(size_t i, void * client_data)
+{
+ register struct substr_args *descr = (struct substr_args *)client_data;
+
+ return(((char *)(descr->sa_cord))[i + descr->sa_index]);
+}
+
+char CORD_apply_access_fn(size_t i, void * client_data)
+{
+ register struct substr_args *descr = (struct substr_args *)client_data;
+ register struct Function * fn_cord = &(descr->sa_cord->function);
+
+ return((*(fn_cord->fn))(i + descr->sa_index, fn_cord->client_data));
+}
+
+/* A version of CORD_substr that simply returns a function node, thus */
+/* postponing its work. The fourth argument is a function that may */
+/* be used for efficient access to the ith character. */
+/* Assumes i >= 0 and i + n < length(x). */
+CORD CORD_substr_closure(CORD x, size_t i, size_t n, CORD_fn f)
+{
+ register struct substr_args * sa = GC_NEW(struct substr_args);
+ CORD result;
+
+ if (sa == 0) OUT_OF_MEMORY;
+ sa->sa_cord = (CordRep *)x;
+ sa->sa_index = i;
+ result = CORD_from_fn(f, (void *)sa, n);
+ ((CordRep *)result) -> function.header = SUBSTR_HDR;
+ return (result);
+}
+
+# define SUBSTR_LIMIT (10 * SHORT_LIMIT)
+ /* Substrings of function nodes and flat strings shorter than */
+ /* this are flat strings. Othewise we use a functional */
+ /* representation, which is significantly slower to access. */
+
+/* A version of CORD_substr that assumes i >= 0, n > 0, and i + n < length(x).*/
+CORD CORD_substr_checked(CORD x, size_t i, size_t n)
+{
+ if (IS_STRING(x)) {
+ if (n > SUBSTR_LIMIT) {
+ return(CORD_substr_closure(x, i, n, CORD_index_access_fn));
+ } else {
+ register char * result = GC_MALLOC_ATOMIC(n+1);
+ register char * p = result;
+
+ if (result == 0) OUT_OF_MEMORY;
+ strncpy(result, x+i, n);
+ result[n] = '\0';
+ return(result);
+ }
+ } else if (IS_CONCATENATION(x)) {
+ register struct Concatenation * conc
+ = &(((CordRep *)x) -> concatenation);
+ register size_t left_len;
+ register size_t right_len;
+
+ left_len = LEFT_LEN(conc);
+ right_len = conc -> len - left_len;
+ if (i >= left_len) {
+ if (n == right_len) return(conc -> right);
+ return(CORD_substr_checked(conc -> right, i - left_len, n));
+ } else if (i+n <= left_len) {
+ if (n == left_len) return(conc -> left);
+ return(CORD_substr_checked(conc -> left, i, n));
+ } else {
+ /* Need at least one character from each side. */
+ register CORD left_part;
+ register CORD right_part;
+ register size_t left_part_len = left_len - i;
+
+ if (i == 0) {
+ left_part = conc -> left;
+ } else {
+ left_part = CORD_substr_checked(conc -> left, i, left_part_len);
+ }
+ if (i + n == right_len + left_len) {
+ right_part = conc -> right;
+ } else {
+ right_part = CORD_substr_checked(conc -> right, 0,
+ n - left_part_len);
+ }
+ return(CORD_cat(left_part, right_part));
+ }
+ } else /* function */ {
+ if (n > SUBSTR_LIMIT) {
+ if (IS_SUBSTR(x)) {
+ /* Avoid nesting substring nodes. */
+ register struct Function * f = &(((CordRep *)x) -> function);
+ register struct substr_args *descr =
+ (struct substr_args *)(f -> client_data);
+
+ return(CORD_substr_closure((CORD)descr->sa_cord,
+ i + descr->sa_index,
+ n, f -> fn));
+ } else {
+ return(CORD_substr_closure(x, i, n, CORD_apply_access_fn));
+ }
+ } else {
+ char * result;
+ register struct Function * f = &(((CordRep *)x) -> function);
+ char buf[SUBSTR_LIMIT+1];
+ register char * p = buf;
+ register char c;
+ register int j;
+ register int lim = i + n;
+
+ for (j = i; j < lim; j++) {
+ c = (*(f -> fn))(j, f -> client_data);
+ if (c == '\0') {
+ return(CORD_substr_closure(x, i, n, CORD_apply_access_fn));
+ }
+ *p++ = c;
+ }
+ *p = '\0';
+ result = GC_MALLOC_ATOMIC(n+1);
+ if (result == 0) OUT_OF_MEMORY;
+ strcpy(result, buf);
+ return(result);
+ }
+ }
+}
+
+CORD CORD_substr(CORD x, size_t i, size_t n)
+{
+ register size_t len = CORD_len(x);
+
+ if (i >= len || n <= 0) return(0);
+ /* n < 0 is impossible in a correct C implementation, but */
+ /* quite possible under SunOS 4.X. */
+ if (i + n > len) n = len - i;
+ if (i < 0) ABORT("CORD_substr: second arg. negative");
+ /* Possible only if both client and C implementation are buggy. */
+ /* But empirically this happens frequently. */
+ return(CORD_substr_checked(x, i, n));
+}
+
+/* See cord.h for definition. We assume i is in range. */
+int CORD_iter5(CORD x, size_t i, CORD_iter_fn f1,
+ CORD_batched_iter_fn f2, void * client_data)
+{
+ if (x == 0) return(0);
+ if (IS_STRING(x)) {
+ register const char *p = x+i;
+
+ if (*p == '\0') ABORT("2nd arg to CORD_iter5 too big");
+ if (f2 != CORD_NO_FN) {
+ return((*f2)(p, client_data));
+ } else {
+ while (*p) {
+ if ((*f1)(*p, client_data)) return(1);
+ p++;
+ }
+ return(0);
+ }
+ } else if (IS_CONCATENATION(x)) {
+ register struct Concatenation * conc
+ = &(((CordRep *)x) -> concatenation);
+
+
+ if (i > 0) {
+ register size_t left_len = LEFT_LEN(conc);
+
+ if (i >= left_len) {
+ return(CORD_iter5(conc -> right, i - left_len, f1, f2,
+ client_data));
+ }
+ }
+ if (CORD_iter5(conc -> left, i, f1, f2, client_data)) {
+ return(1);
+ }
+ return(CORD_iter5(conc -> right, 0, f1, f2, client_data));
+ } else /* function */ {
+ register struct Function * f = &(((CordRep *)x) -> function);
+ register size_t j;
+ register size_t lim = f -> len;
+
+ for (j = i; j < lim; j++) {
+ if ((*f1)((*(f -> fn))(j, f -> client_data), client_data)) {
+ return(1);
+ }
+ }
+ return(0);
+ }
+}
+
+#undef CORD_iter
+int CORD_iter(CORD x, CORD_iter_fn f1, void * client_data)
+{
+ return(CORD_iter5(x, 0, f1, CORD_NO_FN, client_data));
+}
+
+int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data)
+{
+ if (x == 0) return(0);
+ if (IS_STRING(x)) {
+ register const char *p = x + i;
+ register char c;
+
+ while (p >= x) {
+ c = *p;
+ if (c == '\0') ABORT("2nd arg to CORD_riter4 too big");
+ if ((*f1)(c, client_data)) return(1);
+ p--;
+ }
+ return(0);
+ } else if (IS_CONCATENATION(x)) {
+ register struct Concatenation * conc
+ = &(((CordRep *)x) -> concatenation);
+ register CORD left_part = conc -> left;
+ register size_t left_len;
+
+ left_len = LEFT_LEN(conc);
+ if (i >= left_len) {
+ if (CORD_riter4(conc -> right, i - left_len, f1, client_data)) {
+ return(1);
+ }
+ return(CORD_riter4(left_part, left_len - 1, f1, client_data));
+ } else {
+ return(CORD_riter4(left_part, i, f1, client_data));
+ }
+ } else /* function */ {
+ register struct Function * f = &(((CordRep *)x) -> function);
+ register size_t j;
+
+ for (j = i; j >= 0; j--) {
+ if ((*f1)((*(f -> fn))(j, f -> client_data), client_data)) {
+ return(1);
+ }
+ }
+ return(0);
+ }
+}
+
+int CORD_riter(CORD x, CORD_iter_fn f1, void * client_data)
+{
+ return(CORD_riter4(x, CORD_len(x) - 1, f1, client_data));
+}
+
+/*
+ * The following functions are concerned with balancing cords.
+ * Strategy:
+ * Scan the cord from left to right, keeping the cord scanned so far
+ * as a forest of balanced trees of exponentialy decreasing length.
+ * When a new subtree needs to be added to the forest, we concatenate all
+ * shorter ones to the new tree in the appropriate order, and then insert
+ * the result into the forest.
+ * Crucial invariants:
+ * 1. The concatenation of the forest (in decreasing order) with the
+ * unscanned part of the rope is equal to the rope being balanced.
+ * 2. All trees in the forest are balanced.
+ * 3. forest[i] has depth at most i.
+ */
+
+typedef struct {
+ CORD c;
+ size_t len; /* Actual ength of c */
+} ForestElement;
+
+static size_t min_len [ MAX_DEPTH ];
+
+static int min_len_init = 0;
+
+int CORD_max_len;
+
+typedef ForestElement Forest [ MAX_DEPTH ];
+ /* forest[i].min_length = fib(i+1) */
+ /* The string is the concatenation */
+ /* of the forest in order of DECREASING */
+ /* indices. */
+
+void CORD_init_min_len()
+{
+ register int i;
+ register size_t last, previous, current;
+
+ min_len[0] = previous = 1;
+ min_len[1] = last = 2;
+ for (i = 2; i < MAX_DEPTH; i++) {
+ current = last + previous;
+ if (current < last) /* overflow */ current = last;
+ min_len[i] = current;
+ previous = last;
+ last = current;
+ }
+ CORD_max_len = last - 1;
+ min_len_init = 1;
+}
+
+
+void CORD_init_forest(ForestElement * forest, size_t max_len)
+{
+ register int i;
+
+ for (i = 0; i < MAX_DEPTH; i++) {
+ forest[i].c = 0;
+ if (min_len[i] > max_len) return;
+ }
+ ABORT("Cord too long");
+}
+
+/* Add a leaf to the appropriate level in the forest, cleaning */
+/* out lower levels as necessary. */
+/* Also works if x is a balanced tree of concatenations; however */
+/* in this case an extra concatenation node may be inserted above x; */
+/* This node should not be counted in the statement of the invariants. */
+void CORD_add_forest(ForestElement * forest, CORD x, size_t len)
+{
+ register int i = 0;
+ register CORD sum = CORD_EMPTY;
+ register size_t sum_len = 0;
+
+ while (len > min_len[i + 1]) {
+ if (forest[i].c != 0) {
+ sum = CORD_cat(forest[i].c, sum);
+ sum_len += forest[i].len;
+ forest[i].c = 0;
+ }
+ i++;
+ }
+ /* Sum has depth at most 1 greter than what would be required */
+ /* for balance. */
+ sum = CORD_cat(sum, x);
+ sum_len += len;
+ /* If x was a leaf, then sum is now balanced. To see this */
+ /* consider the two cases in whichforest[i-1] either is or is */
+ /* not empty. */
+ while (sum_len >= min_len[i]) {
+ if (forest[i].c != 0) {
+ sum = CORD_cat(forest[i].c, sum);
+ sum_len += forest[i].len;
+ /* This is again balanced, since sum was balanced, and has */
+ /* allowable depth that differs from i by at most 1. */
+ forest[i].c = 0;
+ }
+ i++;
+ }
+ i--;
+ forest[i].c = sum;
+ forest[i].len = sum_len;
+}
+
+CORD CORD_concat_forest(ForestElement * forest, size_t expected_len)
+{
+ register int i = 0;
+ CORD sum = 0;
+ size_t sum_len = 0;
+
+ while (sum_len != expected_len) {
+ if (forest[i].c != 0) {
+ sum = CORD_cat(forest[i].c, sum);
+ sum_len += forest[i].len;
+ }
+ i++;
+ }
+ return(sum);
+}
+
+/* Insert the frontier of x into forest. Balanced subtrees are */
+/* treated as leaves. This potentially adds one to the depth */
+/* of the final tree. */
+void CORD_balance_insert(CORD x, size_t len, ForestElement * forest)
+{
+ register int depth;
+
+ if (IS_STRING(x)) {
+ CORD_add_forest(forest, x, len);
+ } else if (IS_CONCATENATION(x)
+ && ((depth = DEPTH(x)) >= MAX_DEPTH
+ || len < min_len[depth])) {
+ register struct Concatenation * conc
+ = &(((CordRep *)x) -> concatenation);
+ size_t left_len = LEFT_LEN(conc);
+
+ CORD_balance_insert(conc -> left, left_len, forest);
+ CORD_balance_insert(conc -> right, len - left_len, forest);
+ } else /* function or balanced */ {
+ CORD_add_forest(forest, x, len);
+ }
+}
+
+
+CORD CORD_balance(CORD x)
+{
+ Forest forest;
+ register size_t len;
+
+ if (x == 0) return(0);
+ if (IS_STRING(x)) return(x);
+ if (!min_len_init) CORD_init_min_len();
+ len = LEN(x);
+ CORD_init_forest(forest, len);
+ CORD_balance_insert(x, len, forest);
+ return(CORD_concat_forest(forest, len));
+}
+
+
+/* Position primitives */
+
+/* Private routines to deal with the hard cases only: */
+
+/* P contains a prefix of the path to cur_pos. Extend it to a full */
+/* path and set up leaf info. */
+/* Return 0 if past the end of cord, 1 o.w. */
+void CORD__extend_path(register CORD_pos p)
+{
+ register struct CORD_pe * current_pe = &(p[0].path[p[0].path_len]);
+ register CORD top = current_pe -> pe_cord;
+ register size_t pos = p[0].cur_pos;
+ register size_t top_pos = current_pe -> pe_start_pos;
+ register size_t top_len = GEN_LEN(top);
+
+ /* Fill in the rest of the path. */
+ while(!IS_STRING(top) && IS_CONCATENATION(top)) {
+ register struct Concatenation * conc =
+ &(((CordRep *)top) -> concatenation);
+ register size_t left_len;
+
+ left_len = LEFT_LEN(conc);
+ current_pe++;
+ if (pos >= top_pos + left_len) {
+ current_pe -> pe_cord = top = conc -> right;
+ current_pe -> pe_start_pos = top_pos = top_pos + left_len;
+ top_len -= left_len;
+ } else {
+ current_pe -> pe_cord = top = conc -> left;
+ current_pe -> pe_start_pos = top_pos;
+ top_len = left_len;
+ }
+ p[0].path_len++;
+ }
+ /* Fill in leaf description for fast access. */
+ if (IS_STRING(top)) {
+ p[0].cur_leaf = top;
+ p[0].cur_start = top_pos;
+ p[0].cur_end = top_pos + top_len;
+ } else {
+ p[0].cur_end = 0;
+ }
+ if (pos >= top_pos + top_len) p[0].path_len = CORD_POS_INVALID;
+}
+
+char CORD__pos_fetch(register CORD_pos p)
+{
+ /* Leaf is a function node */
+ struct CORD_pe * pe = &((p)[0].path[(p)[0].path_len]);
+ CORD leaf = pe -> pe_cord;
+ register struct Function * f = &(((CordRep *)leaf) -> function);
+
+ if (!IS_FUNCTION(leaf)) ABORT("CORD_pos_fetch: bad leaf");
+ return ((*(f -> fn))(p[0].cur_pos - pe -> pe_start_pos, f -> client_data));
+}
+
+void CORD__next(register CORD_pos p)
+{
+ register size_t cur_pos = p[0].cur_pos + 1;
+ register struct CORD_pe * current_pe = &((p)[0].path[(p)[0].path_len]);
+ register CORD leaf = current_pe -> pe_cord;
+
+ /* Leaf is not a string or we're at end of leaf */
+ p[0].cur_pos = cur_pos;
+ if (!IS_STRING(leaf)) {
+ /* Function leaf */
+ register struct Function * f = &(((CordRep *)leaf) -> function);
+ register size_t start_pos = current_pe -> pe_start_pos;
+ register size_t end_pos = start_pos + f -> len;
+
+ if (cur_pos < end_pos) {
+ /* Fill cache and return. */
+ register size_t i;
+ register size_t limit = cur_pos + FUNCTION_BUF_SZ;
+ register CORD_fn fn = f -> fn;
+ register void * client_data = f -> client_data;
+
+ if (limit > end_pos) {
+ limit = end_pos;
+ }
+ for (i = cur_pos; i < limit; i++) {
+ p[0].function_buf[i - cur_pos] =
+ (*fn)(i - start_pos, client_data);
+ }
+ p[0].cur_start = cur_pos;
+ p[0].cur_leaf = p[0].function_buf;
+ p[0].cur_end = limit;
+ return;
+ }
+ }
+ /* End of leaf */
+ /* Pop the stack until we find two concatenation nodes with the */
+ /* same start position: this implies we were in left part. */
+ {
+ while (p[0].path_len > 0
+ && current_pe[0].pe_start_pos != current_pe[-1].pe_start_pos) {
+ p[0].path_len--;
+ current_pe--;
+ }
+ if (p[0].path_len == 0) {
+ p[0].path_len = CORD_POS_INVALID;
+ return;
+ }
+ }
+ p[0].path_len--;
+ CORD__extend_path(p);
+}
+
+void CORD__prev(register CORD_pos p)
+{
+ register struct CORD_pe * pe = &(p[0].path[p[0].path_len]);
+
+ if (p[0].cur_pos == 0) {
+ p[0].path_len = CORD_POS_INVALID;
+ return;
+ }
+ p[0].cur_pos--;
+ if (p[0].cur_pos >= pe -> pe_start_pos) return;
+
+ /* Beginning of leaf */
+
+ /* Pop the stack until we find two concatenation nodes with the */
+ /* different start position: this implies we were in right part. */
+ {
+ register struct CORD_pe * current_pe = &((p)[0].path[(p)[0].path_len]);
+
+ while (p[0].path_len > 0
+ && current_pe[0].pe_start_pos == current_pe[-1].pe_start_pos) {
+ p[0].path_len--;
+ current_pe--;
+ }
+ }
+ p[0].path_len--;
+ CORD__extend_path(p);
+}
+
+#undef CORD_pos_fetch
+#undef CORD_next
+#undef CORD_prev
+#undef CORD_pos_to_index
+#undef CORD_pos_to_cord
+#undef CORD_pos_valid
+
+char CORD_pos_fetch(register CORD_pos p)
+{
+ if (p[0].cur_start <= p[0].cur_pos && p[0].cur_pos < p[0].cur_end) {
+ return(p[0].cur_leaf[p[0].cur_pos - p[0].cur_start]);
+ } else {
+ return(CORD__pos_fetch(p));
+ }
+}
+
+void CORD_next(CORD_pos p)
+{
+ if (p[0].cur_pos < p[0].cur_end - 1) {
+ p[0].cur_pos++;
+ } else {
+ CORD__next(p);
+ }
+}
+
+void CORD_prev(CORD_pos p)
+{
+ if (p[0].cur_end != 0 && p[0].cur_pos > p[0].cur_start) {
+ p[0].cur_pos--;
+ } else {
+ CORD__prev(p);
+ }
+}
+
+size_t CORD_pos_to_index(CORD_pos p)
+{
+ return(p[0].cur_pos);
+}
+
+CORD CORD_pos_to_cord(CORD_pos p)
+{
+ return(p[0].path[0].pe_cord);
+}
+
+int CORD_pos_valid(CORD_pos p)
+{
+ return(p[0].path_len != CORD_POS_INVALID);
+}
+
+void CORD_set_pos(CORD_pos p, CORD x, size_t i)
+{
+ if (x == CORD_EMPTY) {
+ p[0].path_len = CORD_POS_INVALID;
+ return;
+ }
+ p[0].path[0].pe_cord = x;
+ p[0].path[0].pe_start_pos = 0;
+ p[0].path_len = 0;
+ p[0].cur_pos = i;
+ CORD__extend_path(p);
+}
--- /dev/null
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* An sprintf implementation that understands cords. This is probably */
+/* not terribly portable. It assumes an ANSI stdarg.h. It further */
+/* assumes that I can make copies of va_list variables, and read */
+/* arguments repeatedly by applyting va_arg to the copies. This */
+/* could be avoided at some performance cost. */
+/* We also assume that unsigned and signed integers of various kinds */
+/* have the same sizes, and can be cast back and forth. */
+/* We assume that void * and char * have the same size. */
+/* All this cruft is needed because we want to rely on the underlying */
+/* sprintf implementation whenever possible. */
+/* Boehm, May 19, 1994 2:19 pm PDT */
+
+#include "cord.h"
+#include "ec.h"
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include "../gc.h"
+
+#define CONV_SPEC_LEN 50 /* Maximum length of a single */
+ /* conversion specification. */
+#define CONV_RESULT_LEN 50 /* Maximum length of any */
+ /* conversion with default */
+ /* width and prec. */
+
+
+static int ec_len(CORD_ec x)
+{
+ return(CORD_len(x[0].ec_cord) + (x[0].ec_bufptr - x[0].ec_buf));
+}
+
+/* Possible nonumeric precision values. */
+# define NONE -1
+# define VARIABLE -2
+/* Copy the conversion specification from CORD_pos into the buffer buf */
+/* Return negative on error. */
+/* Source initially points one past the leading %. */
+/* It is left pointing at the conversion type. */
+/* Assign field width and precision to *width and *prec. */
+/* If width or prec is *, VARIABLE is assigned. */
+/* Set *left to 1 if left adjustment flag is present. */
+/* Set *long_arg to 1 if long flag ('l' or 'L') is present, or to */
+/* -1 if 'h' is present. */
+static int extract_conv_spec(CORD_pos source, char *buf,
+ int * width, int *prec, int *left, int * long_arg)
+{
+ register int result = 0;
+ register int current_number = 0;
+ register int saw_period = 0;
+ register int saw_number;
+ register int chars_so_far = 0;
+ register char current;
+
+ *width = NONE;
+ buf[chars_so_far++] = '%';
+ while(CORD_pos_valid(source)) {
+ if (chars_so_far >= CONV_SPEC_LEN) return(-1);
+ current = CORD_pos_fetch(source);
+ buf[chars_so_far++] = current;
+ switch(current) {
+ case '*':
+ saw_number = 1;
+ current_number = VARIABLE;
+ break;
+ case '0':
+ if (!saw_number) {
+ /* Zero fill flag; ignore */
+ break;
+ } /* otherwise fall through: */
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ saw_number = 1;
+ current_number *= 10;
+ current_number += current - '0';
+ break;
+ case '.':
+ saw_period = 1;
+ if(saw_number) {
+ *width = current_number;
+ saw_number = 0;
+ }
+ current_number = 0;
+ break;
+ case 'l':
+ case 'L':
+ *long_arg = 1;
+ current_number = 0;
+ break;
+ case 'h':
+ *long_arg = -1;
+ current_number = 0;
+ break;
+ case ' ':
+ case '+':
+ case '#':
+ current_number = 0;
+ break;
+ case '-':
+ *left = 1;
+ current_number = 0;
+ break;
+ case 'd':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ case 'f':
+ case 'e':
+ case 'E':
+ case 'g':
+ case 'G':
+ case 'c':
+ case 'C':
+ case 's':
+ case 'S':
+ case 'p':
+ case 'n':
+ case 'r':
+ goto done;
+ default:
+ return(-1);
+ }
+ CORD_next(source);
+ }
+ return(-1);
+ done:
+ if (saw_number) {
+ if (saw_period) {
+ *prec = current_number;
+ } else {
+ *prec = NONE;
+ *width = current_number;
+ }
+ } else {
+ *prec = NONE;
+ }
+ buf[chars_so_far] = '\0';
+ return(result);
+}
+
+int CORD_vsprintf(CORD * out, CORD format, va_list args)
+{
+ CORD_ec result;
+ register int count;
+ register char current;
+ CORD_pos pos;
+ char conv_spec[CONV_SPEC_LEN + 1];
+
+ CORD_ec_init(result);
+ for (CORD_set_pos(pos, format, 0); CORD_pos_valid(pos); CORD_next(pos)) {
+ current = CORD_pos_fetch(pos);
+ if (current == '%') {
+ CORD_next(pos);
+ if (!CORD_pos_valid(pos)) return(-1);
+ current = CORD_pos_fetch(pos);
+ if (current == '%') {
+ CORD_ec_append(result, current);
+ } else {
+ int width, prec;
+ int left_adj = 0;
+ int long_arg = 0;
+ CORD arg;
+ size_t len;
+
+ if (extract_conv_spec(pos, conv_spec,
+ &width, &prec,
+ &left_adj, &long_arg) < 0) {
+ return(-1);
+ }
+ current = CORD_pos_fetch(pos);
+ switch(current) {
+ case 'n':
+ /* Assign length to next arg */
+ if (long_arg == 0) {
+ int * pos_ptr;
+ pos_ptr = va_arg(args, int *);
+ *pos_ptr = ec_len(result);
+ } else if (long_arg > 0) {
+ long * pos_ptr;
+ pos_ptr = va_arg(args, long *);
+ *pos_ptr = ec_len(result);
+ } else {
+ short * pos_ptr;
+ pos_ptr = va_arg(args, short *);
+ *pos_ptr = ec_len(result);
+ }
+ goto done;
+ case 'r':
+ /* Append cord and any padding */
+ if (width == VARIABLE) width = va_arg(args, int);
+ if (prec == VARIABLE) prec = va_arg(args, int);
+ arg = va_arg(args, CORD);
+ len = CORD_len(arg);
+ if (prec != NONE && len > prec) {
+ if (prec < 0) return(-1);
+ arg = CORD_substr(arg, 0, prec);
+ len = prec;
+ }
+ if (width != NONE && len < width) {
+ char * blanks = GC_MALLOC_ATOMIC(width-len+1);
+
+ memset(blanks, ' ', width-len);
+ blanks[width-len] = '\0';
+ if (left_adj) {
+ arg = CORD_cat(arg, blanks);
+ } else {
+ arg = CORD_cat(blanks, arg);
+ }
+ }
+ CORD_ec_append_cord(result, arg);
+ goto done;
+ case 'c':
+ if (width == NONE && prec == NONE) {
+ register char c = va_arg(args, char);
+
+ CORD_ec_append(result, c);
+ goto done;
+ }
+ break;
+ case 's':
+ if (width == NONE && prec == NONE) {
+ char * str = va_arg(args, char *);
+ register char c;
+
+ while (c = *str++) {
+ CORD_ec_append(result, c);
+ }
+ goto done;
+ }
+ break;
+ default:
+ break;
+ }
+ /* Use standard sprintf to perform conversion */
+ {
+ register char * buf;
+ int needed_sz;
+ va_list vsprintf_args = args;
+ /* The above does not appear to be sanctioned */
+ /* by the ANSI C standard. */
+ int max_size = 0;
+
+ if (width == VARIABLE) width = va_arg(args, int);
+ if (prec == VARIABLE) prec = va_arg(args, int);
+ if (width != NONE) max_size = width;
+ if (prec != NONE && prec > max_size) max_size = prec;
+ max_size += CONV_RESULT_LEN;
+ if (max_size >= CORD_BUFSZ) {
+ buf = GC_MALLOC_ATOMIC(max_size + 1);
+ } else {
+ if (CORD_BUFSZ - (result[0].ec_bufptr-result[0].ec_buf)
+ < max_size) {
+ CORD_ec_flush_buf(result);
+ }
+ buf = result[0].ec_bufptr;
+ }
+ switch(current) {
+ case 'd':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ case 'c':
+ if (long_arg <= 0) {
+ (void) va_arg(args, int);
+ } else if (long_arg > 0) {
+ (void) va_arg(args, long);
+ }
+ break;
+ case 's':
+ case 'p':
+ (void) va_arg(args, char *);
+ break;
+ case 'f':
+ case 'e':
+ case 'E':
+ case 'g':
+ case 'G':
+ (void) va_arg(args, double);
+ break;
+ default:
+ return(-1);
+ }
+ len = (size_t)vsprintf(buf, conv_spec, vsprintf_args);
+ if ((char *)len == buf) {
+ /* old style vsprintf */
+ len = strlen(buf);
+ } else if (len < 0) {
+ return(-1);
+ }
+ if (buf != result[0].ec_bufptr) {
+ register char c;
+
+ while (c = *buf++) {
+ CORD_ec_append(result, c);
+ }
+ } else {
+ result[0].ec_bufptr = buf + len;
+ }
+ }
+ done:;
+ }
+ } else {
+ CORD_ec_append(result, current);
+ }
+ }
+ count = ec_len(result);
+ *out = CORD_balance(CORD_ec_to_cord(result));
+ return(count);
+}
+
+int CORD_sprintf(CORD * out, CORD format, ...)
+{
+ va_list args;
+ int result;
+
+ va_start(args, format);
+ result = CORD_vsprintf(out, format, args);
+ va_end(args);
+ return(result);
+}
+
+int CORD_fprintf(FILE * f, CORD format, ...)
+{
+ va_list args;
+ int result;
+ CORD out;
+
+ va_start(args, format);
+ result = CORD_vsprintf(&out, format, args);
+ va_end(args);
+ if (result > 0) CORD_put(out, f);
+ return(result);
+}
+
+int CORD_vfprintf(FILE * f, CORD format, va_list args)
+{
+ int result;
+ CORD out;
+
+ result = CORD_vsprintf(&out, format, args);
+ if (result > 0) CORD_put(out, f);
+ return(result);
+}
+
+int CORD_printf(CORD format, ...)
+{
+ va_list args;
+ int result;
+ CORD out;
+
+ va_start(args, format);
+ result = CORD_vsprintf(&out, format, args);
+ va_end(args);
+ if (result > 0) CORD_put(out, stdout);
+ return(result);
+}
+
+int CORD_vprintf(CORD format, va_list args)
+{
+ int result;
+ CORD out;
+
+ result = CORD_vsprintf(&out, format, args);
+ if (result > 0) CORD_put(out, stdout);
+ return(result);
+}
--- /dev/null
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:21 pm PDT */
+# include "cord.h"
+# include <stdio.h>
+/* This is a very incomplete test of the cord package. It knows about */
+/* a few internals of the package (e.g. when C strings are returned) */
+/* that real clients shouldn't rely on. */
+
+# define ABORT(string) \
+{ int x = 0; fprintf(stderr, "FAILED: %s\n", string); x = 1 / x; abort(); }
+
+int count;
+
+int test_fn(char c, void * client_data)
+{
+ if (client_data != (void *)13) ABORT("bad client data");
+ if (count < 64*1024+1) {
+ if ((count & 1) == 0) {
+ if (c != 'b') ABORT("bad char");
+ } else {
+ if (c != 'a') ABORT("bad char");
+ }
+ count++;
+ return(0);
+ } else {
+ if (c != 'c') ABORT("bad char");
+ count++;
+ return(1);
+ }
+}
+
+char id_cord_fn(size_t i, void * client_data)
+{
+ return((char)i);
+}
+
+test_basics()
+{
+ CORD x = "ab";
+ register int i;
+ char c;
+ CORD y;
+ CORD_pos p;
+
+ x = CORD_cat(x,x);
+ if (!IS_STRING(x)) ABORT("short cord should usually be a string");
+ if (strcmp(x, "abab") != 0) ABORT("bad CORD_cat result");
+
+ for (i = 1; i < 16; i++) {
+ x = CORD_cat(x,x);
+ }
+ x = CORD_cat(x,"c");
+ if (CORD_len(x) != 128*1024+1) ABORT("bad length");
+
+ count = 0;
+ if (CORD_iter5(x, 64*1024-1, test_fn, CORD_NO_FN, (void *)13) == 0) {
+ ABORT("CORD_iter5 failed");
+ }
+ if (count != 64*1024 + 2) ABORT("CORD_iter5 failed");
+
+ count = 0;
+ CORD_set_pos(p, x, 64*1024-1);
+ while(CORD_pos_valid(p)) {
+ (void) test_fn(CORD_pos_fetch(p), (void *)13);
+ CORD_next(p);
+ }
+ if (count != 64*1024 + 2) ABORT("Position based iteration failed");
+
+ y = CORD_substr(x, 1023, 5);
+ if (!IS_STRING(y)) ABORT("short cord should usually be a string");
+ if (strcmp(y, "babab") != 0) ABORT("bad CORD_substr result");
+
+ y = CORD_substr(x, 1024, 8);
+ if (!IS_STRING(y)) ABORT("short cord should usually be a string");
+ if (strcmp(y, "abababab") != 0) ABORT("bad CORD_substr result");
+
+ y = CORD_substr(x, 128*1024-1, 8);
+ if (!IS_STRING(y)) ABORT("short cord should usually be a string");
+ if (strcmp(y, "bc") != 0) ABORT("bad CORD_substr result");
+
+ x = CORD_balance(x);
+ if (CORD_len(x) != 128*1024+1) ABORT("bad length");
+
+ count = 0;
+ if (CORD_iter5(x, 64*1024-1, test_fn, CORD_NO_FN, (void *)13) == 0) {
+ ABORT("CORD_iter5 failed");
+ }
+ if (count != 64*1024 + 2) ABORT("CORD_iter5 failed");
+
+ y = CORD_substr(x, 1023, 5);
+ if (!IS_STRING(y)) ABORT("short cord should usually be a string");
+ if (strcmp(y, "babab") != 0) ABORT("bad CORD_substr result");
+ y = CORD_from_fn(id_cord_fn, 0, 13);
+ i = 0;
+ CORD_set_pos(p, y, i);
+ while(CORD_pos_valid(p)) {
+ c = CORD_pos_fetch(p);
+ if(c != i) ABORT("Traversal of function node failed");
+ CORD_next(p); i++;
+ }
+ if (i != 13) ABORT("Bad apparent length for function node");
+}
+
+test_extras()
+{
+# ifdef __OS2__
+# define FNAME1 "tmp1"
+# define FNAME2 "tmp2"
+# else
+# define FNAME1 "/tmp/cord_test"
+# define FNAME2 "/tmp/cord_test2"
+# endif
+ register int i;
+ CORD y = "abcdefghijklmnopqrstuvwxyz0123456789";
+ CORD x = "{}";
+ CORD w, z;
+ FILE *f;
+ FILE *f1a, *f1b, *f2;
+
+ for (i = 1; i < 100; i++) {
+ x = CORD_cat(x, y);
+ }
+ z = CORD_balance(x);
+ if (CORD_cmp(x,z) != 0) ABORT("balanced string comparison wrong");
+ if (CORD_cmp(x,CORD_cat(z, CORD_nul(13))) >= 0) ABORT("comparison 2");
+ if (CORD_cmp(CORD_cat(x, CORD_nul(13)), z) <= 0) ABORT("comparison 3");
+ if (CORD_cmp(x,CORD_cat(z, "13")) >= 0) ABORT("comparison 4");
+ if ((f = fopen(FNAME1, "w")) == 0) ABORT("open failed");
+ if (CORD_put(z,f) == EOF) ABORT("CORD_put failed");
+ if (fclose(f) == EOF) ABORT("fclose failed");
+ w = CORD_from_file(f1a = fopen(FNAME1, "rb"));
+ if (CORD_len(w) != CORD_len(z)) ABORT("file length wrong");
+ if (CORD_cmp(w,z) != 0) ABORT("file comparison wrong");
+ if (CORD_cmp(CORD_substr(w, 50*36+2, 36), y) != 0)
+ ABORT("file substr wrong");
+ z = CORD_from_file_lazy(f1b = fopen(FNAME1, "rb"));
+ if (CORD_cmp(w,z) != 0) ABORT("File conversions differ");
+ if (CORD_chr(w, 0, '9') != 37) ABORT("CORD_chr failed 1");
+ if (CORD_chr(w, 3, 'a') != 38) ABORT("CORD_chr failed 2");
+ if (CORD_rchr(w, CORD_len(w) - 1, '}') != 1) ABORT("CORD_rchr failed");
+ x = y;
+ for (i = 1; i < 14; i++) {
+ x = CORD_cat(x,x);
+ }
+ if ((f = fopen(FNAME2, "w")) == 0) ABORT("2nd open failed");
+ if (CORD_put(x,f) == EOF) ABORT("CORD_put failed");
+ if (fclose(f) == EOF) ABORT("fclose failed");
+ w = CORD_from_file(f2 = fopen(FNAME2, "rb"));
+ if (CORD_len(w) != CORD_len(x)) ABORT("file length wrong");
+ if (CORD_cmp(w,x) != 0) ABORT("file comparison wrong");
+ if (CORD_cmp(CORD_substr(w, 1000*36, 36), y) != 0)
+ ABORT("file substr wrong");
+ if (strcmp(CORD_to_char_star(CORD_substr(w, 1000*36, 36)), y) != 0)
+ ABORT("char * file substr wrong");
+ if (strcmp(CORD_substr(w, 1000*36, 2), "ab") != 0)
+ ABORT("short file substr wrong");
+ if (CORD_str(x,1,"9a") != 35) ABORT("CORD_str failed 1");
+ if (CORD_str(x,0,"9abcdefghijk") != 35) ABORT("CORD_str failed 2");
+ if (CORD_str(x,0,"9abcdefghijx") != CORD_NOT_FOUND)
+ ABORT("CORD_str failed 3");
+ if (CORD_str(x,0,"9>") != CORD_NOT_FOUND) ABORT("CORD_str failed 4");
+ if (remove(FNAME1) != 0) {
+ /* On some systems, e.g. OS2, this may fail if f1 is still open. */
+ if ((fclose(f1a) == EOF) & (fclose(f1b) == EOF))
+ ABORT("fclose(f1) failed");
+ if (remove(FNAME1) != 0) ABORT("remove 1 failed");
+ }
+ if (remove(FNAME2) != 0) {
+ if (fclose(f2) == EOF) ABORT("fclose(f2) failed");
+ if (remove(FNAME2) != 0) ABORT("remove 2 failed");
+ }
+}
+
+test_printf()
+{
+ CORD result;
+ char result2[200];
+ long l;
+ short s;
+ CORD x;
+
+ if (CORD_sprintf(&result, "%7.2f%ln", 3.14159, &l) != 7)
+ ABORT("CORD_sprintf failed 1");
+ if (CORD_cmp(result, " 3.14") != 0)ABORT("CORD_sprintf goofed 1");
+ if (l != 7) ABORT("CORD_sprintf goofed 2");
+ if (CORD_sprintf(&result, "%-7.2s%hn%c%s", "abcd", &s, 'x', "yz") != 10)
+ ABORT("CORD_sprintf failed 2");
+ if (CORD_cmp(result, "ab xyz") != 0)ABORT("CORD_sprintf goofed 3");
+ if (s != 7) ABORT("CORD_sprintf goofed 4");
+ x = "abcdefghij";
+ x = CORD_cat(x,x);
+ x = CORD_cat(x,x);
+ x = CORD_cat(x,x);
+ if (CORD_sprintf(&result, "->%-120.78r!\n", x) != 124)
+ ABORT("CORD_sprintf failed 3");
+ (void) sprintf(result2, "->%-120.78s!\n", CORD_to_char_star(x));
+ if (CORD_cmp(result, result2) != 0)ABORT("CORD_sprintf goofed 5");
+}
+
+main()
+{
+ test_basics();
+ test_extras();
+ test_printf();
+ CORD_fprintf(stderr, "SUCCEEDED\n");
+ return(0);
+}
--- /dev/null
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Author: Hans-J. Boehm (boehm@parc.xerox.com)
+ */
+/*
+ * These are functions on cords that do not need to understand their
+ * implementation. They serve also serve as example client code for
+ * cord_basics.
+ */
+/* Boehm, May 19, 1994 2:18 pm PDT */
+# include <stdio.h>
+# include <string.h>
+# include <stdlib.h>
+# include "cord.h"
+# include "ec.h"
+# define I_HIDE_POINTERS /* So we get access to allocation lock. */
+ /* We use this for lazy file reading, */
+ /* so that we remain independent */
+ /* of the threads primitives. */
+# include "../gc.h"
+
+/* The standard says these are in stdio.h, but they aren't always: */
+# ifndef SEEK_SET
+# define SEEK_SET 0
+# endif
+# ifndef SEEK_END
+# define SEEK_END 2
+# endif
+
+# define BUFSZ 2048 /* Size of stack allocated buffers when */
+ /* we want large buffers. */
+
+typedef void (* oom_fn)(void);
+
+# define OUT_OF_MEMORY { if (CORD_oom_fn != (oom_fn) 0) (*CORD_oom_fn)(); \
+ ABORT("Out of memory\n"); }
+# define ABORT(msg) { fprintf(stderr, "%s\n", msg); abort(); }
+
+CORD CORD_cat_char(CORD x, char c)
+{
+ register char * string;
+
+ if (c == '\0') return(CORD_cat(x, CORD_nul(1)));
+ string = GC_MALLOC_ATOMIC(2);
+ if (string == 0) OUT_OF_MEMORY;
+ string[0] = c;
+ string[1] = '\0';
+ return(CORD_cat_char_star(x, string, 1));
+}
+
+typedef struct {
+ size_t len;
+ size_t count;
+ char * buf;
+} CORD_fill_data;
+
+int CORD_fill_proc(char c, void * client_data)
+{
+ register CORD_fill_data * d = (CORD_fill_data *)client_data;
+ register size_t count = d -> count;
+
+ (d -> buf)[count] = c;
+ d -> count = ++count;
+ if (count >= d -> len) {
+ return(1);
+ } else {
+ return(0);
+ }
+}
+
+int CORD_batched_fill_proc(const char * s, void * client_data)
+{
+ register CORD_fill_data * d = (CORD_fill_data *)client_data;
+ register size_t count = d -> count;
+ register size_t max = d -> len;
+ register char * buf = d -> buf;
+ register const char * t = s;
+
+ while(((d -> buf)[count] = *t++) != '\0') {
+ count++;
+ if (count >= max) {
+ d -> count = count;
+ return(1);
+ }
+ }
+ d -> count = count;
+ return(0);
+}
+
+/* Fill buf with between min and max characters starting at i. */
+/* Assumes len characters are available. */
+void CORD_fill_buf(CORD x, size_t i, size_t len, char * buf)
+{
+ CORD_fill_data fd;
+
+ fd.len = len;
+ fd.buf = buf;
+ fd.count = 0;
+ (void)CORD_iter5(x, i, CORD_fill_proc, CORD_batched_fill_proc, &fd);
+}
+
+int CORD_cmp(CORD x, CORD y)
+{
+ CORD_pos xpos;
+ CORD_pos ypos;
+ register size_t avail, yavail;
+
+ if (y == CORD_EMPTY) return(x != CORD_EMPTY);
+ if (x == CORD_EMPTY) return(-1);
+ if (IS_STRING(y) && IS_STRING(x)) return(strcmp(x,y));
+ CORD_set_pos(xpos, x, 0);
+ CORD_set_pos(ypos, y, 0);
+ for(;;) {
+ if (!CORD_pos_valid(xpos)) {
+ if (CORD_pos_valid(ypos)) {
+ return(-1);
+ } else {
+ return(0);
+ }
+ }
+ if (!CORD_pos_valid(ypos)) {
+ return(1);
+ }
+ if ((avail = CORD_pos_chars_left(xpos)) <= 0
+ || (yavail = CORD_pos_chars_left(ypos)) <= 0) {
+ register char xcurrent = CORD_pos_fetch(xpos);
+ register char ycurrent = CORD_pos_fetch(ypos);
+ if (xcurrent != ycurrent) return(xcurrent - ycurrent);
+ CORD_next(xpos);
+ CORD_next(ypos);
+ } else {
+ /* process as many characters as we can */
+ register int result;
+
+ if (avail > yavail) avail = yavail;
+ result = strncmp(CORD_pos_cur_char_addr(xpos),
+ CORD_pos_cur_char_addr(ypos), avail);
+ if (result != 0) return(result);
+ CORD_pos_advance(xpos, avail);
+ CORD_pos_advance(ypos, avail);
+ }
+ }
+}
+
+int CORD_ncmp(CORD x, size_t x_start, CORD y, size_t y_start, size_t len)
+{
+ CORD_pos xpos;
+ CORD_pos ypos;
+ register size_t count;
+ register long avail, yavail;
+
+ CORD_set_pos(xpos, x, x_start);
+ CORD_set_pos(ypos, y, y_start);
+ for(count = 0; count < len;) {
+ if (!CORD_pos_valid(xpos)) {
+ if (CORD_pos_valid(ypos)) {
+ return(-1);
+ } else {
+ return(0);
+ }
+ }
+ if (!CORD_pos_valid(ypos)) {
+ return(1);
+ }
+ if ((avail = CORD_pos_chars_left(xpos)) <= 0
+ || (yavail = CORD_pos_chars_left(ypos)) <= 0) {
+ register char xcurrent = CORD_pos_fetch(xpos);
+ register char ycurrent = CORD_pos_fetch(ypos);
+ if (xcurrent != ycurrent) return(xcurrent - ycurrent);
+ CORD_next(xpos);
+ CORD_next(ypos);
+ count++;
+ } else {
+ /* process as many characters as we can */
+ register int result;
+
+ if (avail > yavail) avail = yavail;
+ count += avail;
+ if (count > len) avail -= (count - len);
+ result = strncmp(CORD_pos_cur_char_addr(xpos),
+ CORD_pos_cur_char_addr(ypos), (size_t)avail);
+ if (result != 0) return(result);
+ CORD_pos_advance(xpos, (size_t)avail);
+ CORD_pos_advance(ypos, (size_t)avail);
+ }
+ }
+ return(0);
+}
+
+char * CORD_to_char_star(CORD x)
+{
+ register size_t len;
+ char * result;
+
+ if (x == 0) return("");
+ len = CORD_len(x);
+ result = (char *)GC_MALLOC_ATOMIC(len + 1);
+ if (result == 0) OUT_OF_MEMORY;
+ CORD_fill_buf(x, 0, len, result);
+ result[len] = '\0';
+ return(result);
+}
+
+char CORD_fetch(CORD x, size_t i)
+{
+ CORD_pos xpos;
+
+ CORD_set_pos(xpos, x, i);
+ if (!CORD_pos_valid(xpos)) ABORT("bad index?");
+ return(CORD_pos_fetch(xpos));
+}
+
+
+int CORD_put_proc(char c, void * client_data)
+{
+ register FILE * f = (FILE *)client_data;
+
+ return(putc(c, f) == EOF);
+}
+
+int CORD_batched_put_proc(const char * s, void * client_data)
+{
+ register FILE * f = (FILE *)client_data;
+
+ return(fputs(s, f) == EOF);
+}
+
+
+int CORD_put(CORD x, FILE * f)
+{
+ if (CORD_iter5(x, 0, CORD_put_proc, CORD_batched_put_proc, f)) {
+ return(EOF);
+ } else {
+ return(1);
+ }
+}
+
+typedef struct {
+ size_t pos; /* Current position in the cord */
+ char target; /* Character we're looking for */
+} chr_data;
+
+int CORD_chr_proc(char c, void * client_data)
+{
+ register chr_data * d = (chr_data *)client_data;
+
+ if (c == d -> target) return(1);
+ (d -> pos) ++;
+ return(0);
+}
+
+int CORD_rchr_proc(char c, void * client_data)
+{
+ register chr_data * d = (chr_data *)client_data;
+
+ if (c == d -> target) return(1);
+ (d -> pos) --;
+ return(0);
+}
+
+int CORD_batched_chr_proc(const char *s, void * client_data)
+{
+ register chr_data * d = (chr_data *)client_data;
+ register char * occ = strchr(s, d -> target);
+
+ if (occ == 0) {
+ d -> pos += strlen(s);
+ return(0);
+ } else {
+ d -> pos += occ - s;
+ return(1);
+ }
+}
+
+size_t CORD_chr(CORD x, size_t i, int c)
+{
+ chr_data d;
+
+ d.pos = i;
+ d.target = c;
+ if (CORD_iter5(x, i, CORD_chr_proc, CORD_batched_chr_proc, &d)) {
+ return(d.pos);
+ } else {
+ return(CORD_NOT_FOUND);
+ }
+}
+
+size_t CORD_rchr(CORD x, size_t i, int c)
+{
+ chr_data d;
+
+ d.pos = i;
+ d.target = c;
+ if (CORD_riter4(x, i, CORD_rchr_proc, &d)) {
+ return(d.pos);
+ } else {
+ return(CORD_NOT_FOUND);
+ }
+}
+
+/* Find the first occurrence of s in x at position start or later. */
+/* This uses an asymptotically poor algorithm, which should typically */
+/* perform acceptably. We compare the first few characters directly, */
+/* and call CORD_ncmp whenever there is a partial match. */
+/* This has the advantage that we allocate very little, or not at all. */
+/* It's very fast if there are few close misses. */
+size_t CORD_str(CORD x, size_t start, CORD s)
+{
+ CORD_pos xpos;
+ size_t xlen = CORD_len(x);
+ size_t slen;
+ register size_t start_len;
+ const char * s_start;
+ unsigned long s_buf = 0; /* The first few characters of s */
+ unsigned long x_buf = 0; /* Start of candidate substring. */
+ /* Initialized only to make compilers */
+ /* happy. */
+ unsigned long mask = 0;
+ register size_t i;
+ register size_t match_pos;
+
+ if (s == CORD_EMPTY) return(start);
+ if (IS_STRING(s)) {
+ s_start = s;
+ slen = strlen(s);
+ } else {
+ s_start = CORD_to_char_star(CORD_substr(s, 0, sizeof(unsigned long)));
+ slen = CORD_len(s);
+ }
+ if (xlen < start || xlen - start < slen) return(CORD_NOT_FOUND);
+ start_len = slen;
+ if (start_len > sizeof(unsigned long)) start_len = sizeof(unsigned long);
+ CORD_set_pos(xpos, x, start);
+ for (i = 0; i < start_len; i++) {
+ mask <<= 8;
+ mask |= 0xff;
+ s_buf <<= 8;
+ s_buf |= s_start[i];
+ x_buf <<= 8;
+ x_buf |= CORD_pos_fetch(xpos);
+ CORD_next(xpos);
+ }
+ for (match_pos = start; match_pos < xlen - slen; match_pos++) {
+ if ((x_buf & mask) == s_buf) {
+ if (slen == start_len ||
+ CORD_ncmp(x, match_pos + start_len,
+ s, start_len, slen - start_len) == 0) {
+ return(match_pos);
+ }
+ }
+ x_buf <<= 8;
+ x_buf |= CORD_pos_fetch(xpos);
+ CORD_next(xpos);
+ }
+ return(CORD_NOT_FOUND);
+}
+
+void CORD_ec_flush_buf(CORD_ec x)
+{
+ register size_t len = x[0].ec_bufptr - x[0].ec_buf;
+ char * s;
+
+ if (len == 0) return;
+ s = GC_MALLOC_ATOMIC(len+1);
+ memcpy(s, x[0].ec_buf, len);
+ s[len] = '\0';
+ x[0].ec_cord = CORD_cat_char_star(x[0].ec_cord, s, len);
+ x[0].ec_bufptr = x[0].ec_buf;
+}
+
+void CORD_ec_append_cord(CORD_ec x, CORD s)
+{
+ CORD_ec_flush_buf(x);
+ x[0].ec_cord = CORD_cat(x[0].ec_cord, s);
+}
+
+/*ARGSUSED*/
+char CORD_nul_func(size_t i, void * client_data)
+{
+ return((char)(unsigned long)client_data);
+}
+
+
+CORD CORD_chars(char c, size_t i)
+{
+ return(CORD_from_fn(CORD_nul_func, (void *)(unsigned long)c, i));
+}
+
+CORD CORD_from_file_eager(FILE * f)
+{
+ register int c;
+ CORD_ec ecord;
+
+ CORD_ec_init(ecord);
+ for(;;) {
+ c = getc(f);
+ if (c == 0) {
+ /* Append the right number of NULs */
+ /* Note that any string of NULs is rpresented in 4 words, */
+ /* independent of its length. */
+ register size_t count = 1;
+
+ CORD_ec_flush_buf(ecord);
+ while ((c = getc(f)) == 0) count++;
+ ecord[0].ec_cord = CORD_cat(ecord[0].ec_cord, CORD_nul(count));
+ }
+ if (c == EOF) break;
+ CORD_ec_append(ecord, c);
+ }
+ (void) fclose(f);
+ return(CORD_balance(CORD_ec_to_cord(ecord)));
+}
+
+/* The state maintained for a lazily read file consists primarily */
+/* of a large direct-mapped cache of previously read values. */
+/* We could rely more on stdio buffering. That would have 2 */
+/* disadvantages: */
+/* 1) Empirically, not all fseek implementations preserve the */
+/* buffer whenever they could. */
+/* 2) It would fail if 2 different sections of a long cord */
+/* were being read alternately. */
+/* We do use the stdio buffer for read ahead. */
+/* To guarantee thread safety in the presence of atomic pointer */
+/* writes, cache lines are always replaced, and never modified in */
+/* place. */
+
+# define LOG_CACHE_SZ 14
+# define CACHE_SZ (1 << LOG_CACHE_SZ)
+# define LOG_LINE_SZ 9
+# define LINE_SZ (1 << LOG_LINE_SZ)
+
+typedef struct {
+ size_t tag;
+ char data[LINE_SZ];
+ /* data[i%LINE_SZ] = ith char in file if tag = i/LINE_SZ */
+} cache_line;
+
+typedef struct {
+ FILE * lf_file;
+ size_t lf_current; /* Current file pointer value */
+ cache_line * volatile lf_cache[CACHE_SZ/LINE_SZ];
+} lf_state;
+
+# define MOD_CACHE_SZ(n) ((n) & (CACHE_SZ - 1))
+# define DIV_CACHE_SZ(n) ((n) >> LOG_CACHE_SZ)
+# define MOD_LINE_SZ(n) ((n) & (LINE_SZ - 1))
+# define DIV_LINE_SZ(n) ((n) >> LOG_LINE_SZ)
+# define LINE_START(n) ((n) & ~(LINE_SZ - 1))
+
+typedef struct {
+ lf_state * state;
+ size_t file_pos; /* Position of needed character. */
+ cache_line * new_cache;
+} refill_data;
+
+/* Executed with allocation lock. */
+static char refill_cache(client_data)
+refill_data * client_data;
+{
+ register lf_state * state = client_data -> state;
+ register size_t file_pos = client_data -> file_pos;
+ FILE *f = state -> lf_file;
+ size_t line_start = LINE_START(file_pos);
+ size_t line_no = DIV_LINE_SZ(MOD_CACHE_SZ(file_pos));
+ cache_line * new_cache = client_data -> new_cache;
+
+ if (line_start != state -> lf_current
+ && fseek(f, line_start, SEEK_SET) != 0) {
+ ABORT("fseek failed");
+ }
+ if (fread(new_cache -> data, sizeof(char), LINE_SZ, f)
+ <= file_pos - line_start) {
+ ABORT("fread failed");
+ }
+ new_cache -> tag = DIV_LINE_SZ(file_pos);
+ /* Store barrier goes here. */
+ state -> lf_cache[line_no] = new_cache;
+ state -> lf_current = line_start + LINE_SZ;
+ return(new_cache->data[MOD_LINE_SZ(file_pos)]);
+}
+
+char CORD_lf_func(size_t i, void * client_data)
+{
+ register lf_state * state = (lf_state *)client_data;
+ register cache_line * cl = state -> lf_cache[DIV_LINE_SZ(MOD_CACHE_SZ(i))];
+
+ if (cl == 0 || cl -> tag != DIV_LINE_SZ(i)) {
+ /* Cache miss */
+ refill_data rd;
+
+ rd.state = state;
+ rd.file_pos = i;
+ rd.new_cache = GC_NEW_ATOMIC(cache_line);
+ if (rd.new_cache == 0) OUT_OF_MEMORY;
+ return((char)(GC_word)
+ GC_call_with_alloc_lock((GC_fn_type) refill_cache, &rd));
+ }
+ return(cl -> data[MOD_LINE_SZ(i)]);
+}
+
+/*ARGSUSED*/
+void CORD_lf_close_proc(void * obj, void * client_data)
+{
+ if (fclose(((lf_state *)obj) -> lf_file) != 0) {
+ ABORT("CORD_lf_close_proc: fclose failed");
+ }
+}
+
+CORD CORD_from_file_lazy_inner(FILE * f, size_t len)
+{
+ register lf_state * state = GC_NEW(lf_state);
+ register int i;
+
+ if (state == 0) OUT_OF_MEMORY;
+ state -> lf_file = f;
+ for (i = 0; i < CACHE_SZ/LINE_SZ; i++) {
+ state -> lf_cache[i] = 0;
+ }
+ state -> lf_current = 0;
+ GC_register_finalizer(state, CORD_lf_close_proc, 0, 0, 0);
+ return(CORD_from_fn(CORD_lf_func, state, len));
+}
+
+CORD CORD_from_file_lazy(FILE * f)
+{
+ register size_t len;
+
+ if (fseek(f, 0l, SEEK_END) != 0) {
+ ABORT("Bad fd argument - fseek failed");
+ }
+ if ((len = ftell(f)) < 0) {
+ ABORT("Bad fd argument - ftell failed");
+ }
+ rewind(f);
+ return(CORD_from_file_lazy_inner(f, len));
+}
+
+# define LAZY_THRESHOLD (128*1024 + 1)
+
+CORD CORD_from_file(FILE * f)
+{
+ register size_t len;
+
+ if (fseek(f, 0l, SEEK_END) != 0) {
+ ABORT("Bad fd argument - fseek failed");
+ }
+ if ((len = ftell(f)) < 0) {
+ ABORT("Bad fd argument - ftell failed");
+ }
+ rewind(f);
+ if (len < LAZY_THRESHOLD) {
+ return(CORD_from_file_eager(f));
+ } else {
+ return(CORD_from_file_lazy_inner(f, len));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Author: Hans-J. Boehm (boehm@parc.xerox.com)
+ */
+/*
+ * A really simple-minded text editor based on cords.
+ * Things it does right:
+ * No size bounds.
+ * Inbounded undo.
+ * Shouldn't crash no matter what file you invoke it on (e.g. /vmunix)
+ * (Make sure /vmunix is not writable before you try this.)
+ * Scrolls horizontally.
+ * Things it does wrong:
+ * It doesn't handle tabs reasonably (use "expand" first).
+ * The command set is MUCH too small.
+ * The redisplay algorithm doesn't let curses do the scrolling.
+ * The rule for moving the window over the file is suboptimal.
+ */
+/* Boehm, May 19, 1994 2:20 pm PDT */
+#include <stdio.h>
+#include "../gc.h"
+#include "cord.h"
+#ifdef WIN32
+# include <windows.h>
+# include "de_win.h"
+#else
+# include <curses.h>
+# define de_error(s) { fprintf(stderr, s); sleep(2); }
+#endif
+#include "de_cmds.h"
+
+
+/* List of line number to position mappings, in descending order. */
+/* There may be holes. */
+typedef struct LineMapRep {
+ int line;
+ size_t pos;
+ struct LineMapRep * previous;
+} * line_map;
+
+/* List of file versions, one per edit operation */
+typedef struct HistoryRep {
+ CORD file_contents;
+ struct HistoryRep * previous;
+ line_map map; /* Invalid for first record "now" */
+} * history;
+
+history now = 0;
+CORD current; /* == now -> file_contents. */
+size_t current_len; /* Current file length. */
+line_map current_map = 0; /* Current line no. to pos. map */
+size_t current_map_size = 0; /* Number of current_map entries. */
+ /* Not always accurate, but reset */
+ /* by prune_map. */
+# define MAX_MAP_SIZE 3000
+
+/* Current display position */
+int dis_line = 0;
+int dis_col = 0;
+
+# define ALL -1
+# define NONE - 2
+int need_redisplay = 0; /* Line that needs to be redisplayed. */
+
+
+/* Current cursor position. Always within file. */
+int line = 0;
+int col = 0;
+size_t file_pos = 0; /* Character position corresponding to cursor. */
+
+/* Invalidate line map for lines > i */
+void invalidate_map(int i)
+{
+ while(current_map -> line > i) {
+ current_map = current_map -> previous;
+ current_map_size--;
+ }
+}
+
+/* Reduce the number of map entries to save space for huge files. */
+/* This also affects maps in histories. */
+void prune_map()
+{
+ line_map map = current_map;
+ int start_line = map -> line;
+
+ current_map_size = 0;
+ for(; map != 0; map = map -> previous) {
+ current_map_size++;
+ if (map -> line < start_line - LINES && map -> previous != 0) {
+ map -> previous = map -> previous -> previous;
+ }
+ }
+}
+/* Add mapping entry */
+void add_map(int line, size_t pos)
+{
+ line_map new_map = GC_NEW(struct LineMapRep);
+
+ if (current_map_size >= MAX_MAP_SIZE) prune_map();
+ new_map -> line = line;
+ new_map -> pos = pos;
+ new_map -> previous = current_map;
+ current_map = new_map;
+ current_map_size++;
+}
+
+
+
+/* Return position of column *c of ith line in */
+/* current file. Adjust *c to be within the line.*/
+/* A 0 pointer is taken as 0 column. */
+/* Returns CORD_NOT_FOUND if i is too big. */
+/* Assumes i > dis_line. */
+size_t line_pos(int i, int *c)
+{
+ int j;
+ size_t cur;
+ size_t next;
+ line_map map = current_map;
+
+ while (map -> line > i) map = map -> previous;
+ if (map -> line < i - 2) /* rebuild */ invalidate_map(i);
+ for (j = map -> line, cur = map -> pos; j < i;) {
+ cur = CORD_chr(current, cur, '\n');
+ if (cur == current_len-1) return(CORD_NOT_FOUND);
+ cur++;
+ if (++j > current_map -> line) add_map(j, cur);
+ }
+ if (c != 0) {
+ next = CORD_chr(current, cur, '\n');
+ if (next == CORD_NOT_FOUND) next = current_len - 1;
+ if (next < cur + *c) {
+ *c = next - cur;
+ }
+ cur += *c;
+ }
+ return(cur);
+}
+
+void add_hist(CORD s)
+{
+ history new_file = GC_NEW(struct HistoryRep);
+
+ new_file -> file_contents = current = s;
+ current_len = CORD_len(s);
+ new_file -> previous = now;
+ if (now != 0) now -> map = current_map;
+ now = new_file;
+}
+
+void del_hist(void)
+{
+ now = now -> previous;
+ current = now -> file_contents;
+ current_map = now -> map;
+ current_len = CORD_len(current);
+}
+
+/* Current screen_contents; a dynamically allocated array of CORDs */
+CORD * screen = 0;
+int screen_size = 0;
+
+# ifndef WIN32
+/* Replace a line in the curses stdscr. All control characters are */
+/* displayed as upper case characters in standout mode. This isn't */
+/* terribly appropriate for tabs. */
+void replace_line(int i, CORD s)
+{
+ register int c;
+ CORD_pos p;
+
+ if (screen == 0 || LINES > screen_size) {
+ screen_size = LINES;
+ screen = (CORD *)GC_MALLOC(screen_size * sizeof(CORD));
+ }
+ if (CORD_cmp(screen[i], s) != 0) {
+ move(i,0); clrtoeol();
+ /* A gross workaround for an apparent curses bug: */
+ if (i == LINES-1) s = CORD_substr(s, 0, CORD_len(s) - 1);
+ CORD_FOR (p, s) {
+ c = CORD_pos_fetch(p) & 0x7f;
+ if (iscntrl(c)) {
+ standout(); addch(c + 0x40); standend();
+ } else {
+ addch(c);
+ }
+ }
+ screen[i] = s;
+ }
+}
+#else
+# define replace_line(i,s) invalidate_line(i)
+#endif
+
+/* Return up to COLS characters of the line of s starting at pos, */
+/* returning only characters after the given column. */
+CORD retrieve_line(CORD s, size_t pos, unsigned column)
+{
+ CORD candidate = CORD_substr(s, pos, column + COLS);
+ /* avoids scanning very long lines */
+ int eol = CORD_chr(candidate, 0, '\n');
+ int len;
+
+ if (eol == CORD_NOT_FOUND) eol = CORD_len(candidate);
+ len = (int)eol - (int)column;
+ if (len < 0) len = 0;
+ return(CORD_substr(s, pos + column, len));
+}
+
+# ifdef WIN32
+# define refresh();
+
+ CORD retrieve_screen_line(int i)
+ {
+ register size_t pos;
+
+ invalidate_map(dis_line + LINES); /* Prune search */
+ pos = line_pos(dis_line + i, 0);
+ if (pos == CORD_NOT_FOUND) return(CORD_EMPTY);
+ return(retrieve_line(current, pos, dis_col));
+ }
+# endif
+
+/* Display the visible section of the current file */
+void redisplay(void)
+{
+ register int i;
+
+ invalidate_map(dis_line + LINES); /* Prune search */
+ for (i = 0; i < LINES; i++) {
+ if (need_redisplay == ALL || need_redisplay == i) {
+ register size_t pos = line_pos(dis_line + i, 0);
+
+ if (pos == CORD_NOT_FOUND) break;
+ replace_line(i, retrieve_line(current, pos, dis_col));
+ if (need_redisplay == i) goto done;
+ }
+ }
+ for (; i < LINES; i++) replace_line(i, CORD_EMPTY);
+done:
+ refresh();
+ need_redisplay = NONE;
+}
+
+int dis_granularity;
+
+/* Update dis_line, dis_col, and dis_pos to make cursor visible. */
+/* Assumes line, col, dis_line, dis_pos are in bounds. */
+void normalize_display()
+{
+ int old_line = dis_line;
+ int old_col = dis_col;
+
+ dis_granularity = 1;
+ if (LINES > 15 && COLS > 15) dis_granularity = 5;
+ while (dis_line > line) dis_line -= dis_granularity;
+ while (dis_col > col) dis_col -= dis_granularity;
+ while (line >= dis_line + LINES) dis_line += dis_granularity;
+ while (col >= dis_col + COLS) dis_col += dis_granularity;
+ if (old_line != dis_line || old_col != dis_col) {
+ need_redisplay = ALL;
+ }
+}
+
+# ifndef WIN32
+# define move_cursor(x,y) move(y,x)
+# endif
+
+/* Adjust display so that cursor is visible; move cursor into position */
+/* Update screen if necessary. */
+void fix_cursor(void)
+{
+ normalize_display();
+ if (need_redisplay != NONE) redisplay();
+ move_cursor(col - dis_col, line - dis_line);
+ refresh();
+# ifndef WIN32
+ fflush(stdout);
+# endif
+}
+
+/* Make sure line, col, and dis_pos are somewhere inside file. */
+/* Recompute file_pos. Assumes dis_pos is accurate or past eof */
+void fix_pos()
+{
+ int my_col = col;
+
+ if ((size_t)line > current_len) line = current_len;
+ file_pos = line_pos(line, &my_col);
+ if (file_pos == CORD_NOT_FOUND) {
+ for (line = current_map -> line, file_pos = current_map -> pos;
+ file_pos < current_len;
+ line++, file_pos = CORD_chr(current, file_pos, '\n') + 1);
+ line--;
+ file_pos = line_pos(line, &col);
+ } else {
+ col = my_col;
+ }
+}
+
+#ifndef WIN32
+/*
+ * beep() is part of some curses packages and not others.
+ * We try to match the type of the builtin one, if any.
+ */
+#ifdef __STDC__
+ int beep(void)
+#else
+ int beep()
+#endif
+{
+ putc('\007', stderr);
+ return(0);
+}
+#else
+# define beep() Beep(1000 /* Hz */, 300 /* msecs */)
+#endif
+
+# define NO_PREFIX -1
+# define BARE_PREFIX -2
+int repeat_count = NO_PREFIX; /* Current command prefix. */
+
+int locate_mode = 0; /* Currently between 2 ^Ls */
+CORD locate_string = CORD_EMPTY; /* Current search string. */
+
+char * arg_file_name;
+
+#ifdef WIN32
+/* Change the current position to whatever is currently displayed at */
+/* the given SCREEN coordinates. */
+void set_position(int c, int l)
+{
+ line = l + dis_line;
+ col = c + dis_col;
+ fix_pos();
+ move_cursor(col - dis_col, line - dis_line);
+}
+#endif /* WIN32 */
+
+/* Perform the command associated with character c. C may be an */
+/* integer > 256 denoting a windows command, one of the above control */
+/* characters, or another ASCII character to be used as either a */
+/* character to be inserted, a repeat count, or a search string, */
+/* depending on the current state. */
+void do_command(int c)
+{
+ int i;
+ int need_fix_pos;
+ FILE * out;
+
+ if ( c == '\r') c = '\n';
+ if (locate_mode) {
+ size_t new_pos;
+
+ if (c == LOCATE) {
+ locate_mode = 0;
+ locate_string = CORD_EMPTY;
+ return;
+ }
+ locate_string = CORD_cat_char(locate_string, (char)c);
+ new_pos = CORD_str(current, file_pos - CORD_len(locate_string) + 1,
+ locate_string);
+ if (new_pos != CORD_NOT_FOUND) {
+ need_redisplay = ALL;
+ new_pos += CORD_len(locate_string);
+ for (;;) {
+ file_pos = line_pos(line + 1, 0);
+ if (file_pos > new_pos) break;
+ line++;
+ }
+ col = new_pos - line_pos(line, 0);
+ file_pos = new_pos;
+ fix_cursor();
+ } else {
+ locate_string = CORD_substr(locate_string, 0,
+ CORD_len(locate_string) - 1);
+ beep();
+ }
+ return;
+ }
+ if (c == REPEAT) {
+ repeat_count = BARE_PREFIX; return;
+ } else if (c < 0x100 && isdigit(c)){
+ if (repeat_count == BARE_PREFIX) {
+ repeat_count = c - '0'; return;
+ } else if (repeat_count != NO_PREFIX) {
+ repeat_count = 10 * repeat_count + c - '0'; return;
+ }
+ }
+ if (repeat_count == NO_PREFIX) repeat_count = 1;
+ if (repeat_count == BARE_PREFIX && (c == UP || c == DOWN)) {
+ repeat_count = LINES - dis_granularity;
+ }
+ if (repeat_count == BARE_PREFIX) repeat_count = 8;
+ need_fix_pos = 0;
+ for (i = 0; i < repeat_count; i++) {
+ switch(c) {
+ case LOCATE:
+ locate_mode = 1;
+ break;
+ case TOP:
+ line = col = file_pos = 0;
+ break;
+ case UP:
+ if (line != 0) {
+ line--;
+ need_fix_pos = 1;
+ }
+ break;
+ case DOWN:
+ line++;
+ need_fix_pos = 1;
+ break;
+ case LEFT:
+ if (col != 0) {
+ col--; file_pos--;
+ }
+ break;
+ case RIGHT:
+ if (CORD_fetch(current, file_pos) == '\n') break;
+ col++; file_pos++;
+ break;
+ case UNDO:
+ del_hist();
+ need_redisplay = ALL; need_fix_pos = 1;
+ break;
+ case BS:
+ if (col == 0) {
+ beep();
+ break;
+ }
+ col--; file_pos--;
+ /* fall through: */
+ case DEL:
+ if (file_pos == current_len-1) break;
+ /* Can't delete trailing newline */
+ if (CORD_fetch(current, file_pos) == '\n') {
+ need_redisplay = ALL; need_fix_pos = 1;
+ } else {
+ need_redisplay = line - dis_line;
+ }
+ add_hist(CORD_cat(
+ CORD_substr(current, 0, file_pos),
+ CORD_substr(current, file_pos+1, current_len)));
+ invalidate_map(line);
+ break;
+ case WRITE:
+ if ((out = fopen(arg_file_name, "wb")) == NULL
+ || CORD_put(current, out) == EOF) {
+ de_error("Write failed\n");
+ need_redisplay = ALL;
+ } else {
+ fclose(out);
+ }
+ break;
+ default:
+ {
+ CORD left_part = CORD_substr(current, 0, file_pos);
+ CORD right_part = CORD_substr(current, file_pos, current_len);
+
+ add_hist(CORD_cat(CORD_cat_char(left_part, (char)c),
+ right_part));
+ invalidate_map(line);
+ if (c == '\n') {
+ col = 0; line++; file_pos++;
+ need_redisplay = ALL;
+ } else {
+ col++; file_pos++;
+ need_redisplay = line - dis_line;
+ }
+ break;
+ }
+ }
+ }
+ if (need_fix_pos) fix_pos();
+ fix_cursor();
+ repeat_count = NO_PREFIX;
+}
+
+/* OS independent initialization */
+void generic_init(void)
+{
+ FILE * f;
+ CORD initial;
+
+ if ((f = fopen(arg_file_name, "rb")) == NULL) {
+ initial = "\n";
+ } else {
+ initial = CORD_from_file(f);
+ if (initial == CORD_EMPTY
+ || CORD_fetch(initial, CORD_len(initial)-1) != '\n') {
+ initial = CORD_cat(initial, "\n");
+ }
+ }
+ add_map(0,0);
+ add_hist(initial);
+ now -> map = current_map;
+ now -> previous = now; /* Can't back up further: beginning of the world */
+ need_redisplay = ALL;
+ fix_cursor();
+}
+
+#ifndef WIN32
+
+main(argc, argv)
+int argc;
+char ** argv;
+{
+ int c;
+ CORD initial;
+
+ if (argc != 2) goto usage;
+ arg_file_name = argv[1];
+ setvbuf(stdout, GC_MALLOC_ATOMIC(8192), _IOFBF, 8192);
+ initscr();
+ noecho(); nonl(); cbreak();
+ generic_init();
+ while ((c = getchar()) != QUIT) {
+ do_command(c);
+ }
+done:
+ endwin();
+ exit(0);
+usage:
+ fprintf(stderr, "Usage: %s file\n", argv[0]);
+ fprintf(stderr, "Cursor keys: ^B(left) ^F(right) ^P(up) ^N(down)\n");
+ fprintf(stderr, "Undo: ^U Write: ^W Quit:^D Repeat count: ^R[n]\n");
+ fprintf(stderr, "Top: ^T Locate (search, find): ^L text ^L\n");
+ exit(1);
+}
+
+#endif /* !WIN32 */
--- /dev/null
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:24 pm PDT */
+
+#ifndef DE_CMDS_H
+
+# define DE_CMDS_H
+
+# define UP 16 /* ^P */
+# define DOWN 14 /* ^N */
+# define LEFT 2 /* ^B */
+# define RIGHT 6 /* ^F */
+# define DEL 127 /* ^? */
+# define BS 8 /* ^H */
+# define UNDO 21 /* ^U */
+# define WRITE 23 /* ^W */
+# define QUIT 4 /* ^D */
+# define REPEAT 18 /* ^R */
+# define LOCATE 12 /* ^L */
+# define TOP 20 /* ^T */
+
+#endif
+
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+/* Boehm, May 13, 1994 9:50 am PDT */
+
+#include "windows.h"
+#include "de_cmds.h"
+#include "de_win.h"
+
+
+
+ABOUTBOX DIALOG 19, 21, 163, 47
+STYLE DS_MODALFRAME | WS_POPUP | WS_CAPTION | WS_SYSMENU
+CAPTION "About Demonstration Text Editor"
+BEGIN
+ ICON "DE", -1, 8, 8, 13, 13, WS_CHILD | WS_VISIBLE
+ LTEXT "Demonstration Text Editor", -1, 44, 8, 118, 8, WS_CHILD | WS_VISIBLE | WS_GROUP
+ LTEXT "Version 4.1", -1, 44, 16, 60, 8, WS_CHILD | WS_VISIBLE | WS_GROUP
+ PUSHBUTTON "OK", IDOK, 118, 27, 24, 14, WS_CHILD | WS_VISIBLE | WS_TABSTOP
+END
+
+
+DE MENU
+BEGIN
+ POPUP "&File"
+ BEGIN
+ MENUITEM "&Save\t^W", IDM_FILESAVE
+ MENUITEM "E&xit\t^D", IDM_FILEEXIT
+ END
+
+ POPUP "&Edit"
+ BEGIN
+ MENUITEM "Page &Down\t^R^N", IDM_EDITPDOWN
+ MENUITEM "Page &Up\t^R^P", IDM_EDITPUP
+ MENUITEM "U&ndo\t^U", IDM_EDITUNDO
+ MENUITEM "&Locate\t^L ... ^L", IDM_EDITLOCATE
+ MENUITEM "D&own\t^N", IDM_EDITDOWN
+ MENUITEM "U&p\t^P", IDM_EDITUP
+ MENUITEM "Le&ft\t^B", IDM_EDITLEFT
+ MENUITEM "&Right\t^F", IDM_EDITRIGHT
+ MENUITEM "Delete &Backward\tBS", IDM_EDITBS
+ MENUITEM "Delete F&orward\tDEL", IDM_EDITDEL
+ MENUITEM "&Top\t^T", IDM_EDITTOP
+ END
+
+ POPUP "&Help"
+ BEGIN
+ MENUITEM "&Contents", IDM_HELPCONTENTS
+ MENUITEM "&About...", IDM_HELPABOUT
+ END
+
+ MENUITEM "Page_&Down", IDM_EDITPDOWN
+ MENUITEM "Page_&Up", IDM_EDITPUP
+END
+
+
+DE ACCELERATORS
+BEGIN
+ "^R", IDM_EDITREPEAT
+ "^N", IDM_EDITDOWN
+ "^P", IDM_EDITUP
+ "^L", IDM_EDITLOCATE
+ "^B", IDM_EDITLEFT
+ "^F", IDM_EDITRIGHT
+ "^T", IDM_EDITTOP
+ VK_DELETE, IDM_EDITDEL, VIRTKEY
+ VK_BACK, IDM_EDITBS, VIRTKEY
+END
+
+
+DE ICON cord\de_win.ICO
+
--- /dev/null
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:21 pm PDT */
+
+/*
+ * The MS Windows specific part of de.
+ * This started as the generic Windows application template
+ * made available by Rob Haack (rhaack@polaris.unm.edu), but
+ * significant parts didn't survive to the final version.
+ *
+ * This was written by a nonexpert windows programmer.
+ */
+
+
+#include "windows.h"
+#include "gc.h"
+#include "cord.h"
+#include "de_cmds.h"
+#include "de_win.h"
+
+int LINES = 0;
+int COLS = 0;
+
+char szAppName[] = "DE";
+char FullAppName[] = "Demonstration Editor";
+
+HWND hwnd;
+
+void de_error(char *s)
+{
+ MessageBox( hwnd, (LPSTR) s,
+ (LPSTR) FullAppName,
+ MB_ICONINFORMATION | MB_OK );
+ InvalidateRect(hwnd, NULL, TRUE);
+}
+
+int APIENTRY WinMain (HINSTANCE hInstance, HINSTANCE hPrevInstance,
+ LPSTR command_line, int nCmdShow)
+{
+ MSG msg;
+ WNDCLASS wndclass;
+ HANDLE hAccel;
+
+ if (!hPrevInstance)
+ {
+ wndclass.style = CS_HREDRAW | CS_VREDRAW;
+ wndclass.lpfnWndProc = WndProc;
+ wndclass.cbClsExtra = 0;
+ wndclass.cbWndExtra = DLGWINDOWEXTRA;
+ wndclass.hInstance = hInstance;
+ wndclass.hIcon = LoadIcon (hInstance, szAppName);
+ wndclass.hCursor = LoadCursor (NULL, IDC_ARROW);
+ wndclass.hbrBackground = GetStockObject(WHITE_BRUSH);
+ wndclass.lpszMenuName = "DE";
+ wndclass.lpszClassName = szAppName;
+
+ if (RegisterClass (&wndclass) == 0) {
+ char buf[50];
+
+ sprintf(buf, "RegisterClass: error code: 0x%X", GetLastError());
+ de_error(buf);
+ return(0);
+ }
+ }
+
+ /* Empirically, the command line does not include the command name ...
+ if (command_line != 0) {
+ while (isspace(*command_line)) command_line++;
+ while (*command_line != 0 && !isspace(*command_line)) command_line++;
+ while (isspace(*command_line)) command_line++;
+ } */
+
+ if (command_line == 0 || *command_line == 0) {
+ de_error("File name argument required");
+ return( 0 );
+ } else {
+ char *p = command_line;
+
+ while (*p != 0 && !isspace(*p)) p++;
+ arg_file_name = CORD_to_char_star(
+ CORD_substr(command_line, 0, p - command_line));
+ }
+
+ hwnd = CreateWindow (szAppName,
+ FullAppName,
+ WS_OVERLAPPEDWINDOW | WS_CAPTION, /* Window style */
+ CW_USEDEFAULT, 0, /* default pos. */,
+ CW_USEDEFAULT, 0, /* default width, height */,
+ NULL, /* No parent */
+ NULL, /* Window class menu */
+ hInstance, NULL);
+ if (hwnd == NULL) {
+ char buf[50];
+
+ sprintf(buf, "CreateWindow: error code: 0x%X", GetLastError());
+ de_error(buf);
+ return(0);
+ }
+
+ ShowWindow (hwnd, nCmdShow);
+
+ hAccel = LoadAccelerators( hInstance, szAppName );
+
+ while (GetMessage (&msg, NULL, 0, 0))
+ {
+ if( !TranslateAccelerator( hwnd, hAccel, &msg ) )
+ {
+ TranslateMessage (&msg);
+ DispatchMessage (&msg);
+ }
+ }
+ return msg.wParam;
+}
+
+/* Return the argument with all control characters replaced by blanks. */
+char * plain_chars(char * text, size_t len)
+{
+ char * result = GC_MALLOC_ATOMIC(len + 1);
+ register size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (iscntrl(text[i])) {
+ result[i] = ' ';
+ } else {
+ result[i] = text[i];
+ }
+ }
+ result[len] = '\0';
+ return(result);
+}
+
+/* Return the argument with all non-control-characters replaced by */
+/* blank, and all control characters c replaced by c + 32. */
+char * control_chars(char * text, size_t len)
+{
+ char * result = GC_MALLOC_ATOMIC(len + 1);
+ register size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (iscntrl(text[i])) {
+ result[i] = text[i] + 0x40;
+ } else {
+ result[i] = ' ';
+ }
+ }
+ result[len] = '\0';
+ return(result);
+}
+
+int char_width;
+int char_height;
+
+void get_line_rect(int line, int win_width, RECT * rectp)
+{
+ rectp -> top = line * char_height;
+ rectp -> bottom = rectp->top + char_height;
+ rectp -> left = 0;
+ rectp -> right = win_width;
+}
+
+int caret_visible = 0; /* Caret is currently visible. */
+
+int screen_was_painted = 0;/* Screen has been painted at least once. */
+
+void update_cursor(void);
+
+LRESULT CALLBACK WndProc (HWND hwnd, UINT message,
+ WPARAM wParam, LPARAM lParam)
+{
+ static FARPROC lpfnAboutBox;
+ static HANDLE hInstance;
+ HDC dc;
+ PAINTSTRUCT ps;
+ RECT client_area;
+ RECT this_line;
+ RECT dummy;
+ TEXTMETRIC tm;
+ register int i;
+ int id;
+
+ switch (message)
+ {
+ case WM_CREATE:
+ hInstance = ( (LPCREATESTRUCT) lParam)->hInstance;
+ lpfnAboutBox = MakeProcInstance( (FARPROC) AboutBox, hInstance );
+ dc = GetDC(hwnd);
+ SelectObject(dc, GetStockObject(SYSTEM_FIXED_FONT));
+ GetTextMetrics(dc, &tm);
+ ReleaseDC(hwnd, dc);
+ char_width = tm.tmAveCharWidth;
+ char_height = tm.tmHeight + tm.tmExternalLeading;
+ GetClientRect(hwnd, &client_area);
+ COLS = (client_area.right - client_area.left)/char_width;
+ LINES = (client_area.bottom - client_area.top)/char_height;
+ generic_init();
+ return(0);
+
+ case WM_CHAR:
+ if (wParam == QUIT) {
+ SendMessage( hwnd, WM_CLOSE, 0, 0L );
+ } else {
+ do_command(wParam);
+ }
+ return(0);
+
+ case WM_SETFOCUS:
+ CreateCaret(hwnd, NULL, char_width, char_height);
+ ShowCaret(hwnd);
+ caret_visible = 1;
+ update_cursor();
+ return(0);
+
+ case WM_KILLFOCUS:
+ HideCaret(hwnd);
+ DestroyCaret();
+ caret_visible = 0;
+ return(0);
+
+ case WM_LBUTTONUP:
+ {
+ unsigned xpos = LOWORD(lParam); /* From left */
+ unsigned ypos = HIWORD(lParam); /* from top */
+
+ set_position( xpos/char_width, ypos/char_height );
+ return(0);
+ }
+
+ case WM_COMMAND:
+ id = LOWORD(wParam);
+ if (id & EDIT_CMD_FLAG) {
+ if (id & REPEAT_FLAG) do_command(REPEAT);
+ do_command(CHAR_CMD(id));
+ return( 0 );
+ } else {
+ switch(id) {
+ case IDM_FILEEXIT:
+ SendMessage( hwnd, WM_CLOSE, 0, 0L );
+ return( 0 );
+
+ case IDM_HELPABOUT:
+ if( DialogBox( hInstance, "ABOUTBOX",
+ hwnd, lpfnAboutBox ) );
+ InvalidateRect( hwnd, NULL, TRUE );
+ return( 0 );
+ case IDM_HELPCONTENTS:
+ de_error(
+ "Cursor keys: ^B(left) ^F(right) ^P(up) ^N(down)\n"
+ "Undo: ^U Write: ^W Quit:^D Repeat count: ^R[n]\n"
+ "Top: ^T Locate (search, find): ^L text ^L\n");
+ return( 0 );
+ }
+ }
+ break;
+
+ case WM_CLOSE:
+ DestroyWindow( hwnd );
+ return 0;
+
+ case WM_DESTROY:
+ PostQuitMessage (0);
+ return 0;
+
+ case WM_PAINT:
+ dc = BeginPaint(hwnd, &ps);
+ GetClientRect(hwnd, &client_area);
+ COLS = (client_area.right - client_area.left)/char_width;
+ LINES = (client_area.bottom - client_area.top)/char_height;
+ SelectObject(dc, GetStockObject(SYSTEM_FIXED_FONT));
+ for (i = 0; i < LINES; i++) {
+ get_line_rect(i, client_area.right, &this_line);
+ if (IntersectRect(&dummy, &this_line, &ps.rcPaint)) {
+ CORD raw_line = retrieve_screen_line(i);
+ size_t len = CORD_len(raw_line);
+ char * text = CORD_to_char_star(raw_line);
+ /* May contain embedded NULLs */
+ char * plain = plain_chars(text, len);
+ char * blanks = CORD_to_char_star(CORD_chars(' ',
+ COLS - len));
+ char * control = control_chars(text, len);
+# define RED RGB(255,0,0)
+
+ SetBkMode(dc, OPAQUE);
+ SetTextColor(dc, GetSysColor(COLOR_WINDOWTEXT));
+
+ TextOut(dc, this_line.left, this_line.top,
+ plain, len);
+ TextOut(dc, this_line.left + len * char_width, this_line.top,
+ blanks, COLS - len);
+ SetBkMode(dc, TRANSPARENT);
+ SetTextColor(dc, RED);
+ TextOut(dc, this_line.left, this_line.top,
+ control, strlen(control));
+ }
+ }
+ EndPaint(hwnd, &ps);
+ screen_was_painted = 1;
+ return 0;
+ }
+ return DefWindowProc (hwnd, message, wParam, lParam);
+}
+
+int last_col;
+int last_line;
+
+void move_cursor(int c, int l)
+{
+ last_col = c;
+ last_line = l;
+
+ if (caret_visible) update_cursor();
+}
+
+void update_cursor(void)
+{
+ SetCaretPos(last_col * char_width, last_line * char_height);
+ ShowCaret(hwnd);
+}
+
+void invalidate_line(int i)
+{
+ RECT line;
+
+ if (!screen_was_painted) return;
+ /* Invalidating a rectangle before painting seems result in a */
+ /* major performance problem. */
+ get_line_rect(i, COLS*char_width, &line);
+ InvalidateRect(hwnd, &line, FALSE);
+}
+
+LRESULT CALLBACK AboutBox( HWND hDlg, UINT message,
+ WPARAM wParam, LPARAM lParam )
+{
+ switch( message )
+ {
+ case WM_INITDIALOG:
+ SetFocus( GetDlgItem( hDlg, IDOK ) );
+ break;
+
+ case WM_COMMAND:
+ switch( wParam )
+ {
+ case IDOK:
+ EndDialog( hDlg, TRUE );
+ break;
+ }
+ break;
+
+ case WM_CLOSE:
+ EndDialog( hDlg, TRUE );
+ return TRUE;
+
+ }
+ return FALSE;
+}
+
--- /dev/null
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:25 pm PDT */
+
+/* cord.h, de_cmds.h, and windows.h should be included before this. */
+
+
+# define OTHER_FLAG 0x100
+# define EDIT_CMD_FLAG 0x200
+# define REPEAT_FLAG 0x400
+
+# define CHAR_CMD(i) ((i) & 0xff)
+
+/* MENU: DE */
+#define IDM_FILESAVE (EDIT_CMD_FLAG + WRITE)
+#define IDM_FILEEXIT (OTHER_FLAG + 1)
+#define IDM_HELPABOUT (OTHER_FLAG + 2)
+#define IDM_HELPCONTENTS (OTHER_FLAG + 3)
+
+#define IDM_EDITPDOWN (REPEAT_FLAG + EDIT_CMD_FLAG + DOWN)
+#define IDM_EDITPUP (REPEAT_FLAG + EDIT_CMD_FLAG + UP)
+#define IDM_EDITUNDO (EDIT_CMD_FLAG + UNDO)
+#define IDM_EDITLOCATE (EDIT_CMD_FLAG + LOCATE)
+#define IDM_EDITDOWN (EDIT_CMD_FLAG + DOWN)
+#define IDM_EDITUP (EDIT_CMD_FLAG + UP)
+#define IDM_EDITLEFT (EDIT_CMD_FLAG + LEFT)
+#define IDM_EDITRIGHT (EDIT_CMD_FLAG + RIGHT)
+#define IDM_EDITBS (EDIT_CMD_FLAG + BS)
+#define IDM_EDITDEL (EDIT_CMD_FLAG + DEL)
+#define IDM_EDITREPEAT (EDIT_CMD_FLAG + REPEAT)
+#define IDM_EDITTOP (EDIT_CMD_FLAG + TOP)
+
+
+
+
+/* Windows UI stuff */
+
+LRESULT CALLBACK WndProc (HWND hwnd, UINT message,
+ UINT wParam, LONG lParam);
+
+LRESULT CALLBACK AboutBox( HWND hDlg, UINT message,
+ UINT wParam, LONG lParam );
+
+
+/* Screen dimensions. Maintained by de_win.c. */
+extern int LINES;
+extern int COLS;
+
+/* File being edited. */
+extern char * arg_file_name;
+
+/* Current display position in file. Maintained by de.c */
+extern int dis_line;
+extern int dis_col;
+
+/* Current cursor position in file. */
+extern int line;
+extern int col;
+
+/*
+ * Calls from de_win.c to de.c
+ */
+
+CORD retrieve_screen_line(int i);
+ /* Get the contents of i'th screen line. */
+ /* Relies on COLS. */
+
+void set_position(int x, int y);
+ /* Set column, row. Upper left of window = (0,0). */
+
+void do_command(int);
+ /* Execute an editor command. */
+ /* Agument is a command character or one */
+ /* of the IDM_ commands. */
+
+void generic_init(void);
+ /* OS independent initialization */
+
+
+/*
+ * Calls from de.c to de_win.c
+ */
+
+void move_cursor(int column, int line);
+ /* Physically move the cursor on the display, */
+ /* so that it appears at */
+ /* (column, line). */
+
+void invalidate_line(int line);
+ /* Invalidate line i on the screen. */
+
+void de_error(char *s);
+ /* Display error message. */
\ No newline at end of file
--- /dev/null
+# ifndef EC_H
+# define EC_H
+
+# ifndef CORD_H
+# include "cord.h"
+# endif
+
+/* Extensible cords are strings that may be destructively appended to. */
+/* They allow fast construction of cords from characters that are */
+/* being read from a stream. */
+/*
+ * A client might look like:
+ *
+ * {
+ * CORD_ec x;
+ * CORD result;
+ * char c;
+ * FILE *f;
+ *
+ * ...
+ * CORD_ec_init(x);
+ * while(...) {
+ * c = getc(f);
+ * ...
+ * CORD_ec_append(x, c);
+ * }
+ * result = CORD_balance(CORD_ec_to_cord(x));
+ *
+ * If a C string is desired as the final result, the call to CORD_balance
+ * may be replaced by a call to CORD_to_char_star.
+ */
+
+# ifndef CORD_BUFSZ
+# define CORD_BUFSZ 128
+# endif
+
+typedef struct CORD_ec_struct {
+ CORD ec_cord;
+ char * ec_bufptr;
+ char ec_buf[CORD_BUFSZ+1];
+} CORD_ec[1];
+
+/* This structure represents the concatenation of ec_cord with */
+/* ec_buf[0 ... (ec_bufptr-ec_buf-1)] */
+
+/* Flush the buffer part of the extended chord into ec_cord. */
+/* Note that this is almost the only real function, and it is */
+/* implemented in 6 lines in cordxtra.c */
+void CORD_ec_flush_buf(CORD_ec x);
+
+/* Convert an extensible cord to a cord. */
+# define CORD_ec_to_cord(x) (CORD_ec_flush_buf(x), (x)[0].ec_cord)
+
+/* Initialize an extensible cord. */
+# define CORD_ec_init(x) ((x)[0].ec_cord = 0, (x)[0].ec_bufptr = (x)[0].ec_buf)
+
+/* Append a character to an extensible cord. */
+# define CORD_ec_append(x, c) \
+ { \
+ if ((x)[0].ec_bufptr == (x)[0].ec_buf + CORD_BUFSZ) { \
+ CORD_ec_flush_buf(x); \
+ } \
+ *((x)[0].ec_bufptr)++ = (c); \
+ }
+
+/* Append a cord to an extensible cord. Structure remains shared with */
+/* original. */
+void CORD_ec_append_cord(CORD_ec x, CORD s);
+
+# endif /* EC_H */
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:07 pm PDT */
+# include "gc_priv.h"
+
+/* Do we want to and know how to save the call stack at the time of */
+/* an allocation? How much space do we want to use in each object? */
+
+# if defined(SPARC) && defined(SUNOS4)
+# include <machine/frame.h>
+# define SAVE_CALL_CHAIN
+# define NFRAMES 5 /* Number of frames to save. */
+# define NARGS 2 /* Mumber of arguments to save for each call. */
+# if NARGS > 6
+ --> We only know how to to get the first 6 arguments
+# endif
+# endif
+
+# define START_FLAG ((word)0xfedcedcb)
+# define END_FLAG ((word)0xbcdecdef)
+ /* Stored both one past the end of user object, and one before */
+ /* the end of the object as seen by the allocator. */
+
+#ifdef SAVE_CALL_CHAIN
+ struct callinfo {
+ word ci_pc;
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+ };
+#endif
+
+/* Object header */
+typedef struct {
+ char * oh_string; /* object descriptor string */
+ word oh_int; /* object descriptor integers */
+# ifdef SAVE_CALL_CHAIN
+ struct callinfo oh_ci[NFRAMES];
+# endif
+ word oh_sz; /* Original malloc arg. */
+ word oh_sf; /* start flag */
+} oh;
+/* The size of the above structure is assumed not to dealign things, */
+/* and to be a multiple of the word length. */
+
+#define DEBUG_BYTES (sizeof (oh) + sizeof (word))
+#undef ROUNDED_UP_WORDS
+#define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
+
+#if defined(SPARC) && defined(SUNOS4)
+/* Fill in the pc and argument information for up to NFRAMES of my */
+/* callers. Ignore my frame and my callers frame. */
+void GC_save_callers (info)
+struct callinfo info[NFRAMES];
+{
+ struct frame *frame;
+ struct frame *fp;
+ int nframes = 0;
+ word GC_save_regs_in_stack();
+
+ frame = (struct frame *) GC_save_regs_in_stack ();
+
+ for (fp = frame -> fr_savfp; fp != 0 && nframes < NFRAMES;
+ fp = fp -> fr_savfp, nframes++) {
+ register int i;
+
+ info[nframes].ci_pc = fp->fr_savpc;
+ for (i = 0; i < NARGS; i++) {
+ info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
+ }
+ }
+ if (nframes < NFRAMES) info[nframes].ci_pc = 0;
+}
+
+void GC_print_callers (info)
+struct callinfo info[NFRAMES];
+{
+ register int i,j;
+
+ GC_err_printf0("\tCall chain at allocation:\n");
+ for (i = 0; i < NFRAMES; i++) {
+ if (info[i].ci_pc == 0) break;
+ GC_err_printf1("\t##PC##= 0x%X\n\t\targs: ", info[i].ci_pc);
+ for (j = 0; j < NARGS; j++) {
+ if (j != 0) GC_err_printf0(", ");
+ GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
+ ~(info[i].ci_arg[j]));
+ }
+ GC_err_printf0("\n");
+ }
+}
+
+#endif /* SPARC & SUNOS4 */
+
+#ifdef SAVE_CALL_CHAIN
+# define ADD_CALL_CHAIN(base) GC_save_callers(((oh *)(base)) -> oh_ci)
+# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
+#else
+# define ADD_CALL_CHAIN(base)
+# define PRINT_CALL_CHAIN(base)
+#endif
+
+/* Check whether object with base pointer p has debugging info */
+/* p is assumed to point to a legitimate object in our part */
+/* of the heap. */
+bool GC_has_debug_info(p)
+ptr_t p;
+{
+ register oh * ohdr = (oh *)p;
+ register ptr_t body = (ptr_t)(ohdr + 1);
+ register word sz = GC_size((ptr_t) ohdr);
+
+ if (HBLKPTR((ptr_t)ohdr) != HBLKPTR((ptr_t)body)
+ || sz < sizeof (oh)) {
+ return(FALSE);
+ }
+ if (ohdr -> oh_sz == sz) {
+ /* Object may have had debug info, but has been deallocated */
+ return(FALSE);
+ }
+ if (ohdr -> oh_sf == (START_FLAG ^ (word)body)) return(TRUE);
+ if (((word *)ohdr)[BYTES_TO_WORDS(sz)-1] == (END_FLAG ^ (word)body)) {
+ return(TRUE);
+ }
+ return(FALSE);
+}
+
+/* Store debugging info into p. Return displaced pointer. */
+/* Assumes we don't hold allocation lock. */
+ptr_t GC_store_debug_info(p, sz, string, integer)
+register ptr_t p; /* base pointer */
+word sz; /* bytes */
+char * string;
+word integer;
+{
+ register word * result = (word *)((oh *)p + 1);
+ DCL_LOCK_STATE;
+
+ /* There is some argument that we should dissble signals here. */
+ /* But that's expensive. And this way things should only appear */
+ /* inconsistent while we're in the handler. */
+ LOCK();
+ ((oh *)p) -> oh_string = string;
+ ((oh *)p) -> oh_int = integer;
+ ((oh *)p) -> oh_sz = sz;
+ ((oh *)p) -> oh_sf = START_FLAG ^ (word)result;
+ ((word *)p)[BYTES_TO_WORDS(GC_size(p))-1] =
+ result[ROUNDED_UP_WORDS(sz)] = END_FLAG ^ (word)result;
+ UNLOCK();
+ return((ptr_t)result);
+}
+
+/* Check the object with debugging info at p */
+/* return NIL if it's OK. Else return clobbered */
+/* address. */
+ptr_t GC_check_annotated_obj(ohdr)
+register oh * ohdr;
+{
+ register ptr_t body = (ptr_t)(ohdr + 1);
+ register word gc_sz = GC_size((ptr_t)ohdr);
+ if (ohdr -> oh_sz + DEBUG_BYTES > gc_sz) {
+ return((ptr_t)(&(ohdr -> oh_sz)));
+ }
+ if (ohdr -> oh_sf != (START_FLAG ^ (word)body)) {
+ return((ptr_t)(&(ohdr -> oh_sf)));
+ }
+ if (((word *)ohdr)[BYTES_TO_WORDS(gc_sz)-1] != (END_FLAG ^ (word)body)) {
+ return((ptr_t)((word *)ohdr + BYTES_TO_WORDS(gc_sz)-1));
+ }
+ if (((word *)body)[ROUNDED_UP_WORDS(ohdr -> oh_sz)]
+ != (END_FLAG ^ (word)body)) {
+ return((ptr_t)((word *)body + ROUNDED_UP_WORDS(ohdr -> oh_sz)));
+ }
+ return(0);
+}
+
+void GC_print_obj(p)
+ptr_t p;
+{
+ register oh * ohdr = (oh *)GC_base(p);
+
+ GC_err_printf1("0x%lx (", (unsigned long)ohdr + sizeof(oh));
+ GC_err_puts(ohdr -> oh_string);
+ GC_err_printf2(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
+ (unsigned long)(ohdr -> oh_sz));
+ PRINT_CALL_CHAIN(ohdr);
+}
+void GC_print_smashed_obj(p, clobbered_addr)
+ptr_t p, clobbered_addr;
+{
+ register oh * ohdr = (oh *)GC_base(p);
+
+ GC_err_printf2("0x%lx in object at 0x%lx(", (unsigned long)clobbered_addr,
+ (unsigned long)p);
+ if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
+ || ohdr -> oh_string == 0) {
+ GC_err_printf1("<smashed>, appr. sz = %ld)\n",
+ BYTES_TO_WORDS(GC_size((ptr_t)ohdr)));
+ } else {
+ if (ohdr -> oh_string[0] == '\0') {
+ GC_err_puts("EMPTY(smashed?)");
+ } else {
+ GC_err_puts(ohdr -> oh_string);
+ }
+ GC_err_printf2(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
+ (unsigned long)(ohdr -> oh_sz));
+ }
+}
+
+void GC_check_heap_proc();
+
+void GC_start_debugging()
+{
+ GC_check_heap = GC_check_heap_proc;
+ GC_debugging_started = TRUE;
+ GC_register_displacement((word)sizeof(oh));
+}
+
+# ifdef __STDC__
+ extern_ptr_t GC_debug_malloc(size_t lb, char * s, int i)
+# else
+ extern_ptr_t GC_debug_malloc(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ extern_ptr_t result = GC_malloc(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf1(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_start_debugging();
+ }
+ ADD_CALL_CHAIN(result);
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+
+#ifdef STUBBORN_ALLOC
+# ifdef __STDC__
+ extern_ptr_t GC_debug_malloc_stubborn(size_t lb, char * s, int i)
+# else
+ extern_ptr_t GC_debug_malloc_stubborn(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ extern_ptr_t result = GC_malloc_stubborn(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf1(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_start_debugging();
+ }
+ ADD_CALL_CHAIN(result);
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+
+void GC_debug_change_stubborn(p)
+extern_ptr_t p;
+{
+ register extern_ptr_t q = GC_base(p);
+ register hdr * hhdr;
+
+ if (q == 0) {
+ GC_err_printf1("Bad argument: 0x%lx to GC_debug_change_stubborn\n",
+ (unsigned long) p);
+ ABORT("GC_debug_change_stubborn: bad arg");
+ }
+ hhdr = HDR(q);
+ if (hhdr -> hb_obj_kind != STUBBORN) {
+ GC_err_printf1("GC_debug_change_stubborn arg not stubborn: 0x%lx\n",
+ (unsigned long) p);
+ ABORT("GC_debug_change_stubborn: arg not stubborn");
+ }
+ GC_change_stubborn(q);
+}
+
+void GC_debug_end_stubborn_change(p)
+extern_ptr_t p;
+{
+ register extern_ptr_t q = GC_base(p);
+ register hdr * hhdr;
+
+ if (q == 0) {
+ GC_err_printf1("Bad argument: 0x%lx to GC_debug_end_stubborn_change\n",
+ (unsigned long) p);
+ ABORT("GC_debug_end_stubborn_change: bad arg");
+ }
+ hhdr = HDR(q);
+ if (hhdr -> hb_obj_kind != STUBBORN) {
+ GC_err_printf1("debug_end_stubborn_change arg not stubborn: 0x%lx\n",
+ (unsigned long) p);
+ ABORT("GC_debug_end_stubborn_change: arg not stubborn");
+ }
+ GC_end_stubborn_change(q);
+}
+
+#endif /* STUBBORN_ALLOC */
+
+# ifdef __STDC__
+ extern_ptr_t GC_debug_malloc_atomic(size_t lb, char * s, int i)
+# else
+ extern_ptr_t GC_debug_malloc_atomic(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ extern_ptr_t result = GC_malloc_atomic(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf1("GC_debug_malloc_atomic(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf1(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_start_debugging();
+ }
+ ADD_CALL_CHAIN(result);
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+
+# ifdef __STDC__
+ extern_ptr_t GC_debug_malloc_uncollectable(size_t lb, char * s, int i)
+# else
+ extern_ptr_t GC_debug_malloc_uncollectable(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ extern_ptr_t result = GC_malloc_uncollectable(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf1("GC_debug_malloc_uncollectable(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf1(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_start_debugging();
+ }
+ ADD_CALL_CHAIN(result);
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+
+
+# ifdef __STDC__
+ void GC_debug_free(extern_ptr_t p)
+# else
+ void GC_debug_free(p)
+ extern_ptr_t p;
+# endif
+{
+ register extern_ptr_t base = GC_base(p);
+ register ptr_t clobbered;
+
+ if (base == 0) {
+ GC_err_printf1("Attempt to free invalid pointer %lx\n",
+ (unsigned long)p);
+ if (p != 0) ABORT("free(invalid pointer)");
+ }
+ if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
+ GC_err_printf1(
+ "GC_debug_free called on pointer %lx wo debugging info\n",
+ (unsigned long)p);
+ } else {
+ clobbered = GC_check_annotated_obj((oh *)base);
+ if (clobbered != 0) {
+ if (((oh *)base) -> oh_sz == GC_size(base)) {
+ GC_err_printf0(
+ "GC_debug_free: found previously deallocated (?) object at ");
+ } else {
+ GC_err_printf0("GC_debug_free: found smashed object at ");
+ }
+ GC_print_smashed_obj(p, clobbered);
+ }
+ /* Invalidate size */
+ ((oh *)base) -> oh_sz = GC_size(base);
+ }
+# ifdef FIND_LEAK
+ GC_free(base);
+# endif
+}
+
+# ifdef __STDC__
+ extern_ptr_t GC_debug_realloc(extern_ptr_t p, size_t lb, char *s, int i)
+# else
+ extern_ptr_t GC_debug_realloc(p, lb, s, i)
+ extern_ptr_t p;
+ size_t lb;
+ char *s;
+ int i;
+# endif
+{
+ register extern_ptr_t base = GC_base(p);
+ register ptr_t clobbered;
+ register extern_ptr_t result = GC_debug_malloc(lb, s, i);
+ register size_t copy_sz = lb;
+ register size_t old_sz;
+ register hdr * hhdr;
+
+ if (p == 0) return(GC_debug_malloc(lb, s, i));
+ if (base == 0) {
+ GC_err_printf1(
+ "Attempt to free invalid pointer %lx\n", (unsigned long)p);
+ ABORT("realloc(invalid pointer)");
+ }
+ if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
+ GC_err_printf1(
+ "GC_debug_realloc called on pointer %lx wo debugging info\n",
+ (unsigned long)p);
+ return(GC_realloc(p, lb));
+ }
+ hhdr = HDR(base);
+ switch (hhdr -> hb_obj_kind) {
+# ifdef STUBBORN_ALLOC
+ case STUBBORN:
+ result = GC_debug_malloc_stubborn(lb, s, i);
+ break;
+# endif
+ case NORMAL:
+ result = GC_debug_malloc(lb, s, i);
+ break;
+ case PTRFREE:
+ result = GC_debug_malloc_atomic(lb, s, i);
+ break;
+ default:
+ GC_err_printf0("GC_debug_realloc: encountered bad kind\n");
+ ABORT("bad kind");
+ }
+ clobbered = GC_check_annotated_obj((oh *)base);
+ if (clobbered != 0) {
+ GC_err_printf0("GC_debug_realloc: found smashed object at ");
+ GC_print_smashed_obj(p, clobbered);
+ }
+ old_sz = ((oh *)base) -> oh_sz;
+ if (old_sz < copy_sz) copy_sz = old_sz;
+ if (result == 0) return(0);
+ BCOPY(p, result, copy_sz);
+ return(result);
+}
+
+/* Check all marked objects in the given block for validity */
+/*ARGSUSED*/
+void GC_check_heap_block(hbp, dummy)
+register struct hblk *hbp; /* ptr to current heap block */
+word dummy;
+{
+ register struct hblkhdr * hhdr = HDR(hbp);
+ register word sz = hhdr -> hb_sz;
+ register int word_no;
+ register word *p, *plim;
+
+ p = (word *)(hbp->hb_body);
+ word_no = HDR_WORDS;
+ plim = (word *)((((word)hbp) + HBLKSIZE)
+ - WORDS_TO_BYTES(sz));
+
+ /* go through all words in block */
+ do {
+ if( mark_bit_from_hdr(hhdr, word_no)
+ && GC_has_debug_info((ptr_t)p)) {
+ ptr_t clobbered = GC_check_annotated_obj((oh *)p);
+
+ if (clobbered != 0) {
+ GC_err_printf0(
+ "GC_check_heap_block: found smashed object at ");
+ GC_print_smashed_obj((ptr_t)p, clobbered);
+ }
+ }
+ word_no += sz;
+ p += sz;
+ } while( p <= plim );
+}
+
+
+/* This assumes that all accessible objects are marked, and that */
+/* I hold the allocation lock. Normally called by collector. */
+void GC_check_heap_proc()
+{
+ GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
+}
+
+struct closure {
+ GC_finalization_proc cl_fn;
+ extern_ptr_t cl_data;
+};
+
+# ifdef __STDC__
+ void * GC_make_closure(GC_finalization_proc fn, void * data)
+# else
+ extern_ptr_t GC_make_closure(fn, data)
+ GC_finalization_proc fn;
+ extern_ptr_t data;
+# endif
+{
+ struct closure * result =
+ (struct closure *) GC_malloc(sizeof (struct closure));
+
+ result -> cl_fn = fn;
+ result -> cl_data = data;
+ return((extern_ptr_t)result);
+}
+
+# ifdef __STDC__
+ void GC_debug_invoke_finalizer(void * obj, void * data)
+# else
+ void GC_debug_invoke_finalizer(obj, data)
+ char * obj;
+ char * data;
+# endif
+{
+ register struct closure * cl = (struct closure *) data;
+
+ (*(cl -> cl_fn))((extern_ptr_t)((char *)obj + sizeof(oh)), cl -> cl_data);
+}
+
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Original author: Bill Janssen
+ * Heavily modified by Hans Boehm and others
+ */
+/* Boehm, May 19, 1994 1:57 pm PDT */
+
+/*
+ * This is incredibly OS specific code for tracking down data sections in
+ * dynamic libraries. There appears to be no way of doing this quickly
+ * without groveling through undocumented data structures. We would argue
+ * that this is a bug in the design of the dlopen interface. THIS CODE
+ * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
+ * to let your vendor know ...
+ *
+ * None of this is safe with dlclose and incremental collection.
+ * But then not much of anything is safe in the presence of dlclose.
+ */
+#include <sys/types.h>
+#include "gc_priv.h"
+
+#if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
+#if !defined(SUNOS4) && !defined(SUNOS5) && !defined(IRIX5) && !defined(MSWIN32)
+ --> We only know how to find data segments of dynamic libraries under SunOS,
+ --> IRIX5 and Win32. Additional SVR4 variants might not be too hard to add.
+#endif
+
+#include <stdio.h>
+#ifdef SUNOS5
+# include <sys/elf.h>
+# include <dlfcn.h>
+# include <link.h>
+#endif
+#ifdef SUNOS4
+# include <dlfcn.h>
+# include <link.h>
+# include <a.out.h>
+ /* struct link_map field overrides */
+# define l_next lm_next
+# define l_addr lm_addr
+# define l_name lm_name
+#endif
+
+
+#ifdef SUNOS5
+
+#ifdef LINT
+ Elf32_Dyn _DYNAMIC;
+#endif
+
+static struct link_map *
+GC_FirstDLOpenedLinkMap()
+{
+ extern Elf32_Dyn _DYNAMIC;
+ Elf32_Dyn *dp;
+ struct r_debug *r;
+ static struct link_map * cachedResult = 0;
+
+ if( &_DYNAMIC == 0) {
+ return(0);
+ }
+ if( cachedResult == 0 ) {
+ int tag;
+ for( dp = ((Elf32_Dyn *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
+ if( tag == DT_DEBUG ) {
+ struct link_map *lm
+ = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
+ if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
+ break;
+ }
+ }
+ }
+ return cachedResult;
+}
+
+#endif
+
+#ifdef SUNOS4
+
+#ifdef LINT
+ struct link_dynamic _DYNAMIC;
+#endif
+
+static struct link_map *
+GC_FirstDLOpenedLinkMap()
+{
+ extern struct link_dynamic _DYNAMIC;
+
+ if( &_DYNAMIC == 0) {
+ return(0);
+ }
+ return(_DYNAMIC.ld_un.ld_1->ld_loaded);
+}
+
+/* Return the address of the ld.so allocated common symbol */
+/* with the least address, or 0 if none. */
+static ptr_t GC_first_common()
+{
+ ptr_t result = 0;
+ extern struct link_dynamic _DYNAMIC;
+ struct rtc_symb * curr_symbol;
+
+ if( &_DYNAMIC == 0) {
+ return(0);
+ }
+ curr_symbol = _DYNAMIC.ldd -> ldd_cp;
+ for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
+ if (result == 0
+ || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
+ result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
+ }
+ }
+ return(result);
+}
+
+#endif
+
+# if defined(SUNOS4) || defined(SUNOS5)
+/* Add dynamic library data sections to the root set. */
+# if !defined(PCR) && !defined(SOLARIS_THREADS) && defined(THREADS)
+# ifndef SRC_M3
+ --> fix mutual exclusion with dlopen
+# endif /* We assume M3 programs don't call dlopen for now */
+# endif
+
+# ifdef SOLARIS_THREADS
+ /* Redefine dlopen to guarantee mutual exclusion with */
+ /* GC_register_dynamic_libraries. */
+ /* assumes that dlopen doesn't need to call GC_malloc */
+ /* and friends. */
+# include <thread.h>
+# include <synch.h>
+
+void * GC_dlopen(const char *path, int mode)
+{
+ void * result;
+
+ mutex_lock(&GC_allocate_ml);
+ result = dlopen(path, mode);
+ mutex_unlock(&GC_allocate_ml);
+ return(result);
+}
+# endif
+
+void GC_register_dynamic_libraries()
+{
+ struct link_map *lm = GC_FirstDLOpenedLinkMap();
+
+
+ for (lm = GC_FirstDLOpenedLinkMap();
+ lm != (struct link_map *) 0; lm = lm->l_next)
+ {
+# ifdef SUNOS4
+ struct exec *e;
+
+ e = (struct exec *) lm->lm_addr;
+ GC_add_roots_inner(
+ ((char *) (N_DATOFF(*e) + lm->lm_addr)),
+ ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)));
+# endif
+# ifdef SUNOS5
+ Elf32_Ehdr * e;
+ Elf32_Phdr * p;
+ unsigned long offset;
+ char * start;
+ register int i;
+
+ e = (Elf32_Ehdr *) lm->l_addr;
+ p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
+ offset = ((unsigned long)(lm->l_addr));
+ for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
+ switch( p->p_type ) {
+ case PT_LOAD:
+ {
+ if( !(p->p_flags & PF_W) ) break;
+ start = ((char *)(p->p_vaddr)) + offset;
+ GC_add_roots_inner(
+ start,
+ start + p->p_memsz
+ );
+ }
+ break;
+ default:
+ break;
+ }
+ }
+# endif
+ }
+# ifdef SUNOS4
+ {
+ static ptr_t common_start = 0;
+ ptr_t common_end;
+ extern ptr_t GC_find_limit();
+
+ if (common_start == 0) common_start = GC_first_common();
+ if (common_start != 0) {
+ common_end = GC_find_limit(common_start, TRUE);
+ GC_add_roots_inner((char *)common_start, (char *)common_end);
+ }
+ }
+# endif
+}
+
+# endif /* SUNOS */
+
+#ifdef IRIX5
+
+#include <sys/procfs.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <elf.h>
+
+extern void * GC_roots_present();
+
+extern ptr_t GC_scratch_end_ptr; /* End of GC_scratch_alloc arena */
+
+/* We use /proc to track down all parts of the address space that are */
+/* mapped by the process, and throw out regions we know we shouldn't */
+/* worry about. This may also work under other SVR4 variants. */
+void GC_register_dynamic_libraries()
+{
+ static int fd = -1;
+ char buf[30];
+ static prmap_t * addr_map = 0;
+ static int current_sz = 0; /* Number of records currently in addr_map */
+ static int needed_sz; /* Required size of addr_map */
+ register int i;
+ register long flags;
+ register ptr_t start;
+ register ptr_t limit;
+ ptr_t heap_end = (ptr_t)DATASTART;
+
+ if (fd < 0) {
+ sprintf(buf, "/proc/%d", getpid());
+ fd = open(buf, O_RDONLY);
+ if (fd < 0) {
+ ABORT("/proc open failed");
+ }
+ }
+ if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
+ ABORT("/proc PIOCNMAP ioctl failed");
+ }
+ if (needed_sz >= current_sz) {
+ current_sz = needed_sz * 2 + 1;
+ /* Expansion, plus room for 0 record */
+ addr_map = (prmap_t *)GC_scratch_alloc(current_sz * sizeof(prmap_t));
+ }
+ if (ioctl(fd, PIOCMAP, addr_map) < 0) {
+ ABORT("/proc PIOCMAP ioctl failed");
+ };
+ if (GC_n_heap_sects > 0) {
+ heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
+ + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
+ if (heap_end < GC_scratch_end_ptr) heap_end = GC_scratch_end_ptr;
+ }
+ for (i = 0; i < needed_sz; i++) {
+ flags = addr_map[i].pr_mflags;
+ if ((flags & (MA_BREAK | MA_STACK | MA_PHYS)) != 0) goto irrelevant;
+ if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
+ goto irrelevant;
+ /* The latter test is empirically useless. Other than the */
+ /* main data and stack segments, everything appears to be */
+ /* mapped readable, writable, executable, and shared(!!). */
+ /* This makes no sense to me. - HB */
+ start = (ptr_t)(addr_map[i].pr_vaddr);
+ if (GC_roots_present(start)) goto irrelevant;
+ if (start < heap_end && start >= (ptr_t)DATASTART)
+ goto irrelevant;
+ limit = start + addr_map[i].pr_size;
+ if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
+ /* Discard text segments, i.e. 0-offset mappings against */
+ /* executable files which appear to have ELF headers. */
+ caddr_t arg;
+ int obj;
+# define MAP_IRR_SZ 10
+ static ptr_t map_irr[MAP_IRR_SZ];
+ /* Known irrelevant map entries */
+ static int n_irr = 0;
+ struct stat buf;
+ register int i;
+
+ for (i = 0; i < n_irr; i++) {
+ if (map_irr[i] == start) goto irrelevant;
+ }
+ arg = (caddr_t)start;
+ obj = ioctl(fd, PIOCOPENM, &arg);
+ if (obj >= 0) {
+ fstat(obj, &buf);
+ close(obj);
+ if ((buf.st_mode & 0111) != 0) {
+ if (n_irr < MAP_IRR_SZ) {
+ map_irr[n_irr++] = start;
+ }
+ goto irrelevant;
+ }
+ }
+ }
+ GC_add_roots_inner(start, limit);
+ irrelevant: ;
+ }
+}
+
+#endif /* IRIX5 */
+
+# ifdef MSWIN32
+
+# define WIN32_LEAN_AND_MEAN
+# define NOSERVICE
+# include <windows.h>
+# include <stdlib.h>
+
+ /* We traverse the entire address space and register all segments */
+ /* that could possibly have been written to. */
+ DWORD GC_allocation_granularity;
+
+ extern bool GC_is_heap_base (ptr_t p);
+
+ void GC_cond_add_roots(char *base, char * limit)
+ {
+ char dummy;
+ char * stack_top
+ = (char *) ((word)(&dummy) & ~(GC_allocation_granularity-1));
+ if (base == limit) return;
+ if (limit > stack_top && base < GC_stackbottom) {
+ /* Part of the stack; ignore it. */
+ return;
+ }
+ GC_add_roots_inner(base, limit);
+ }
+
+ extern bool GC_win32s;
+
+ void GC_register_dynamic_libraries()
+ {
+ MEMORY_BASIC_INFORMATION buf;
+ SYSTEM_INFO sysinfo;
+ DWORD result;
+ DWORD protect;
+ LPVOID p;
+ char * base;
+ char * limit, * new_limit;
+
+ if (GC_win32s) return;
+ GetSystemInfo(&sysinfo);
+ base = limit = p = sysinfo.lpMinimumApplicationAddress;
+ GC_allocation_granularity = sysinfo.dwAllocationGranularity;
+ while (p < sysinfo.lpMaximumApplicationAddress) {
+ result = VirtualQuery(p, &buf, sizeof(buf));
+ if (result != sizeof(buf)) {
+ ABORT("Weird VirtualQuery result");
+ }
+ new_limit = (char *)p + buf.RegionSize;
+ protect = buf.Protect;
+ if (buf.State == MEM_COMMIT
+ && (protect == PAGE_EXECUTE_READWRITE
+ || protect == PAGE_READWRITE)
+ && !GC_is_heap_base(buf.AllocationBase)) {
+ if ((char *)p == limit) {
+ limit = new_limit;
+ } else {
+ GC_cond_add_roots(base, limit);
+ base = p;
+ limit = new_limit;
+ }
+ }
+ if (p > (LPVOID)new_limit /* overflow */) break;
+ p = (LPVOID)new_limit;
+ }
+ GC_cond_add_roots(base, limit);
+ }
+
+#endif /* MSWIN32 */
+
+#if defined(ALPHA)
+void GC_register_dynamic_libraries()
+{
+ int status;
+ ldr_process_t mypid;
+
+ /* module */
+ ldr_module_t moduleid = LDR_NULL_MODULE;
+ ldr_module_info_t moduleinfo;
+ size_t moduleinfosize = sizeof(moduleinfo);
+ size_t modulereturnsize;
+
+ /* region */
+ ldr_region_t region;
+ ldr_region_info_t regioninfo;
+ size_t regioninfosize = sizeof(regioninfo);
+ size_t regionreturnsize;
+
+ /* Obtain id of this process */
+ mypid = ldr_my_process();
+
+ /* For each module */
+ while (TRUE) {
+
+ /* Get the next (first) module */
+ status = ldr_next_module(mypid, &moduleid);
+
+ /* Any more modules? */
+ if (moduleid == LDR_NULL_MODULE)
+ break; /* No more modules */
+
+ /* Check status AFTER checking moduleid because */
+ /* of a bug in the non-shared ldr_next_module stub */
+ if (status != 0 ) {
+ GC_printf("dynamic_load: status = %ld\n", (long)status);
+ {
+ extern char *sys_errlist[];
+ extern int sys_nerr;
+ extern int errno;
+ if (errno <= sys_nerr) {
+ GC_printf("dynamic_load: %s\n", sys_errlist[errno]);
+ } else {
+ GC_printf("dynamic_load: %d\n", errno);
+ }
+ }
+ ABORT("ldr_next_module failed");
+ }
+
+ /* Get the module information */
+ status = ldr_inq_module(mypid, moduleid, &moduleinfo,
+ moduleinfosize, &modulereturnsize);
+ if (status != 0 )
+ ABORT("ldr_inq_module failed");
+
+ /* is module for the main program (i.e. nonshared portion)? */
+ if (moduleinfo.lmi_flags & LDR_MAIN)
+ continue; /* skip the main module */
+
+# ifdef VERBOSE
+ GC_printf("---Module---\n");
+ GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
+ GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
+ GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
+ GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
+# endif
+
+ /* For each region in this module */
+ for (region = 0; region < moduleinfo.lmi_nregion; region++) {
+
+ /* Get the region information */
+ status = ldr_inq_region(mypid, moduleid, region, ®ioninfo,
+ regioninfosize, ®ionreturnsize);
+ if (status != 0 )
+ ABORT("ldr_inq_region failed");
+
+ /* only process writable (data) regions */
+ if (! (regioninfo.lri_prot & LDR_W))
+ continue;
+
+# ifdef VERBOSE
+ GC_printf("--- Region ---\n");
+ GC_printf("Region number = %16ld\n",
+ regioninfo.lri_region_no);
+ GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
+ GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
+ GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
+ GC_printf("Region size = %16ld\n", regioninfo.lri_size);
+ GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
+# endif
+
+ /* register region as a garbage collection root */
+ GC_add_roots_inner (
+ (char *)regioninfo.lri_mapaddr,
+ (char *)regioninfo.lri_mapaddr + regioninfo.lri_size);
+
+ }
+ }
+}
+#endif
+
+
+#else /* !DYNAMIC_LOADING */
+
+#ifdef PCR
+
+# include "il/PCR_IL.h"
+# include "th/PCR_ThCtl.h"
+# include "mm/PCR_MM.h"
+
+void GC_register_dynamic_libraries()
+{
+ /* Add new static data areas of dynamically loaded modules. */
+ {
+ PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
+ PCR_IL_LoadedSegment * q;
+
+ /* Skip uncommited files */
+ while (p != NIL && !(p -> lf_commitPoint)) {
+ /* The loading of this file has not yet been committed */
+ /* Hence its description could be inconsistent. */
+ /* Furthermore, it hasn't yet been run. Hence its data */
+ /* segments can't possibly reference heap allocated */
+ /* objects. */
+ p = p -> lf_prev;
+ }
+ for (; p != NIL; p = p -> lf_prev) {
+ for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
+ if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
+ == PCR_IL_SegFlags_Traced_on) {
+ GC_add_roots_inner
+ ((char *)(q -> ls_addr),
+ (char *)(q -> ls_addr) + q -> ls_bytes);
+ }
+ }
+ }
+ }
+}
+
+
+#else /* !PCR */
+
+void GC_register_dynamic_libraries(){}
+
+int GC_no_dynamic_loading;
+
+#endif /* !PCR */
+#endif /* !DYNAMIC_LOADING */
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:08 pm PDT */
+# define I_HIDE_POINTERS
+# include "gc.h"
+# include "gc_priv.h"
+# include "gc_mark.h"
+
+# define HASH3(addr,size,log_size) \
+ ((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
+ & ((size) - 1))
+#define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
+
+struct hash_chain_entry {
+ word hidden_key;
+ struct hash_chain_entry * next;
+};
+
+unsigned GC_finalization_failures = 0;
+ /* Number of finalization requests that failed for lack of memory. */
+
+static struct disappearing_link {
+ struct hash_chain_entry prolog;
+# define dl_hidden_link prolog.hidden_key
+ /* Field to be cleared. */
+# define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
+# define dl_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
+
+ word dl_hidden_obj; /* Pointer to object base */
+} **dl_head = 0;
+
+static signed_word log_dl_table_size = -1;
+ /* Binary log of */
+ /* current size of array pointed to by dl_head. */
+ /* -1 ==> size is 0. */
+
+word GC_dl_entries = 0; /* Number of entries currently in disappearing */
+ /* link table. */
+
+static struct finalizable_object {
+ struct hash_chain_entry prolog;
+# define fo_hidden_base prolog.hidden_key
+ /* Pointer to object base. */
+# define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
+# define fo_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
+ GC_finalization_proc fo_fn; /* Finalizer. */
+ ptr_t fo_client_data;
+ word fo_object_size; /* In bytes. */
+} **fo_head = 0;
+
+struct finalizable_object * GC_finalize_now = 0;
+ /* LIst of objects that should be finalized now. */
+
+static signed_word log_fo_table_size = -1;
+
+word GC_fo_entries = 0;
+
+# ifdef SRC_M3
+void GC_push_finalizer_structures()
+{
+ GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
+ GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
+}
+# endif
+
+# define ALLOC(x, t) t *x = GC_NEW(t)
+
+/* Double the size of a hash table. *size_ptr is the log of its current */
+/* size. May be a noop. */
+/* *table is a pointer to an array of hash headers. If we succeed, we */
+/* update both *table and *log_size_ptr. */
+/* Lock is held. Signals are disabled. */
+void GC_grow_table(table, log_size_ptr)
+struct hash_chain_entry ***table;
+signed_word * log_size_ptr;
+{
+ register word i;
+ register struct hash_chain_entry *p;
+ int log_old_size = *log_size_ptr;
+ register int log_new_size = log_old_size + 1;
+ word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
+ register word new_size = 1 << log_new_size;
+ struct hash_chain_entry **new_table = (struct hash_chain_entry **)
+ GC_malloc_ignore_off_page_inner(
+ (size_t)new_size * sizeof(struct hash_chain_entry *));
+
+ if (new_table == 0) {
+ if (table == 0) {
+ ABORT("Insufficient space for initial table allocation");
+ } else {
+ return;
+ }
+ }
+ for (i = 0; i < old_size; i++) {
+ p = (*table)[i];
+ while (p != 0) {
+ register ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
+ register struct hash_chain_entry *next = p -> next;
+ register int new_hash = HASH3(real_key, new_size, log_new_size);
+
+ p -> next = new_table[new_hash];
+ new_table[new_hash] = p;
+ p = next;
+ }
+ }
+ *log_size_ptr = log_new_size;
+ *table = new_table;
+}
+
+
+int GC_register_disappearing_link(link)
+extern_ptr_t * link;
+{
+ ptr_t base;
+
+ base = (ptr_t)GC_base((extern_ptr_t)link);
+ if (base == 0)
+ ABORT("Bad arg to GC_register_disappearing_link");
+ return(GC_general_register_disappearing_link(link, base));
+}
+
+int GC_general_register_disappearing_link(link, obj)
+extern_ptr_t * link;
+extern_ptr_t obj;
+{
+ struct disappearing_link *curr_dl;
+ int index;
+ struct disappearing_link * new_dl;
+ DCL_LOCK_STATE;
+
+ if ((word)link & (ALIGNMENT-1))
+ ABORT("Bad arg to GC_general_register_disappearing_link");
+# ifdef THREADS
+ DISABLE_SIGNALS();
+ LOCK();
+# endif
+ if (log_dl_table_size == -1
+ || GC_dl_entries > ((word)1 << log_dl_table_size)) {
+# ifndef THREADS
+ DISABLE_SIGNALS();
+# endif
+ GC_grow_table((struct hash_chain_entry ***)(&dl_head),
+ &log_dl_table_size);
+# ifdef PRINTSTATS
+ GC_printf1("Grew dl table to %lu entries\n",
+ (unsigned long)(1 << log_dl_table_size));
+# endif
+# ifndef THREADS
+ ENABLE_SIGNALS();
+# endif
+ }
+ index = HASH2(link, log_dl_table_size);
+ curr_dl = dl_head[index];
+ for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
+ if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
+ curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
+# ifdef THREADS
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+ return(1);
+ }
+ }
+# ifdef THREADS
+ new_dl = (struct disappearing_link *)
+ GC_generic_malloc_inner(sizeof(struct disappearing_link),NORMAL);
+# else
+ new_dl = GC_NEW(struct disappearing_link);
+# endif
+ if (new_dl != 0) {
+ new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
+ new_dl -> dl_hidden_link = HIDE_POINTER(link);
+ dl_set_next(new_dl, dl_head[index]);
+ dl_head[index] = new_dl;
+ GC_dl_entries++;
+ } else {
+ GC_finalization_failures++;
+ }
+# ifdef THREADS
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+ return(0);
+}
+
+int GC_unregister_disappearing_link(link)
+extern_ptr_t * link;
+{
+ struct disappearing_link *curr_dl, *prev_dl;
+ int index;
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ index = HASH2(link, log_dl_table_size);
+ if (((unsigned long)link & (ALIGNMENT-1))) goto out;
+ prev_dl = 0; curr_dl = dl_head[index];
+ while (curr_dl != 0) {
+ if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
+ if (prev_dl == 0) {
+ dl_head[index] = dl_next(curr_dl);
+ } else {
+ dl_set_next(prev_dl, dl_next(curr_dl));
+ }
+ GC_dl_entries--;
+ UNLOCK();
+ ENABLE_SIGNALS();
+ GC_free((extern_ptr_t)curr_dl);
+ return(1);
+ }
+ prev_dl = curr_dl;
+ curr_dl = dl_next(curr_dl);
+ }
+out:
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(0);
+}
+
+/* Register a finalization function. See gc.h for details. */
+/* in the nonthreads case, we try to avoid disabling signals, */
+/* since it can be expensive. Threads packages typically */
+/* make it cheaper. */
+void GC_register_finalizer(obj, fn, cd, ofn, ocd)
+extern_ptr_t obj;
+GC_finalization_proc fn;
+extern_ptr_t cd;
+GC_finalization_proc * ofn;
+extern_ptr_t * ocd;
+{
+ ptr_t base;
+ struct finalizable_object * curr_fo, * prev_fo;
+ int index;
+ struct finalizable_object *new_fo;
+ DCL_LOCK_STATE;
+
+# ifdef THREADS
+ DISABLE_SIGNALS();
+ LOCK();
+# endif
+ if (log_fo_table_size == -1
+ || GC_fo_entries > ((word)1 << log_fo_table_size)) {
+# ifndef THREADS
+ DISABLE_SIGNALS();
+# endif
+ GC_grow_table((struct hash_chain_entry ***)(&fo_head),
+ &log_fo_table_size);
+# ifdef PRINTSTATS
+ GC_printf1("Grew fo table to %lu entries\n",
+ (unsigned long)(1 << log_fo_table_size));
+# endif
+# ifndef THREADS
+ ENABLE_SIGNALS();
+# endif
+ }
+ /* in the THREADS case signals are disabled and we hold allocation */
+ /* lock; otherwise neither is true. Proceed carefully. */
+ base = (ptr_t)obj;
+ index = HASH2(base, log_fo_table_size);
+ prev_fo = 0; curr_fo = fo_head[index];
+ while (curr_fo != 0) {
+ if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
+ /* Interruption by a signal in the middle of this */
+ /* should be safe. The client may see only *ocd */
+ /* updated, but we'll declare that to be his */
+ /* problem. */
+ if (ocd) *ocd = (extern_ptr_t) curr_fo -> fo_client_data;
+ if (ofn) *ofn = curr_fo -> fo_fn;
+ /* Delete the structure for base. */
+ if (prev_fo == 0) {
+ fo_head[index] = fo_next(curr_fo);
+ } else {
+ fo_set_next(prev_fo, fo_next(curr_fo));
+ }
+ if (fn == 0) {
+ GC_fo_entries--;
+ /* May not happen if we get a signal. But a high */
+ /* estimate will only make the table larger than */
+ /* necessary. */
+# ifndef THREADS
+ GC_free((extern_ptr_t)curr_fo);
+# endif
+ } else {
+ curr_fo -> fo_fn = fn;
+ curr_fo -> fo_client_data = (ptr_t)cd;
+ /* Reinsert it. We deleted it first to maintain */
+ /* consistency in the event of a signal. */
+ if (prev_fo == 0) {
+ fo_head[index] = curr_fo;
+ } else {
+ fo_set_next(prev_fo, curr_fo);
+ }
+ }
+# ifdef THREADS
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+ return;
+ }
+ prev_fo = curr_fo;
+ curr_fo = fo_next(curr_fo);
+ }
+ if (ofn) *ofn = 0;
+ if (ocd) *ocd = 0;
+ if (fn == 0) {
+# ifdef THREADS
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+ return;
+ }
+# ifdef THREADS
+ new_fo = (struct finalizable_object *)
+ GC_generic_malloc_inner(sizeof(struct finalizable_object),NORMAL);
+# else
+ new_fo = GC_NEW(struct finalizable_object);
+# endif
+ if (new_fo != 0) {
+ new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
+ new_fo -> fo_fn = fn;
+ new_fo -> fo_client_data = (ptr_t)cd;
+ new_fo -> fo_object_size = GC_size(base);
+ fo_set_next(new_fo, fo_head[index]);
+ GC_fo_entries++;
+ fo_head[index] = new_fo;
+ } else {
+ GC_finalization_failures++;
+ }
+# ifdef THREADS
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+}
+
+/* Called with world stopped. Cause disappearing links to disappear, */
+/* and invoke finalizers. */
+void GC_finalize()
+{
+ struct disappearing_link * curr_dl, * prev_dl, * next_dl;
+ struct finalizable_object * curr_fo, * prev_fo, * next_fo;
+ ptr_t real_ptr, real_link;
+ register int i;
+ int dl_size = 1 << log_dl_table_size;
+ int fo_size = 1 << log_fo_table_size;
+
+ /* Make disappearing links disappear */
+ for (i = 0; i < dl_size; i++) {
+ curr_dl = dl_head[i];
+ prev_dl = 0;
+ while (curr_dl != 0) {
+ real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
+ real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
+ if (!GC_is_marked(real_ptr)) {
+ *(word *)real_link = 0;
+ next_dl = dl_next(curr_dl);
+ if (prev_dl == 0) {
+ dl_head[i] = next_dl;
+ } else {
+ dl_set_next(prev_dl, next_dl);
+ }
+ GC_clear_mark_bit((ptr_t)curr_dl);
+ GC_dl_entries--;
+ curr_dl = next_dl;
+ } else {
+ prev_dl = curr_dl;
+ curr_dl = dl_next(curr_dl);
+ }
+ }
+ }
+ /* Mark all objects reachable via chains of 1 or more pointers */
+ /* from finalizable objects. */
+# ifdef PRINTSTATS
+ if (GC_mark_state != MS_NONE) ABORT("Bad mark state");
+# endif
+ for (i = 0; i < fo_size; i++) {
+ for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
+ real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ if (!GC_is_marked(real_ptr)) {
+ hdr * hhdr = HDR(real_ptr);
+
+ PUSH_OBJ((word *)real_ptr, hhdr, GC_mark_stack_top,
+ &(GC_mark_stack[GC_mark_stack_size]));
+ while (!GC_mark_stack_empty()) GC_mark_from_mark_stack();
+ if (GC_mark_state != MS_NONE) {
+ /* Mark stack overflowed. Very unlikely. */
+# ifdef PRINTSTATS
+ if (GC_mark_state != MS_INVALID) ABORT("Bad mark state");
+ GC_printf0("Mark stack overflowed in finalization!!\n");
+# endif
+ /* Make mark bits consistent again. Forget about */
+ /* finalizing this object for now. */
+ GC_set_mark_bit(real_ptr);
+ while (!GC_mark_some());
+ }
+ /*
+ if (GC_is_marked(real_ptr)) {
+ --> Report finalization cycle here, if desired
+ }
+ */
+ }
+
+ }
+ }
+ /* Enqueue for finalization all objects that are still */
+ /* unreachable. */
+ for (i = 0; i < fo_size; i++) {
+ curr_fo = fo_head[i];
+ prev_fo = 0;
+ while (curr_fo != 0) {
+ real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ if (!GC_is_marked(real_ptr)) {
+ GC_set_mark_bit(real_ptr);
+ /* Delete from hash table */
+ next_fo = fo_next(curr_fo);
+ if (prev_fo == 0) {
+ fo_head[i] = next_fo;
+ } else {
+ fo_set_next(prev_fo, next_fo);
+ }
+ GC_fo_entries--;
+ /* Add to list of objects awaiting finalization. */
+ fo_set_next(curr_fo, GC_finalize_now);
+ GC_finalize_now = curr_fo;
+# ifdef PRINTSTATS
+ if (!GC_is_marked((ptr_t)curr_fo)) {
+ ABORT("GC_finalize: found accessible unmarked object\n");
+ }
+# endif
+ curr_fo = next_fo;
+ } else {
+ prev_fo = curr_fo;
+ curr_fo = fo_next(curr_fo);
+ }
+ }
+ }
+ /* Remove dangling disappearing links. */
+ for (i = 0; i < dl_size; i++) {
+ curr_dl = dl_head[i];
+ prev_dl = 0;
+ while (curr_dl != 0) {
+ real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
+ if (real_link != 0 && !GC_is_marked(real_link)) {
+ next_dl = dl_next(curr_dl);
+ if (prev_dl == 0) {
+ dl_head[i] = next_dl;
+ } else {
+ dl_set_next(prev_dl, next_dl);
+ }
+ GC_clear_mark_bit((ptr_t)curr_dl);
+ GC_dl_entries--;
+ curr_dl = next_dl;
+ } else {
+ prev_dl = curr_dl;
+ curr_dl = dl_next(curr_dl);
+ }
+ }
+ }
+}
+
+/* Invoke finalizers for all objects that are ready to be finalized. */
+/* Should be called without allocation lock. */
+void GC_invoke_finalizers()
+{
+ ptr_t real_ptr;
+ register struct finalizable_object * curr_fo;
+ DCL_LOCK_STATE;
+
+ while (GC_finalize_now != 0) {
+# ifdef THREADS
+ DISABLE_SIGNALS();
+ LOCK();
+# endif
+ curr_fo = GC_finalize_now;
+# ifdef THREADS
+ if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ if (curr_fo == 0) break;
+# else
+ GC_finalize_now = fo_next(curr_fo);
+# endif
+ real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ (*(curr_fo -> fo_fn))(real_ptr, curr_fo -> fo_client_data);
+# ifndef THREADS
+ GC_free((extern_ptr_t)curr_fo);
+# endif
+ }
+}
+
+# ifdef __STDC__
+ extern_ptr_t GC_call_with_alloc_lock(GC_fn_type fn, extern_ptr_t client_data)
+# else
+ extern_ptr_t GC_call_with_alloc_lock(fn, client_data)
+ GC_fn_type fn;
+ extern_ptr_t client_data;
+# endif
+{
+ extern_ptr_t result;
+ DCL_LOCK_STATE;
+
+# ifdef THREADS
+ DISABLE_SIGNALS();
+ LOCK();
+# endif
+ result = (*fn)(client_data);
+# ifdef THREADS
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+ return(result);
+}
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:13 pm PDT */
+
+#ifndef _GC_H
+
+# define _GC_H
+
+# include <stddef.h>
+
+/* Define word and signed_word to be unsigned and signed types of the */
+/* size as char * or void *. There seems to be no way to do this */
+/* even semi-portably. The following is probably no better/worse */
+/* than almost anything else. */
+/* The ANSI standard suggests that size_t and ptr_diff_t might be */
+/* better choices. But those appear to have incorrect definitions */
+/* on may systems. Notably "typedef int size_t" seems to be both */
+/* frequent and WRONG. */
+typedef unsigned long GC_word;
+typedef long GC_signed_word;
+
+/* Public read-only variables */
+
+extern GC_word GC_gc_no;/* Counter incremented per collection. */
+ /* Includes empty GCs at startup. */
+
+
+/* Public R/W variables */
+
+extern int GC_quiet; /* Disable statistics output. Only matters if */
+ /* collector has been compiled with statistics */
+ /* enabled. This involves a performance cost, */
+ /* and is thus not the default. */
+
+extern int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
+ /* beacuse it's not safe. */
+
+extern int GC_dont_expand;
+ /* Dont expand heap unless explicitly requested */
+ /* or forced to. */
+
+extern int GC_full_freq; /* Number of partial collections between */
+ /* full collections. Matters only if */
+ /* GC_incremental is set. */
+
+extern GC_word GC_non_gc_bytes;
+ /* Bytes not considered candidates for collection. */
+ /* Used only to control scheduling of collections. */
+
+extern GC_word GC_free_space_divisor;
+ /* We try to make sure that we allocate at */
+ /* least N/GC_free_space_divisor bytes between */
+ /* collections, where N is the heap size plus */
+ /* a rough estimate of the root set size. */
+ /* Initially, GC_free_space_divisor = 4. */
+ /* Increasing its value will use less space */
+ /* but more collection time. Decreasing it */
+ /* will appreciably decrease collection time */
+ /* at the expense of space. */
+ /* GC_free_space_divisor = 1 will effectively */
+ /* disable collections. */
+
+
+/* Public procedures */
+/*
+ * general purpose allocation routines, with roughly malloc calling conv.
+ * The atomic versions promise that no relevant pointers are contained
+ * in the object. The nonatomic versions guarantee that the new object
+ * is cleared. GC_malloc_stubborn promises that no changes to the object
+ * will occur after GC_end_stubborn_change has been called on the
+ * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
+ * that is scanned for pointers to collectable objects, but is not itself
+ * collectable. GC_malloc_uncollectable and GC_free called on the resulting
+ * object implicitly update GC_non_gc_bytes appropriately.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_malloc(size_t size_in_bytes);
+ extern void * GC_malloc_atomic(size_t size_in_bytes);
+ extern void * GC_malloc_uncollectable(size_t size_in_bytes);
+ extern void * GC_malloc_stubborn(size_t size_in_bytes);
+# else
+ extern char * GC_malloc(/* size_in_bytes */);
+ extern char * GC_malloc_atomic(/* size_in_bytes */);
+ extern char * GC_malloc_uncollectable(/* size_in_bytes */);
+ extern char * GC_malloc_stubborn(/* size_in_bytes */);
+# endif
+
+/* Explicitly deallocate an object. Dangerous if used incorrectly. */
+/* Requires a pointer to the base of an object. */
+/* If the argument is stubborn, it should not be changeable when freed. */
+/* An object should not be enable for finalization when it is */
+/* explicitly deallocated. */
+/* GC_free(0) is a no-op, as required by ANSI C for free. */
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void GC_free(void * object_addr);
+# else
+ extern void GC_free(/* object_addr */);
+# endif
+
+/*
+ * Stubborn objects may be changed only if the collector is explicitly informed.
+ * The collector is implicitly informed of coming change when such
+ * an object is first allocated. The following routines inform the
+ * collector that an object will no longer be changed, or that it will
+ * once again be changed. Only nonNIL pointer stores into the object
+ * are considered to be changes. The argument to GC_end_stubborn_change
+ * must be exacly the value returned by GC_malloc_stubborn or passed to
+ * GC_change_stubborn. (In the second case it may be an interior pointer
+ * within 512 bytes of the beginning of the objects.)
+ * There is a performance penalty for allowing more than
+ * one stubborn object to be changed at once, but it is acceptable to
+ * do so. The same applies to dropping stubborn objects that are still
+ * changeable.
+ */
+void GC_change_stubborn(/* p */);
+void GC_end_stubborn_change(/* p */);
+
+/* Return a pointer to the base (lowest address) of an object given */
+/* a pointer to a location within the object. */
+/* Return 0 if displaced_pointer doesn't point to within a valid */
+/* object. */
+# if defined(__STDC__) || defined(__cplusplus)
+ void * GC_base(void * displaced_pointer);
+# else
+ char * GC_base(/* char * displaced_pointer */);
+# endif
+
+/* Given a pointer to the base of an object, return its size in bytes. */
+/* The returned size may be slightly larger than what was originally */
+/* requested. */
+# if defined(__STDC__) || defined(__cplusplus)
+ size_t GC_size(void * object_addr);
+# else
+ size_t GC_size(/* char * object_addr */);
+# endif
+
+/* For compatibility with C library. This is occasionally faster than */
+/* a malloc followed by a bcopy. But if you rely on that, either here */
+/* or with the standard C library, your code is broken. In my */
+/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
+/* The resulting object has the same kind as the original. */
+/* If the argument is stubborn, the result will have changes enabled. */
+/* It is an error to have changes enabled for the original object. */
+/* Follows ANSI comventions for NULL old_object. */
+# if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_realloc(void * old_object, size_t new_size_in_bytes);
+# else
+ extern char * GC_realloc(/* old_object, new_size_in_bytes */);
+# endif
+
+
+/* Explicitly increase the heap size. */
+/* Returns 0 on failure, 1 on success. */
+extern int GC_expand_hp(/* number_of_bytes */);
+
+/* Clear the set of root segments */
+extern void GC_clear_roots();
+
+/* Add a root segment */
+extern void GC_add_roots(/* low_address, high_address_plus_1 */);
+
+/* Add a displacement to the set of those considered valid by the */
+/* collector. GC_register_displacement(n) means that if p was returned */
+/* by GC_malloc, then (char *)p + n will be considered to be a valid */
+/* pointer to n. N must be small and less than the size of p. */
+/* (All pointers to the interior of objects from the stack are */
+/* considered valid in any case. This applies to heap objects and */
+/* static data.) */
+/* Preferably, this should be called before any other GC procedures. */
+/* Calling it later adds to the probability of excess memory */
+/* retention. */
+/* This is a no-op if the collector was compiled with recognition of */
+/* arbitrary interior pointers enabled, which is now the default. */
+void GC_register_displacement(/* n */);
+
+/* Explicitly trigger a collection. */
+void GC_gcollect();
+
+/* Return the number of bytes in the heap. Excludes collector private */
+/* data structures. Includes empty blocks and fragmentation loss. */
+/* Includes some pages that were allocated but never written. */
+size_t GC_get_heap_size();
+
+/* Enable incremental/generational collection. */
+/* Not advisable unless dirty bits are */
+/* available or most heap objects are */
+/* pointerfree(atomic) or immutable. */
+/* Don't use in leak finding mode. */
+/* Ignored if GC_dont_gc is true. */
+void GC_enable_incremental();
+
+/* Allocate an object of size lb bytes. The client guarantees that */
+/* as long as the object is live, it will be referenced by a pointer */
+/* that points to somewhere within the first 256 bytes of the object. */
+/* (This should normally be declared volatile to prevent the compiler */
+/* from invalidating this assertion.) This routine is only useful */
+/* if a large array is being allocated. It reduces the chance of */
+/* accidentally retaining such an array as a result of scanning an */
+/* integer that happens to be an address inside the array. (Actually, */
+/* it reduces the chance of the allocator not finding space for such */
+/* an array, since it will try hard to avoid introducing such a false */
+/* reference.) On a SunOS 4.X or MS Windows system this is recommended */
+/* for arrays likely to be larger than 100K or so. For other systems, */
+/* or if the collector is not configured to recognize all interior */
+/* pointers, the threshold is normally much higher. */
+# if defined(__STDC__) || defined(__cplusplus)
+ void * GC_malloc_ignore_off_page(size_t lb);
+# else
+ char * GC_malloc_ignore_off_page(/* size_t lb */);
+# endif
+
+/* Debugging (annotated) allocation. GC_gcollect will check */
+/* objects allocated in this way for overwrites, etc. */
+# if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_debug_malloc(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_atomic(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_uncollectable(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_stubborn(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void GC_debug_free(void * object_addr);
+ extern void * GC_debug_realloc(void * old_object,
+ size_t new_size_in_bytes,
+ char * descr_string, int descr_int);
+# else
+ extern char * GC_debug_malloc(/* size_in_bytes, descr_string, descr_int */);
+ extern char * GC_debug_malloc_atomic(/* size_in_bytes, descr_string,
+ descr_int */);
+ extern char * GC_debug_malloc_uncollectable(/* size_in_bytes, descr_string,
+ descr_int */);
+ extern char * GC_debug_malloc_stubborn(/* size_in_bytes, descr_string,
+ descr_int */);
+ extern void GC_debug_free(/* object_addr */);
+ extern char * GC_debug_realloc(/* old_object, new_size_in_bytes,
+ descr_string, descr_int */);
+# endif
+void GC_debug_change_stubborn(/* p */);
+void GC_debug_end_stubborn_change(/* p */);
+# ifdef GC_DEBUG
+# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
+ __FILE__, __LINE__)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
+ __LINE__)
+# define GC_FREE(p) GC_debug_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(GC_base(p), GC_debug_invoke_finalizer, \
+ GC_make_closure(f,d), of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
+ __LINE__)
+# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
+# else
+# define GC_MALLOC(sz) GC_malloc(sz)
+# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
+# define GC_REALLOC(old, sz) GC_realloc(old, sz)
+# define GC_FREE(p) GC_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(p, f, d, of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
+# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
+# endif
+/* The following are included because they are often convenient, and */
+/* reduce the chance for a misspecifed size argument. But calls may */
+/* expand to something syntactically incorrect if t is a complicated */
+/* type expression. */
+# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
+# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
+# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
+# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
+
+/* Finalization. Some of these primitives are grossly unsafe. */
+/* The idea is to make them both cheap, and sufficient to build */
+/* a safer layer, closer to PCedar finalization. */
+/* The interface represents my conclusions from a long discussion */
+/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
+/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
+/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
+# if defined(__STDC__) || defined(__cplusplus)
+ typedef void (*GC_finalization_proc)(void * obj, void * client_data);
+# else
+ typedef void (*GC_finalization_proc)(/* void * obj, void * client_data */);
+# endif
+
+void GC_register_finalizer(/* void * obj,
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd */);
+ /* When obj is no longer accessible, invoke */
+ /* (*fn)(obj, cd). If a and b are inaccessible, and */
+ /* a points to b (after disappearing links have been */
+ /* made to disappear), then only a will be */
+ /* finalized. (If this does not create any new */
+ /* pointers to b, then b will be finalized after the */
+ /* next collection.) Any finalizable object that */
+ /* is reachable from itself by following one or more */
+ /* pointers will not be finalized (or collected). */
+ /* Thus cycles involving finalizable objects should */
+ /* be avoided, or broken by disappearing links. */
+ /* Fn should terminate as quickly as possible, and */
+ /* defer extended computation. */
+ /* All but the last finalizer registered for an object */
+ /* is ignored. */
+ /* Finalization may be removed by passing 0 as fn. */
+ /* The old finalizer and client data are stored in */
+ /* *ofn and *ocd. */
+ /* Fn is never invoked on an accessible object, */
+ /* provided hidden pointers are converted to real */
+ /* pointers only if the allocation lock is held, and */
+ /* such conversions are not performed by finalization */
+ /* routines. */
+ /* If GC_register_finalizer is aborted as a result of */
+ /* a signal, the object may be left with no */
+ /* finalization, even if neither the old nor new */
+ /* finalizer were NULL. */
+ /* Obj should be the nonNULL starting address of an */
+ /* object allocated by GC_malloc or friends. */
+
+/* The following routine may be used to break cycles between */
+/* finalizable objects, thus causing cyclic finalizable */
+/* objects to be finalized in the correct order. Standard */
+/* use involves calling GC_register_disappearing_link(&p), */
+/* where p is a pointer that is not followed by finalization */
+/* code, and should not be considered in determining */
+/* finalization order. */
+int GC_register_disappearing_link(/* void ** link */);
+ /* Link should point to a field of a heap allocated */
+ /* object obj. *link will be cleared when obj is */
+ /* found to be inaccessible. This happens BEFORE any */
+ /* finalization code is invoked, and BEFORE any */
+ /* decisions about finalization order are made. */
+ /* This is useful in telling the finalizer that */
+ /* some pointers are not essential for proper */
+ /* finalization. This may avoid finalization cycles. */
+ /* Note that obj may be resurrected by another */
+ /* finalizer, and thus the clearing of *link may */
+ /* be visible to non-finalization code. */
+ /* There's an argument that an arbitrary action should */
+ /* be allowed here, instead of just clearing a pointer. */
+ /* But this causes problems if that action alters, or */
+ /* examines connectivity. */
+ /* Returns 1 if link was already registered, 0 */
+ /* otherwise. */
+ /* Only exists for backward compatibility. See below: */
+int GC_general_register_disappearing_link(/* void ** link, void * obj */);
+ /* A slight generalization of the above. *link is */
+ /* cleared when obj first becomes inaccessible. This */
+ /* can be used to implement weak pointers easily and */
+ /* safely. Typically link will point to a location */
+ /* holding a disguised pointer to obj. In this way */
+ /* soft pointers are broken before any object */
+ /* reachable from them are finalized. Each link */
+ /* May be registered only once, i.e. with one obj */
+ /* value. This was added after a long email discussion */
+ /* with John Ellis. */
+ /* Obj must be a pointer to the first word of an object */
+ /* we allocated. It is unsafe to explicitly deallocate */
+ /* the object containing link. Explicitly deallocating */
+ /* obj may or may not cause link to eventually be */
+ /* cleared. */
+int GC_unregister_disappearing_link(/* void ** link */);
+ /* Returns 0 if link was not actually registered. */
+ /* Undoes a registration by either of the above two */
+ /* routines. */
+
+/* Auxiliary fns to make finalization work correctly with displaced */
+/* pointers introduced by the debugging allocators. */
+# if defined(__STDC__) || defined(__cplusplus)
+ void * GC_make_closure(GC_finalization_proc fn, void * data);
+ void GC_debug_invoke_finalizer(void * obj, void * data);
+# else
+ char * GC_make_closure(/* GC_finalization_proc fn, char * data */);
+ void GC_debug_invoke_finalizer(/* void * obj, void * data */);
+# endif
+
+
+/* The following is intended to be used by a higher level */
+/* (e.g. cedar-like) finalization facility. It is expected */
+/* that finalization code will arrange for hidden pointers to */
+/* disappear. Otherwise objects can be accessed after they */
+/* have been collected. */
+# ifdef I_HIDE_POINTERS
+# if defined(__STDC__) || defined(__cplusplus)
+# define HIDE_POINTER(p) (~(size_t)(p))
+# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
+# else
+# define HIDE_POINTER(p) (~(unsigned long)(p))
+# define REVEAL_POINTER(p) ((char *)(HIDE_POINTER(p)))
+# endif
+ /* Converting a hidden pointer to a real pointer requires verifying */
+ /* that the object still exists. This involves acquiring the */
+ /* allocator lock to avoid a race with the collector. */
+
+# if defined(__STDC__) || defined(__cplusplus)
+ typedef void * (*GC_fn_type)();
+ void * GC_call_with_alloc_lock(GC_fn_type fn, void * client_data);
+# else
+ typedef char * (*GC_fn_type)();
+ char * GC_call_with_alloc_lock(/* GC_fn_type fn, char * client_data */);
+# endif
+# endif
+
+#ifdef SOLARIS_THREADS
+/* We need to intercept calls to many of the threads primitives, so */
+/* that we can locate thread stacks and stop the world. */
+/* Note also that the collector cannot see thread specific data. */
+/* Thread specific data should generally consist of pointers to */
+/* uncollectable objects, which are deallocated using the destructor */
+/* facility in thr_keycreate. */
+# include <thread.h>
+ int GC_thr_create(void *stack_base, size_t stack_size,
+ void *(*start_routine)(void *), void *arg, long flags,
+ thread_t *new_thread);
+ int GC_thr_join(thread_t wait_for, thread_t *departed, void **status);
+ int GC_thr_suspend(thread_t target_thread);
+ int GC_thr_continue(thread_t target_thread);
+ void * GC_dlopen(const char *path, int mode);
+
+# define thr_create GC_thr_create
+# define thr_join GC_thr_join
+# define thr_suspend GC_thr_suspend
+# define thr_continue GC_thr_continue
+# define dlopen GC_dlopen
+
+/* This returns a list of objects, linked through their first */
+/* word. Its use can greatly reduce lock contention problems, since */
+/* the allocation lock can be acquired and released many fewer times. */
+void * GC_malloc_many(size_t lb);
+#define GC_NEXT(p) (*(void **)(p)) /* Retrieve the next element */
+ /* in returned list. */
+
+#endif /* SOLARIS_THREADS */
+
+#endif /* _GC_H */
--- /dev/null
+.TH GC_MALLOC 1L "20 April 1994"
+.SH NAME
+GC_malloc, GC_malloc_atomic, GC_free, GC_realloc, GC_enable_incremental, GC_register_finalizer \- Garbage collecting malloc replacement
+.SH SYNOPSIS
+#include "gc.h"
+.br
+# define malloc(n) GC_malloc(n)
+.br
+... malloc(...) ...
+.br
+.sp
+cc ... gc.a
+.LP
+.SH DESCRIPTION
+.I GC_malloc
+and
+.I GC_free
+are plug-in replacements for standard malloc and free. However,
+.I
+GC_malloc
+will attempt to reclaim inaccessible space automaticaly by invoking a conservative garbage collector at appropriate points. The collector traverses all data structures accessible by following pointers from the machines registers, stack(s), data, and bss segments. Inaccessible structures will be reclaimed. A machine word is considered to be a valid pointer if it is an address inside an object allocated by
+.I
+GC_malloc
+or friends.
+.LP
+Unlike the standard implementations of malloc,
+.I
+GC_malloc
+clears the newly allocated storage.
+.I
+GC_malloc_atomic
+does not. Furthermore, it informs the collector that the resulting object will never contain any pointers, and should therefore not be scanned by the collector.
+.I
+GC_free
+can be used to deallocate objects, but its use is optional, and discouraged.
+.I
+GC_realloc
+has the standard realloc semantics. It preserves pointer-free-ness.
+.I
+GC_register_finalizer
+allows for registration of functions that are invoked when an object becomes inaccessible.
+.LP
+It is also possible to use the collector to find storage leaks in programs destined to be run with standard malloc/free. The collector can be compiled for thread-safe operation. Unlike standard malloc, it is safe to call malloc after a previous malloc call was interrupted by a signal, provided the original malloc call is not resumed.
+.LP
+Debugging versions of many of the above routines are provided as macros. Their names are identical to the above, but consist of all capital letters. If GC_DEBUG is defined before gc.h is included, these routines do additional checking, and allow the leak detecting version of the collector to produce slightly more useful output. Without GC_DEBUG defined, they behave exactly like the lower-case versions.
+.LP
+On some machines, collection will be performed incrementally after a call to
+.I
+GC_enable_incremental.
+This may temporarily write protect pages in the heap. See the README file for more information on how this interacts with system calls that write to the heap.
+.LP
+Other facilities not discussed here include a C++ interface, limited facilities to support incremental collection on machines without appropriate VM support, provisions for providing more explicit object layout information to the garbage collector, more direct support for ``weak'' pointers, etc.
+.LP
+.SH "SEE ALSO"
+The README and gc.h files in the distribution. More detailed definitions of the functions exported by the collector are given there. (The above list is not complete.)
+.LP
+Boehm, H., and M. Weiser, "Garbage Collection in an Uncooperative Environment",
+\fISoftware Practice & Experience\fP, September 1988, pp. 807-820.
+.LP
+The malloc(3) man page.
+.LP
+.SH AUTHOR
+Hans-J. Boehm (boehm@parc.xerox.com). Some of the code was written by others, most notably Alan Demers.
--- /dev/null
+/*************************************************************************
+
+
+Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to copy this code for any purpose,
+provided the above notices are retained on all copies.
+
+This implementation module for gc_c++.h provides an implementation of
+the global operators "new" and "delete" that calls the Boehm
+allocator. All objects allocated by this implementation will be
+non-collectable but part of the root set of the collector.
+
+You should ensure (using implementation-dependent techniques) that the
+linker finds this module before the library that defines the default
+built-in "new" and "delete".
+
+
+**************************************************************************/
+
+#include "gc_c++.h"
+
+void* operator new( size_t size ) {
+ return GC_MALLOC_UNCOLLECTABLE( size ); }
+
+void operator delete( void* obj ) {
+ return GC_FREE( obj ); }
+
+
+
--- /dev/null
+
+/****************************************************************************
+
+Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program
+for any purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is granted,
+provided the above notices are retained, and a notice that the code was
+modified is included with the above copyright notice.
+
+C++ Interface to the Boehm Collector
+
+ Jesse Hull and John Ellis
+ Last modified on Tue Feb 15 14:43:02 PST 1994 by ellis
+
+This interface provides access to the Boehm collector (versions 3.6
+and later). It is intended to provide facilities similar to those
+described in the Ellis-Detlefs proposal for C++ garbage collection.
+
+To make a class collectable, derive it from the base class "gc":
+
+ class MyClass: gc {...}
+
+Then, "new MyClass" will allocate intances that will be automatically
+garbage collected.
+
+Collected objects can be explicitly deleted with "delete", e.g.
+
+ MyClass* m = ...;
+ delete m;
+
+This will free the object's storage immediately.
+
+Collected instances of non-class types can be allocated using
+placement syntax with the argument "GC":
+
+ typedef int A[ 10 ];
+ A* a = new (GC) A;
+
+The built-in "operator new" continues to allocate non-collectible
+objects that the programmer must explicitly delete. Collected object
+may freely point at non-collected objects, and vice versa.
+
+Object clean-up (finalization) can be specified using class
+"gc_cleanup". When an object derived from "gc_cleanup" is discovered
+to be inaccessible by the collector, or when it is explicitly deleted,
+its destructors will be invoked first.
+
+Clean-up functions for non-class types can be specified as additional
+placement arguments:
+
+ A* a = new (GC, MyCleanup) A;
+
+An object is considered "accessible" by the collector if it can be
+reached by a path of pointers from static variables, automatic
+variables of active functions, or from another object with clean-up
+enabled. This implies that if object A and B both have clean-up
+enabled, and A points at B, B will be considered accessible, and A's
+clean-up will be be invoked before B's. If A points at B and B points
+back to A, forming a cycle, that's considered a storage leak, and
+neither will ever become inaccessible. See the C interface gc.h for
+low-level facilities for handling such cycles of objects with cleanup.
+
+****************************************************************************/
+
+#ifndef GC_CPP_H
+#define GC_CPP_H
+
+extern "C" {
+#include "gc.h"
+}
+
+enum GCPlacement {GC, NoGC};
+
+class gc {
+public:
+ void* operator new( size_t size );
+ void* operator new( size_t size, GCPlacement gcp );
+ void operator delete( void* obj ); };
+ /*
+ Intances of classes derived from "gc" will be allocated in the
+ collected heap by default, unless an explicit NoGC placement is
+ specified. */
+
+class gc_cleanup: public gc {
+public:
+ gc_cleanup();
+ virtual ~gc_cleanup();
+private:
+ static void cleanup( void* obj, void* clientData ); };
+ /*
+ Instances of classes derived from "gc_cleanup" will be allocated
+ in the collected heap by default. Further, when the collector
+ discovers an instance is inaccessible (see above) or when the
+ instance is explicitly deleted, its destructors will be invoked.
+ NOTE: Only one instance of "gc_cleanup" should occur in the
+ inheritance heirarchy -- i.e. it should always be a virtual
+ base. */
+
+void* operator new(
+ size_t size,
+ GCPlacement gcp,
+ void (*cleanup)( void*, void* ) = 0,
+ void* clientData = 0 );
+ /*
+ If "gcp = GC", then this "operator new" allocates in the collected
+ heap, otherwise in the non-collected heap. When the allocated
+ object "obj" becomes inaccessible, the collector will invoke the
+ function "cleanup( obj, clientData )". It is an error to specify
+ a non-null "cleanup" when "gcp = NoGC". */
+
+/****************************************************************************
+
+Inline implementation
+
+****************************************************************************/
+
+inline void* gc::operator new( size_t size ) {
+ return GC_MALLOC( size ); };
+
+inline void* gc::operator new( size_t size, GCPlacement gcp ) {
+ if (gcp == GC)
+ return GC_MALLOC( size );
+ else
+ return GC_MALLOC_UNCOLLECTABLE( size ); }
+
+inline void gc::operator delete( void* obj ) {
+ GC_FREE( obj ); };
+
+inline gc_cleanup::gc_cleanup() {
+ GC_REGISTER_FINALIZER( GC_base( this ), cleanup, this, 0, 0 ); }
+
+inline void gc_cleanup::cleanup( void* obj, void* realThis ) {
+ ((gc_cleanup*) realThis)->~gc_cleanup(); }
+
+inline gc_cleanup::~gc_cleanup() {
+ GC_REGISTER_FINALIZER( this, 0, 0, 0, 0 ); }
+
+inline void* operator new(
+ size_t size,
+ GCPlacement gcp,
+ void (*cleanup)( void*, void* ) = 0,
+ void* clientData = 0 )
+{
+ void* obj;
+
+ if (gcp == GC) {
+ obj = GC_MALLOC( size );
+ if (cleanup != 0)
+ GC_REGISTER_FINALIZER( obj, cleanup, clientData, 0, 0 ); }
+ else {
+ obj = GC_MALLOC_UNCOLLECTABLE( size ); };
+ return obj; }
+
+
+#endif
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:16 pm PDT */
+# ifndef GC_HEADERS_H
+# define GC_HEADERS_H
+typedef struct hblkhdr hdr;
+
+# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
+ --> Get a real machine.
+# endif
+
+/*
+ * The 2 level tree data structure that is used to find block headers.
+ * If there are more than 32 bits in a pointer, the top level is a hash
+ * table.
+ */
+
+# if CPP_WORDSZ > 32
+# define HASH_TL
+# endif
+
+/* Define appropriate out-degrees for each of the two tree levels */
+# ifdef SMALL_CONFIG
+# define LOG_BOTTOM_SZ 11
+ /* Keep top index size reasonable with smaller blocks. */
+# else
+# define LOG_BOTTOM_SZ 10
+# endif
+# ifndef HASH_TL
+# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
+# else
+# define LOG_TOP_SZ 11
+# endif
+# define TOP_SZ (1 << LOG_TOP_SZ)
+# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
+
+typedef struct bi {
+ hdr * index[BOTTOM_SZ];
+ /*
+ * The bottom level index contains one of three kinds of values:
+ * 0 means we're not responsible for this block.
+ * 1 < (long)X <= MAX_JUMP means the block starts at least
+ * X * HBLKSIZE bytes before the current address.
+ * A valid pointer points to a hdr structure. (The above can't be
+ * valid pointers due to the GET_MEM return convention.)
+ */
+ struct bi * asc_link; /* All indices are linked in */
+ /* ascending order. */
+ word key; /* high order address bits. */
+# ifdef HASH_TL
+ struct bi * hash_link; /* Hash chain link. */
+# endif
+} bottom_index;
+
+/* extern bottom_index GC_all_nils; - really part of GC_arrays */
+
+/* extern bottom_index * GC_top_index []; - really part of GC_arrays */
+ /* Each entry points to a bottom_index. */
+ /* On a 32 bit machine, it points to */
+ /* the index for a set of high order */
+ /* bits equal to the index. For longer */
+ /* addresses, we hash the high order */
+ /* bits to compute the index in */
+ /* GC_top_index, and each entry points */
+ /* to a hash chain. */
+ /* The last entry in each chain is */
+ /* GC_all_nils. */
+
+
+# define MAX_JUMP (HBLKSIZE - 1)
+
+# ifndef HASH_TL
+# define BI(p) (GC_top_index \
+ [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
+# define HDR_INNER(p) (BI(p)->index \
+ [((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
+# ifdef SMALL_CONFIG
+# define HDR(p) GC_find_header((ptr_t)(p))
+# else
+# define HDR(p) HDR_INNER(p)
+# endif
+# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
+# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
+# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
+# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
+# else /* hash */
+/* Hash function for tree top level */
+# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
+/* Set bottom_indx to point to the bottom index for address p */
+# define GET_BI(p, bottom_indx) \
+ { \
+ register word hi = \
+ (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
+ register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
+ \
+ while (_bi -> key != hi && _bi != &GC_all_nils) \
+ _bi = _bi -> hash_link; \
+ (bottom_indx) = _bi; \
+ }
+# define GET_HDR_ADDR(p, ha) \
+ { \
+ register bottom_index * bi; \
+ \
+ GET_BI(p, bi); \
+ (ha) = &(bi->index[((unsigned long)(p)>>LOG_HBLKSIZE) \
+ & (BOTTOM_SZ - 1)]); \
+ }
+# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ (hhdr) = *_ha; }
+# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ *_ha = (hhdr); }
+# define HDR(p) GC_find_header((ptr_t)(p))
+# endif
+
+/* Is the result a forwarding address to someplace closer to the */
+/* beginning of the block or NIL? */
+# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((unsigned long) (hhdr) <= MAX_JUMP)
+
+/* Get an HBLKSIZE aligned address closer to the beginning of the block */
+/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
+# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (unsigned long)(hhdr))
+# endif /* GC_HEADERS_H */
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:12 pm PDT */
+
+# ifndef GC_PRIVATE_H
+# include "gc_priv.h"
+# endif
+
+/* Allocate n words (NOT BYTES). X is made to point to the result. */
+/* It is assumed that n < MAXOBJSZ, and */
+/* that n > 0. On machines requiring double word alignment of some */
+/* data, we also assume that n is 1 or even. This bypasses the */
+/* MERGE_SIZES mechanism. In order to minimize the number of distinct */
+/* free lists that are maintained, the caller should ensure that a */
+/* small number of distinct values of n are used. (The MERGE_SIZES */
+/* mechanism normally does this by ensuring that only the leading three */
+/* bits of n may be nonzero. See misc.c for details.) We really */
+/* recommend this only in cases in which n is a constant, and no */
+/* locking is required. */
+/* In that case it may allow the compiler to perform substantial */
+/* additional optimizations. */
+# define GC_MALLOC_WORDS(result,n) \
+{ \
+ register ptr_t op; \
+ register ptr_t *opp; \
+ DCL_LOCK_STATE; \
+ \
+ opp = &(GC_objfreelist[n]); \
+ FASTLOCK(); \
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
+ FASTUNLOCK(); \
+ (result) = GC_generic_malloc_words_small((n), NORMAL); \
+ } else { \
+ *opp = obj_link(op); \
+ obj_link(op) = 0; \
+ GC_words_allocd += (n); \
+ FASTUNLOCK(); \
+ (result) = (extern_ptr_t) op; \
+ } \
+}
+
+
+/* The same for atomic objects: */
+# define GC_MALLOC_ATOMIC_WORDS(result,n) \
+{ \
+ register ptr_t op; \
+ register ptr_t *opp; \
+ DCL_LOCK_STATE; \
+ \
+ opp = &(GC_aobjfreelist[n]); \
+ FASTLOCK(); \
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
+ FASTUNLOCK(); \
+ (result) = GC_generic_malloc_words_small((n), PTRFREE); \
+ } else { \
+ *opp = obj_link(op); \
+ obj_link(op) = 0; \
+ GC_words_allocd += (n); \
+ FASTUNLOCK(); \
+ (result) = (extern_ptr_t) op; \
+ } \
+}
+
+/* And once more for two word initialized objects: */
+# define GC_CONS(result, first, second) \
+{ \
+ register ptr_t op; \
+ register ptr_t *opp; \
+ DCL_LOCK_STATE; \
+ \
+ opp = &(GC_objfreelist[2]); \
+ FASTLOCK(); \
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
+ FASTUNLOCK(); \
+ op = GC_generic_malloc_words_small(2, NORMAL); \
+ } else { \
+ *opp = obj_link(op); \
+ GC_words_allocd += 2; \
+ FASTUNLOCK(); \
+ } \
+ ((word *)op)[0] = (word)(first); \
+ ((word *)op)[1] = (word)(second); \
+ (result) = (extern_ptr_t) op; \
+}
--- /dev/null
+# include "gc_inl.h"
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+/* Boehm, May 19, 1994 2:15 pm PDT */
+
+/*
+ * Declarations of mark stack. Needed by marker and client supplied mark
+ * routines. To be included after gc_priv.h.
+ */
+#ifndef GC_MARK_H
+# define GC_MARK_H
+
+/* A client supplied mark procedure. Returns new mark stack pointer. */
+/* Not currently used for predefined object kinds. */
+/* Primary effect should be to push new entries on the mark stack. */
+/* Mark stack pointer values are passed and returned explicitly. */
+/* Global variables decribing mark stack are not necessarily valid. */
+/* (This usually saves a few cycles by keeping things in registers.) */
+/* Assumed to scan about PROC_BYTES on average. If it needs to do */
+/* much more work than that, it should do it in smaller pieces by */
+/* pushing itself back on the mark stack. */
+/* Note that it should always do some work (defined as marking some */
+/* objects) before pushing more than one entry on the mark stack. */
+/* This is required to ensure termination in the event of mark stack */
+/* overflows. */
+/* This procedure is always called with at least one empty entry on the */
+/* mark stack. */
+/* Boehm, March 15, 1994 2:38 pm PST */
+# define PROC_BYTES 100
+typedef struct ms_entry * (*mark_proc)(/* word * addr, mark_stack_ptr,
+ mark_stack_limit, env */);
+
+# define LOG_MAX_MARK_PROCS 6
+# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
+extern mark_proc GC_mark_procs[MAX_MARK_PROCS];
+extern word GC_n_mark_procs;
+
+/* Object descriptors on mark stack or in objects. Low order two */
+/* bits are tags distinguishing among the following 4 possibilities */
+/* for the high order 30 bits. */
+#define DS_TAG_BITS 2
+#define DS_TAGS ((1 << DS_TAG_BITS) - 1)
+#define DS_LENGTH 0 /* The entire word is a length in bytes that */
+ /* must be a multiple of 4. */
+#define DS_BITMAP 1 /* 30 bits are a bitmap describing pointer */
+ /* fields. The msb is 1 iff the first word */
+ /* is a pointer. */
+ /* (This unconventional ordering sometimes */
+ /* makes the marker slightly faster.) */
+ /* Zeroes indicate definite nonpointers. Ones */
+ /* indicate possible pointers. */
+ /* Only usable if pointers are word aligned. */
+# define BITMAP_BITS (WORDSZ - DS_TAG_BITS)
+#define DS_PROC 2
+ /* The objects referenced by this object can be */
+ /* pushed on the mark stack by invoking */
+ /* PROC(descr). ENV(descr) is passed as the */
+ /* last argument. */
+# define PROC(descr) \
+ (GC_mark_procs[((descr) >> DS_TAG_BITS) & (MAX_MARK_PROCS-1)])
+# define ENV(descr) \
+ ((descr) >> (DS_TAG_BITS + LOG_MAX_MARK_PROCS))
+# define MAX_ENV \
+ (((word)1 << (WORDSZ - DS_TAG_BITS - LOG_MAX_MARK_PROCS)) - 1)
+# define MAKE_PROC(proc_index, env) \
+ (((((env) << LOG_MAX_MARK_PROCS) | (proc_index)) << DS_TAG_BITS) \
+ | DS_PROC)
+#define DS_PER_OBJECT 3 /* The real descriptor is at the */
+ /* byte displacement from the beginning of the */
+ /* object given by descr & ~DS_TAGS */
+
+typedef struct ms_entry {
+ word * mse_start; /* First word of object */
+ word mse_descr; /* Descriptor; low order two bits are tags, */
+ /* identifying the upper 30 bits as one of the */
+ /* following: */
+} mse;
+
+extern word GC_mark_stack_size;
+
+extern mse * GC_mark_stack_top;
+
+extern mse * GC_mark_stack;
+
+word GC_find_start();
+
+mse * GC_signal_mark_stack_overflow();
+
+# ifdef GATHERSTATS
+# define ADD_TO_ATOMIC(sz) GC_atomic_in_use += (sz)
+# define ADD_TO_COMPOSITE(sz) GC_composite_in_use += (sz)
+# else
+# define ADD_TO_ATOMIC(sz)
+# define ADD_TO_COMPOSITE(sz)
+# endif
+
+/* Push the object obj with corresponding heap block header hhdr onto */
+/* the mark stack. */
+# define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
+{ \
+ register word _descr = (hhdr) -> hb_descr; \
+ \
+ if (_descr == 0) { \
+ ADD_TO_ATOMIC((hhdr) -> hb_sz); \
+ } else { \
+ ADD_TO_COMPOSITE((hhdr) -> hb_sz); \
+ mark_stack_top++; \
+ if (mark_stack_top >= mark_stack_limit) { \
+ mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top); \
+ } \
+ mark_stack_top -> mse_start = (obj); \
+ mark_stack_top -> mse_descr = _descr; \
+ } \
+}
+
+/* Push the contenst of current onto the mark stack if it is a valid */
+/* ptr to a currently unmarked object. Mark it. */
+# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit) \
+{ \
+ register int displ; /* Displacement in block; first bytes, then words */ \
+ register hdr * hhdr; \
+ register map_entry_type map_entry; \
+ \
+ GET_HDR(current,hhdr); \
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
+ current = GC_find_start(current, hhdr); \
+ if (current == 0) continue; \
+ hhdr = HDR(current); \
+ } \
+ displ = HBLKDISPL(current); \
+ map_entry = MAP_ENTRY((hhdr -> hb_map), displ); \
+ if (map_entry == OBJ_INVALID) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current); continue; \
+ } \
+ displ = BYTES_TO_WORDS(displ); \
+ displ -= map_entry; \
+ \
+ { \
+ register word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(displ); \
+ register word mark_word = *mark_word_addr; \
+ register word mark_bit = (word)1 << modWORDSZ(displ); \
+ \
+ if (mark_word & mark_bit) { \
+ /* Mark bit is already set */ \
+ continue; \
+ } \
+ *mark_word_addr = mark_word | mark_bit; \
+ } \
+ PUSH_OBJ(((word *)(HBLKPTR(current)) + displ), hhdr, \
+ mark_stack_top, mark_stack_limit) \
+}
+
+extern bool GC_mark_stack_too_small;
+ /* We need a larger mark stack. May be */
+ /* set by client supplied mark routines.*/
+
+typedef int mark_state_t; /* Current state of marking, as follows:*/
+ /* Used to remember where we are during */
+ /* concurrent marking. */
+
+ /* We say something is dirty if it was */
+ /* written since the last time we */
+ /* retrieved dirty bits. We say it's */
+ /* grungy if it was marked dirty in the */
+ /* last set of bits we retrieved. */
+
+ /* Invariant I: all roots and marked */
+ /* objects p are either dirty, or point */
+ /* objects q that are either marked or */
+ /* a pointer to q appears in a range */
+ /* on the mark stack. */
+
+# define MS_NONE 0 /* No marking in progress. I holds. */
+ /* Mark stack is empty. */
+
+# define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
+ /* being pushed. I holds, except */
+ /* that grungy roots may point to */
+ /* unmarked objects, as may marked */
+ /* grungy objects above scan_ptr. */
+
+# define MS_PUSH_UNCOLLECTABLE 2
+ /* I holds, except that marked */
+ /* uncollectable objects above scan_ptr */
+ /* may point to unmarked objects. */
+ /* Roots may point to unmarked objects */
+
+# define MS_ROOTS_PUSHED 3 /* I holds, mark stack may be nonempty */
+
+# define MS_PARTIALLY_INVALID 4 /* I may not hold, e.g. because of M.S. */
+ /* overflow. However marked heap */
+ /* objects below scan_ptr point to */
+ /* marked or stacked objects. */
+
+# define MS_INVALID 5 /* I may not hold. */
+
+extern mark_state_t GC_mark_state;
+
+#endif /* GC_MARK_H */
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:17 pm PDT */
+
+
+# ifndef GC_PRIVATE_H
+# define GC_PRIVATE_H
+
+# ifndef GC_H
+# include "gc.h"
+# endif
+
+typedef GC_word word;
+typedef GC_signed_word signed_word;
+
+# ifndef CONFIG_H
+# include "config.h"
+# endif
+
+# ifndef HEADERS_H
+# include "gc_hdrs.h"
+# endif
+
+# ifndef bool
+ typedef int bool;
+# endif
+# define TRUE 1
+# define FALSE 0
+
+typedef char * ptr_t; /* A generic pointer to which we can add */
+ /* byte displacments. */
+ /* Prefereably identical to caddr_t, if it */
+ /* exists. */
+
+#if defined(__STDC__)
+# include <stdlib.h>
+# if !(defined( sony_news ) )
+# include <stddef.h>
+# endif
+ typedef void * extern_ptr_t;
+# define VOLATILE volatile
+#else
+# ifdef MSWIN32
+# include <stdlib.h>
+# endif
+ typedef char * extern_ptr_t;
+# define VOLATILE
+#endif
+
+#ifdef AMIGA
+# define GC_FAR __far
+#else
+# define GC_FAR
+#endif
+
+/*********************************/
+/* */
+/* Definitions for conservative */
+/* collector */
+/* */
+/*********************************/
+
+/*********************************/
+/* */
+/* Easily changeable parameters */
+/* */
+/*********************************/
+
+#define STUBBORN_ALLOC /* Define stubborn allocation primitives */
+#if defined(SRC_M3) || defined(SMALL_CONFIG)
+# undef STUBBORN_ALLOC
+#endif
+
+
+/* #define ALL_INTERIOR_POINTERS */
+ /* Forces all pointers into the interior of an */
+ /* object to be considered valid. Also causes the */
+ /* sizes of all objects to be inflated by at least */
+ /* one byte. This should suffice to guarantee */
+ /* that in the presence of a compiler that does */
+ /* not perform garbage-collector-unsafe */
+ /* optimizations, all portable, strictly ANSI */
+ /* conforming C programs should be safely usable */
+ /* with malloc replaced by GC_malloc and free */
+ /* calls removed. There are several disadvantages: */
+ /* 1. There are probably no interesting, portable, */
+ /* strictly ANSI conforming C programs. */
+ /* 2. This option makes it hard for the collector */
+ /* to allocate space that is not ``pointed to'' */
+ /* by integers, etc. Under SunOS 4.X with a */
+ /* statically linked libc, we empiricaly */
+ /* observed that it would be difficult to */
+ /* allocate individual objects larger than 100K. */
+ /* Even if only smaller objects are allocated, */
+ /* more swap space is likely to be needed. */
+ /* Fortunately, much of this will never be */
+ /* touched. */
+ /* If you can easily avoid using this option, do. */
+ /* If not, try to keep individual objects small. */
+
+#define PRINTSTATS /* Print garbage collection statistics */
+ /* For less verbose output, undefine in reclaim.c */
+
+#define PRINTTIMES /* Print the amount of time consumed by each garbage */
+ /* collection. */
+
+#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
+ /* whether the objects are atomic or composite, and */
+ /* whether or not the block was found to be empty */
+ /* duing the reclaim phase. Typically generates */
+ /* about one screenful per garbage collection. */
+#undef PRINTBLOCKS
+
+#define PRINTBLACKLIST /* Print black listed blocks, i.e. values that */
+ /* cause the allocator to avoid allocating certain */
+ /* blocks in order to avoid introducing "false */
+ /* hits". */
+#undef PRINTBLACKLIST
+
+#ifdef SILENT
+# ifdef PRINTSTATS
+# undef PRINTSTATS
+# endif
+# ifdef PRINTTIMES
+# undef PRINTTIMES
+# endif
+# ifdef PRINTNBLOCKS
+# undef PRINTNBLOCKS
+# endif
+#endif
+
+#if defined(PRINTSTATS) && !defined(GATHERSTATS)
+# define GATHERSTATS
+#endif
+
+# if defined(SOLARIS_THREADS) && !defined(SUNOS5)
+--> inconsistent configuration
+# endif
+# if defined(PCR) || defined(SRC_M3) || defined(SOLARIS_THREADS)
+# define THREADS
+# endif
+
+#ifdef SPARC
+# define ALIGN_DOUBLE /* Align objects of size > 1 word on 2 word */
+ /* boundaries. Wasteful of memory, but */
+ /* apparently required by SPARC architecture. */
+# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
+ /* include assembly code to do it well. */
+#endif
+
+#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
+ /* free lists are actually maintained. This applies */
+ /* only to the top level routines in misc.c, not to */
+ /* user generated code that calls GC_allocobj and */
+ /* GC_allocaobj directly. */
+ /* Slows down average programs slightly. May however */
+ /* substantially reduce fragmentation if allocation */
+ /* request sizes are widely scattered. */
+ /* May save significant amounts of space for obj_map */
+ /* entries. */
+
+/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
+# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
+# define MERGE_SIZES
+# endif
+
+#if defined(ALL_INTERIOR_POINTERS) && !defined(DONT_ADD_BYTE_AT_END)
+# define ADD_BYTE_AT_END
+#endif
+
+
+# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+# define MAXHINCR 512 /* Maximum heap increment, in blocks */
+
+# define TIME_LIMIT 50 /* We try to keep pause times from exceeding */
+ /* this by much. In milliseconds. */
+
+/*********************************/
+/* */
+/* OS interface routines */
+/* */
+/*********************************/
+
+#include <time.h>
+#if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
+ clock_t clock(); /* Not in time.h, where it belongs */
+#endif
+#if !defined(CLOCKS_PER_SEC)
+# define CLOCKS_PER_SEC 1000000
+/*
+ * This is technically a bug in the implementation. ANSI requires that
+ * CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't.
+ * Also note that the combination of ANSI C and POSIX is incredibly gross
+ * here. The type clock_t is used by both clock() and times(). But on
+ * some machines thes use different notions of a clock tick, CLOCKS_PER_SEC
+ * seems to apply only to clock. Hence we use it here. On many machines,
+ * including SunOS, clock actually uses units of microseconds (which are
+ * not really clock ticks).
+ */
+#endif
+#define CLOCK_TYPE clock_t
+#define GET_TIME(x) x = clock()
+#define MS_TIME_DIFF(a,b) ((unsigned long) \
+ (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
+
+/* We use bzero and bcopy internally. They may not be available. */
+# if defined(SPARC) && defined(SUNOS4)
+# define BCOPY_EXISTS
+# endif
+# if defined(M68K) && defined(AMIGA)
+# define BCOPY_EXISTS
+# endif
+# if defined(M68K) && defined(NEXT)
+# define BCOPY_EXISTS
+# endif
+# if defined(VAX)
+# define BCOPY_EXISTS
+# endif
+# if defined(AMIGA)
+# include <string.h>
+# define BCOPY_EXISTS
+# endif
+
+# ifndef BCOPY_EXISTS
+# include <string.h>
+# define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
+# define BZERO(x,n) memset(x, 0, (size_t)(n))
+# else
+# define BCOPY(x,y,n) bcopy((char *)(x),(char *)(y),(int)(n))
+# define BZERO(x,n) bzero((char *)(x),(int)(n))
+# endif
+
+/* HBLKSIZE aligned allocation. 0 is taken to mean failure */
+/* space is assumed to be cleared. */
+# ifdef PCR
+ char * real_malloc();
+# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
+# else
+# ifdef OS2
+ void * os2_alloc(size_t bytes);
+# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
+# else
+# if defined(AMIGA) || defined(NEXT)
+# define GET_MEM(bytes) HBLKPTR(calloc(1, (size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
+# else
+# ifdef MSWIN32
+ extern ptr_t GC_win32_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes)
+# else
+ extern ptr_t GC_unix_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
+# endif
+# endif
+# endif
+# endif
+
+/*
+ * Mutual exclusion between allocator/collector routines.
+ * Needed if there is more than one allocator thread.
+ * FASTLOCK() is assumed to try to acquire the lock in a cheap and
+ * dirty way that is acceptable for a few instructions, e.g. by
+ * inhibiting preemption. This is assumed to have succeeded only
+ * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
+ * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
+ * If signals cannot be tolerated with the FASTLOCK held, then
+ * FASTLOCK should disable signals. The code executed under
+ * FASTLOCK is otherwise immune to interruption, provided it is
+ * not restarted.
+ * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
+ * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
+ * (There is currently no equivalent for FASTLOCK.)
+ */
+# ifdef THREADS
+# ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
+# include "th/PCR_Th.h"
+# include "th/PCR_ThCrSec.h"
+ extern struct PCR_Th_MLRep GC_allocate_ml;
+# define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
+# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
+# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
+# define FASTLOCK() PCR_ThCrSec_EnterSys()
+ /* Here we cheat (a lot): */
+# define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
+ /* TRUE if nobody currently holds the lock */
+# define FASTUNLOCK() PCR_ThCrSec_ExitSys()
+# endif
+# ifdef PCR
+# include <base/PCR_Base.h>
+# include <th/PCR_Th.h>
+ extern PCR_Th_ML GC_allocate_ml;
+# define DCL_LOCK_STATE PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mas
+k
+# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
+# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
+# define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
+# define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
+# define FASTUNLOCK() {\
+ if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
+# endif
+# ifdef SRC_M3
+ extern word RT0u__inCritical;
+# define LOCK() RT0u__inCritical++
+# define UNLOCK() RT0u__inCritical--
+# endif
+# ifdef SOLARIS_THREADS
+# include <thread.h>
+# include <signal.h>
+ extern mutex_t GC_allocate_ml;
+# define LOCK() mutex_lock(&GC_allocate_ml);
+# define UNLOCK() mutex_unlock(&GC_allocate_ml);
+# endif
+# else
+# define LOCK()
+# define UNLOCK()
+# endif
+
+# ifndef DCL_LOCK_STATE
+# define DCL_LOCK_STATE
+# endif
+# ifndef FASTLOCK
+# define FASTLOCK() LOCK()
+# define FASTLOCK_SUCCEEDED() TRUE
+# define FASTUNLOCK() UNLOCK()
+# endif
+
+/* Delay any interrupts or signals that may abort this thread. Data */
+/* structures are in a consistent state outside this pair of calls. */
+/* ANSI C allows both to be empty (though the standard isn't very */
+/* clear on that point). Standard malloc implementations are usually */
+/* neither interruptable nor thread-safe, and thus correspond to */
+/* empty definitions. */
+# ifdef PCR
+# define DISABLE_SIGNALS() \
+ PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask)
+# define ENABLE_SIGNALS() \
+ PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
+# else
+# if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) || defined(MSWIN32)
+ /* Also useful for debugging, and unusually */
+ /* correct client code. */
+ /* Should probably use thr_sigsetmask for SOLARIS_THREADS. */
+# define DISABLE_SIGNALS()
+# define ENABLE_SIGNALS()
+# else
+# define DISABLE_SIGNALS() GC_disable_signals()
+ void GC_disable_signals();
+# define ENABLE_SIGNALS() GC_enable_signals()
+ void GC_enable_signals();
+# endif
+# endif
+
+/*
+ * Stop and restart mutator threads.
+ */
+# ifdef PCR
+# include "th/PCR_ThCtl.h"
+# define STOP_WORLD() \
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever)
+# define START_WORLD() \
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever);
+# else
+# ifdef SOLARIS_THREADS
+# define STOP_WORLD() GC_stop_world()
+# define START_WORLD() GC_start_world()
+# else
+# define STOP_WORLD()
+# define START_WORLD()
+# endif
+# endif
+
+/* Abandon ship */
+# ifdef PCR
+ void PCR_Base_Panic(const char *fmt, ...);
+# define ABORT(s) PCR_Base_Panic(s)
+# else
+# ifdef SMALL_CONFIG
+# define ABORT(msg) abort();
+# else
+ void GC_abort();
+# define ABORT(msg) GC_abort(msg);
+# endif
+# endif
+
+/* Exit abnormally, but without making a mess (e.g. out of memory) */
+# ifdef PCR
+ void PCR_Base_Exit(int status);
+# define EXIT() PCR_Base_Exit(1)
+# else
+# define EXIT() (void)exit(1)
+# endif
+
+/* Print warning message, e.g. almost out of memory. */
+# define WARN(s) GC_printf0(s)
+
+/*********************************/
+/* */
+/* Word-size-dependent defines */
+/* */
+/*********************************/
+
+#if CPP_WORDSZ == 32
+# define WORDS_TO_BYTES(x) ((x)<<2)
+# define BYTES_TO_WORDS(x) ((x)>>2)
+# define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */
+# define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
+#endif
+
+#if CPP_WORDSZ == 64
+# define WORDS_TO_BYTES(x) ((x)<<3)
+# define BYTES_TO_WORDS(x) ((x)>>3)
+# define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */
+# define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
+#endif
+
+#define WORDSZ ((word)CPP_WORDSZ)
+#define SIGNB ((word)1 << (WORDSZ-1))
+#define BYTES_PER_WORD ((word)(sizeof (word)))
+#define ONES ((word)(-1))
+#define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
+
+/*********************/
+/* */
+/* Size Parameters */
+/* */
+/*********************/
+
+/* heap block size, bytes. Should be power of 2 */
+
+#ifdef SMALL_CONFIG
+# define CPP_LOG_HBLKSIZE 10
+#else
+# if CPP_WORDSZ == 32
+# define CPP_LOG_HBLKSIZE 12
+# else
+# define CPP_LOG_HBLKSIZE 13
+# endif
+#endif
+#define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE)
+#define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
+#define HBLKSIZE ((word)CPP_HBLKSIZE)
+
+
+/* max size objects supported by freelist (larger objects may be */
+/* allocated, but less efficiently) */
+
+#define CPP_MAXOBJSZ BYTES_TO_WORDS(CPP_HBLKSIZE/2)
+#define MAXOBJSZ ((word)CPP_MAXOBJSZ)
+
+# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
+
+# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
+ /* Equivalent to subtracting 2 hblk pointers. */
+ /* We do it this way because a compiler should */
+ /* find it hard to use an integer division */
+ /* instead of a shift. The bundled SunOS 4.1 */
+ /* o.w. sometimes pessimizes the subtraction to */
+ /* involve a call to .div. */
+
+# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
+
+# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
+
+# define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1))
+
+/* Round up byte allocation requests to integral number of words, etc. */
+# ifdef ADD_BYTE_AT_END
+# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1))
+# define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ))
+# define ADD_SLOP(bytes) ((bytes)+1)
+# else
+# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1))
+# define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ))
+# define ADD_SLOP(bytes) (bytes)
+# endif
+
+
+/*
+ * Hash table representation of sets of pages. This assumes it is
+ * OK to add spurious entries to sets.
+ * Used by black-listing code, and perhaps by dirty bit maintenance code.
+ */
+
+# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
+ /* to more than 16K hblks = 64MB. */
+ /* Each hash table occupies 2K bytes. */
+# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
+# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
+typedef word page_hash_table[PHT_SIZE];
+
+# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
+
+# define get_pht_entry_from_index(bl, index) \
+ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
+# define set_pht_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
+# define clear_pht_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
+
+
+
+/********************************************/
+/* */
+/* H e a p B l o c k s */
+/* */
+/********************************************/
+
+/* heap block header */
+#define HBLKMASK (HBLKSIZE-1)
+
+#define BITS_PER_HBLK (HBLKSIZE * 8)
+
+#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ)
+ /* upper bound */
+ /* We allocate 1 bit/word. Only the first word */
+ /* in each object is actually marked. */
+
+# ifdef ALIGN_DOUBLE
+# define MARK_BITS_SZ (((MARK_BITS_PER_HBLK + 2*CPP_WORDSZ - 1) \
+ / (2*CPP_WORDSZ))*2)
+# else
+# define MARK_BITS_SZ ((MARK_BITS_PER_HBLK + CPP_WORDSZ - 1)/CPP_WORDSZ)
+# endif
+ /* Upper bound on number of mark words per heap block */
+
+struct hblkhdr {
+ word hb_sz; /* If in use, size in words, of objects in the block. */
+ /* if free, the size in bytes of the whole block */
+ struct hblk * hb_next; /* Link field for hblk free list */
+ /* and for lists of chunks waiting to be */
+ /* reclaimed. */
+ word hb_descr; /* object descriptor for marking. See */
+ /* mark.h. */
+ char* hb_map; /* A pointer to a pointer validity map of the block. */
+ /* See GC_obj_map. */
+ /* Valid for all blocks with headers. */
+ /* Free blocks point to GC_invalid_map. */
+ unsigned char hb_obj_kind;
+ /* Kind of objects in the block. Each kind */
+ /* identifies a mark procedure and a set of */
+ /* list headers. Sometimes called regions. */
+ unsigned char hb_flags;
+# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
+ /* point to the first page of */
+ /* this object. */
+ unsigned short hb_last_reclaimed;
+ /* Value of GC_gc_no when block was */
+ /* last allocated or swept. May wrap. */
+ word hb_marks[MARK_BITS_SZ];
+ /* Bit i in the array refers to the */
+ /* object starting at the ith word (header */
+ /* INCLUDED) in the heap block. */
+ /* The lsb of word 0 is numbered 0. */
+};
+
+/* heap block body */
+
+# define DISCARD_WORDS 0
+ /* Number of words to be dropped at the beginning of each block */
+ /* Must be a multiple of WORDSZ. May reasonably be nonzero */
+ /* on machines that don't guarantee longword alignment of */
+ /* pointers, so that the number of false hits is minimized. */
+ /* 0 and WORDSZ are probably the only reasonable values. */
+
+# define BODY_SZ ((HBLKSIZE-WORDS_TO_BYTES(DISCARD_WORDS))/sizeof(word))
+
+struct hblk {
+# if (DISCARD_WORDS != 0)
+ word garbage[DISCARD_WORDS];
+# endif
+ word hb_body[BODY_SZ];
+};
+
+# define HDR_WORDS ((word)DISCARD_WORDS)
+# define HDR_BYTES ((word)WORDS_TO_BYTES(DISCARD_WORDS))
+
+# define OBJ_SZ_TO_BLOCKS(sz) \
+ divHBLKSZ(HDR_BYTES + WORDS_TO_BYTES(sz) + HBLKSIZE-1)
+ /* Size of block (in units of HBLKSIZE) needed to hold objects of */
+ /* given sz (in words). */
+
+/* Object free list link */
+# define obj_link(p) (*(ptr_t *)(p))
+
+/* lists of all heap blocks and free lists */
+/* These are grouped together in a struct */
+/* so that they can be easily skipped by the */
+/* GC_mark routine. */
+/* The ordering is weird to make GC_malloc */
+/* faster by keeping the important fields */
+/* sufficiently close together that a */
+/* single load of a base register will do. */
+/* Scalars that could easily appear to */
+/* be pointers are also put here. */
+
+struct _GC_arrays {
+ word _heapsize;
+ ptr_t _last_heap_addr;
+ ptr_t _prev_heap_addr;
+ word _words_allocd_before_gc;
+ /* Number of words allocated before this */
+ /* collection cycle. */
+# ifdef GATHERSTATS
+ word _composite_in_use;
+ /* Number of words in accessible composite */
+ /* objects. */
+ word _atomic_in_use;
+ /* Number of words in accessible atomic */
+ /* objects. */
+# endif
+ word _words_allocd;
+ /* Number of words allocated during this collection cycle */
+ word _words_wasted;
+ /* Number of words wasted due to internal fragmentation */
+ /* in large objects allocated since last gc. Approximate.*/
+ word _non_gc_bytes_at_gc;
+ /* Number of explicitly managed bytes of storage */
+ /* at last collection. */
+ word _mem_freed;
+ /* Number of explicitly deallocated words of memory */
+ /* since last collection. */
+
+ ptr_t _objfreelist[MAXOBJSZ+1];
+ /* free list for objects */
+# ifdef MERGE_SIZES
+ unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
+ /* Number of words to allocate for a given allocation request in */
+ /* bytes. */
+# endif
+ ptr_t _aobjfreelist[MAXOBJSZ+1];
+ /* free list for atomic objs */
+
+ ptr_t _uobjfreelist[MAXOBJSZ+1];
+ /* uncollectable but traced objs */
+
+# ifdef STUBBORN_ALLOC
+ ptr_t _sobjfreelist[MAXOBJSZ+1];
+# endif
+ /* free list for immutable objects */
+ ptr_t _obj_map[MAXOBJSZ+1];
+ /* If not NIL, then a pointer to a map of valid */
+ /* object addresses. hbh_map[sz][i] is j if the */
+ /* address block_start+i is a valid pointer */
+ /* to an object at */
+ /* block_start+i&~3 - WORDS_TO_BYTES(j). */
+ /* (If ALL_INTERIOR_POINTERS is defined, then */
+ /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* block_start+WORDS_TO_BYTES(i) is in the */
+ /* interior of an object starting at */
+ /* block_start+WORDS_TO_BYTES(i-j)). */
+ /* It is OBJ_INVALID if */
+ /* block_start+WORDS_TO_BYTES(i) is not */
+ /* valid as a pointer to an object. */
+ /* We assume that all values of j <= OBJ_INVALID */
+ /* The zeroth entry corresponds to large objects.*/
+# ifdef ALL_INTERIOR_POINTERS
+# define map_entry_type short
+# define OBJ_INVALID 0x7fff
+# define MAP_ENTRY(map, bytes) \
+ (((map_entry_type *)(map))[BYTES_TO_WORDS(bytes)])
+# define MAP_ENTRIES BYTES_TO_WORDS(HBLKSIZE)
+# define MAP_SIZE (MAP_ENTRIES * sizeof(map_entry_type))
+# define OFFSET_VALID(displ) TRUE
+# define CPP_MAX_OFFSET (HBLKSIZE - HDR_BYTES - 1)
+# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
+# else
+# define map_entry_type char
+# define OBJ_INVALID 0x7f
+# define MAP_ENTRY(map, bytes) \
+ (map)[bytes]
+# define MAP_ENTRIES HBLKSIZE
+# define MAP_SIZE MAP_ENTRIES
+# define CPP_MAX_OFFSET (WORDS_TO_BYTES(OBJ_INVALID) - 1)
+# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
+# define VALID_OFFSET_SZ \
+ (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \
+ CPP_MAX_OFFSET+1 \
+ : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1)
+ char _valid_offsets[VALID_OFFSET_SZ];
+ /* GC_valid_offsets[i] == TRUE ==> i */
+ /* is registered as a displacement. */
+# define OFFSET_VALID(displ) GC_valid_offsets[displ]
+ char _modws_valid_offsets[sizeof(word)];
+ /* GC_valid_offsets[i] ==> */
+ /* GC_modws_valid_offsets[i%sizeof(word)] */
+# endif
+ struct hblk * _reclaim_list[MAXOBJSZ+1];
+ struct hblk * _areclaim_list[MAXOBJSZ+1];
+ struct hblk * _ureclaim_list[MAXOBJSZ+1];
+# ifdef STUBBORN_ALLOC
+ struct hblk * _sreclaim_list[MAXOBJSZ+1];
+ page_hash_table _changed_pages;
+ /* Stubborn object pages that were changes since last call to */
+ /* GC_read_changed. */
+ page_hash_table _prev_changed_pages;
+ /* Stubborn object pages that were changes before last call to */
+ /* GC_read_changed. */
+# endif
+# if defined(PROC_VDB) || defined(MPROTECT_VDB)
+ page_hash_table _grungy_pages; /* Pages that were dirty at last */
+ /* GC_read_dirty. */
+# endif
+# define MAX_HEAP_SECTS 256 /* Separately added heap sections. */
+ struct HeapSect {
+ ptr_t hs_start; word hs_bytes;
+ } _heap_sects[MAX_HEAP_SECTS];
+# ifdef MSWIN32
+ ptr_t _heap_bases[MAX_HEAP_SECTS];
+ /* Start address of memory regions obtained from kernel. */
+# endif
+ /* Block header index; see gc_headers.h */
+ bottom_index _all_nils;
+ bottom_index * _top_index [TOP_SZ];
+};
+
+extern GC_FAR struct _GC_arrays GC_arrays;
+
+# define GC_objfreelist GC_arrays._objfreelist
+# define GC_aobjfreelist GC_arrays._aobjfreelist
+# define GC_uobjfreelist GC_arrays._uobjfreelist
+# define GC_sobjfreelist GC_arrays._sobjfreelist
+# define GC_valid_offsets GC_arrays._valid_offsets
+# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
+# define GC_reclaim_list GC_arrays._reclaim_list
+# define GC_areclaim_list GC_arrays._areclaim_list
+# define GC_ureclaim_list GC_arrays._ureclaim_list
+# ifdef STUBBORN_ALLOC
+# define GC_sreclaim_list GC_arrays._sreclaim_list
+# define GC_changed_pages GC_arrays._changed_pages
+# define GC_prev_changed_pages GC_arrays._prev_changed_pages
+# endif
+# define GC_obj_map GC_arrays._obj_map
+# define GC_last_heap_addr GC_arrays._last_heap_addr
+# define GC_prev_heap_addr GC_arrays._prev_heap_addr
+# define GC_words_allocd GC_arrays._words_allocd
+# define GC_words_wasted GC_arrays._words_wasted
+# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
+# define GC_mem_freed GC_arrays._mem_freed
+# define GC_heapsize GC_arrays._heapsize
+# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
+# define GC_heap_sects GC_arrays._heap_sects
+# ifdef MSWIN32
+# define GC_heap_bases GC_arrays._heap_bases
+# endif
+# define GC_all_nils GC_arrays._all_nils
+# define GC_top_index GC_arrays._top_index
+# if defined(PROC_VDB) || defined(MPROTECT_VDB)
+# define GC_grungy_pages GC_arrays._grungy_pages
+# endif
+# ifdef GATHERSTATS
+# define GC_composite_in_use GC_arrays._composite_in_use
+# define GC_atomic_in_use GC_arrays._atomic_in_use
+# endif
+# ifdef MERGE_SIZES
+# define GC_size_map GC_arrays._size_map
+# endif
+
+# define beginGC_arrays ((ptr_t)(&GC_arrays))
+# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
+
+
+# define MAXOBJKINDS 16
+
+/* Object kinds: */
+extern struct obj_kind {
+ ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */
+ /* Point either to GC_arrays or to storage allocated */
+ /* with GC_scratch_alloc. */
+ struct hblk **ok_reclaim_list;
+ /* List headers for lists of blocks waiting to be */
+ /* swept. */
+ word ok_descriptor; /* Descriptor template for objects in this */
+ /* block. */
+ bool ok_relocate_descr;
+ /* Add object size in bytes to descriptor */
+ /* template to obtain descriptor. Otherwise */
+ /* template is used as is. */
+ bool ok_init; /* Clear objects before putting them on the free list. */
+} GC_obj_kinds[MAXOBJKINDS];
+/* Predefined kinds: */
+# define PTRFREE 0
+# define NORMAL 1
+# define UNCOLLECTABLE 2
+# define STUBBORN 3
+
+extern int GC_n_kinds;
+
+extern word GC_n_heap_sects; /* Number of separately added heap */
+ /* sections. */
+
+# ifdef MSWIN32
+extern word GC_n_heap_bases; /* See GC_heap_bases. */
+# endif
+
+extern char * GC_invalid_map;
+ /* Pointer to the nowhere valid hblk map */
+ /* Blocks pointing to this map are free. */
+
+extern struct hblk * GC_hblkfreelist;
+ /* List of completely empty heap blocks */
+ /* Linked through hb_next field of */
+ /* header structure associated with */
+ /* block. */
+
+extern bool GC_is_initialized; /* GC_init() has been run. */
+
+extern bool GC_objects_are_marked; /* There are marked objects in */
+ /* the heap. */
+
+extern int GC_incremental; /* Using incremental/generational collection. */
+
+extern bool GC_dirty_maintained;/* Dirty bits are being maintained, */
+ /* either for incremental collection, */
+ /* or to limit the root set. */
+
+# ifndef PCR
+ extern ptr_t GC_stackbottom; /* Cool end of user stack */
+# endif
+
+extern word GC_root_size; /* Total size of registered root sections */
+
+extern bool GC_debugging_started; /* GC_debug_malloc has been called. */
+
+extern ptr_t GC_least_plausible_heap_addr;
+extern ptr_t GC_greatest_plausible_heap_addr;
+ /* Bounds on the heap. Guaranteed valid */
+ /* Likely to include future heap expansion. */
+
+/* Operations */
+# ifndef abs
+# define abs(x) ((x) < 0? (-(x)) : (x))
+# endif
+
+
+/* Marks are in a reserved area in */
+/* each heap block. Each word has one mark bit associated */
+/* with it. Only those corresponding to the beginning of an */
+/* object are used. */
+
+
+/* Mark bit perations */
+
+/*
+ * Retrieve, set, clear the mark bit corresponding
+ * to the nth word in a given heap block.
+ *
+ * (Recall that bit n corresponds to object beginning at word n
+ * relative to the beginning of the block, including unused words)
+ */
+
+# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
+ >> (modWORDSZ(n))) & (word)1)
+# define set_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
+ |= (word)1 << modWORDSZ(n)
+
+# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
+ &= ~((word)1 << modWORDSZ(n))
+
+/* Important internal collector routines */
+
+void GC_apply_to_all_blocks(/*fn, client_data*/);
+ /* Invoke fn(hbp, client_data) for each */
+ /* allocated heap block. */
+struct hblk * GC_next_block(/* struct hblk * h */);
+void GC_mark_init();
+void GC_clear_marks(); /* Clear mark bits for all heap objects. */
+void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
+ /* Return after about one pages worth of */
+ /* work. */
+bool GC_mark_stack_empty();
+bool GC_mark_some(); /* Perform about one pages worth of marking */
+ /* work of whatever kind is needed. Returns */
+ /* quickly if no collection is in progress. */
+ /* Return TRUE if mark phase finished. */
+void GC_initiate_full(); /* initiate full collection. */
+void GC_initiate_partial(); /* initiate partial collection. */
+void GC_push_all(/*b,t*/); /* Push everything in a range */
+ /* onto mark stack. */
+void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
+ /* subintervals of [b,t) onto */
+ /* mark stack. */
+#ifndef SMALL_CONFIG
+ void GC_push_conditional(/* ptr_t b, ptr_t t, bool all*/);
+#else
+# define GC_push_conditional(b, t, all) GC_push_all(b, t)
+#endif
+ /* Do either of the above, depending */
+ /* on the third arg. */
+void GC_push_all_stack(/*b,t*/); /* As above, but consider */
+ /* interior pointers as valid */
+void GC_push_roots(/* bool all */); /* Push all or dirty roots. */
+extern void (*GC_push_other_roots)();
+ /* Push system or application specific roots */
+ /* onto the mark stack. In some environments */
+ /* (e.g. threads environments) this is */
+ /* predfined to be non-zero. A client supplied */
+ /* replacement should also call the original */
+ /* function. */
+void GC_push_regs(); /* Push register contents onto mark stack. */
+void GC_remark(); /* Mark from all marked objects. Used */
+ /* only if we had to drop something. */
+void GC_push_one(/*p*/); /* If p points to an object, mark it */
+ /* and push contents on the mark stack */
+void GC_push_one_checked(/*p*/); /* Ditto, omits plausibility test */
+void GC_push_marked(/* struct hblk h, hdr * hhdr */);
+ /* Push contents of all marked objects in h onto */
+ /* mark stack. */
+#ifdef SMALL_CONFIG
+# define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
+#else
+ struct hblk * GC_push_next_marked_dirty(/* h */);
+ /* Invoke GC_push_marked on next dirty block above h. */
+ /* Return a pointer just past the end of this block. */
+#endif /* !SMALL_CONFIG */
+struct hblk * GC_push_next_marked(/* h */);
+ /* Ditto, but also mark from clean pages. */
+struct hblk * GC_push_next_marked_uncollectable(/* h */);
+ /* Ditto, but mark only from uncollectable pages. */
+bool GC_stopped_mark(); /* Stop world and mark from all roots */
+ /* and rescuers. */
+void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
+void GC_add_roots_inner();
+void GC_register_dynamic_libraries();
+ /* Add dynamic library data sections to the root set. */
+
+/* Machine dependent startup routines */
+ptr_t GC_get_stack_base();
+void GC_register_data_segments();
+
+/* Black listing: */
+void GC_bl_init();
+# ifndef ALL_INTERIOR_POINTERS
+ void GC_add_to_black_list_normal(/* bits */);
+ /* Register bits as a possible future false */
+ /* reference from the heap or static data */
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_normal(bits)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_stack(bits)
+# endif
+
+void GC_add_to_black_list_stack(/* bits */);
+struct hblk * GC_is_black_listed(/* h, len */);
+ /* If there are likely to be false references */
+ /* to a block starting at h of the indicated */
+ /* length, then return the next plausible */
+ /* starting location for h that might avoid */
+ /* these false references. */
+void GC_promote_black_lists();
+ /* Declare an end to a black listing phase. */
+
+ptr_t GC_scratch_alloc(/*bytes*/);
+ /* GC internal memory allocation for */
+ /* small objects. Deallocation is not */
+ /* possible. */
+
+/* Heap block layout maps: */
+void GC_invalidate_map(/* hdr */);
+ /* Remove the object map associated */
+ /* with the block. This identifies */
+ /* the block as invalid to the mark */
+ /* routines. */
+bool GC_add_map_entry(/*sz*/);
+ /* Add a heap block map for objects of */
+ /* size sz to obj_map. */
+ /* Return FALSE on failure. */
+void GC_register_displacement_inner(/*offset*/);
+ /* Version of GC_register_displacement */
+ /* that assumes lock is already held */
+ /* and signals are already disabled. */
+
+/* hblk allocation: */
+void GC_new_hblk(/*size_in_words, kind*/);
+ /* Allocate a new heap block, and build */
+ /* a free list in it. */
+struct hblk * GC_allochblk(/*size_in_words, kind*/);
+ /* Allocate a heap block, clear it if */
+ /* for composite objects, inform */
+ /* the marker that block is valid */
+ /* for objects of indicated size. */
+ /* sz < 0 ==> atomic. */
+void GC_freehblk(); /* Deallocate a heap block and mark it */
+ /* as invalid. */
+
+/* Misc GC: */
+void GC_init_inner();
+bool GC_expand_hp_inner();
+void GC_start_reclaim(/*abort_if_found*/);
+ /* Restore unmarked objects to free */
+ /* lists, or (if abort_if_found is */
+ /* TRUE) report them. */
+ /* Sweeping of small object pages is */
+ /* largely deferred. */
+void GC_continue_reclaim(/*size, kind*/);
+ /* Sweep pages of the given size and */
+ /* kind, as long as possible, and */
+ /* as long as the corr. free list is */
+ /* empty. */
+void GC_reclaim_or_delete_all();
+ /* Arrange for all reclaim lists to be */
+ /* empty. Judiciously choose between */
+ /* sweeping and discarding each page. */
+bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
+void GC_gcollect_inner();
+ /* Collect; caller must have acquired */
+ /* lock and disabled signals. */
+ /* FALSE return indicates nothing was */
+ /* done due to insufficient allocation. */
+void GC_finish_collection(); /* Finish collection. Mark bits are */
+ /* consistent and lock is still held. */
+bool GC_collect_or_expand(/* needed_blocks */);
+ /* Collect or expand heap in an attempt */
+ /* make the indicated number of free */
+ /* blocks available. Should be called */
+ /* until it fails by returning FALSE. */
+void GC_init(); /* Initialize collector. */
+void GC_collect_a_little(/* n */);
+ /* Do n units worth of garbage */
+ /* collection work, if appropriate. */
+ /* A unit is an amount appropriate for */
+ /* HBLKSIZE bytes of allocation. */
+ptr_t GC_generic_malloc(/* bytes, kind */);
+ /* Allocate an object of the given */
+ /* kind. By default, there are only */
+ /* two kinds: composite, and atomic. */
+ /* We claim it's possible for clever */
+ /* client code that understands GC */
+ /* internals to add more, e.g. to */
+ /* communicate object layout info */
+ /* to the collector. */
+ptr_t GC_generic_malloc_inner(/* bytes, kind */);
+ /* Ditto, but I already hold lock, etc. */
+ptr_t GC_generic_malloc_words_small(/*words, kind*/);
+ /* As above, but size in units of words */
+ /* Bypasses MERGE_SIZES. Assumes */
+ /* words <= MAXOBJSZ. */
+ptr_t GC_malloc_ignore_off_page_inner(/* bytes */);
+ /* Allocate an object, where */
+ /* the client guarantees that there */
+ /* will always be a pointer to the */
+ /* beginning of the object while the */
+ /* object is live. */
+ptr_t GC_allocobj(/* sz_inn_words, kind */);
+ /* Make the indicated */
+ /* free list nonempty, and return its */
+ /* head. */
+
+void GC_init_headers();
+bool GC_install_header(/*h*/);
+ /* Install a header for block h. */
+ /* Return FALSE on failure. */
+bool GC_install_counts(/*h, sz*/);
+ /* Set up forwarding counts for block */
+ /* h of size sz. */
+ /* Return FALSE on failure. */
+void GC_remove_header(/*h*/);
+ /* Remove the header for block h. */
+void GC_remove_counts(/*h, sz*/);
+ /* Remove forwarding counts for h. */
+hdr * GC_find_header(/*p*/); /* Debugging only. */
+
+void GC_finalize(); /* Perform all indicated finalization actions */
+ /* on unmarked objects. */
+ /* Unreachable finalizable objects are enqueued */
+ /* for processing by GC_invoke_finalizers. */
+ /* Invoked with lock. */
+void GC_invoke_finalizers(); /* Run eligible finalizers. */
+ /* Invoked without lock. */
+
+void GC_add_to_heap(/*p, bytes*/);
+ /* Add a HBLKSIZE aligned chunk to the heap. */
+
+void GC_print_obj(/* ptr_t p */);
+ /* P points to somewhere inside an object with */
+ /* debugging info. Print a human readable */
+ /* description of the object to stderr. */
+extern void (*GC_check_heap)();
+ /* Check that all objects in the heap with */
+ /* debugging info are intact. Print */
+ /* descriptions of any that are not. */
+
+/* Virtual dirty bit implementation: */
+/* Each implementation exports the following: */
+void GC_read_dirty(); /* Retrieve dirty bits. */
+bool GC_page_was_dirty(/* struct hblk * h */);
+ /* Read retrieved dirty bits. */
+bool GC_page_was_ever_dirty(/* struct hblk * h */);
+ /* Could the page contain valid heap pointers? */
+void GC_is_fresh(/* struct hblk * h, word number_of_blocks */);
+ /* Assert the region currently contains no */
+ /* valid pointers. */
+void GC_write_hint(/* struct hblk * h */);
+ /* h is about to be written. */
+void GC_dirty_init();
+
+/* Slow/general mark bit manipulation: */
+bool GC_is_marked();
+void GC_clear_mark_bit();
+void GC_set_mark_bit();
+
+/* Stubborn objects: */
+void GC_read_changed(); /* Analogous to GC_read_dirty */
+bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
+void GC_clean_changing_list(); /* Collect obsolete changing list entries */
+void GC_stubborn_init();
+
+/* Debugging print routines: */
+void GC_print_block_list();
+void GC_print_hblkfreelist();
+
+/* Make arguments appear live to compiler */
+void GC_noop();
+
+/* Logging and diagnostic output: */
+void GC_printf(/* format, a, b, c, d, e, f */);
+ /* A version of printf that doesn't allocate, */
+ /* is restricted to long arguments, and */
+ /* (unfortunately) doesn't use varargs for */
+ /* portability. Restricted to 6 args and */
+ /* 1K total output length. */
+ /* (We use sprintf. Hopefully that doesn't */
+ /* allocate for long arguments.) */
+# define GC_printf0(f) GC_printf(f, 0l, 0l, 0l, 0l, 0l, 0l)
+# define GC_printf1(f,a) GC_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
+# define GC_printf2(f,a,b) GC_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
+# define GC_printf3(f,a,b,c) GC_printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l)
+# define GC_printf4(f,a,b,c,d) GC_printf(f, (long)a, (long)b, (long)c, \
+ (long)d, 0l, 0l)
+# define GC_printf5(f,a,b,c,d,e) GC_printf(f, (long)a, (long)b, (long)c, \
+ (long)d, (long)e, 0l)
+# define GC_printf6(f,a,b,c,d,e,g) GC_printf(f, (long)a, (long)b, (long)c, \
+ (long)d, (long)e, (long)g)
+
+void GC_err_printf(/* format, a, b, c, d, e, f */);
+# define GC_err_printf0(f) GC_err_puts(f)
+# define GC_err_printf1(f,a) GC_err_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
+# define GC_err_printf2(f,a,b) GC_err_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
+# define GC_err_printf3(f,a,b,c) GC_err_printf(f, (long)a, (long)b, (long)c, \
+ 0l, 0l, 0l)
+# define GC_err_printf4(f,a,b,c,d) GC_err_printf(f, (long)a, (long)b, \
+ (long)c, (long)d, 0l, 0l)
+# define GC_err_printf5(f,a,b,c,d,e) GC_err_printf(f, (long)a, (long)b, \
+ (long)c, (long)d, \
+ (long)e, 0l)
+# define GC_err_printf6(f,a,b,c,d,e,g) GC_err_printf(f, (long)a, (long)b, \
+ (long)c, (long)d, \
+ (long)e, (long)g)
+ /* Ditto, writes to stderr. */
+
+void GC_err_puts(/* char *s */);
+ /* Write s to stderr, don't buffer, don't add */
+ /* newlines, don't ... */
+
+# endif /* GC_PRIVATE_H */
--- /dev/null
+# include "gc_priv.h"
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/*
+ * Some simple primitives for allocation with explicit type information.
+ * Facilities for dynamic type inference may be added later.
+ * Should be used only for extremely performance critical applications,
+ * or if conservative collector leakage is otherwise a problem (unlikely).
+ * Note that this is implemented completely separately from the rest
+ * of the collector, and is not linked in unless referenced.
+ */
+/* Boehm, May 19, 1994 2:13 pm PDT */
+
+#ifndef _GC_TYPED_H
+# define _GC_TYPED_H
+# ifndef _GC_H
+# include "gc.h"
+# endif
+
+typedef GC_word * GC_bitmap;
+ /* The least significant bit of the first word is one if */
+ /* the first word in the object may be a pointer. */
+
+# define GC_get_bit(bm, index) \
+ (((bm)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
+# define GC_set_bit(bm, index) \
+ (bm)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
+
+typedef GC_word GC_descr;
+
+#if defined(__STDC__) || defined(__cplusplus)
+ extern GC_descr GC_make_descriptor(GC_bitmap bm, size_t len);
+#else
+ extern GC_descr GC_make_descriptor(/* GC_bitmap bm, size_t len */);
+#endif
+ /* Return a type descriptor for the object whose layout */
+ /* is described by the argument. */
+ /* The least significant bit of the first word is one */
+ /* if the first word in the object may be a pointer. */
+ /* The second argument specifies the number of */
+ /* meaningful bits in the bitmap. The actual object */
+ /* may be larger (but not smaller). Any additional */
+ /* words in the object are assumed not to contain */
+ /* pointers. */
+ /* Returns a conservative approximation in the */
+ /* (unlikely) case of insufficient memory to build */
+ /* the descriptor. Calls to GC_make_descriptor */
+ /* may consume some amount of a finite resource. This */
+ /* is intended to be called once per type, not once */
+ /* per allocation. */
+
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_malloc_explicitly_typed(size_t size_in_bytes, GC_descr d);
+#else
+ extern char * GC_malloc_explicitly_typed(/* size_in_bytes, descriptor */);
+#endif
+ /* Allocate an object whose layout is described by d. */
+ /* The resulting object MAY NOT BE PASSED TO REALLOC. */
+
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_calloc_explicitly_typed(size_t nelements,
+ size_t element_size_in_bytes,
+ GC_descr d);
+#else
+ char * GC_calloc_explicitly_typed(/* nelements, size_in_bytes, descriptor */);
+ /* Allocate an array of nelements elements, each of the */
+ /* given size, and with the given descriptor. */
+ /* The elemnt size must be a multiple of the byte */
+ /* alignment required for pointers. E.g. on a 32-bit */
+ /* machine with 16-bit aligned pointers, size_in_bytes */
+ /* must be a multiple of 2. */
+#endif
+
+#endif /* _GC_TYPED_H */
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:08 pm PDT */
+
+/*
+ * This implements:
+ * 1. allocation of heap block headers
+ * 2. A map from addresses to heap block addresses to heap block headers
+ *
+ * Access speed is crucial. We implement an index structure based on a 2
+ * level tree.
+ */
+
+# include "gc_priv.h"
+
+bottom_index * GC_all_bottom_indices = 0;
+
+/* Non-macro version of header location routine */
+hdr * GC_find_header(h)
+ptr_t h;
+{
+# ifdef HASH_TL
+ register hdr * result;
+ GET_HDR(h, result);
+ return(result);
+# else
+ return(HDR_INNER(h));
+# endif
+}
+
+/* Routines to dynamically allocate collector data structures that will */
+/* never be freed. */
+
+static ptr_t scratch_free_ptr = 0;
+
+ptr_t GC_scratch_end_ptr = 0;
+
+ptr_t GC_scratch_alloc(bytes)
+register word bytes;
+{
+ register ptr_t result = scratch_free_ptr;
+ scratch_free_ptr += bytes;
+ if (scratch_free_ptr <= GC_scratch_end_ptr) {
+ return(result);
+ }
+ {
+ word bytes_to_get = MINHINCR * HBLKSIZE;
+
+ if (bytes_to_get <= bytes) {
+ /* Undo the damage, and get memory directly */
+ scratch_free_ptr -= bytes;
+ return((ptr_t)GET_MEM(bytes));
+ }
+ result = (ptr_t)GET_MEM(bytes_to_get);
+ if (result == 0) {
+# ifdef PRINTSTATS
+ GC_printf0("Out of memory - trying to allocate less\n");
+# endif
+ scratch_free_ptr -= bytes;
+ return((ptr_t)GET_MEM(bytes));
+ }
+ scratch_free_ptr = result;
+ GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
+ return(GC_scratch_alloc(bytes));
+ }
+}
+
+static hdr * hdr_free_list = 0;
+
+/* Return an uninitialized header */
+static hdr * alloc_hdr()
+{
+ register hdr * result;
+
+ if (hdr_free_list == 0) {
+ result = (hdr *) GC_scratch_alloc((word)(sizeof(hdr)));
+ } else {
+ result = hdr_free_list;
+ hdr_free_list = (hdr *) (result -> hb_next);
+ }
+ return(result);
+}
+
+static void free_hdr(hhdr)
+hdr * hhdr;
+{
+ hhdr -> hb_next = (struct hblk *) hdr_free_list;
+ hdr_free_list = hhdr;
+}
+
+void GC_init_headers()
+{
+ register int i;
+
+ for (i = 0; i < TOP_SZ; i++) {
+ GC_top_index[i] = &GC_all_nils;
+ }
+}
+
+/* Make sure that there is a bottom level index block for address addr */
+/* Return FALSE on failure. */
+static bool get_index(addr)
+register word addr;
+{
+ register word hi =
+ (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ register bottom_index * r;
+ register bottom_index * p;
+ register bottom_index ** prev;
+# ifdef HASH_TL
+ register i = TL_HASH(hi);
+ register bottom_index * old;
+
+ old = p = GC_top_index[i];
+ while(p != &GC_all_nils) {
+ if (p -> key == hi) return(TRUE);
+ p = p -> hash_link;
+ }
+ r = (bottom_index*)GC_scratch_alloc((word)(sizeof (bottom_index)));
+ if (r == 0) return(FALSE);
+ BZERO(r, sizeof (bottom_index));
+ r -> hash_link = old;
+ GC_top_index[i] = r;
+# else
+ if (GC_top_index[hi] != &GC_all_nils) return(TRUE);
+ r = (bottom_index*)GC_scratch_alloc((word)(sizeof (bottom_index)));
+ if (r == 0) return(FALSE);
+ GC_top_index[hi] = r;
+ BZERO(r, sizeof (bottom_index));
+# endif
+ r -> key = hi;
+ /* Add it to the list of bottom indices */
+ prev = &GC_all_bottom_indices;
+ while ((p = *prev) != 0 && p -> key < hi) prev = &(p -> asc_link);
+ r -> asc_link = p;
+ *prev = r;
+ return(TRUE);
+}
+
+/* Install a header for block h. */
+/* The header is uninitialized. */
+/* Returns FALSE on failure. */
+bool GC_install_header(h)
+register struct hblk * h;
+{
+ hdr * result;
+
+ if (!get_index((word) h)) return(FALSE);
+ result = alloc_hdr();
+ SET_HDR(h, result);
+ return(result != 0);
+}
+
+/* Set up forwarding counts for block h of size sz */
+bool GC_install_counts(h, sz)
+register struct hblk * h;
+register word sz; /* bytes */
+{
+ register struct hblk * hbp;
+ register int i;
+
+ for (hbp = h; (char *)hbp < (char *)h + sz; hbp += BOTTOM_SZ) {
+ if (!get_index((word) hbp)) return(FALSE);
+ }
+ if (!get_index((word)h + sz - 1)) return(FALSE);
+ for (hbp = h + 1; (char *)hbp < (char *)h + sz; hbp += 1) {
+ i = HBLK_PTR_DIFF(hbp, h);
+ SET_HDR(hbp, (hdr *)(i > MAX_JUMP? MAX_JUMP : i));
+ }
+ return(TRUE);
+}
+
+/* Remove the header for block h */
+void GC_remove_header(h)
+register struct hblk * h;
+{
+ hdr ** ha;
+
+ GET_HDR_ADDR(h, ha);
+ free_hdr(*ha);
+ *ha = 0;
+}
+
+/* Remove forwarding counts for h */
+void GC_remove_counts(h, sz)
+register struct hblk * h;
+register word sz; /* bytes */
+{
+ register struct hblk * hbp;
+
+ for (hbp = h+1; (char *)hbp < (char *)h + sz; hbp += 1) {
+ SET_HDR(hbp, 0);
+ }
+}
+
+/* Apply fn to all allocated blocks */
+/*VARARGS1*/
+void GC_apply_to_all_blocks(fn, client_data)
+void (*fn)(/* struct hblk *h, word client_data */);
+word client_data;
+{
+ register int j;
+ register bottom_index * index_p;
+
+ for (index_p = GC_all_bottom_indices; index_p != 0;
+ index_p = index_p -> asc_link) {
+ for (j = BOTTOM_SZ-1; j >= 0;) {
+ if (!IS_FORWARDING_ADDR_OR_NIL(index_p->index[j])) {
+ if (index_p->index[j]->hb_map != GC_invalid_map) {
+ (*fn)(((struct hblk *)
+ (((index_p->key << LOG_BOTTOM_SZ) + (word)j)
+ << LOG_HBLKSIZE)),
+ client_data);
+ }
+ j--;
+ } else if (index_p->index[j] == 0) {
+ j--;
+ } else {
+ j -= (int)(index_p->index[j]);
+ }
+ }
+ }
+}
+
+/* Get the next valid block whose address is at least h */
+/* Return 0 if there is none. */
+struct hblk * GC_next_block(h)
+struct hblk * h;
+{
+ register bottom_index * bi;
+ register word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
+
+ GET_BI(h, bi);
+ if (bi == &GC_all_nils) {
+ register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ bi = GC_all_bottom_indices;
+ while (bi != 0 && bi -> key < hi) bi = bi -> asc_link;
+ j = 0;
+ }
+ while(bi != 0) {
+ while (j < BOTTOM_SZ) {
+ if (IS_FORWARDING_ADDR_OR_NIL(bi -> index[j])) {
+ j++;
+ } else {
+ if (bi->index[j]->hb_map != GC_invalid_map) {
+ return((struct hblk *)
+ (((bi -> key << LOG_BOTTOM_SZ) + j)
+ << LOG_HBLKSIZE));
+ } else {
+ j += divHBLKSZ(bi->index[j] -> hb_sz);
+ }
+ }
+ }
+ j = 0;
+ bi = bi -> asc_link;
+ }
+ return(0);
+}
--- /dev/null
+/* Conditionally execute a command based on machine and OS from config.h */
+# include "config.h"
+# include <stdio.h>
+
+int main(argc, argv, envp)
+int argc;
+char ** argv;
+char ** envp;
+{
+ if (argc < 4) goto Usage;
+ if (strcmp(MACH_TYPE, argv[1]) != 0) return(0);
+ if (strcmp(OS_TYPE, "") != 0 && strcmp(argv[2], "") != 0
+ && strcmp(OS_TYPE, argv[2]) != 0) return(0);
+ execvp(argv[3], argv+3);
+
+Usage:
+ fprintf(stderr, "Usage: %s mach_type os_type command\n", argv[0]);
+ fprintf(stderr, "Currently mach_type = %s, os_type = %s\n",
+ MACH_TYPE, OS_TYPE);
+ return(1);
+}
+
--- /dev/null
+/* Conditionally execute a command based if the file argv[1] doesn't exist */
+/* Except for execvp, we stick to ANSI C. */
+# include "config.h"
+# include <stdio.h>
+
+int main(argc, argv, envp)
+int argc;
+char ** argv;
+char ** envp;
+{
+ FILE * f;
+ if (argc < 3) goto Usage;
+ if ((f = fopen(argv[1], "rb")) != 0
+ || (f = fopen(argv[1], "r")) != 0) {
+ fclose(f);
+ return(0);
+ }
+ execvp(argv[2], argv+2);
+
+Usage:
+ fprintf(stderr, "Usage: %s file_name command\n", argv[0]);
+ return(1);
+}
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+
+#ifndef GC_H
+
+# define GC_H
+
+# include <stddef.h>
+
+/* Define word and signed_word to be unsigned and signed types of the */
+/* size as char * or void *. There seems to be no way to do this */
+/* even semi-portably. The following is probably no better/worse */
+/* than almost anything else. */
+/* The ANSI standard suggests that size_t and ptr_diff_t might be */
+/* better choices. But those appear to have incorrect definitions */
+/* on may systems. Notably "typedef int size_t" seems to be both */
+/* frequent and WRONG. */
+typedef unsigned long GC_word;
+typedef long GC_signed_word;
+
+/* Public read-only variables */
+
+extern GC_word GC_heapsize; /* Heap size in bytes */
+
+extern GC_word GC_gc_no;/* Counter incremented per collection. */
+ /* Includes empty GCs at startup. */
+
+extern int GC_incremental; /* Using incremental/generational collection. */
+
+
+/* Public R/W variables */
+
+extern int GC_quiet; /* Disable statistics output. Only matters if */
+ /* collector has been compiled with statistics */
+ /* enabled. This involves a performance cost, */
+ /* and is thus not the default. */
+
+extern int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
+ /* beacuse it's not safe. */
+
+extern int GC_dont_expand;
+ /* Dont expand heap unless explicitly requested */
+ /* or forced to. */
+
+extern int GC_full_freq; /* Number of partial collections between */
+ /* full collections. Matters only if */
+ /* GC_incremental is set. */
+
+extern GC_word GC_non_gc_bytes;
+ /* Bytes not considered candidates for collection. */
+ /* Used only to control scheduling of collections. */
+
+extern GC_word GC_free_space_divisor;
+ /* We try to make sure that we allocate at */
+ /* least N/GC_free_space_divisor bytes between */
+ /* collections, where N is the heap size plus */
+ /* a rough estimate of the root set size. */
+ /* Initially, GC_free_space_divisor = 4. */
+ /* Increasing its value will use less space */
+ /* but more collection time. Decreasing it */
+ /* will appreciably decrease collection time */
+ /* at the expens of space. */
+ /* GC_free_space_divisor = 1 will effectively */
+ /* disable collections. */
+
+/* Public procedures */
+/*
+ * general purpose allocation routines, with roughly malloc calling conv.
+ * The atomic versions promise that no relevant pointers are contained
+ * in the object. The nonatomic versions guarantee that the new object
+ * is cleared. GC_malloc_stubborn promises that no changes to the object
+ * will occur after GC_end_stubborn_change has been called on the
+ * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
+ * that is scanned for pointers to collectable objects, but is not itself
+ * collectable. GC_malloc_uncollectable and GC_free called on the resulting
+ * object implicitly update GC_non_gc_bytes appropriately.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_malloc(size_t size_in_bytes);
+ extern void * GC_malloc_atomic(size_t size_in_bytes);
+ extern void * GC_malloc_uncollectable(size_t size_in_bytes);
+ extern void * GC_malloc_stubborn(size_t size_in_bytes);
+# else
+ extern char * GC_malloc(/* size_in_bytes */);
+ extern char * GC_malloc_atomic(/* size_in_bytes */);
+ extern char * GC_malloc_uncollectable(/* size_in_bytes */);
+ extern char * GC_malloc_stubborn(/* size_in_bytes */);
+# endif
+
+/* Explicitly deallocate an object. Dangerous if used incorrectly. */
+/* Requires a pointer to the base of an object. */
+/* If the argument is stubborn, it should not be changeable when freed. */
+/* An object should not be enable for finalization when it is */
+/* explicitly deallocated. */
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void GC_free(void * object_addr);
+# else
+ extern void GC_free(/* object_addr */);
+# endif
+
+/*
+ * Stubborn objects may be changed only if the collector is explicitly informed.
+ * The collector is implicitly informed of coming change when such
+ * an object is first allocated. The following routines inform the
+ * collector that an object will no longer be changed, or that it will
+ * once again be changed. Only nonNIL pointer stores into the object
+ * are considered to be changes. The argument to GC_end_stubborn_change
+ * must be exacly the value returned by GC_malloc_stubborn or passed to
+ * GC_change_stubborn. (In the second case it may be an interior pointer
+ * within 512 bytes of the beginning of the objects.)
+ * There is a performance penalty for allowing more than
+ * one stubborn object to be changed at once, but it is acceptable to
+ * do so. The same applies to dropping stubborn objects that are still
+ * changeable.
+ */
+void GC_change_stubborn(/* p */);
+void GC_end_stubborn_change(/* p */);
+
+/* Return a pointer to the base (lowest address) of an object given */
+/* a pointer to a location within the object. */
+/* Return 0 if displaced_pointer doesn't point to within a valid */
+/* object. */
+# if defined(__STDC__) || defined(__cplusplus)
+ void * GC_base(void * displaced_pointer);
+# else
+ char * GC_base(/* char * displaced_pointer */);
+# endif
+
+/* Given a pointer to the base of an object, return its size in bytes. */
+/* The returned size may be slightly larger than what was originally */
+/* requested. */
+# if defined(__STDC__) || defined(__cplusplus)
+ size_t GC_size(void * object_addr);
+# else
+ size_t GC_size(/* char * object_addr */);
+# endif
+
+/* For compatibility with C library. This is occasionally faster than */
+/* a malloc followed by a bcopy. But if you rely on that, either here */
+/* or with the standard C library, your code is broken. In my */
+/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
+/* The resulting object has the same kind as the original. */
+/* If the argument is stubborn, the result will have changes enabled. */
+/* It is an error to have changes enabled for the original object. */
+# if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_realloc(void * old_object, size_t new_size_in_bytes);
+# else
+ extern char * GC_realloc(/* old_object, new_size_in_bytes */);
+# endif
+
+
+/* Explicitly increase the heap size. */
+/* Returns 0 on failure, 1 on success. */
+extern int GC_expand_hp(/* number_of_4K_blocks */);
+
+/* Clear the set of root segments */
+extern void GC_clear_roots();
+
+/* Add a root segment */
+extern void GC_add_roots(/* low_address, high_address_plus_1 */);
+
+/* Add a displacement to the set of those considered valid by the */
+/* collector. GC_register_displacement(n) means that if p was returned */
+/* by GC_malloc, then (char *)p + n will be considered to be a valid */
+/* pointer to n. N must be small and less than the size of p. */
+/* (All pointers to the interior of objects from the stack are */
+/* considered valid in any case. This applies to heap objects and */
+/* static data.) */
+/* Preferably, this should be called before any other GC procedures. */
+/* Calling it later adds to the probability of excess memory */
+/* retention. */
+void GC_register_displacement(/* n */);
+
+/* Explicitly trigger a collection. */
+void GC_gcollect();
+
+/* Enable incremental/generational collection. */
+/* Not advisable unless dirty bits are */
+/* available or most heap objects are */
+/* pointerfree(atomic) or immutable. */
+/* Don't use in leak finding mode. */
+void GC_enable_incremental();
+
+/* Debugging (annotated) allocation. GC_gcollect will check */
+/* objects allocated in this way for overwrites, etc. */
+# if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_debug_malloc(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_atomic(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_uncollectable(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_stubborn(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void GC_debug_free(void * object_addr);
+ extern void * GC_debug_realloc(void * old_object,
+ size_t new_size_in_bytes,
+ char * descr_string, int descr_int);
+# else
+ extern char * GC_debug_malloc(/* size_in_bytes, descr_string, descr_int */);
+ extern char * GC_debug_malloc_atomic(/* size_in_bytes, descr_string,
+ descr_int */);
+ extern char * GC_debug_malloc_uncollectable(/* size_in_bytes, descr_string,
+ descr_int */);
+ extern char * GC_debug_malloc_stubborn(/* size_in_bytes, descr_string,
+ descr_int */);
+ extern void GC_debug_free(/* object_addr */);
+ extern char * GC_debug_realloc(/* old_object, new_size_in_bytes,
+ descr_string, descr_int */);
+# endif
+void GC_debug_change_stubborn(/* p */);
+void GC_debug_end_stubborn_change(/* p */);
+# ifdef GC_DEBUG
+# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
+ __FILE__, __LINE__)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
+ __LINE__)
+# define GC_FREE(p) GC_debug_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(GC_base(p), GC_debug_invoke_finalizer, \
+ GC_make_closure(f,d), of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
+ __LINE__)
+# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
+# else
+# define GC_MALLOC(sz) GC_malloc(sz)
+# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
+# define GC_REALLOC(old, sz) GC_realloc(old, sz)
+# define GC_FREE(p) GC_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(p, f, d, of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
+# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
+# endif
+/* The following are included because they are often convenient, and */
+/* reduce the chance for a misspecifed size argument. But calls may */
+/* expand to something syntactically incorrect if t is a complicated */
+/* type expression. */
+# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
+# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
+# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
+# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_NEW_UNCOLLECTABLE(sizeof (t))
+
+/* Finalization. Some of these primitives are grossly unsafe. */
+/* The idea is to make them both cheap, and sufficient to build */
+/* a safer layer, closer to PCedar finalization. */
+/* The interface represents my conclusions from a long discussion */
+/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
+/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
+/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
+# if defined(__STDC__) || defined(__cplusplus)
+ typedef void (*GC_finalization_proc)(void * obj, void * client_data);
+# else
+ typedef void (*GC_finalization_proc)(/* void * obj, void * client_data */);
+# endif
+
+void GC_register_finalizer(/* void * obj,
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd */);
+ /* When obj is no longer accessible, invoke */
+ /* (*fn)(obj, cd). If a and b are inaccessible, and */
+ /* a points to b (after disappearing links have been */
+ /* made to disappear), then only a will be */
+ /* finalized. (If this does not create any new */
+ /* pointers to b, then b will be finalized after the */
+ /* next collection.) Any finalizable object that */
+ /* is reachable from itself by following one or more */
+ /* pointers will not be finalized (or collected). */
+ /* Thus cycles involving finalizable objects should */
+ /* be avoided, or broken by disappearing links. */
+ /* fn is invoked with the allocation lock held. It may */
+ /* not allocate. (Any storage it might need */
+ /* should be preallocated and passed as part of cd.) */
+ /* fn should terminate as quickly as possible, and */
+ /* defer extended computation. */
+ /* All but the last finalizer registered for an object */
+ /* is ignored. */
+ /* Finalization may be removed by passing 0 as fn. */
+ /* The old finalizer and client data are stored in */
+ /* *ofn and *ocd. */
+ /* Fn is never invoked on an accessible object, */
+ /* provided hidden pointers are converted to real */
+ /* pointers only if the allocation lock is held, and */
+ /* such conversions are not performed by finalization */
+ /* routines. */
+
+/* The following routine may be used to break cycles between */
+/* finalizable objects, thus causing cyclic finalizable */
+/* objects to be finalized in the correct order. Standard */
+/* use involves calling GC_register_disappearing_link(&p), */
+/* where p is a pointer that is not followed by finalization */
+/* code, and should not be considered in determining */
+/* finalization order. */
+int GC_register_disappearing_link(/* void ** link */);
+ /* Link should point to a field of a heap allocated */
+ /* object obj. *link will be cleared when obj is */
+ /* found to be inaccessible. This happens BEFORE any */
+ /* finalization code is invoked, and BEFORE any */
+ /* decisions about finalization order are made. */
+ /* This is useful in telling the finalizer that */
+ /* some pointers are not essential for proper */
+ /* finalization. This may avoid finalization cycles. */
+ /* Note that obj may be resurrected by another */
+ /* finalizer, and thus the clearing of *link may */
+ /* be visible to non-finalization code. */
+ /* There's an argument that an arbitrary action should */
+ /* be allowed here, instead of just clearing a pointer. */
+ /* But this causes problems if that action alters, or */
+ /* examines connectivity. */
+ /* Returns 1 if link was already registered, 0 */
+ /* otherwise. */
+ /* Only exists for backward compatibility. See below: */
+int GC_general_register_disappearing_link(/* void ** link, void * obj */);
+ /* A slight generalization of the above. *link is */
+ /* cleared when obj first becomes inaccessible. This */
+ /* can be used to implement weak pointers easily and */
+ /* safely. Typically link will point to a location */
+ /* holding a disguised pointer to obj. In this way */
+ /* soft pointers are broken before any object */
+ /* reachable from them are finalized. Each link */
+ /* May be registered only once, i.e. with one obj */
+ /* value. This was added after a long email discussion */
+ /* with John Ellis. */
+int GC_unregister_disappearing_link(/* void ** link */);
+ /* Returns 0 if link was not actually registered. */
+ /* Undoes a registration by either of the above two */
+ /* routines. */
+
+/* Auxiliary fns to make finalization work correctly with displaced */
+/* pointers introduced by the debugging allocators. */
+# if defined(__STDC__) || defined(__cplusplus)
+ void * GC_make_closure(GC_finalization_proc fn, void * data);
+ void GC_debug_invoke_finalizer(void * obj, void * data);
+# else
+ char * GC_make_closure(/* GC_finalization_proc fn, char * data */);
+ void GC_debug_invoke_finalizer(/* void * obj, void * data */);
+# endif
+
+
+/* The following is intended to be used by a higher level */
+/* (e.g. cedar-like) finalization facility. It is expected */
+/* that finalization code will arrange for hidden pointers to */
+/* disappear. Otherwise objects can be accessed after they */
+/* have been collected. */
+# ifdef I_HIDE_POINTERS
+# if defined(__STDC__) || defined(__cplusplus)
+# define HIDE_POINTER(p) (~(size_t)(p))
+# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
+# else
+# define HIDE_POINTER(p) (~(unsigned long)(p))
+# define REVEAL_POINTER(p) ((char *)(HIDE_POINTER(p)))
+# endif
+ /* Converting a hidden pointer to a real pointer requires verifying */
+ /* that the object still exists. This involves acquiring the */
+ /* allocator lock to avoid a race with the collector. */
+
+# if defined(__STDC__) || defined(__cplusplus)
+ typedef void * (*GC_fn_type)();
+ void * GC_call_with_alloc_lock(GC_fn_type fn, void * client_data);
+# else
+ typedef char * (*GC_fn_type)();
+ char * GC_call_with_alloc_lock(/* GC_fn_type fn, char * client_data */);
+# endif
+# endif
+
+#endif
--- /dev/null
+/*
+ * Some simple primitives for allocation with explicit type information.
+ * Facilities for dynamic type inference may be added later.
+ * Should be used only for extremely performance critical applications,
+ * or if conservative collector leakage is otherwise a problem (unlikely).
+ * Note that this is implemented completely separately from the rest
+ * of the collector, and is not linked in unless referenced.
+ */
+/* Boehm, March 31, 1994 4:43 pm PST */
+
+#ifndef _GC_TYPED_H
+# define _GC_TYPED_H
+# ifndef _GC_H
+# include "gc.h"
+# endif
+
+typedef GC_word * GC_bitmap;
+ /* The least significant bit of the first word is one if */
+ /* the first word in the object may be a pointer. */
+
+# define GC_get_bit(bm, index) \
+ (((bm)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
+# define GC_set_bit(bm, index) \
+ (bm)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
+
+typedef GC_word GC_descr;
+
+#if defined(__STDC__) || defined(__cplusplus)
+ extern GC_descr GC_make_decriptor(GC_bitmap bm, size_t len);
+#else
+ extern GC_descr GC_make_decriptor(/* GC_bitmap bm, size_t len */);
+#endif
+ /* Return a type descriptor for the object whose layout */
+ /* is described by the argument. */
+ /* The least significant bit of the first word is one */
+ /* if the first word in the object may be a pointer. */
+ /* The second argument specifies the number of */
+ /* meaningful bits in the bitmap. The actual object */
+ /* may be larger (but not smaller). Any additional */
+ /* words in the object are assumed not to contain */
+ /* pointers. */
+ /* Returns (GC_descr)(-1) on failure (no memory). */
+
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_malloc_explicitly_typed(size_t size_in_bytes, GC_descr d);
+#else
+ extern char * GC_malloc_explicitly_typed(/* size_in_bytes, descriptor */);
+#endif
+ /* Allocate an object whose layout is described by d. */
+ /* The resulting object MAY NOT BE PASSED TO REALLOC. */
+
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_calloc_explicitly_typed(size_t nelements,
+ size_t element_size_in_bytes,
+ GC_descr d);
+#else
+ char * GC_calloc_explicitly_typed(/* nelements, size_in_bytes, descriptor */);
+ /* Allocate an array of nelements elements, each of the */
+ /* given size, and with the given descriptor. */
+ /* The elemnt size must be a multiple of the byte */
+ /* alignment required for pointers. E.g. on a 32-bit */
+ /* machine with 16-bit aligned pointers, size_in_bytes */
+ /* must be a multiple of 2. */
+#endif
+
+#endif /* _GC_TYPED_H */
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 1:58 pm PDT */
+# include "gc_priv.h"
+# include <stdio.h>
+# include <setjmp.h>
+# if defined(OS2) || defined(CX_UX)
+# define _setjmp(b) setjmp(b)
+# define _longjmp(b,v) longjmp(b,v)
+# endif
+
+
+/* Routine to mark from registers that are preserved by the C compiler. */
+/* This must be ported to every new architecture. There is a generic */
+/* version at the end, that is likely, but not guaranteed to work */
+/* on your architecture. Run the test_setjmp program to see whether */
+/* there is any chance it will work. */
+
+#ifdef AMIGA
+__asm GC_push_regs(
+ register __a2 word a2,
+ register __a3 word a3,
+ register __a4 word a4,
+ register __a5 word a5,
+ register __a6 word a6,
+ register __d2 const word d2,
+ register __d3 const word d3,
+ register __d4 const word d4,
+ register __d5 const word d5,
+ register __d6 const word d6,
+ register __d7 const word d7)
+#else
+ void GC_push_regs()
+#endif
+{
+# ifdef RT
+ register long TMP_SP; /* must be bound to r11 */
+# endif
+# ifdef VAX
+ /* VAX - generic code below does not work under 4.2 */
+ /* r1 through r5 are caller save, and therefore */
+ /* on the stack or dead. */
+ asm("pushl r11"); asm("calls $1,_GC_push_one");
+ asm("pushl r10"); asm("calls $1,_GC_push_one");
+ asm("pushl r9"); asm("calls $1,_GC_push_one");
+ asm("pushl r8"); asm("calls $1,_GC_push_one");
+ asm("pushl r7"); asm("calls $1,_GC_push_one");
+ asm("pushl r6"); asm("calls $1,_GC_push_one");
+# endif
+# if defined(M68K) && (defined(SUNOS4) || defined(NEXT))
+ /* M68K SUNOS - could be replaced by generic code */
+ /* a0, a1 and d1 are caller save */
+ /* and therefore are on stack or dead. */
+
+ asm("subqw #0x4,sp"); /* allocate word on top of stack */
+
+ asm("movl a2,sp@"); asm("jbsr _GC_push_one");
+ asm("movl a3,sp@"); asm("jbsr _GC_push_one");
+ asm("movl a4,sp@"); asm("jbsr _GC_push_one");
+ asm("movl a5,sp@"); asm("jbsr _GC_push_one");
+ /* Skip frame pointer and stack pointer */
+ asm("movl d1,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d2,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d3,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d4,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d5,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d6,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d7,sp@"); asm("jbsr _GC_push_one");
+
+ asm("addqw #0x4,sp"); /* put stack back where it was */
+# endif
+
+# if defined(M68K) && defined(HP)
+ /* M68K HP - could be replaced by generic code */
+ /* a0, a1 and d1 are caller save. */
+
+ asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
+
+ asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
+ /* Skip frame pointer and stack pointer */
+ asm("mov.l %d1,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
+
+ asm("addq.w &0x4,%sp"); /* put stack back where it was */
+# endif /* M68K HP */
+
+# ifdef AMIGA
+ /* AMIGA - could be replaced by generic code */
+ /* SAS/C optimizer mangles this so compile with "noopt" */
+ /* a0, a1, d0 and d1 are caller save */
+ GC_push_one(a2);
+ GC_push_one(a3);
+ GC_push_one(a4);
+ GC_push_one(a5);
+ GC_push_one(a6);
+ /* Skip stack pointer */
+ GC_push_one(d2);
+ GC_push_one(d3);
+ GC_push_one(d4);
+ GC_push_one(d5);
+ GC_push_one(d6);
+ GC_push_one(d7);
+# endif
+
+# if defined(I386) &&!defined(OS2) &&!defined(SUNOS5) &&!defined(MSWIN32)
+ /* I386 code, generic code does not appear to work */
+ /* It does appear to work under OS2, and asms dont */
+ asm("pushl %eax"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ecx"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edx"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %esi"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edi"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebx"); asm("call _GC_push_one"); asm("addl $4,%esp");
+# endif
+
+# if defined(I386) && defined(MSWIN32)
+ /* I386 code, Microsoft variant */
+ __asm push eax
+ __asm call GC_push_one
+ __asm add esp,4
+ __asm push ecx
+ __asm call GC_push_one
+ __asm add esp,4
+ __asm push edx
+ __asm call GC_push_one
+ __asm add esp,4
+ __asm push esi
+ __asm call GC_push_one
+ __asm add esp,4
+ __asm push edi
+ __asm call GC_push_one
+ __asm add esp,4
+ __asm push ebx
+ __asm call GC_push_one
+ __asm add esp,4
+# endif
+
+# if defined(I386) && defined(SUNOS5)
+ /* I386 code, SVR4 variant, generic code does not appear to work */
+ asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
+# endif
+
+# ifdef NS32K
+ asm ("movd r3, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+ asm ("movd r4, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+ asm ("movd r5, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+ asm ("movd r6, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+ asm ("movd r7, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+# endif
+
+# ifdef SPARC
+ {
+ word GC_save_regs_in_stack();
+
+ /* generic code will not work */
+ (void)GC_save_regs_in_stack();
+ }
+# endif
+
+# ifdef RT
+ GC_push_one(TMP_SP); /* GC_push_one from r11 */
+
+ asm("cas r11, r6, r0"); GC_push_one(TMP_SP); /* r6 */
+ asm("cas r11, r7, r0"); GC_push_one(TMP_SP); /* through */
+ asm("cas r11, r8, r0"); GC_push_one(TMP_SP); /* r10 */
+ asm("cas r11, r9, r0"); GC_push_one(TMP_SP);
+ asm("cas r11, r10, r0"); GC_push_one(TMP_SP);
+
+ asm("cas r11, r12, r0"); GC_push_one(TMP_SP); /* r12 */
+ asm("cas r11, r13, r0"); GC_push_one(TMP_SP); /* through */
+ asm("cas r11, r14, r0"); GC_push_one(TMP_SP); /* r15 */
+ asm("cas r11, r15, r0"); GC_push_one(TMP_SP);
+# endif
+
+# if defined(M68K) && defined(SYSV)
+ /* Once again similar to SUN and HP, though setjmp appears to work.
+ --Parag
+ */
+# ifdef __GNUC__
+ asm("subqw #0x4,%sp"); /* allocate word on top of stack */
+
+ asm("movl %a2,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %a3,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %a4,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %a5,%sp@"); asm("jbsr GC_push_one");
+ /* Skip frame pointer and stack pointer */
+ asm("movl %d1,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d2,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d3,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d4,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d5,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d6,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d7,%sp@"); asm("jbsr GC_push_one");
+
+ asm("addqw #0x4,%sp"); /* put stack back where it was */
+# else /* !__GNUC__*/
+ asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
+
+ asm("mov.l %a2,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %a3,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %a4,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %a5,(%sp)"); asm("jsr GC_push_one");
+ /* Skip frame pointer and stack pointer */
+ asm("mov.l %d1,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d2,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d3,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d4,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d5,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d6,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d7,(%sp)"); asm("jsr GC_push_one");
+
+ asm("addq.w &0x4,%sp"); /* put stack back where it was */
+# endif /* !__GNUC__ */
+# endif /* M68K/SYSV */
+
+
+# if defined(HP_PA) || defined(M88K) || (defined(I386) && defined(OS2))
+ /* Generic code */
+ /* The idea is due to Parag Patel at HP. */
+ /* We're not sure whether he would like */
+ /* to be he acknowledged for it or not. */
+ {
+ static jmp_buf regs;
+ register word * i = (word *) regs;
+ register ptr_t lim = (ptr_t)(regs) + (sizeof regs);
+
+ /* Setjmp on Sun 3s doesn't clear all of the buffer. */
+ /* That tends to preserve garbage. Clear it. */
+ for (; (char *)i < lim; i++) {
+ *i = 0;
+ }
+ (void) _setjmp(regs);
+ GC_push_all_stack((ptr_t)regs, lim);
+ }
+# endif
+
+ /* other machines... */
+# if !(defined M68K) && !(defined VAX) && !(defined RT)
+# if !(defined SPARC) && !(defined I386) && !(defined NS32K)
+# if !defined(HP_PA) && !defined(M88K)
+ --> bad news <--
+# endif
+# endif
+# endif
+}
+
+/* On register window machines, we need a way to force registers into */
+/* the stack. Return sp. */
+# ifdef SPARC
+ asm(" .seg \"text\"");
+# ifdef SUNOS5
+ asm(" .globl GC_save_regs_in_stack");
+ asm("GC_save_regs_in_stack:");
+# else
+ asm(" .globl _GC_save_regs_in_stack");
+ asm("_GC_save_regs_in_stack:");
+# endif
+ asm(" ta 0x3 ! ST_FLUSH_WINDOWS");
+ asm(" mov %sp,%o0");
+ asm(" retl");
+ asm(" nop");
+
+# ifdef LINT
+ word GC_save_regs_in_stack() { return(0 /* sp really */);}
+# endif
+# endif
+
+
+/* GC_clear_stack_inner(arg, limit) clears stack area up to limit and */
+/* returns arg. Stack clearing is crucial on SPARC, so we supply */
+/* an assembly version that's more careful. Assumes limit is hotter */
+/* than sp, and limit is 8 byte aligned. */
+#if defined(ASM_CLEAR_CODE) && !defined(THREADS)
+#ifndef SPARC
+ --> fix it
+#endif
+# ifdef SUNOS4
+ asm(".globl _GC_clear_stack_inner");
+ asm("_GC_clear_stack_inner:");
+# else
+ asm(".globl GC_clear_stack_inner");
+ asm("GC_clear_stack_inner:");
+# endif
+ asm("mov %sp,%o2"); /* Save sp */
+ asm("add %sp,-8,%o3"); /* p = sp-8 */
+ asm("clr %g1"); /* [g0,g1] = 0 */
+ asm("add %o1,-0x60,%sp"); /* Move sp out of the way, */
+ /* so that traps still work. */
+ /* Includes some extra words */
+ /* so we can be sloppy below. */
+ asm("loop:");
+ asm("std %g0,[%o3]"); /* *(long long *)p = 0 */
+ asm("cmp %o3,%o1");
+ asm("bgu loop "); /* if (p > limit) goto loop */
+ asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
+ asm("retl");
+ asm("mov %o2,%sp"); /* Restore sp., delay slot */
+ /* First argument = %o0 = return value */
+
+# ifdef LINT
+ /*ARGSUSED*/
+ ptr_t GC_clear_stack_inner(arg, limit)
+ ptr_t arg; word limit;
+ { return(arg); }
+# endif
+#endif
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:03 pm PDT */
+
+#include <stdio.h>
+#include "gc_priv.h"
+
+extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
+void GC_extend_size_map(); /* in misc.c. */
+
+/* allocate lb bytes for an object of kind. */
+/* Should not be used to directly to allocate */
+/* objects such as STUBBORN objects that */
+/* require special handling on allocation. */
+/* First a version that assumes we already */
+/* hold lock: */
+ptr_t GC_generic_malloc_inner(lb, k)
+register word lb;
+register int k;
+{
+register word lw;
+register ptr_t op;
+register ptr_t *opp;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+ if (lw == 0) lw = 1;
+# endif
+ opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ if( (op = *opp) == 0 ) {
+# ifdef MERGE_SIZES
+ if (GC_size_map[lb] == 0) {
+ if (!GC_is_initialized) GC_init_inner();
+ if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
+ return(GC_generic_malloc_inner(lb, k));
+ }
+# else
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ return(GC_generic_malloc_inner(lb, k));
+ }
+# endif
+ op = GC_allocobj(lw, k);
+ if (op == 0) goto out;
+ }
+ /* Here everything is in a consistent state. */
+ /* We assume the following assignment is */
+ /* atomic. If we get aborted */
+ /* after the assignment, we lose an object, */
+ /* but that's benign. */
+ /* Volatile declarations may need to be added */
+ /* to prevent the compiler from breaking things.*/
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ } else {
+ register struct hblk * h;
+ register word n_blocks = divHBLKSZ(ADD_SLOP(lb)
+ + HDR_BYTES + HBLKSIZE-1);
+
+ if (!GC_is_initialized) GC_init_inner();
+ /* Do our share of marking work */
+ if(GC_incremental && !GC_dont_gc) GC_collect_a_little((int)n_blocks);
+ lw = ROUNDED_UP_WORDS(lb);
+ while ((h = GC_allochblk(lw, k, 0)) == 0
+ && GC_collect_or_expand(n_blocks));
+ if (h == 0) {
+ op = 0;
+ } else {
+ op = (ptr_t) (h -> hb_body);
+ GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
+ }
+ }
+ GC_words_allocd += lw;
+
+out:
+ return((ptr_t)op);
+}
+
+/* Allocate a composite object of size n bytes. The caller guarantees */
+/* that pointers past the first page are not relevant. Caller holds */
+/* allocation lock. */
+ptr_t GC_malloc_ignore_off_page_inner(lb)
+register size_t lb;
+{
+# ifdef ALL_INTERIOR_POINTERS
+ register struct hblk * h;
+ register word n_blocks;
+ register word lw;
+ register ptr_t op;
+
+ if (lb <= HBLKSIZE)
+ return(GC_generic_malloc_inner((word)lb, NORMAL));
+ n_blocks = divHBLKSZ(ADD_SLOP(lb) + HDR_BYTES + HBLKSIZE-1);
+ if (!GC_is_initialized) GC_init_inner();
+ /* Do our share of marking work */
+ if(GC_incremental && !GC_dont_gc) GC_collect_a_little((int)n_blocks);
+ lw = ROUNDED_UP_WORDS(lb);
+ while ((h = GC_allochblk(lw, NORMAL, IGNORE_OFF_PAGE)) == 0
+ && GC_collect_or_expand(n_blocks));
+ if (h == 0) {
+ op = 0;
+ } else {
+ op = (ptr_t) (h -> hb_body);
+ GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
+ }
+ GC_words_allocd += lw;
+ return((ptr_t)op);
+# else
+ return(GC_generic_malloc_inner((word)lb, NORMAL));
+# endif
+}
+
+# if defined(__STDC__) || defined(__cplusplus)
+ void * GC_malloc_ignore_off_page(size_t lb)
+# else
+ char * GC_malloc_ignore_off_page(lb)
+ register size_t lb;
+# endif
+{
+ register extern_ptr_t result;
+ DCL_LOCK_STATE;
+
+ GC_invoke_finalizers();
+ DISABLE_SIGNALS();
+ LOCK();
+ result = GC_malloc_ignore_off_page_inner(lb);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(result);
+}
+
+ptr_t GC_generic_malloc(lb, k)
+register word lb;
+register int k;
+{
+ ptr_t result;
+ DCL_LOCK_STATE;
+
+ GC_invoke_finalizers();
+ DISABLE_SIGNALS();
+ LOCK();
+ result = GC_generic_malloc_inner(lb, k);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(result);
+}
+
+
+/* Analogous to the above, but assumes a small object size, and */
+/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
+ptr_t GC_generic_malloc_words_small(lw, k)
+register word lw;
+register int k;
+{
+register ptr_t op;
+register ptr_t *opp;
+DCL_LOCK_STATE;
+
+ GC_invoke_finalizers();
+ DISABLE_SIGNALS();
+ LOCK();
+ opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ if( (op = *opp) == 0 ) {
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ }
+ op = GC_clear_stack(GC_allocobj(lw, k));
+ if (op == 0) goto out;
+ }
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+
+out:
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return((ptr_t)op);
+}
+
+#if defined(THREADS) && !defined(SRC_M3)
+/* Return a list of 1 or more objects of the indicated size, linked */
+/* through the first word in the object. This has the advantage that */
+/* it acquires the allocation lock only once, and may greatly reduce */
+/* time wasted contending for the allocation lock. Typical usage would */
+/* be in a thread that requires many items of the same size. It would */
+/* keep its own free list in thread-local storage, and call */
+/* GC_malloc_many or friends to replenish it. (We do not round up */
+/* object sizes, since a call indicates the intention to consume many */
+/* objects of exactly this size.) */
+/* Note that the client should usually clear the link field. */
+ptr_t GC_generic_malloc_many(lb, k)
+register word lb;
+register int k;
+{
+ptr_t op;
+register ptr_t p;
+ptr_t *opp;
+word lw;
+register word my_words_allocd;
+DCL_LOCK_STATE;
+
+ if (!SMALL_OBJ(lb)) {
+ op = GC_generic_malloc(lb, k);
+ obj_link(op) = 0;
+ return(op);
+ }
+ lw = ROUNDED_UP_WORDS(lb);
+ GC_invoke_finalizers();
+ DISABLE_SIGNALS();
+ LOCK();
+ opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ if( (op = *opp) == 0 ) {
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ }
+ op = GC_clear_stack(GC_allocobj(lw, k));
+ if (op == 0) goto out;
+ }
+ *opp = 0;
+ my_words_allocd = 0;
+ for (p = op; p != 0; p = obj_link(p)) {
+ my_words_allocd += lw;
+ if (my_words_allocd >= BODY_SZ) {
+ *opp = obj_link(p);
+ obj_link(p) = 0;
+ break;
+ }
+ }
+ GC_words_allocd += my_words_allocd;
+
+out:
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(op);
+
+}
+
+void * GC_malloc_many(size_t lb)
+{
+ return(GC_generic_malloc_many(lb, NORMAL));
+}
+
+/* Note that the "atomic" version of this would be unsafe, since the */
+/* links would not be seen by the collector. */
+# endif
+
+#define GENERAL_MALLOC(lb,k) \
+ (extern_ptr_t)GC_clear_stack(GC_generic_malloc((word)lb, k))
+/* We make the GC_clear_stack_call a tail call, hoping to get more of */
+/* the stack. */
+
+/* Allocate lb bytes of atomic (pointerfree) data */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_atomic(size_t lb)
+# else
+ extern_ptr_t GC_malloc_atomic(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t * opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_aobjfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ return(GENERAL_MALLOC((word)lb, PTRFREE));
+ }
+ /* See above comment on signals. */
+ *opp = obj_link(op);
+ GC_words_allocd += lw;
+ FASTUNLOCK();
+ return((extern_ptr_t) op);
+ } else {
+ return(GENERAL_MALLOC((word)lb, PTRFREE));
+ }
+}
+
+/* Allocate lb bytes of composite (pointerful) data */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc(size_t lb)
+# else
+ extern_ptr_t GC_malloc(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t *opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_objfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ return(GENERAL_MALLOC((word)lb, NORMAL));
+ }
+ /* See above comment on signals. */
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+ FASTUNLOCK();
+ return((extern_ptr_t) op);
+ } else {
+ return(GENERAL_MALLOC((word)lb, NORMAL));
+ }
+}
+
+/* Allocate lb bytes of pointerful, traced, but not collectable data */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_uncollectable(size_t lb)
+# else
+ extern_ptr_t GC_malloc_uncollectable(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t *opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+# ifdef ADD_BYTE_AT_END
+ lb--; /* We don't need the extra byte, since this won't be */
+ /* collected anyway. */
+# endif
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_uobjfreelist[lw]);
+ FASTLOCK();
+ if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
+ /* See above comment on signals. */
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+ GC_set_mark_bit(op);
+ GC_non_gc_bytes += WORDS_TO_BYTES(lw);
+ FASTUNLOCK();
+ return((extern_ptr_t) op);
+ }
+ FASTUNLOCK();
+ op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
+ } else {
+ op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
+ }
+ /* We don't need the lock here, since we have an undisguised */
+ /* pointer. We do need to hold the lock while we adjust */
+ /* mark bits. */
+ {
+ register struct hblk * h;
+
+ h = HBLKPTR(op);
+ lw = HDR(h) -> hb_sz;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_set_mark_bit(op);
+ GC_non_gc_bytes += WORDS_TO_BYTES(lw);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return((extern_ptr_t) op);
+ }
+}
+
+extern_ptr_t GC_generic_or_special_malloc(lb,knd)
+word lb;
+int knd;
+{
+ switch(knd) {
+# ifdef STUBBORN_ALLOC
+ case STUBBORN:
+ return(GC_malloc_stubborn((size_t)lb));
+# endif
+ case PTRFREE:
+ return(GC_malloc_atomic((size_t)lb));
+ case NORMAL:
+ return(GC_malloc((size_t)lb));
+ case UNCOLLECTABLE:
+ return(GC_malloc_uncollectable((size_t)lb));
+ default:
+ return(GC_generic_malloc(lb,knd));
+ }
+}
+
+
+/* Change the size of the block pointed to by p to contain at least */
+/* lb bytes. The object may be (and quite likely will be) moved. */
+/* The kind (e.g. atomic) is the same as that of the old. */
+/* Shrinking of large blocks is not implemented well. */
+# ifdef __STDC__
+ extern_ptr_t GC_realloc(extern_ptr_t p, size_t lb)
+# else
+ extern_ptr_t GC_realloc(p,lb)
+ extern_ptr_t p;
+ size_t lb;
+# endif
+{
+register struct hblk * h;
+register hdr * hhdr;
+register word sz; /* Current size in bytes */
+register word orig_sz; /* Original sz in bytes */
+int obj_kind;
+
+ if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
+ h = HBLKPTR(p);
+ hhdr = HDR(h);
+ sz = hhdr -> hb_sz;
+ obj_kind = hhdr -> hb_obj_kind;
+ sz = WORDS_TO_BYTES(sz);
+ orig_sz = sz;
+
+ if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
+ /* Round it up to the next whole heap block */
+
+ sz = (sz+HDR_BYTES+HBLKSIZE-1)
+ & (~HBLKMASK);
+ sz -= HDR_BYTES;
+ hhdr -> hb_sz = BYTES_TO_WORDS(sz);
+ if (obj_kind == UNCOLLECTABLE) GC_non_gc_bytes += (sz - orig_sz);
+ /* Extra area is already cleared by allochblk. */
+ }
+ if (ADD_SLOP(lb) <= sz) {
+ if (lb >= (sz >> 1)) {
+# ifdef STUBBORN_ALLOC
+ if (obj_kind == STUBBORN) GC_change_stubborn(p);
+# endif
+ if (orig_sz > lb) {
+ /* Clear unneeded part of object to avoid bogus pointer */
+ /* tracing. */
+ /* Safe for stubborn objects. */
+ BZERO(((ptr_t)p) + lb, orig_sz - lb);
+ }
+ return(p);
+ } else {
+ /* shrink */
+ extern_ptr_t result =
+ GC_generic_or_special_malloc((word)lb, obj_kind);
+
+ if (result == 0) return(0);
+ /* Could also return original object. But this */
+ /* gives the client warning of imminent disaster. */
+ BCOPY(p, result, lb);
+ GC_free(p);
+ return(result);
+ }
+ } else {
+ /* grow */
+ extern_ptr_t result =
+ GC_generic_or_special_malloc((word)lb, obj_kind);
+
+ if (result == 0) return(0);
+ BCOPY(p, result, sz);
+ GC_free(p);
+ return(result);
+ }
+}
+
+/* Explicitly deallocate an object p. */
+# ifdef __STDC__
+ void GC_free(extern_ptr_t p)
+# else
+ void GC_free(p)
+ extern_ptr_t p;
+# endif
+{
+ register struct hblk *h;
+ register hdr *hhdr;
+ register signed_word sz;
+ register ptr_t * flh;
+ register int knd;
+ register struct obj_kind * ok;
+ DCL_LOCK_STATE;
+
+ if (p == 0) return;
+ /* Required by ANSI. It's not my fault ... */
+ h = HBLKPTR(p);
+ hhdr = HDR(h);
+ knd = hhdr -> hb_obj_kind;
+ sz = hhdr -> hb_sz;
+ ok = &GC_obj_kinds[knd];
+ if (sz <= MAXOBJSZ) {
+# ifdef THREADS
+ DISABLE_SIGNALS();
+ LOCK();
+# endif
+ GC_mem_freed += sz;
+ /* A signal here can make GC_mem_freed and GC_non_gc_bytes */
+ /* inconsistent. We claim this is benign. */
+ if (knd == UNCOLLECTABLE) GC_non_gc_bytes -= sz;
+ if (ok -> ok_init) {
+ BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
+ }
+ flh = &(ok -> ok_freelist[sz]);
+ obj_link(p) = *flh;
+ *flh = (ptr_t)p;
+# ifdef THREADS
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+ } else {
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_mem_freed += sz;
+ if (knd == UNCOLLECTABLE) GC_non_gc_bytes -= sz;
+ GC_freehblk(h);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ }
+}
+
--- /dev/null
+
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+
+# include <stdio.h>
+# include "gc_priv.h"
+# include "gc_mark.h"
+
+/* We put this here to minimize the risk of inlining. */
+/*VARARGS*/
+void GC_noop() {}
+
+mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0};
+word GC_n_mark_procs = 0;
+
+/* Initialize GC_obj_kinds properly and standard free lists properly. */
+/* This must be done statically since they may be accessed before */
+/* GC_init is called. */
+/* It's done here, since we need to deal with mark descriptors. */
+struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
+/* PTRFREE */ { &GC_aobjfreelist[0], &GC_areclaim_list[0],
+ 0 | DS_LENGTH, FALSE, FALSE },
+/* NORMAL */ { &GC_objfreelist[0], &GC_reclaim_list[0],
+# ifdef ADD_BYTE_AT_END
+ (word)(WORDS_TO_BYTES(-1)) | DS_LENGTH,
+# else
+ 0 | DS_LENGTH,
+# endif
+ TRUE /* add length to descr */, TRUE },
+/* UNCOLLECTABLE */
+ { &GC_uobjfreelist[0], &GC_ureclaim_list[0],
+ 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
+# ifdef STUBBORN_ALLOC
+/*STUBBORN*/ { &GC_sobjfreelist[0], &GC_sreclaim_list[0],
+ 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
+# endif
+};
+
+# ifdef STUBBORN_ALLOC
+ int GC_n_kinds = 4;
+# else
+ int GC_n_kinds = 3;
+# endif
+
+
+# define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
+ /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
+ /* multiple of HBLKSIZE. */
+
+/*
+ * Limits of stack for GC_mark routine.
+ * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
+ * need to be marked from.
+ */
+
+word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
+ /* excludes ptrfree pages, etc. */
+
+mse * GC_mark_stack;
+
+word GC_mark_stack_size = 0;
+
+mse * GC_mark_stack_top;
+
+static struct hblk * scan_ptr;
+
+mark_state_t GC_mark_state = MS_NONE;
+
+bool GC_mark_stack_too_small = FALSE;
+
+bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
+ /* objects in the heap? */
+
+bool GC_collection_in_progress()
+{
+ return(GC_mark_state != MS_NONE);
+}
+
+/* clear all mark bits in the header */
+void GC_clear_hdr_marks(hhdr)
+register hdr * hhdr;
+{
+ BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
+}
+
+/*
+ * Clear all mark bits associated with block h.
+ */
+/*ARGSUSED*/
+static void clear_marks_for_block(h, dummy)
+struct hblk *h;
+word dummy;
+{
+ register hdr * hhdr = HDR(h);
+
+ if (hhdr -> hb_obj_kind == UNCOLLECTABLE) return;
+ /* Mark bit for these is cleared only once the object is */
+ /* explicitly deallocated. This either frees the block, or */
+ /* the bit is cleared once the object is on the free list. */
+ GC_clear_hdr_marks(hhdr);
+}
+
+/* Slow but general routines for setting/clearing/asking about mark bits */
+void GC_set_mark_bit(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ set_mark_bit_from_hdr(hhdr, word_no);
+}
+
+void GC_clear_mark_bit(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ clear_mark_bit_from_hdr(hhdr, word_no);
+}
+
+bool GC_is_marked(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ return(mark_bit_from_hdr(hhdr, word_no));
+}
+
+
+/*
+ * Clear mark bits in all allocated heap blocks. This invalidates
+ * the marker invariant, and sets GC_mark_state to reflect this.
+ * (This implicitly starts marking to reestablish the
+ */
+void GC_clear_marks()
+{
+ GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
+ GC_objects_are_marked = FALSE;
+ GC_mark_state = MS_INVALID;
+ scan_ptr = 0;
+# ifdef GATHERSTATS
+ /* Counters reflect currently marked objects: reset here */
+ GC_composite_in_use = 0;
+ GC_atomic_in_use = 0;
+# endif
+
+}
+
+/* Initiate full marking. */
+void GC_initiate_full()
+{
+# ifdef PRINTSTATS
+ GC_printf2("***>Full mark for collection %lu after %ld allocd bytes\n",
+ (unsigned long) GC_gc_no+1,
+ (long)WORDS_TO_BYTES(GC_words_allocd));
+# endif
+ GC_promote_black_lists();
+ GC_reclaim_or_delete_all();
+ GC_clear_marks();
+ GC_read_dirty();
+# ifdef STUBBORN_ALLOC
+ GC_read_changed();
+# endif
+# ifdef CHECKSUMS
+ {
+ extern void GC_check_dirty();
+
+ GC_check_dirty();
+ }
+# endif
+# ifdef GATHERSTATS
+ GC_n_rescuing_pages = 0;
+# endif
+}
+
+/* Initiate partial marking. */
+/*ARGSUSED*/
+void GC_initiate_partial()
+{
+ if (GC_dirty_maintained) GC_read_dirty();
+# ifdef STUBBORN_ALLOC
+ GC_read_changed();
+# endif
+# ifdef CHECKSUMS
+ {
+ extern void GC_check_dirty();
+
+ if (GC_dirty_maintained) GC_check_dirty();
+ }
+# endif
+# ifdef GATHERSTATS
+ GC_n_rescuing_pages = 0;
+# endif
+ if (GC_mark_state == MS_NONE) {
+ GC_mark_state = MS_PUSH_RESCUERS;
+ } else if (GC_mark_state != MS_INVALID) {
+ ABORT("unexpected state");
+ } /* else this is really a full collection, and mark */
+ /* bits are invalid. */
+ scan_ptr = 0;
+}
+
+
+static void alloc_mark_stack();
+
+/* Perform a small amount of marking. */
+/* We try to touch roughly a page of memory. */
+/* Return TRUE if we just finished a mark phase. */
+bool GC_mark_some()
+{
+ switch(GC_mark_state) {
+ case MS_NONE:
+ return(FALSE);
+
+ case MS_PUSH_RESCUERS:
+ if (GC_mark_stack_top
+ >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
+ GC_mark_from_mark_stack();
+ return(FALSE);
+ } else {
+ scan_ptr = GC_push_next_marked_dirty(scan_ptr);
+ if (scan_ptr == 0) {
+# ifdef PRINTSTATS
+ GC_printf1("Marked from %lu dirty pages\n",
+ (unsigned long)GC_n_rescuing_pages);
+# endif
+ GC_push_roots(FALSE);
+ GC_objects_are_marked = TRUE;
+ if (GC_mark_state != MS_INVALID) {
+ GC_mark_state = MS_ROOTS_PUSHED;
+ }
+ }
+ }
+ return(FALSE);
+
+ case MS_PUSH_UNCOLLECTABLE:
+ if (GC_mark_stack_top
+ >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
+ GC_mark_from_mark_stack();
+ return(FALSE);
+ } else {
+ scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
+ if (scan_ptr == 0) {
+ GC_push_roots(TRUE);
+ GC_objects_are_marked = TRUE;
+ if (GC_mark_state != MS_INVALID) {
+ GC_mark_state = MS_ROOTS_PUSHED;
+ }
+ }
+ }
+ return(FALSE);
+
+ case MS_ROOTS_PUSHED:
+ if (GC_mark_stack_top >= GC_mark_stack) {
+ GC_mark_from_mark_stack();
+ return(FALSE);
+ } else {
+ GC_mark_state = MS_NONE;
+ if (GC_mark_stack_too_small) {
+ alloc_mark_stack(2*GC_mark_stack_size);
+ }
+ return(TRUE);
+ }
+
+ case MS_INVALID:
+ case MS_PARTIALLY_INVALID:
+ if (!GC_objects_are_marked) {
+ GC_mark_state = MS_PUSH_UNCOLLECTABLE;
+ return(FALSE);
+ }
+ if (GC_mark_stack_top >= GC_mark_stack) {
+ GC_mark_from_mark_stack();
+ return(FALSE);
+ }
+ if (scan_ptr == 0
+ && (GC_mark_state == MS_INVALID || GC_mark_stack_too_small)) {
+ alloc_mark_stack(2*GC_mark_stack_size);
+ GC_mark_state = MS_PARTIALLY_INVALID;
+ }
+ scan_ptr = GC_push_next_marked(scan_ptr);
+ if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
+ GC_push_roots(TRUE);
+ GC_objects_are_marked = TRUE;
+ if (GC_mark_state != MS_INVALID) {
+ GC_mark_state = MS_ROOTS_PUSHED;
+ }
+ }
+ return(FALSE);
+ default:
+ ABORT("GC_mark_some: bad state");
+ return(FALSE);
+ }
+}
+
+
+bool GC_mark_stack_empty()
+{
+ return(GC_mark_stack_top < GC_mark_stack);
+}
+
+#ifdef PROF_MARKER
+ word GC_prof_array[10];
+# define PROF(n) GC_prof_array[n]++
+#else
+# define PROF(n)
+#endif
+
+/* Given a pointer to someplace other than a small object page or the */
+/* first page of a large object, return a pointer either to the */
+/* start of the large object or NIL. */
+/* In the latter case black list the address current. */
+/* Returns NIL without black listing if current points to a block */
+/* with IGNORE_OFF_PAGE set. */
+/*ARGSUSED*/
+word GC_find_start(current, hhdr)
+register word current;
+register hdr * hhdr;
+{
+# ifdef ALL_INTERIOR_POINTERS
+ if (hhdr != 0) {
+ register word orig = current;
+
+ current = (word)HBLKPTR(current) + HDR_BYTES;
+ do {
+ current = current - HBLKSIZE*(int)hhdr;
+ hhdr = HDR(current);
+ } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
+ /* current points to the start of the large object */
+ if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(0);
+ if ((word *)orig - (word *)current
+ >= (ptrdiff_t)(hhdr->hb_sz)) {
+ /* Pointer past the end of the block */
+ GC_ADD_TO_BLACK_LIST_NORMAL(orig);
+ return(0);
+ }
+ return(current);
+ } else {
+ GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ return(0);
+ }
+# else
+ GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ return(0);
+# endif
+}
+
+mse * GC_signal_mark_stack_overflow(msp)
+mse * msp;
+{
+ GC_mark_state = MS_INVALID;
+# ifdef PRINTSTATS
+ GC_printf1("Mark stack overflow; current size = %lu entries\n",
+ GC_mark_stack_size);
+# endif
+ return(msp-INITIAL_MARK_STACK_SIZE/8);
+}
+
+
+/*
+ * Mark objects pointed to by the regions described by
+ * mark stack entries between GC_mark_stack and GC_mark_stack_top,
+ * inclusive. Assumes the upper limit of a mark stack entry
+ * is never 0. A mark stack entry never has size 0.
+ * We try to traverse on the order of a hblk of memory before we return.
+ * Caller is responsible for calling this until the mark stack is empty.
+ */
+void GC_mark_from_mark_stack()
+{
+ mse * GC_mark_stack_reg = GC_mark_stack;
+ mse * GC_mark_stack_top_reg = GC_mark_stack_top;
+ mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
+ int credit = HBLKSIZE; /* Remaining credit for marking work */
+ register word * current_p; /* Pointer to current candidate ptr. */
+ register word current; /* Candidate pointer. */
+ register word * limit; /* (Incl) limit of current candidate */
+ /* range */
+ register word descr;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
+
+ GC_objects_are_marked = TRUE;
+# ifdef OS2 /* Use untweaked version to circumvent compiler problem */
+ while (GC_mark_stack_top_reg >= GC_mark_stack_reg && credit >= 0) {
+# else
+ while ((((ptr_t)GC_mark_stack_top_reg - (ptr_t)GC_mark_stack_reg) | credit)
+ >= 0) {
+# endif
+ current_p = GC_mark_stack_top_reg -> mse_start;
+ descr = GC_mark_stack_top_reg -> mse_descr;
+ retry:
+ if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | DS_TAGS)) {
+ word tag = descr & DS_TAGS;
+
+ switch(tag) {
+ case DS_LENGTH:
+ /* Large length. */
+ /* Process part of the range to avoid pushing too much on the */
+ /* stack. */
+ GC_mark_stack_top_reg -> mse_start =
+ limit = current_p + SPLIT_RANGE_WORDS-1;
+ GC_mark_stack_top_reg -> mse_descr -=
+ WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
+ /* Make sure that pointers overlapping the two ranges are */
+ /* considered. */
+ limit += sizeof(word) - ALIGNMENT;
+ break;
+ case DS_BITMAP:
+ GC_mark_stack_top_reg--;
+ descr &= ~DS_TAGS;
+ credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
+ while (descr != 0) {
+ if ((signed_word)descr < 0) {
+ current = *current_p++;
+ descr <<= 1;
+ if ((ptr_t)current < least_ha) continue;
+ if ((ptr_t)current >= greatest_ha) continue;
+ PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit);
+ } else {
+ descr <<= 1;
+ current_p++;
+ }
+ }
+ continue;
+ case DS_PROC:
+ GC_mark_stack_top_reg--;
+ credit -= PROC_BYTES;
+ GC_mark_stack_top_reg =
+ (*PROC(descr))
+ (current_p, GC_mark_stack_top_reg,
+ mark_stack_limit, ENV(descr));
+ continue;
+ case DS_PER_OBJECT:
+ descr = *(word *)((ptr_t)current_p + descr - tag);
+ goto retry;
+ }
+ } else {
+ GC_mark_stack_top_reg--;
+ limit = (word *)(((ptr_t)current_p) + (word)descr);
+ }
+ /* The simple case in which we're scanning a range. */
+ credit -= (ptr_t)limit - (ptr_t)current_p;
+ limit -= 1;
+ while (current_p <= limit) {
+ current = *current_p;
+ current_p = (word *)((char *)current_p + ALIGNMENT);
+ if ((ptr_t)current < least_ha) continue;
+ if ((ptr_t)current >= greatest_ha) continue;
+ PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit);
+ }
+ }
+ GC_mark_stack_top = GC_mark_stack_top_reg;
+}
+
+/* Allocate or reallocate space for mark stack of size s words */
+/* May silently fail. */
+static void alloc_mark_stack(n)
+word n;
+{
+ mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct ms_entry));
+
+ GC_mark_stack_too_small = FALSE;
+ if (GC_mark_stack_size != 0) {
+ if (new_stack != 0) {
+ word displ = HBLKDISPL(GC_mark_stack);
+ word size = GC_mark_stack_size * sizeof(struct ms_entry);
+
+ /* Recycle old space */
+ if (displ == 0) {
+ GC_add_to_heap((struct hblk *)GC_mark_stack, size);
+ } else {
+ GC_add_to_heap((struct hblk *)
+ ((word)GC_mark_stack - displ + HBLKSIZE),
+ size - HBLKSIZE);
+ }
+ GC_mark_stack = new_stack;
+ GC_mark_stack_size = n;
+# ifdef PRINTSTATS
+ GC_printf1("Grew mark stack to %lu frames\n",
+ (unsigned long) GC_mark_stack_size);
+# endif
+ } else {
+# ifdef PRINTSTATS
+ GC_printf1("Failed to grow mark stack to %lu frames\n",
+ (unsigned long) n);
+# endif
+ }
+ } else {
+ if (new_stack == 0) {
+ GC_err_printf0("No space for mark stack\n");
+ EXIT();
+ }
+ GC_mark_stack = new_stack;
+ GC_mark_stack_size = n;
+ }
+ GC_mark_stack_top = GC_mark_stack-1;
+}
+
+void GC_mark_init()
+{
+ alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
+}
+
+/*
+ * Push all locations between b and t onto the mark stack.
+ * b is the first location to be checked. t is one past the last
+ * location to be checked.
+ * Should only be used if there is no possibility of mark stack
+ * overflow.
+ */
+void GC_push_all(bottom, top)
+ptr_t bottom;
+ptr_t top;
+{
+ register word length;
+
+ bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
+ top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
+ if (top == 0 || bottom == top) return;
+ GC_mark_stack_top++;
+ if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
+ ABORT("unexpected mark stack overflow");
+ }
+ length = top - bottom;
+# if DS_TAGS > ALIGNMENT - 1
+ length += DS_TAGS;
+ length &= ~DS_TAGS;
+# endif
+ GC_mark_stack_top -> mse_start = (word *)bottom;
+ GC_mark_stack_top -> mse_descr = length;
+}
+
+/*
+ * Analogous to the above, but push only those pages that may have been
+ * dirtied. A block h is assumed dirty if dirty_fn(h) != 0.
+ * We use push_fn to actually push the block.
+ * Will not overflow mark stack if push_fn pushes a small fixed number
+ * of entries. (This is invoked only if push_fn pushes a single entry,
+ * or if it marks each object before pushing it, thus ensuring progress
+ * in the event of a stack overflow.)
+ */
+void GC_push_dirty(bottom, top, dirty_fn, push_fn)
+ptr_t bottom;
+ptr_t top;
+int (*dirty_fn)(/* struct hblk * h */);
+void (*push_fn)(/* ptr_t bottom, ptr_t top */);
+{
+ register struct hblk * h;
+
+ bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
+ top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
+
+ if (top == 0 || bottom == top) return;
+ h = HBLKPTR(bottom + HBLKSIZE);
+ if (top <= (ptr_t) h) {
+ if ((*dirty_fn)(h-1)) {
+ (*push_fn)(bottom, top);
+ }
+ return;
+ }
+ if ((*dirty_fn)(h-1)) {
+ (*push_fn)(bottom, (ptr_t)h);
+ }
+ while ((ptr_t)(h+1) <= top) {
+ if ((*dirty_fn)(h)) {
+ if ((word)(GC_mark_stack_top - GC_mark_stack)
+ > 3 * GC_mark_stack_size / 4) {
+ /* Danger of mark stack overflow */
+ (*push_fn)((ptr_t)h, top);
+ return;
+ } else {
+ (*push_fn)((ptr_t)h, (ptr_t)(h+1));
+ }
+ }
+ h++;
+ }
+ if ((ptr_t)h != top) {
+ if ((*dirty_fn)(h)) {
+ (*push_fn)((ptr_t)h, top);
+ }
+ }
+ if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
+ ABORT("unexpected mark stack overflow");
+ }
+}
+
+# ifndef SMALL_CONFIG
+void GC_push_conditional(bottom, top, all)
+ptr_t bottom;
+ptr_t top;
+{
+ if (all) {
+ if (GC_dirty_maintained) {
+# ifdef PROC_VDB
+ /* Pages that were never dirtied cannot contain pointers */
+ GC_push_dirty(bottom, top, GC_page_was_ever_dirty, GC_push_all);
+# else
+ GC_push_all(bottom, top);
+# endif
+ } else {
+ GC_push_all(bottom, top);
+ }
+ } else {
+ GC_push_dirty(bottom, top, GC_page_was_dirty, GC_push_all);
+ }
+}
+#endif
+
+/*
+ * Push a single value onto mark stack. Mark from the object pointed to by p.
+ * GC_push_one is normally called by GC_push_regs, and thus must be defined.
+ * P is considered valid even if it is an interior pointer.
+ * Previously marked objects are not pushed. Hence we make progress even
+ * if the mark stack overflows.
+ */
+# define GC_PUSH_ONE_STACK(p) \
+ if ((ptr_t)(p) >= GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < GC_greatest_plausible_heap_addr) { \
+ GC_push_one_checked(p,TRUE); \
+ }
+
+/*
+ * As above, but interior pointer recognition as for
+ * normal for heap pointers.
+ */
+# ifdef ALL_INTERIOR_POINTERS
+# define AIP TRUE
+# else
+# define AIP FALSE
+# endif
+# define GC_PUSH_ONE_HEAP(p) \
+ if ((ptr_t)(p) >= GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < GC_greatest_plausible_heap_addr) { \
+ GC_push_one_checked(p,AIP); \
+ }
+
+# ifdef MSWIN32
+ void __cdecl GC_push_one(p)
+# else
+ void GC_push_one(p)
+# endif
+word p;
+{
+ GC_PUSH_ONE_STACK(p);
+}
+
+# ifdef __STDC__
+# define BASE(p) (word)GC_base((void *)(p))
+# else
+# define BASE(p) (word)GC_base((char *)(p))
+# endif
+
+/* As above, but argument passed preliminary test. */
+void GC_push_one_checked(p, interior_ptrs)
+register word p;
+register bool interior_ptrs;
+{
+ register word r;
+ register hdr * hhdr;
+ register int displ;
+
+ GET_HDR(p, hhdr);
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ if (hhdr != 0 && interior_ptrs) {
+ r = BASE(p);
+ hhdr = HDR(r);
+ displ = BYTES_TO_WORDS(HBLKDISPL(r));
+ } else {
+ hhdr = 0;
+ }
+ } else {
+ register map_entry_type map_entry;
+
+ displ = HBLKDISPL(p);
+ map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
+ if (map_entry == OBJ_INVALID) {
+ if (interior_ptrs) {
+ r = BASE(p);
+ displ = BYTES_TO_WORDS(HBLKDISPL(r));
+ if (r == 0) hhdr = 0;
+ } else {
+ hhdr = 0;
+ }
+ } else {
+ displ = BYTES_TO_WORDS(displ);
+ displ -= map_entry;
+ r = (word)((word *)(HBLKPTR(p)) + displ);
+ }
+ }
+ /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
+ /* displ is the word index within the block. */
+ if (hhdr == 0) {
+ if (interior_ptrs) {
+ GC_add_to_black_list_stack(p);
+ } else {
+ GC_ADD_TO_BLACK_LIST_NORMAL(p);
+ }
+ } else {
+ if (!mark_bit_from_hdr(hhdr, displ)) {
+ set_mark_bit_from_hdr(hhdr, displ);
+ PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
+ &(GC_mark_stack[GC_mark_stack_size]));
+ }
+ }
+}
+
+/*
+ * A version of GC_push_all that treats all interior pointers as valid
+ */
+void GC_push_all_stack(bottom, top)
+ptr_t bottom;
+ptr_t top;
+{
+# ifdef ALL_INTERIOR_POINTERS
+ GC_push_all(bottom, top);
+# else
+ word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
+ word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
+ register word *p;
+ register word q;
+ register word *lim;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define GC_greatest_plausible_heap_addr greatest_ha
+# define GC_least_plausible_heap_addr least_ha
+
+ if (top == 0) return;
+ /* check all pointers in range and put in push if they appear */
+ /* to be valid. */
+ lim = t - 1 /* longword */;
+ for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
+ q = *p;
+ GC_PUSH_ONE_STACK(q);
+ }
+# undef GC_greatest_plausible_heap_addr
+# undef GC_least_plausible_heap_addr
+# endif
+}
+
+#ifndef SMALL_CONFIG
+/* Push all objects reachable from marked objects in the given block */
+/* of size 1 objects. */
+void GC_push_marked1(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
+{
+ word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p;
+ word *plim;
+ register int i;
+ register word q;
+ register word mark_word;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define GC_greatest_plausible_heap_addr greatest_ha
+# define GC_least_plausible_heap_addr least_ha
+
+ p = (word *)(h->hb_body);
+ plim = (word *)(((word)h) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ i = 0;
+ while(mark_word != 0) {
+ if (mark_word & 1) {
+ q = p[i];
+ GC_PUSH_ONE_HEAP(q);
+ }
+ i++;
+ mark_word >>= 1;
+ }
+ p += WORDSZ;
+ }
+# undef GC_greatest_plausible_heap_addr
+# undef GC_least_plausible_heap_addr
+}
+
+
+/* Push all objects reachable from marked objects in the given block */
+/* of size 2 objects. */
+void GC_push_marked2(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
+{
+ word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p;
+ word *plim;
+ register int i;
+ register word q;
+ register word mark_word;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define GC_greatest_plausible_heap_addr greatest_ha
+# define GC_least_plausible_heap_addr least_ha
+
+ p = (word *)(h->hb_body);
+ plim = (word *)(((word)h) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ i = 0;
+ while(mark_word != 0) {
+ if (mark_word & 1) {
+ q = p[i];
+ GC_PUSH_ONE_HEAP(q);
+ q = p[i+1];
+ GC_PUSH_ONE_HEAP(q);
+ }
+ i += 2;
+ mark_word >>= 2;
+ }
+ p += WORDSZ;
+ }
+# undef GC_greatest_plausible_heap_addr
+# undef GC_least_plausible_heap_addr
+}
+
+/* Push all objects reachable from marked objects in the given block */
+/* of size 4 objects. */
+/* There is a risk of mark stack overflow here. But we handle that. */
+/* And only unmarked objects get pushed, so it's not very likely. */
+void GC_push_marked4(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
+{
+ word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p;
+ word *plim;
+ register int i;
+ register word q;
+ register word mark_word;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define GC_greatest_plausible_heap_addr greatest_ha
+# define GC_least_plausible_heap_addr least_ha
+
+ p = (word *)(h->hb_body);
+ plim = (word *)(((word)h) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ i = 0;
+ while(mark_word != 0) {
+ if (mark_word & 1) {
+ q = p[i];
+ GC_PUSH_ONE_HEAP(q);
+ q = p[i+1];
+ GC_PUSH_ONE_HEAP(q);
+ q = p[i+2];
+ GC_PUSH_ONE_HEAP(q);
+ q = p[i+3];
+ GC_PUSH_ONE_HEAP(q);
+ }
+ i += 4;
+ mark_word >>= 4;
+ }
+ p += WORDSZ;
+ }
+# undef GC_greatest_plausible_heap_addr
+# undef GC_least_plausible_heap_addr
+}
+
+#endif /* SMALL_CONFIG */
+
+/* Push all objects reachable from marked objects in the given block */
+void GC_push_marked(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
+{
+ register int sz = hhdr -> hb_sz;
+ register word * p;
+ register int word_no;
+ register word * lim;
+ register mse * GC_mark_stack_top_reg;
+ register mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
+
+ /* Some quick shortcuts: */
+ if (hhdr -> hb_obj_kind == PTRFREE) return;
+ if (GC_block_empty(hhdr)/* nothing marked */) return;
+# ifdef GATHERSTATS
+ GC_n_rescuing_pages++;
+# endif
+ GC_objects_are_marked = TRUE;
+ if (sz > MAXOBJSZ) {
+ lim = (word *)(h + 1);
+ } else {
+ lim = (word *)(h + 1) - sz;
+ }
+
+ switch(sz) {
+# ifndef SMALL_CONFIG
+ case 1:
+ GC_push_marked1(h, hhdr);
+ break;
+ case 2:
+ GC_push_marked2(h, hhdr);
+ break;
+ case 4:
+ GC_push_marked4(h, hhdr);
+ break;
+# endif
+ default:
+ GC_mark_stack_top_reg = GC_mark_stack_top;
+ for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
+ p += sz, word_no += sz) {
+ /* This needs manual optimization: */
+ if (mark_bit_from_hdr(hhdr, word_no)) {
+ /* Mark from fields inside the object */
+ PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
+# ifdef GATHERSTATS
+ /* Subtract this object from total, since it was */
+ /* added in twice. */
+ GC_composite_in_use -= sz;
+# endif
+ }
+ }
+ GC_mark_stack_top = GC_mark_stack_top_reg;
+ }
+}
+
+#ifndef SMALL_CONFIG
+/* Test whether any page in the given block is dirty */
+bool GC_block_was_dirty(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
+{
+ register int sz = hhdr -> hb_sz;
+
+ if (sz < MAXOBJSZ) {
+ return(GC_page_was_dirty(h));
+ } else {
+ register ptr_t p = (ptr_t)h;
+ sz += HDR_WORDS;
+ sz = WORDS_TO_BYTES(sz);
+ while (p < (ptr_t)h + sz) {
+ if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
+ p += HBLKSIZE;
+ }
+ return(FALSE);
+ }
+}
+#endif /* SMALL_CONFIG */
+
+/* Similar to GC_push_next_marked, but return address of next block */
+struct hblk * GC_push_next_marked(h)
+struct hblk *h;
+{
+ register hdr * hhdr;
+
+ h = GC_next_block(h);
+ if (h == 0) return(0);
+ hhdr = HDR(h);
+ GC_push_marked(h, hhdr);
+ return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
+}
+
+#ifndef SMALL_CONFIG
+/* Identical to above, but mark only from dirty pages */
+struct hblk * GC_push_next_marked_dirty(h)
+struct hblk *h;
+{
+ register hdr * hhdr = HDR(h);
+
+ if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
+ for (;;) {
+ h = GC_next_block(h);
+ if (h == 0) return(0);
+ hhdr = HDR(h);
+# ifdef STUBBORN_ALLOC
+ if (hhdr -> hb_obj_kind == STUBBORN) {
+ if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
+ break;
+ }
+ } else {
+ if (GC_block_was_dirty(h, hhdr)) break;
+ }
+# else
+ if (GC_block_was_dirty(h, hhdr)) break;
+# endif
+ h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ }
+ GC_push_marked(h, hhdr);
+ return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
+}
+#endif
+
+/* Similar to above, but for uncollectable pages. Needed since we */
+/* do not clear marks for such pages, even for full collections. */
+struct hblk * GC_push_next_marked_uncollectable(h)
+struct hblk *h;
+{
+ register hdr * hhdr = HDR(h);
+
+ for (;;) {
+ h = GC_next_block(h);
+ if (h == 0) return(0);
+ hhdr = HDR(h);
+ if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
+ h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ }
+ GC_push_marked(h, hhdr);
+ return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
+}
+
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 1:58 pm PDT */
+# include <stdio.h>
+# include "gc_priv.h"
+
+# ifdef PCR
+# define MAX_ROOT_SETS 1024
+# else
+# ifdef MSWIN32
+# define MAX_ROOT_SETS 512
+ /* Under NT, we add only written pages, which can result */
+ /* in many small root sets. */
+# else
+# define MAX_ROOT_SETS 64
+# endif
+# endif
+
+/* Data structure for list of root sets. */
+/* We keep a hash table, so that we can filter out duplicate additions. */
+/* Under Win32, we need to do a better job of filtering overlaps, so */
+/* we resort to sequential search, and pay the price. */
+struct roots {
+ ptr_t r_start;
+ ptr_t r_end;
+# ifndef MSWIN32
+ struct roots * r_next;
+# endif
+};
+
+static struct roots static_roots[MAX_ROOT_SETS];
+
+static int n_root_sets = 0;
+
+ /* static_roots[0..n_root_sets) contains the valid root sets. */
+
+#ifndef MSWIN32
+# define LOG_RT_SIZE 6
+# define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
+
+ static struct roots * root_index[RT_SIZE];
+ /* Hash table header. Used only to check whether a range is */
+ /* already present. */
+
+static int rt_hash(addr)
+char * addr;
+{
+ word result = (word) addr;
+# if CPP_WORDSZ > 8*LOG_RT_SIZE
+ result ^= result >> 8*LOG_RT_SIZE;
+# endif
+# if CPP_WORDSZ > 4*LOG_RT_SIZE
+ result ^= result >> 4*LOG_RT_SIZE;
+# endif
+ result ^= result >> 2*LOG_RT_SIZE;
+ result ^= result >> LOG_RT_SIZE;
+ result &= (RT_SIZE-1);
+ return(result);
+}
+
+/* Is a range starting at b already in the table? If so return a */
+/* pointer to it, else NIL. */
+struct roots * GC_roots_present(b)
+char *b;
+{
+ register int h = rt_hash(b);
+ register struct roots *p = root_index[h];
+
+ while (p != 0) {
+ if (p -> r_start == (ptr_t)b) return(p);
+ p = p -> r_next;
+ }
+ return(FALSE);
+}
+
+/* Add the given root structure to the index. */
+static void add_roots_to_index(p)
+struct roots *p;
+{
+ register int h = rt_hash(p -> r_start);
+
+ p -> r_next = root_index[h];
+ root_index[h] = p;
+}
+
+# else /* MSWIN32 */
+
+# define add_roots_to_index(p)
+
+# endif
+
+
+
+
+word GC_root_size = 0;
+
+void GC_add_roots(b, e)
+char * b; char * e;
+{
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_add_roots_inner(b, e);
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+
+/* Add [b,e) to the root set. Adding the same interval a second time */
+/* is a moderately fast noop, and hence benign. We do not handle */
+/* different but overlapping intervals efficiently. (We do handle */
+/* them correctly.) */
+void GC_add_roots_inner(b, e)
+char * b; char * e;
+{
+ struct roots * old;
+
+ /* We exclude GC data structures from root sets. It's usually safe */
+ /* to mark from those, but it is a waste of time. */
+ if ( (ptr_t)b < endGC_arrays && (ptr_t)e > beginGC_arrays) {
+ if ((ptr_t)e <= endGC_arrays) {
+ if ((ptr_t)b >= beginGC_arrays) return;
+ e = (char *)beginGC_arrays;
+ } else if ((ptr_t)b >= beginGC_arrays) {
+ b = (char *)endGC_arrays;
+ } else {
+ GC_add_roots_inner(b, (char *)beginGC_arrays);
+ GC_add_roots_inner((char *)endGC_arrays, e);
+ return;
+ }
+ }
+# ifdef MSWIN32
+ /* Spend the time to ensure that there are no overlapping */
+ /* or adjacent intervals. */
+ /* This could be done faster with e.g. a */
+ /* balanced tree. But the execution time here is */
+ /* virtually guaranteed to be dominated by the time it */
+ /* takes to scan the roots. */
+ {
+ register int i;
+
+ for (i = 0; i < n_root_sets; i++) {
+ old = static_roots + i;
+ if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
+ if ((ptr_t)b < old -> r_start) {
+ old -> r_start = (ptr_t)b;
+ }
+ if ((ptr_t)e > old -> r_end) {
+ old -> r_end = (ptr_t)e;
+ }
+ break;
+ }
+ }
+ if (i < n_root_sets) {
+ /* merge other overlapping intervals */
+ struct roots *other;
+
+ for (i++; i < n_root_sets; i++) {
+ other = static_roots + i;
+ b = (char *)(other -> r_start);
+ e = (char *)(other -> r_end);
+ if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
+ if ((ptr_t)b < old -> r_start) {
+ old -> r_start = (ptr_t)b;
+ }
+ if ((ptr_t)e > old -> r_end) {
+ old -> r_end = (ptr_t)e;
+ }
+ /* Delete this entry. */
+ other -> r_start = static_roots[n_root_sets-1].r_start;
+ other -> r_end = static_roots[n_root_sets-1].r_end;
+ n_root_sets--;
+ }
+ }
+ return;
+ }
+ }
+# else
+ old = GC_roots_present(b);
+ if (old != 0) {
+ if ((ptr_t)e <= old -> r_end) /* already there */ return;
+ /* else extend */
+ GC_root_size += (ptr_t)e - old -> r_end;
+ old -> r_end = (ptr_t)e;
+ return;
+ }
+# endif
+ if (n_root_sets == MAX_ROOT_SETS) {
+ ABORT("Too many root sets\n");
+ }
+ static_roots[n_root_sets].r_start = (ptr_t)b;
+ static_roots[n_root_sets].r_end = (ptr_t)e;
+# ifndef MSWIN32
+ static_roots[n_root_sets].r_next = 0;
+# endif
+ add_roots_to_index(static_roots + n_root_sets);
+ GC_root_size += (ptr_t)e - (ptr_t)b;
+ n_root_sets++;
+}
+
+void GC_clear_roots()
+{
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ n_root_sets = 0;
+ GC_root_size = 0;
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+# ifndef THREADS
+ptr_t GC_approx_sp()
+{
+ word dummy;
+
+ return((ptr_t)(&dummy));
+}
+# endif
+
+/*
+ * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
+ * on groups of pointers) on every top level accessible pointer.
+ * If all is FALSE, arrange to push only possibly altered values.
+ */
+
+void GC_push_roots(all)
+bool all;
+{
+ register int i;
+
+ /*
+ * push registers - i.e., call GC_push_one(r) for each
+ * register contents r.
+ */
+ GC_push_regs(); /* usually defined in machine_dep.c */
+
+ /*
+ * Next push static data. This must happen early on, since it's
+ * not robust against mark stack overflow.
+ */
+ /* Reregister dynamic libraries, in case one got added. */
+# if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(PCR)) \
+ && !defined(SRC_M3)
+ GC_register_dynamic_libraries();
+# endif
+ /* Mark everything in static data areas */
+ for (i = 0; i < n_root_sets; i++) {
+ GC_push_conditional(static_roots[i].r_start,
+ static_roots[i].r_end, all);
+ }
+
+ /*
+ * Now traverse stacks.
+ */
+# ifndef THREADS
+ /* Mark everything on the stack. */
+# ifdef STACK_GROWS_DOWN
+ GC_push_all_stack( GC_approx_sp(), GC_stackbottom );
+# else
+ GC_push_all_stack( GC_stackbottom, GC_approx_sp() );
+# endif
+# endif
+ if (GC_push_other_roots != 0) (*GC_push_other_roots)();
+ /* In the threads case, this also pushes thread stacks. */
+}
+
--- /dev/null
+# define call_push(x) move $4,x; jal GC_push_one
+
+ .text
+ # Mark from machine registers that are saved by C compiler
+ .globl GC_push_regs
+ .ent GC_push_regs
+GC_push_regs:
+ subu $sp,8 ## Need to save only return address
+ sw $31,4($sp)
+ .mask 0x80000000,-4
+ .frame $sp,8,$31
+ call_push($2)
+ call_push($3)
+ call_push($16)
+ call_push($17)
+ call_push($18)
+ call_push($19)
+ call_push($20)
+ call_push($21)
+ call_push($22)
+ call_push($23)
+ call_push($30)
+ lw $31,4($sp)
+ addu $sp,8
+ j $31
+ .end GC_push_regs
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:04 pm PDT */
+
+#define DEBUG /* Some run-time consistency checks */
+#undef DEBUG
+#define VERBOSE
+#undef VERBOSE
+
+#include <stdio.h>
+#include <signal.h>
+#define I_HIDE_POINTERS /* To make GC_call_with_alloc_lock visible */
+#include "gc_priv.h"
+
+# ifdef THREADS
+# ifdef PCR
+# include "il/PCR_IL.h"
+ PCR_Th_ML GC_allocate_ml;
+# else
+# ifdef SRC_M3
+ /* Critical section counter is defined in the M3 runtime */
+ /* That's all we use. */
+# else
+# ifdef SOLARIS_THREADS
+ mutex_t GC_allocate_ml; /* Implicitly initialized. */
+# else
+ --> declare allocator lock here
+# endif
+# endif
+# endif
+# endif
+
+GC_FAR struct _GC_arrays GC_arrays = { 0 };
+
+
+bool GC_debugging_started = FALSE;
+ /* defined here so we don't have to load debug_malloc.o */
+
+void (*GC_check_heap)() = (void (*)())0;
+
+ptr_t GC_stackbottom = 0;
+
+bool GC_dont_gc = 0;
+
+bool GC_quiet = 0;
+
+extern signed_word GC_mem_found;
+
+# ifdef MERGE_SIZES
+ /* Set things up so that GC_size_map[i] >= words(i), */
+ /* but not too much bigger */
+ /* and so that size_map contains relatively few distinct entries */
+ /* This is stolen from Russ Atkinson's Cedar quantization */
+ /* alogrithm (but we precompute it). */
+
+
+ void GC_init_size_map()
+ {
+ register unsigned i;
+
+ /* Map size 0 to 1. This avoids problems at lower levels. */
+ GC_size_map[0] = 1;
+ /* One word objects don't have to be 2 word aligned. */
+ for (i = 1; i < sizeof(word); i++) {
+ GC_size_map[i] = 1;
+ }
+ GC_size_map[sizeof(word)] = ROUNDED_UP_WORDS(sizeof(word));
+ for (i = sizeof(word) + 1; i <= 8 * sizeof(word); i++) {
+# ifdef ALIGN_DOUBLE
+ GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 1) & (~1);
+# else
+ GC_size_map[i] = ROUNDED_UP_WORDS(i);
+# endif
+ }
+ for (i = 8*sizeof(word) + 1; i <= 16 * sizeof(word); i++) {
+ GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 1) & (~1);
+ }
+ /* We leave the rest of the array to be filled in on demand. */
+ }
+
+ /* Fill in additional entries in GC_size_map, including the ith one */
+ /* We assume the ith entry is currently 0. */
+ /* Note that a filled in section of the array ending at n always */
+ /* has length at least n/4. */
+ void GC_extend_size_map(i)
+ word i;
+ {
+ word orig_word_sz = ROUNDED_UP_WORDS(i);
+ word word_sz = orig_word_sz;
+ register word byte_sz = WORDS_TO_BYTES(word_sz);
+ /* The size we try to preserve. */
+ /* Close to to i, unless this would */
+ /* introduce too many distinct sizes. */
+ word smaller_than_i = byte_sz - (byte_sz >> 3);
+ word much_smaller_than_i = byte_sz - (byte_sz >> 2);
+ register word low_limit; /* The lowest indexed entry we */
+ /* initialize. */
+ register word j;
+
+ if (GC_size_map[smaller_than_i] == 0) {
+ low_limit = much_smaller_than_i;
+ while (GC_size_map[low_limit] != 0) low_limit++;
+ } else {
+ low_limit = smaller_than_i + 1;
+ while (GC_size_map[low_limit] != 0) low_limit++;
+ word_sz = ROUNDED_UP_WORDS(low_limit);
+ word_sz += word_sz >> 3;
+ if (word_sz < orig_word_sz) word_sz = orig_word_sz;
+ }
+# ifdef ALIGN_DOUBLE
+ word_sz += 1;
+ word_sz &= ~1;
+# endif
+ if (word_sz > MAXOBJSZ) {
+ word_sz = MAXOBJSZ;
+ }
+ byte_sz = WORDS_TO_BYTES(word_sz);
+# ifdef ADD_BYTE_AT_END
+ /* We need one extra byte; don't fill in GC_size_map[byte_sz] */
+ byte_sz--;
+# endif
+
+ for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = word_sz;
+ }
+# endif
+
+
+/*
+ * The following is a gross hack to deal with a problem that can occur
+ * on machines that are sloppy about stack frame sizes, notably SPARC.
+ * Bogus pointers may be written to the stack and not cleared for
+ * a LONG time, because they always fall into holes in stack frames
+ * that are not written. We partially address this by clearing
+ * sections of the stack whenever we get control.
+ */
+word GC_stack_last_cleared = 0; /* GC_no when we last did this */
+# define CLEAR_SIZE 213
+# define DEGRADE_RATE 50
+
+word GC_min_sp; /* Coolest stack pointer value from which we've */
+ /* already cleared the stack. */
+
+# ifdef STACK_GROWS_DOWN
+# define COOLER_THAN >
+# define HOTTER_THAN <
+# define MAKE_COOLER(x,y) if ((word)(x)+(y) > (word)(x)) {(x) += (y);} \
+ else {(x) = (word)ONES;}
+# define MAKE_HOTTER(x,y) (x) -= (y)
+# else
+# define COOLER_THAN <
+# define HOTTER_THAN >
+# define MAKE_COOLER(x,y) if ((word)(x)-(y) < (word)(x)) {(x) -= (y);} else {(x) = 0;}
+# define MAKE_HOTTER(x,y) (x) += (y)
+# endif
+
+word GC_high_water;
+ /* "hottest" stack pointer value we have seen */
+ /* recently. Degrades over time. */
+
+word GC_stack_upper_bound()
+{
+ word dummy;
+
+ return((word)(&dummy));
+}
+
+word GC_words_allocd_at_reset;
+
+#if defined(ASM_CLEAR_CODE) && !defined(THREADS)
+ extern ptr_t GC_clear_stack_inner();
+#endif
+
+#if !defined(ASM_CLEAR_CODE) && !defined(THREADS)
+/* Clear the stack up to about limit. Return arg. */
+/*ARGSUSED*/
+ptr_t GC_clear_stack_inner(arg, limit)
+ptr_t arg;
+word limit;
+{
+ word dummy[CLEAR_SIZE];
+
+ BZERO(dummy, CLEAR_SIZE*sizeof(word));
+ if ((word)(dummy) COOLER_THAN limit) {
+ (void) GC_clear_stack_inner(arg, limit);
+ }
+ /* Make sure the recursive call is not a tail call, and the bzero */
+ /* call is not recognized as dead code. */
+ GC_noop(dummy);
+ return(arg);
+}
+#endif
+
+
+/* Clear some of the inaccessible part of the stack. Returns its */
+/* argument, so it can be used in a tail call position, hence clearing */
+/* another frame. */
+ptr_t GC_clear_stack(arg)
+ptr_t arg;
+{
+ register word sp = GC_stack_upper_bound();
+ register word limit;
+# ifdef THREADS
+ word dummy[CLEAR_SIZE];;
+# endif
+
+# define SLOP 400
+ /* Extra bytes we clear every time. This clears our own */
+ /* activation record, and should cause more frequent */
+ /* clearing near the cold end of the stack, a good thing. */
+# define GC_SLOP 4000
+ /* We make GC_high_water this much hotter than we really saw */
+ /* saw it, to cover for GC noise etc. above our current frame. */
+# define CLEAR_THRESHOLD 100000
+ /* We restart the clearing process after this many bytes of */
+ /* allocation. Otherwise very heavily recursive programs */
+ /* with sparse stacks may result in heaps that grow almost */
+ /* without bounds. As the heap gets larger, collection */
+ /* frequency decreases, thus clearing frequency would decrease, */
+ /* thus more junk remains accessible, thus the heap gets */
+ /* larger ... */
+# ifdef THREADS
+ BZERO(dummy, CLEAR_SIZE*sizeof(word));
+# else
+ if (GC_gc_no > GC_stack_last_cleared) {
+ /* Start things over, so we clear the entire stack again */
+ if (GC_stack_last_cleared == 0) GC_high_water = (word) GC_stackbottom;
+ GC_min_sp = GC_high_water;
+ GC_stack_last_cleared = GC_gc_no;
+ GC_words_allocd_at_reset = GC_words_allocd;
+ }
+ /* Adjust GC_high_water */
+ MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP);
+ if (sp HOTTER_THAN GC_high_water) {
+ GC_high_water = sp;
+ }
+ MAKE_HOTTER(GC_high_water, GC_SLOP);
+ limit = GC_min_sp;
+ MAKE_HOTTER(limit, SLOP);
+ if (sp COOLER_THAN limit) {
+ limit &= ~0xf; /* Make it sufficiently aligned for assembly */
+ /* implementations of GC_clear_stack_inner. */
+ GC_min_sp = sp;
+ return(GC_clear_stack_inner(arg, limit));
+ } else if (WORDS_TO_BYTES(GC_words_allocd - GC_words_allocd_at_reset)
+ > CLEAR_THRESHOLD) {
+ /* Restart clearing process, but limit how much clearing we do. */
+ GC_min_sp = sp;
+ MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);
+ if (GC_min_sp HOTTER_THAN GC_high_water) GC_min_sp = GC_high_water;
+ GC_words_allocd_at_reset = GC_words_allocd;
+ }
+# endif
+ return(arg);
+}
+
+
+/* Return a pointer to the base address of p, given a pointer to a */
+/* an address within an object. Return 0 o.w. */
+# ifdef __STDC__
+ extern_ptr_t GC_base(extern_ptr_t p)
+# else
+ extern_ptr_t GC_base(p)
+ extern_ptr_t p;
+# endif
+{
+ register word r;
+ register struct hblk *h;
+ register hdr *candidate_hdr;
+
+ r = (word)p;
+ h = HBLKPTR(r);
+ candidate_hdr = HDR(r);
+ if (candidate_hdr == 0) return(0);
+ /* If it's a pointer to the middle of a large object, move it */
+ /* to the beginning. */
+ while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
+ h = h - (int)candidate_hdr;
+ r = (word)h + HDR_BYTES;
+ candidate_hdr = HDR(h);
+ }
+ if (candidate_hdr -> hb_map == GC_invalid_map) return(0);
+ /* Make sure r points to the beginning of the object */
+ r &= ~(WORDS_TO_BYTES(1) - 1);
+ {
+ register int offset =
+ (word *)r - (word *)(HBLKPTR(r)) - HDR_WORDS;
+ register signed_word sz = candidate_hdr -> hb_sz;
+ register int correction;
+
+ correction = offset % sz;
+ r -= (WORDS_TO_BYTES(correction));
+ if (((word *)r + sz) > (word *)(h + 1)
+ && sz <= BYTES_TO_WORDS(HBLKSIZE) - HDR_WORDS) {
+ return(0);
+ }
+ }
+ return((extern_ptr_t)r);
+}
+
+/* Return the size of an object, given a pointer to its base. */
+/* (For small obects this also happens to work from interior pointers, */
+/* but that shouldn't be relied upon.) */
+# ifdef __STDC__
+ size_t GC_size(extern_ptr_t p)
+# else
+ size_t GC_size(p)
+ extern_ptr_t p;
+# endif
+{
+ register int sz;
+ register hdr * hhdr = HDR(p);
+
+ sz = WORDS_TO_BYTES(hhdr -> hb_sz);
+ if (sz < 0) {
+ return(-sz);
+ } else {
+ return(sz);
+ }
+}
+
+size_t GC_get_heap_size()
+{
+ return ((size_t) GC_heapsize);
+}
+
+bool GC_is_initialized = FALSE;
+
+void GC_init()
+{
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_init_inner();
+ UNLOCK();
+ ENABLE_SIGNALS();
+
+}
+
+#ifdef MSWIN32
+ extern void GC_init_win32();
+#endif
+
+void GC_init_inner()
+{
+ word dummy;
+
+ if (GC_is_initialized) return;
+ GC_is_initialized = TRUE;
+# ifdef MSWIN32
+ GC_init_win32();
+# endif
+# ifdef SOLARIS_THREADS
+ /* We need dirty bits in order to find live stack sections. */
+ GC_dirty_init();
+# endif
+# if !defined(THREADS) || defined(SOLARIS_THREADS)
+ if (GC_stackbottom == 0) {
+ GC_stackbottom = GC_get_stack_base();
+ }
+# endif
+ if (sizeof (ptr_t) != sizeof(word)) {
+ ABORT("sizeof (ptr_t) != sizeof(word)\n");
+ }
+ if (sizeof (signed_word) != sizeof(word)) {
+ ABORT("sizeof (signed_word) != sizeof(word)\n");
+ }
+ if (sizeof (struct hblk) != HBLKSIZE) {
+ ABORT("sizeof (struct hblk) != HBLKSIZE\n");
+ }
+# ifndef THREADS
+# if defined(STACK_GROWS_UP) && defined(STACK_GROWS_DOWN)
+ ABORT(
+ "Only one of STACK_GROWS_UP and STACK_GROWS_DOWN should be defd\n");
+# endif
+# if !defined(STACK_GROWS_UP) && !defined(STACK_GROWS_DOWN)
+ ABORT(
+ "One of STACK_GROWS_UP and STACK_GROWS_DOWN should be defd\n");
+# endif
+# ifdef STACK_GROWS_DOWN
+ if ((word)(&dummy) > (word)GC_stackbottom) {
+ GC_err_printf0(
+ "STACK_GROWS_DOWN is defd, but stack appears to grow up\n");
+ GC_err_printf2("sp = 0x%lx, GC_stackbottom = 0x%lx\n",
+ (unsigned long) (&dummy),
+ (unsigned long) GC_stackbottom);
+ ABORT("stack direction 3\n");
+ }
+# else
+ if ((word)(&dummy) < (word)GC_stackbottom) {
+ GC_err_printf0(
+ "STACK_GROWS_UP is defd, but stack appears to grow down\n");
+ GC_err_printf2("sp = 0x%lx, GC_stackbottom = 0x%lx\n",
+ (unsigned long) (&dummy),
+ (unsigned long) GC_stackbottom);
+ ABORT("stack direction 4");
+ }
+# endif
+# endif
+# if !defined(_AUX_SOURCE) || defined(__GNUC__)
+ if ((word)(-1) < (word)0) {
+ GC_err_printf0("The type word should be an unsigned integer type\n");
+ GC_err_printf0("It appears to be signed\n");
+ ABORT("word");
+ }
+# endif
+ if ((signed_word)(-1) >= (signed_word)0) {
+ GC_err_printf0(
+ "The type signed_word should be a signed integer type\n");
+ GC_err_printf0("It appears to be unsigned\n");
+ ABORT("signed_word");
+ }
+
+ GC_init_headers();
+ /* Add initial guess of root sets */
+ GC_register_data_segments();
+ GC_bl_init();
+ GC_mark_init();
+ if (!GC_expand_hp_inner((word)MINHINCR)) {
+ GC_err_printf0("Can't start up: not enough memory\n");
+ EXIT();
+ }
+ /* Preallocate large object map. It's otherwise inconvenient to */
+ /* deal with failure. */
+ if (!GC_add_map_entry((word)0)) {
+ GC_err_printf0("Can't start up: not enough memory\n");
+ EXIT();
+ }
+ GC_register_displacement_inner(0L);
+# ifdef MERGE_SIZES
+ GC_init_size_map();
+# endif
+# ifdef PCR
+ PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever);
+ PCR_IL_Unlock();
+ GC_pcr_install();
+# endif
+ /* Get black list set up */
+ GC_gcollect_inner();
+# ifdef STUBBORN_ALLOC
+ GC_stubborn_init();
+# endif
+ /* Convince lint that some things are used */
+# ifdef LINT
+ {
+ extern char * GC_copyright[];
+ extern GC_read();
+
+ GC_noop(GC_copyright, GC_find_header, GC_print_block_list,
+ GC_push_one, GC_call_with_alloc_lock, GC_read,
+ GC_print_hblkfreelist, GC_dont_expand);
+ }
+# endif
+}
+
+void GC_enable_incremental()
+{
+ DCL_LOCK_STATE;
+
+# ifndef FIND_LEAK
+ DISABLE_SIGNALS();
+ LOCK();
+ if (GC_incremental) goto out;
+# ifndef SOLARIS_THREADS
+ GC_dirty_init();
+# endif
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ }
+ if (GC_dont_gc) {
+ /* Can't easily do it. */
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return;
+ }
+ if (GC_words_allocd > 0) {
+ /* There may be unmarked reachable objects */
+ GC_gcollect_inner();
+ } /* else we're OK in assuming everything's */
+ /* clean since nothing can point to an */
+ /* unmarked object. */
+ GC_read_dirty();
+ GC_incremental = TRUE;
+out:
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+}
+
+#if defined(OS2) || defined(MSWIN32)
+ FILE * GC_stdout = NULL;
+ FILE * GC_stderr = NULL;
+#endif
+
+#ifdef MSWIN32
+ void GC_set_files()
+ {
+ if (GC_stdout == NULL) {
+ GC_stdout = fopen("gc.log", "wt");
+ }
+ if (GC_stderr == NULL) {
+ GC_stderr = GC_stdout;
+ }
+ }
+#endif
+
+#ifdef OS2
+ void GC_set_files()
+ {
+ if (GC_stdout == NULL) {
+ GC_stdout = stdout;
+ }
+ if (GC_stderr == NULL) {
+ GC_stderr = stderr;
+ }
+ }
+#endif
+
+/* A version of printf that is unlikely to call malloc, and is thus safer */
+/* to call from the collector in case malloc has been bound to GC_malloc. */
+/* Assumes that no more than 1023 characters are written at once. */
+/* Assumes that all arguments have been converted to something of the */
+/* same size as long, and that the format conversions expect something */
+/* of that size. */
+void GC_printf(format, a, b, c, d, e, f)
+char * format;
+long a, b, c, d, e, f;
+{
+ char buf[1025];
+
+ if (GC_quiet) return;
+ buf[1024] = 0x15;
+ (void) sprintf(buf, format, a, b, c, d, e, f);
+ if (buf[1024] != 0x15) ABORT("GC_printf clobbered stack");
+# if defined(OS2) || defined(MSWIN32)
+ GC_set_files();
+ /* We hope this doesn't allocate */
+ if (fwrite(buf, 1, strlen(buf), GC_stdout) != strlen(buf))
+ ABORT("write to stdout failed");
+ fflush(GC_stdout);
+# else
+ if (write(1, buf, strlen(buf)) < 0) ABORT("write to stdout failed");
+# endif
+}
+
+void GC_err_printf(format, a, b, c, d, e, f)
+char * format;
+long a, b, c, d, e, f;
+{
+ char buf[1025];
+
+ buf[1024] = 0x15;
+ (void) sprintf(buf, format, a, b, c, d, e, f);
+ if (buf[1024] != 0x15) ABORT("GC_err_printf clobbered stack");
+# if defined(OS2) || defined(MSWIN32)
+ GC_set_files();
+ /* We hope this doesn't allocate */
+ if (fwrite(buf, 1, strlen(buf), GC_stderr) != strlen(buf))
+ ABORT("write to stderr failed");
+ fflush(GC_stderr);
+# else
+ if (write(2, buf, strlen(buf)) < 0) ABORT("write to stderr failed");
+# endif
+}
+
+void GC_err_puts(s)
+char *s;
+{
+# if defined(OS2) || defined(MSWIN32)
+ GC_set_files();
+ /* We hope this doesn't allocate */
+ if (fwrite(s, 1, strlen(s), GC_stderr) != strlen(s))
+ ABORT("write to stderr failed");
+ fflush(GC_stderr);
+# else
+ if (write(2, s, strlen(s)) < 0) ABORT("write to stderr failed");
+# endif
+}
+
+#ifndef PCR
+void GC_abort(msg)
+char * msg;
+{
+ GC_err_printf1("%s\n", msg);
+ (void) abort();
+}
+#endif
+
+# ifdef SRC_M3
+void GC_enable()
+{
+ GC_dont_gc--;
+}
+
+void GC_disable()
+{
+ GC_dont_gc++;
+}
+# endif
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * This file contains the functions:
+ * ptr_t GC_build_flXXX(h, old_fl)
+ * void GC_new_hblk(n)
+ */
+/* Boehm, May 19, 1994 2:09 pm PDT */
+
+
+# include <stdio.h>
+# include "gc_priv.h"
+
+#ifndef SMALL_CONFIG
+/*
+ * Build a free list for size 1 objects inside hblk h. Set the last link to
+ * be ofl. Return a pointer tpo the first free list entry.
+ */
+ptr_t GC_build_fl1(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[1] = (word)(p);
+ p[2] = (word)(p+1);
+ p[3] = (word)(p+2);
+ p += 4;
+ for (; p < lim; p += 4) {
+ p[0] = (word)(p-1);
+ p[1] = (word)(p);
+ p[2] = (word)(p+1);
+ p[3] = (word)(p+2);
+ };
+ return((ptr_t)(p-1));
+}
+
+/* The same for size 2 cleared objects */
+ptr_t GC_build_fl_clear2(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[1] = 0;
+ p[2] = (word)p;
+ p[3] = 0;
+ p += 4;
+ for (; p < lim; p += 4) {
+ p[0] = (word)(p-2);
+ p[1] = 0;
+ p[2] = (word)p;
+ p[3] = 0;
+ };
+ return((ptr_t)(p-2));
+}
+
+/* The same for size 3 cleared objects */
+ptr_t GC_build_fl_clear3(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1) - 2;
+
+ p[0] = (word)ofl;
+ p[1] = 0;
+ p[2] = 0;
+ p += 3;
+ for (; p < lim; p += 3) {
+ p[0] = (word)(p-3);
+ p[1] = 0;
+ p[2] = 0;
+ };
+ return((ptr_t)(p-3));
+}
+
+/* The same for size 4 cleared objects */
+ptr_t GC_build_fl_clear4(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0;
+ p += 4;
+ for (; p < lim; p += 4) {
+ p[0] = (word)(p-4);
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0;
+ };
+ return((ptr_t)(p-4));
+}
+
+/* The same for size 2 uncleared objects */
+ptr_t GC_build_fl2(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[2] = (word)p;
+ p += 4;
+ for (; p < lim; p += 4) {
+ p[0] = (word)(p-2);
+ p[2] = (word)p;
+ };
+ return((ptr_t)(p-2));
+}
+
+/* The same for size 4 uncleared objects */
+ptr_t GC_build_fl4(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[4] = (word)p;
+ p += 8;
+ for (; p < lim; p += 8) {
+ p[0] = (word)(p-4);
+ p[4] = (word)p;
+ };
+ return((ptr_t)(p-4));
+}
+
+#endif /* !SMALL_CONFIG */
+
+/*
+ * Allocate a new heapblock for small objects of size n.
+ * Add all of the heapblock's objects to the free list for objects
+ * of that size. Will fail to do anything if we are out of memory.
+ */
+void GC_new_hblk(sz, kind)
+register word sz;
+int kind;
+{
+ register word *p,
+ *prev;
+ word *last_object; /* points to last object in new hblk */
+ register struct hblk *h; /* the new heap block */
+ register bool clear = GC_obj_kinds[kind].ok_init;
+
+# ifdef PRINTSTATS
+ if ((sizeof (struct hblk)) > HBLKSIZE) {
+ ABORT("HBLK SZ inconsistency");
+ }
+# endif
+
+ /* Allocate a new heap block */
+ h = GC_allochblk(sz, kind, 0);
+ if (h == 0) return;
+
+ /* Handle small objects sizes more efficiently. For larger objects */
+ /* the difference is less significant. */
+# ifndef SMALL_CONFIG
+ switch (sz) {
+ case 1: GC_obj_kinds[kind].ok_freelist[1] =
+ GC_build_fl1(h, GC_obj_kinds[kind].ok_freelist[1]);
+ return;
+ case 2: if (clear) {
+ GC_obj_kinds[kind].ok_freelist[2] =
+ GC_build_fl_clear2(h, GC_obj_kinds[kind].ok_freelist[2]);
+ } else {
+ GC_obj_kinds[kind].ok_freelist[2] =
+ GC_build_fl2(h, GC_obj_kinds[kind].ok_freelist[2]);
+ }
+ return;
+ case 3: if (clear) {
+ GC_obj_kinds[kind].ok_freelist[3] =
+ GC_build_fl_clear3(h, GC_obj_kinds[kind].ok_freelist[3]);
+ return;
+ } else {
+ /* It's messy to do better than the default here. */
+ break;
+ }
+ case 4: if (clear) {
+ GC_obj_kinds[kind].ok_freelist[4] =
+ GC_build_fl_clear4(h, GC_obj_kinds[kind].ok_freelist[4]);
+ } else {
+ GC_obj_kinds[kind].ok_freelist[4] =
+ GC_build_fl4(h, GC_obj_kinds[kind].ok_freelist[4]);
+ }
+ return;
+ default:
+ break;
+ }
+# endif /* !SMALL_CONFIG */
+
+ /* Clear the page if necessary. */
+ if (clear) BZERO(h, HBLKSIZE);
+
+ /* Add objects to free list */
+ p = &(h -> hb_body[sz]); /* second object in *h */
+ prev = &(h -> hb_body[0]); /* One object behind p */
+ last_object = (word *)((char *)h + HBLKSIZE);
+ last_object -= sz;
+ /* Last place for last object to start */
+
+ /* make a list of all objects in *h with head as last object */
+ while (p <= last_object) {
+ /* current object's link points to last object */
+ obj_link(p) = (ptr_t)prev;
+ prev = p;
+ p += sz;
+ }
+ p -= sz; /* p now points to last object */
+
+ /*
+ * put p (which is now head of list of objects in *h) as first
+ * pointer in the appropriate free list for this size.
+ */
+ obj_link(h -> hb_body) = GC_obj_kinds[kind].ok_freelist[sz];
+ GC_obj_kinds[kind].ok_freelist[sz] = ((ptr_t)p);
+}
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 1:59 pm PDT */
+
+/* Routines for maintaining maps describing heap block
+ * layouts for various object sizes. Allows fast pointer validity checks
+ * and fast location of object start locations on machines (such as SPARC)
+ * with slow division.
+ */
+
+# include "gc_priv.h"
+
+char * GC_invalid_map = 0;
+
+/* Invalidate the object map associated with a block. Free blocks */
+/* are identified by invalid maps. */
+void GC_invalidate_map(hhdr)
+hdr *hhdr;
+{
+ register int displ;
+
+ if (GC_invalid_map == 0) {
+ GC_invalid_map = GC_scratch_alloc(MAP_SIZE);
+ if (GC_invalid_map == 0) {
+ GC_err_printf0(
+ "Cant initialize GC_invalid_map: insufficient memory\n");
+ EXIT();
+ }
+ for (displ = 0; displ < HBLKSIZE; displ++) {
+ MAP_ENTRY(GC_invalid_map, displ) = OBJ_INVALID;
+ }
+ }
+ hhdr -> hb_map = GC_invalid_map;
+}
+
+/* Consider pointers that are offset bytes displaced from the beginning */
+/* of an object to be valid. */
+void GC_register_displacement(offset)
+word offset;
+{
+# ifndef ALL_INTERIOR_POINTERS
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_register_displacement_inner(offset);
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+}
+
+void GC_register_displacement_inner(offset)
+word offset;
+{
+# ifndef ALL_INTERIOR_POINTERS
+ register unsigned i;
+
+ if (offset > MAX_OFFSET) {
+ ABORT("Bad argument to GC_register_displacement");
+ }
+ if (!GC_valid_offsets[offset]) {
+ GC_valid_offsets[offset] = TRUE;
+ GC_modws_valid_offsets[offset % sizeof(word)] = TRUE;
+ for (i = 0; i <= MAXOBJSZ; i++) {
+ if (GC_obj_map[i] != 0) {
+ if (i == 0) {
+ GC_obj_map[i][offset + HDR_BYTES] = (char)BYTES_TO_WORDS(offset);
+ } else {
+ register unsigned j;
+ register unsigned lb = WORDS_TO_BYTES(i);
+
+ if (offset < lb) {
+ for (j = offset + HDR_BYTES; j < HBLKSIZE; j += lb) {
+ GC_obj_map[i][j] = (char)BYTES_TO_WORDS(offset);
+ }
+ }
+ }
+ }
+ }
+ }
+# endif
+}
+
+
+/* Add a heap block map for objects of size sz to obj_map. */
+/* Return FALSE on failure. */
+bool GC_add_map_entry(sz)
+word sz;
+{
+ register unsigned obj_start;
+ register unsigned displ;
+ register char * new_map;
+
+ if (sz > MAXOBJSZ) sz = 0;
+ if (GC_obj_map[sz] != 0) {
+ return(TRUE);
+ }
+ new_map = GC_scratch_alloc(MAP_SIZE);
+ if (new_map == 0) return(FALSE);
+# ifdef PRINTSTATS
+ GC_printf1("Adding block map for size %lu\n", (unsigned long)sz);
+# endif
+ for (displ = 0; displ < HBLKSIZE; displ++) {
+ MAP_ENTRY(new_map,displ) = OBJ_INVALID;
+ }
+ if (sz == 0) {
+ for(displ = 0; displ <= MAX_OFFSET; displ++) {
+ if (OFFSET_VALID(displ)) {
+ MAP_ENTRY(new_map,displ+HDR_BYTES) = BYTES_TO_WORDS(displ);
+ }
+ }
+ } else {
+ for (obj_start = HDR_BYTES;
+ obj_start + WORDS_TO_BYTES(sz) <= HBLKSIZE;
+ obj_start += WORDS_TO_BYTES(sz)) {
+ for (displ = 0; displ < WORDS_TO_BYTES(sz); displ++) {
+ if (OFFSET_VALID(displ)) {
+ MAP_ENTRY(new_map, obj_start + displ) =
+ BYTES_TO_WORDS(displ);
+ }
+ }
+ }
+ }
+ GC_obj_map[sz] = new_map;
+ return(TRUE);
+}
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:10 pm PDT */
+# if !defined(OS2) && !defined(PCR) && !defined(AMIGA)
+# include <sys/types.h>
+# endif
+# include "gc_priv.h"
+# include <stdio.h>
+# include <signal.h>
+
+/* Blatantly OS dependent routines, except for those that are related */
+/* dynamic loading. */
+
+#ifdef FREEBSD
+# include <machine/trap.h>
+#endif
+
+#ifdef AMIGA
+# include <proto/exec.h>
+# include <proto/dos.h>
+# include <dos/dosextens.h>
+# include <workbench/startup.h>
+#endif
+
+#ifdef MSWIN32
+# define WIN32_LEAN_AND_MEAN
+# define NOSERVICE
+# include <windows.h>
+#endif
+
+#ifdef IRIX5
+# include <sys/uio.h>
+#endif
+
+#ifdef PCR
+# include "il/PCR_IL.h"
+# include "th/PCR_ThCtl.h"
+# include "mm/PCR_MM.h"
+#endif
+
+# ifdef OS2
+
+# include <stddef.h>
+
+# ifndef __IBMC__ /* e.g. EMX */
+
+struct exe_hdr {
+ unsigned short magic_number;
+ unsigned short padding[29];
+ long new_exe_offset;
+};
+
+#define E_MAGIC(x) (x).magic_number
+#define EMAGIC 0x5A4D
+#define E_LFANEW(x) (x).new_exe_offset
+
+struct e32_exe {
+ unsigned char magic_number[2];
+ unsigned char byte_order;
+ unsigned char word_order;
+ unsigned long exe_format_level;
+ unsigned short cpu;
+ unsigned short os;
+ unsigned long padding1[13];
+ unsigned long object_table_offset;
+ unsigned long object_count;
+ unsigned long padding2[31];
+};
+
+#define E32_MAGIC1(x) (x).magic_number[0]
+#define E32MAGIC1 'L'
+#define E32_MAGIC2(x) (x).magic_number[1]
+#define E32MAGIC2 'X'
+#define E32_BORDER(x) (x).byte_order
+#define E32LEBO 0
+#define E32_WORDER(x) (x).word_order
+#define E32LEWO 0
+#define E32_CPU(x) (x).cpu
+#define E32CPU286 1
+#define E32_OBJTAB(x) (x).object_table_offset
+#define E32_OBJCNT(x) (x).object_count
+
+struct o32_obj {
+ unsigned long size;
+ unsigned long base;
+ unsigned long flags;
+ unsigned long pagemap;
+ unsigned long mapsize;
+ unsigned long reserved;
+};
+
+#define O32_FLAGS(x) (x).flags
+#define OBJREAD 0x0001L
+#define OBJWRITE 0x0002L
+#define OBJINVALID 0x0080L
+#define O32_SIZE(x) (x).size
+#define O32_BASE(x) (x).base
+
+# else /* IBM's compiler */
+
+# define INCL_DOSEXCEPTIONS
+# define INCL_DOSPROCESS
+# define INCL_DOSERRORS
+# define INCL_DOSMODULEMGR
+# define INCL_DOSMEMMGR
+# include <os2.h>
+
+/* A kludge to get around what appears to be a header file bug */
+# ifndef WORD
+# define WORD unsigned short
+# endif
+# ifndef DWORD
+# define DWORD unsigned long
+# endif
+
+# define EXE386 1
+# include <newexe.h>
+# include <exe386.h>
+
+# endif /* __IBMC__ */
+
+/* Disable and enable signals during nontrivial allocations */
+
+void GC_disable_signals(void)
+{
+ ULONG nest;
+
+ DosEnterMustComplete(&nest);
+ if (nest != 1) ABORT("nested GC_disable_signals");
+}
+
+void GC_enable_signals(void)
+{
+ ULONG nest;
+
+ DosExitMustComplete(&nest);
+ if (nest != 0) ABORT("GC_enable_signals");
+}
+
+
+# else
+
+# if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32)
+
+# ifdef sigmask
+ /* Use the traditional BSD interface */
+# define SIGSET_T int
+# define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
+# define SIG_FILL(set) (set) = 0x7fffffff
+ /* Setting the leading bit appears to provoke a bug in some */
+ /* longjmp implementations. Most systems appear not to have */
+ /* a signal 32. */
+# define SIGSETMASK(old, new) (old) = sigsetmask(new)
+# else
+ /* Use POSIX/SYSV interface */
+# define SIGSET_T sigset_t
+# define SIG_DEL(set, signal) sigdelset(&(set), (signal))
+# define SIG_FILL(set) sigfillset(&set)
+# define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
+# endif
+
+static bool mask_initialized = FALSE;
+
+static SIGSET_T new_mask;
+
+static SIGSET_T old_mask;
+
+static SIGSET_T dummy;
+
+#if defined(PRINTSTATS) && !defined(THREADS)
+# define CHECK_SIGNALS
+ int GC_sig_disabled = 0;
+#endif
+
+void GC_disable_signals()
+{
+ if (!mask_initialized) {
+ SIG_FILL(new_mask);
+
+ SIG_DEL(new_mask, SIGSEGV);
+ SIG_DEL(new_mask, SIGILL);
+ SIG_DEL(new_mask, SIGQUIT);
+# ifdef SIGBUS
+ SIG_DEL(new_mask, SIGBUS);
+# endif
+# ifdef SIGIOT
+ SIG_DEL(new_mask, SIGIOT);
+# endif
+# ifdef SIGEMT
+ SIG_DEL(new_mask, SIGEMT);
+# endif
+# ifdef SIGTRAP
+ SIG_DEL(new_mask, SIGTRAP);
+# endif
+ mask_initialized = TRUE;
+ }
+# ifdef CHECK_SIGNALS
+ if (GC_sig_disabled != 0) ABORT("Nested disables");
+ GC_sig_disabled++;
+# endif
+ SIGSETMASK(old_mask,new_mask);
+}
+
+void GC_enable_signals()
+{
+# ifdef CHECK_SIGNALS
+ if (GC_sig_disabled != 1) ABORT("Unmatched enable");
+ GC_sig_disabled--;
+# endif
+ SIGSETMASK(dummy,old_mask);
+}
+
+# endif /* !PCR */
+
+# endif /*!OS/2 */
+
+/*
+ * Find the base of the stack.
+ * Used only in single-threaded environment.
+ * With threads, GC_mark_roots needs to know how to do this.
+ * Called with allocator lock held.
+ */
+# ifdef MSWIN32
+
+/* Get the page size. */
+word GC_page_size = 0;
+
+word GC_get_page_size()
+{
+ SYSTEM_INFO sysinfo;
+
+ if (GC_page_size == 0) {
+ GetSystemInfo(&sysinfo);
+ GC_page_size = sysinfo.dwPageSize;
+ }
+ return(GC_page_size);
+}
+
+# define is_writable(prot) ((prot) == PAGE_READWRITE \
+ || (prot) == PAGE_WRITECOPY \
+ || (prot) == PAGE_EXECUTE_READWRITE \
+ || (prot) == PAGE_EXECUTE_WRITECOPY)
+/* Return the number of bytes that are writable starting at p. */
+/* The pointer p is assumed to be page aligned. */
+/* If base is not 0, *base becomes the beginning of the */
+/* allocation region containing p. */
+word GC_get_writable_length(ptr_t p, ptr_t *base)
+{
+ MEMORY_BASIC_INFORMATION buf;
+ word result;
+ word protect;
+
+ result = VirtualQuery(p, &buf, sizeof(buf));
+ if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
+ if (base != 0) *base = (ptr_t)(buf.AllocationBase);
+ protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
+ if (!is_writable(protect)) {
+ return(0);
+ }
+ if (buf.State != MEM_COMMIT) return(0);
+ return(buf.RegionSize);
+}
+
+ptr_t GC_get_stack_base()
+{
+ int dummy;
+ ptr_t sp = (ptr_t)(&dummy);
+ ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_get_page_size() - 1));
+ word size = GC_get_writable_length(trunc_sp, 0);
+
+ return(trunc_sp + size);
+}
+
+
+# else
+
+# ifdef OS2
+
+ptr_t GC_get_stack_base()
+{
+ PTIB ptib;
+ PPIB ppib;
+
+ if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
+ GC_err_printf0("DosGetInfoBlocks failed\n");
+ ABORT("DosGetInfoBlocks failed\n");
+ }
+ return((ptr_t)(ptib -> tib_pstacklimit));
+}
+
+# else
+
+# ifdef AMIGA
+
+ptr_t GC_get_stack_base()
+{
+ extern struct WBStartup *_WBenchMsg;
+ extern long __base;
+ extern long __stack;
+ struct Task *task;
+ struct Process *proc;
+ struct CommandLineInterface *cli;
+ long size;
+
+ if ((task = FindTask(0)) == 0) {
+ GC_err_puts("Cannot find own task structure\n");
+ ABORT("task missing");
+ }
+ proc = (struct Process *)task;
+ cli = BADDR(proc->pr_CLI);
+
+ if (_WBenchMsg != 0 || cli == 0) {
+ size = (char *)task->tc_SPUpper - (char *)task->tc_SPLower;
+ } else {
+ size = cli->cli_DefaultStack * 4;
+ }
+ return (ptr_t)(__base + GC_max(size, __stack));
+}
+
+# else
+
+# if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
+# define NEED_FIND_LIMIT
+# endif
+
+# if defined(SUNOS4) & defined(DYNAMIC_LOADING)
+# define NEED_FIND_LIMIT
+# endif
+
+# ifdef NEED_FIND_LIMIT
+ /* Some tools to implement HEURISTIC2 */
+# define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
+# include <setjmp.h>
+ /* static */ jmp_buf GC_jmp_buf;
+
+ /*ARGSUSED*/
+ void GC_fault_handler(sig)
+ int sig;
+ {
+ longjmp(GC_jmp_buf, 1);
+ }
+
+# ifdef __STDC__
+ typedef void (*handler)(int);
+# else
+ typedef void (*handler)();
+# endif
+
+ /* Return the first nonaddressible location > p (up) or */
+ /* the smallest location q s.t. [q,p] is addressible (!up). */
+ ptr_t GC_find_limit(p, up)
+ ptr_t p;
+ bool up;
+ {
+ static VOLATILE ptr_t result;
+ /* Needs to be static, since otherwise it may not be */
+ /* preserved across the longjmp. Can safely be */
+ /* static since it's only called once, with the */
+ /* allocation lock held. */
+
+ static handler old_segv_handler, old_bus_handler;
+ /* See above for static declaration. */
+
+ old_segv_handler = signal(SIGSEGV, GC_fault_handler);
+# ifdef SIGBUS
+ old_bus_handler = signal(SIGBUS, GC_fault_handler);
+# endif
+ if (setjmp(GC_jmp_buf) == 0) {
+ result = (ptr_t)(((word)(p))
+ & ~(MIN_PAGE_SIZE-1));
+ for (;;) {
+ if (up) {
+ result += MIN_PAGE_SIZE;
+ } else {
+ result -= MIN_PAGE_SIZE;
+ }
+ GC_noop(*result);
+ }
+ }
+ (void) signal(SIGSEGV, old_segv_handler);
+# ifdef SIGBUS
+ (void) signal(SIGBUS, old_bus_handler);
+# endif
+ if (!up) {
+ result += MIN_PAGE_SIZE;
+ }
+ return(result);
+ }
+# endif
+
+
+ptr_t GC_get_stack_base()
+{
+ word dummy;
+ ptr_t result;
+
+# define STACKBOTTOM_ALIGNMENT_M1 0xffffff
+
+# ifdef STACKBOTTOM
+ return(STACKBOTTOM);
+# else
+# ifdef HEURISTIC1
+# ifdef STACK_GROWS_DOWN
+ result = (ptr_t)((((word)(&dummy))
+ + STACKBOTTOM_ALIGNMENT_M1)
+ & ~STACKBOTTOM_ALIGNMENT_M1);
+# else
+ result = (ptr_t)(((word)(&dummy))
+ & ~STACKBOTTOM_ALIGNMENT_M1);
+# endif
+# endif /* HEURISTIC1 */
+# ifdef HEURISTIC2
+# ifdef STACK_GROWS_DOWN
+ result = GC_find_limit((ptr_t)(&dummy), TRUE);
+# else
+ result = GC_find_limit((ptr_t)(&dummy), FALSE);
+# endif
+# endif /* HEURISTIC2 */
+ return(result);
+# endif /* STACKBOTTOM */
+}
+
+# endif /* ! AMIGA */
+# endif /* ! OS2 */
+# endif /* ! MSWIN32 */
+
+/*
+ * Register static data segment(s) as roots.
+ * If more data segments are added later then they need to be registered
+ * add that point (as we do with SunOS dynamic loading),
+ * or GC_mark_roots needs to check for them (as we do with PCR).
+ * Called with allocator lock held.
+ */
+
+# ifdef OS2
+
+void GC_register_data_segments()
+{
+ PTIB ptib;
+ PPIB ppib;
+ HMODULE module_handle;
+# define PBUFSIZ 512
+ UCHAR path[PBUFSIZ];
+ FILE * myexefile;
+ struct exe_hdr hdrdos; /* MSDOS header. */
+ struct e32_exe hdr386; /* Real header for my executable */
+ struct o32_obj seg; /* Currrent segment */
+ int nsegs;
+
+
+ if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
+ GC_err_printf0("DosGetInfoBlocks failed\n");
+ ABORT("DosGetInfoBlocks failed\n");
+ }
+ module_handle = ppib -> pib_hmte;
+ if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
+ GC_err_printf0("DosQueryModuleName failed\n");
+ ABORT("DosGetInfoBlocks failed\n");
+ }
+ myexefile = fopen(path, "rb");
+ if (myexefile == 0) {
+ GC_err_puts("Couldn't open executable ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Failed to open executable\n");
+ }
+ if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
+ GC_err_puts("Couldn't read MSDOS header from ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Couldn't read MSDOS header");
+ }
+ if (E_MAGIC(hdrdos) != EMAGIC) {
+ GC_err_puts("Executable has wrong DOS magic number: ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Bad DOS magic number");
+ }
+ if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
+ GC_err_puts("Seek to new header failed in ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Bad DOS magic number");
+ }
+ if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
+ GC_err_puts("Couldn't read MSDOS header from ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Couldn't read OS/2 header");
+ }
+ if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
+ GC_err_puts("Executable has wrong OS/2 magic number:");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Bad OS/2 magic number");
+ }
+ if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
+ GC_err_puts("Executable %s has wrong byte order: ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Bad byte order");
+ }
+ if ( E32_CPU(hdr386) == E32CPU286) {
+ GC_err_puts("GC can't handle 80286 executables: ");
+ GC_err_puts(path); GC_err_puts("\n");
+ EXIT();
+ }
+ if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
+ SEEK_SET) != 0) {
+ GC_err_puts("Seek to object table failed: ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Seek to object table failed");
+ }
+ for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
+ int flags;
+ if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
+ GC_err_puts("Couldn't read obj table entry from ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Couldn't read obj table entry");
+ }
+ flags = O32_FLAGS(seg);
+ if (!(flags & OBJWRITE)) continue;
+ if (!(flags & OBJREAD)) continue;
+ if (flags & OBJINVALID) {
+ GC_err_printf0("Object with invalid pages?\n");
+ continue;
+ }
+ GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg));
+ }
+}
+
+# else
+
+# ifdef MSWIN32
+ /* Unfortunately, we have to handle win32s very differently from NT, */
+ /* Since VirtualQuery has very different semantics. In particular, */
+ /* under win32s a VirtualQuery call on an unmapped page returns an */
+ /* invalid result. Under GC_register_data_segments is a noop and */
+ /* all real work is done by GC_register_dynamic_libraries. Under */
+ /* win32s, we cannot find the data segments associated with dll's. */
+ /* We rgister the main data segment here. */
+ bool GC_win32s = FALSE; /* We're running under win32s. */
+
+ void GC_init_win32()
+ {
+ if (GetVersion() & 0x80000000) GC_win32s = TRUE;
+ }
+
+ /* Return the smallest address a such that VirtualQuery */
+ /* returns correct results for all addresses between a and start. */
+ /* Assumes VirtualQuery returns correct information for start. */
+ ptr_t GC_least_described_address(ptr_t start)
+ {
+ MEMORY_BASIC_INFORMATION buf;
+ SYSTEM_INFO sysinfo;
+ DWORD result;
+ LPVOID limit;
+ ptr_t p;
+ LPVOID q;
+
+ GetSystemInfo(&sysinfo);
+ limit = sysinfo.lpMinimumApplicationAddress;
+ p = (ptr_t)((word)start & ~(GC_get_page_size() - 1));
+ for (;;) {
+ q = (LPVOID)(p - GC_get_page_size());
+ if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
+ result = VirtualQuery(q, &buf, sizeof(buf));
+ if (result != sizeof(buf)) break;
+ p = (ptr_t)(buf.AllocationBase);
+ }
+ return(p);
+ }
+
+ /* Is p the start of either the malloc heap, or of one of our */
+ /* heap sections? */
+ bool GC_is_heap_base (ptr_t p)
+ {
+ static ptr_t malloc_heap_pointer = 0;
+ register unsigned i;
+ register DWORD result;
+
+ if (malloc_heap_pointer = 0) {
+ MEMORY_BASIC_INFORMATION buf;
+ result = VirtualQuery(malloc(1), &buf, sizeof(buf));
+ if (result != sizeof(buf)) {
+ ABORT("Weird VirtualQuery result");
+ }
+ malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
+ }
+ if (p == malloc_heap_pointer) return(TRUE);
+ for (i = 0; i < GC_n_heap_bases; i++) {
+ if (GC_heap_bases[i] == p) return(TRUE);
+ }
+ return(FALSE);
+ }
+
+ void GC_register_root_section(ptr_t static_root)
+ {
+ MEMORY_BASIC_INFORMATION buf;
+ SYSTEM_INFO sysinfo;
+ DWORD result;
+ DWORD protect;
+ LPVOID p;
+ char * base;
+ char * limit, * new_limit;
+
+ if (!GC_win32s) return;
+ p = base = limit = GC_least_described_address(static_root);
+ GetSystemInfo(&sysinfo);
+ while (p < sysinfo.lpMaximumApplicationAddress) {
+ result = VirtualQuery(p, &buf, sizeof(buf));
+ if (result != sizeof(buf) || GC_is_heap_base(buf.AllocationBase)) break;
+ new_limit = (char *)p + buf.RegionSize;
+ protect = buf.Protect;
+ if (buf.State == MEM_COMMIT
+ && is_writable(protect)) {
+ if ((char *)p == limit) {
+ limit = new_limit;
+ } else {
+ if (base != limit) GC_add_roots_inner(base, limit);
+ base = p;
+ limit = new_limit;
+ }
+ }
+ if (p > (LPVOID)new_limit /* overflow */) break;
+ p = (LPVOID)new_limit;
+ }
+ if (base != limit) GC_add_roots_inner(base, limit);
+ }
+
+ void GC_register_data_segments()
+ {
+ static char dummy;
+
+ GC_register_root_section((ptr_t)(&dummy));
+ }
+# else
+# ifdef AMIGA
+
+ void GC_register_data_segments()
+ {
+ extern struct WBStartup *_WBenchMsg;
+ struct Process *proc;
+ struct CommandLineInterface *cli;
+ BPTR myseglist;
+ ULONG *data;
+
+ if ( _WBenchMsg != 0 ) {
+ if ((myseglist = _WBenchMsg->sm_Segment) == 0) {
+ GC_err_puts("No seglist from workbench\n");
+ return;
+ }
+ } else {
+ if ((proc = (struct Process *)FindTask(0)) == 0) {
+ GC_err_puts("Cannot find process structure\n");
+ return;
+ }
+ if ((cli = BADDR(proc->pr_CLI)) == 0) {
+ GC_err_puts("No CLI\n");
+ return;
+ }
+ if ((myseglist = cli->cli_Module) == 0) {
+ GC_err_puts("No seglist from CLI\n");
+ return;
+ }
+ }
+
+ for (data = (ULONG *)BADDR(myseglist); data != 0;
+ data = (ULONG *)BADDR(data[0])) {
+ GC_add_roots_inner((char *)&data[1], ((char *)&data[1]) + data[-1]);
+ }
+ }
+
+
+# else
+
+void GC_register_data_segments()
+{
+# ifndef NEXT
+ extern int end;
+# endif
+
+# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT)
+ GC_add_roots_inner(DATASTART, (char *)(&end));
+# endif
+# if !defined(PCR) && defined(NEXT)
+ GC_add_roots_inner(DATASTART, (char *) get_end());
+# endif
+ /* Dynamic libraries are added at every collection, since they may */
+ /* change. */
+}
+
+# endif /* ! AMIGA */
+# endif /* ! MSWIN32 */
+# endif /* ! OS2 */
+
+/*
+ * Auxiliary routines for obtaining memory from OS.
+ */
+
+# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32)
+
+extern caddr_t sbrk();
+# ifdef __STDC__
+# define SBRK_ARG_T size_t
+# else
+# define SBRK_ARG_T int
+# endif
+
+# ifdef RS6000
+/* The compiler seems to generate speculative reads one past the end of */
+/* an allocated object. Hence we need to make sure that the page */
+/* following the last heap page is also mapped. */
+ptr_t GC_unix_get_mem(bytes)
+word bytes;
+{
+ caddr_t cur_brk = sbrk(0);
+ caddr_t result;
+ SBRK_ARG_T lsbs = (word)cur_brk & (HBLKSIZE-1);
+ static caddr_t my_brk_val = 0;
+
+ if (lsbs != 0) {
+ if(sbrk(HBLKSIZE - lsbs) == (caddr_t)(-1)) return(0);
+ }
+ if (cur_brk == my_brk_val) {
+ /* Use the extra block we allocated last time. */
+ result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
+ if (result == (caddr_t)(-1)) return(0);
+ result -= HBLKSIZE;
+ } else {
+ result = (ptr_t)sbrk(HBLKSIZE + (SBRK_ARG_T)bytes);
+ if (result == (caddr_t)(-1)) return(0);
+ }
+ my_brk_val = result + bytes + HBLKSIZE; /* Always HBLKSIZE aligned */
+ return((ptr_t)result);
+}
+
+#else
+ptr_t GC_unix_get_mem(bytes)
+word bytes;
+{
+ caddr_t cur_brk = sbrk(0);
+ caddr_t result;
+ SBRK_ARG_T lsbs = (word)cur_brk & (HBLKSIZE-1);
+
+ if (lsbs != 0) {
+ if(sbrk(HBLKSIZE - lsbs) == (caddr_t)(-1)) return(0);
+ }
+ result = sbrk((SBRK_ARG_T)bytes);
+ if (result == (caddr_t)(-1)) return(0);
+ return((ptr_t)result);
+}
+#endif
+
+# endif
+
+# ifdef __OS2__
+
+void * os2_alloc(size_t bytes)
+{
+ void * result;
+
+ if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
+ PAG_WRITE | PAG_COMMIT)
+ != NO_ERROR) {
+ return(0);
+ }
+ if (result == 0) return(os2_alloc(bytes));
+ return(result);
+}
+
+# endif /* OS2 */
+
+
+# ifdef MSWIN32
+word GC_n_heap_bases = 0;
+
+ptr_t GC_win32_get_mem(bytes)
+word bytes;
+{
+ ptr_t result;
+
+ if (GC_win32s) {
+ /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
+ /* There are also unconfirmed rumors of other */
+ /* problems, so we dodge the issue. */
+ result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
+ result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
+ } else {
+ result = (ptr_t) VirtualAlloc(NULL, bytes,
+ MEM_COMMIT | MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE);
+ }
+ if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
+ /* If I read the documentation correctly, this can */
+ /* only happen if HBLKSIZE > 64k or not a power of 2. */
+ if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
+ GC_heap_bases[GC_n_heap_bases++] = result;
+ return(result);
+}
+
+# endif
+
+/* Routine for pushing any additional roots. In THREADS */
+/* environment, this is also responsible for marking from */
+/* thread stacks. In the SRC_M3 case, it also handles */
+/* global variables. */
+#ifndef THREADS
+void (*GC_push_other_roots)() = 0;
+#else /* THREADS */
+
+# ifdef PCR
+PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
+{
+ struct PCR_ThCtl_TInfoRep info;
+ PCR_ERes result;
+
+ info.ti_stkLow = info.ti_stkHi = 0;
+ result = PCR_ThCtl_GetInfo(t, &info);
+ GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
+ return(result);
+}
+
+/* Push the contents of an old object. We treat this as stack */
+/* data only becasue that makes it robust against mark stack */
+/* overflow. */
+PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
+{
+ GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
+ return(PCR_ERes_okay);
+}
+
+
+void GC_default_push_other_roots()
+{
+ /* Traverse data allocated by previous memory managers. */
+ {
+ extern struct PCR_MM_ProcsRep * GC_old_allocator;
+
+ if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
+ GC_push_old_obj, 0)
+ != PCR_ERes_okay) {
+ ABORT("Old object enumeration failed");
+ }
+ }
+ /* Traverse all thread stacks. */
+ if (PCR_ERes_IsErr(
+ PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
+ || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
+ ABORT("Thread stack marking failed\n");
+ }
+}
+
+# endif /* PCR */
+
+# ifdef SRC_M3
+
+# ifdef ALL_INTERIOR_POINTERS
+ --> misconfigured
+# endif
+
+
+extern void ThreadF__ProcessStacks();
+
+void GC_push_thread_stack(start, stop)
+word start, stop;
+{
+ GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
+}
+
+/* Push routine with M3 specific calling convention. */
+GC_m3_push_root(dummy1, p, dummy2, dummy3)
+word *p;
+ptr_t dummy1, dummy2;
+int dummy3;
+{
+ word q = *p;
+
+ if ((ptr_t)(q) >= GC_least_plausible_heap_addr
+ && (ptr_t)(q) < GC_greatest_plausible_heap_addr) {
+ GC_push_one_checked(q,FALSE);
+ }
+}
+
+/* M3 set equivalent to RTHeap.TracedRefTypes */
+typedef struct { int elts[1]; } RefTypeSet;
+RefTypeSet GC_TracedRefTypes = {{0x1}};
+
+/* From finalize.c */
+extern void GC_push_finalizer_structures();
+
+/* From stubborn.c: */
+# ifdef STUBBORN_ALLOC
+ extern extern_ptr_t * GC_changing_list_start;
+# endif
+
+
+void GC_default_push_other_roots()
+{
+ /* Use the M3 provided routine for finding static roots. */
+ /* This is a bit dubious, since it presumes no C roots. */
+ /* We handle the collector roots explicitly. */
+ {
+# ifdef STUBBORN_ALLOC
+ GC_push_one(GC_changing_list_start);
+# endif
+ GC_push_finalizer_structures();
+ RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
+ }
+ if (GC_words_allocd > 0) {
+ ThreadF__ProcessStacks(GC_push_thread_stack);
+ }
+ /* Otherwise this isn't absolutely necessary, and we have */
+ /* startup ordering problems. */
+}
+
+# endif /* SRC_M3 */
+
+# ifdef SOLARIS_THREADS
+
+void GC_default_push_other_roots()
+{
+ GC_push_all_stacks();
+}
+
+# endif /* SOLARIS_THREADS */
+
+void (*GC_push_other_roots)() = GC_default_push_other_roots;
+
+#endif
+
+/*
+ * Routines for accessing dirty bits on virtual pages.
+ * We plan to eventaually implement four strategies for doing so:
+ * DEFAULT_VDB: A simple dummy implementation that treats every page
+ * as possibly dirty. This makes incremental collection
+ * useless, but the implementation is still correct.
+ * PCR_VDB: Use PPCRs virtual dirty bit facility.
+ * PROC_VDB: Use the /proc facility for reading dirty bits. Only
+ * works under some SVR4 variants. Even then, it may be
+ * too slow to be entirely satisfactory. Requires reading
+ * dirty bits for entire address space. Implementations tend
+ * to assume that the client is a (slow) debugger.
+ * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
+ * dirtied pages. The implementation (and implementability)
+ * is highly system dependent. This usually fails when system
+ * calls write to a protected page. We prevent the read system
+ * call from doing so. It is the clients responsibility to
+ * make sure that other system calls are similarly protected
+ * or write only to the stack.
+ */
+
+bool GC_dirty_maintained;
+
+# ifdef DEFAULT_VDB
+
+/* All of the following assume the allocation lock is held, and */
+/* signals are disabled. */
+
+/* The client asserts that unallocated pages in the heap are never */
+/* written. */
+
+/* Initialize virtual dirty bit implementation. */
+void GC_dirty_init()
+{
+}
+
+/* Retrieve system dirty bits for heap to a local buffer. */
+/* Restore the systems notion of which pages are dirty. */
+void GC_read_dirty()
+{}
+
+/* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
+/* If the actual page size is different, this returns TRUE if any */
+/* of the pages overlapping h are dirty. This routine may err on the */
+/* side of labelling pages as dirty (and this implementation does). */
+/*ARGSUSED*/
+bool GC_page_was_dirty(h)
+struct hblk *h;
+{
+ return(TRUE);
+}
+
+/*
+ * The following two routines are typically less crucial. They matter
+ * most with large dynamic libraries, or if we can't accurately identify
+ * stacks, e.g. under Solaris 2.X. Otherwise the following default
+ * versions are adequate.
+ */
+
+/* Could any valid GC heap pointer ever have been written to this page? */
+/*ARGSUSED*/
+bool GC_page_was_ever_dirty(h)
+struct hblk *h;
+{
+ return(TRUE);
+}
+
+/* Reset the n pages starting at h to "was never dirty" status. */
+void GC_is_fresh(h, n)
+struct hblk *h;
+word n;
+{
+}
+
+/* A call hints that h is about to be written. */
+/* May speed up some dirty bit implementations. */
+/*ARGSUSED*/
+void GC_write_hint(h)
+struct hblk *h;
+{
+}
+
+# endif /* DEFAULT_VDB */
+
+
+# ifdef MPROTECT_VDB
+
+/*
+ * See DEFAULT_VDB for interface descriptions.
+ */
+
+/*
+ * This implementation maintains dirty bits itself by catching write
+ * faults and keeping track of them. We assume nobody else catches
+ * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
+ * except as a result of a read system call. This means clients must
+ * either ensure that system calls do not touch the heap, or must
+ * provide their own wrappers analogous to the one for read.
+ * We assume the page size is a multiple of HBLKSIZE.
+ * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
+ * tried to use portable code where easily possible. It is known
+ * not to work under a number of other systems.
+ */
+
+# include <sys/mman.h>
+# include <signal.h>
+# include <sys/syscall.h>
+
+VOLATILE page_hash_table GC_dirty_pages;
+ /* Pages dirtied since last GC_read_dirty. */
+
+word GC_page_size;
+
+bool GC_just_outside_heap(addr)
+word addr;
+{
+ register int i;
+ register word start;
+ register word end;
+ word mask = GC_page_size-1;
+
+ for (i = 0; i < GC_n_heap_sects; i++) {
+ start = (word) GC_heap_sects[i].hs_start;
+ end = start + (word)GC_heap_sects[i].hs_bytes;
+ if (addr < start && addr >= (start & ~mask)
+ || addr >= end && addr < ((end + mask) & ~mask)) {
+ return(TRUE);
+ }
+ }
+ return(FALSE);
+}
+
+#if defined(SUNOS4) || defined(FREEBSD)
+ typedef void (* SIG_PF)();
+#endif
+
+#if defined(ALPHA) /* OSF1 */
+ typedef void (* SIG_PF)(int);
+#endif
+#if defined(IRIX5) || defined(ALPHA) /* OSF1 */
+ typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
+#endif
+
+SIG_PF GC_old_bus_handler;
+SIG_PF GC_old_segv_handler;
+
+/*ARGSUSED*/
+# if defined (SUNOS4) || defined(FREEBSD)
+ void GC_write_fault_handler(sig, code, scp, addr)
+ int sig, code;
+ struct sigcontext *scp;
+ char * addr;
+# ifdef SUNOS4
+# define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
+# define CODE_OK (FC_CODE(code) == FC_PROT \
+ || (FC_CODE(code) == FC_OBJERR \
+ && FC_ERRNO(code) == FC_PROT))
+# endif
+# ifdef FREEBSD
+# define SIG_OK (sig == SIGBUS)
+# define CODE_OK (code == BUS_PAGE_FAULT)
+# endif
+# endif
+# if defined(IRIX5) || defined(ALPHA) /* OSF1 */
+# include <errno.h>
+ void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
+# define SIG_OK (sig == SIGSEGV)
+# ifdef ALPHA
+# define CODE_OK (code == 2 /* experimentally determined */)
+# endif
+# ifdef IRIX5
+# define CODE_OK (code == EACCES)
+# endif
+# endif
+{
+ register int i;
+# ifdef IRIX5
+ char * addr = (char *) (scp -> sc_badvaddr);
+# endif
+# ifdef ALPHA
+ char * addr = (char *) (scp -> sc_traparg_a0);
+# endif
+
+ if (SIG_OK && CODE_OK) {
+ register struct hblk * h =
+ (struct hblk *)((word)addr & ~(GC_page_size-1));
+
+ if (HDR(addr) == 0 && !GC_just_outside_heap((word)addr)) {
+ SIG_PF old_handler;
+
+ if (sig == SIGSEGV) {
+ old_handler = GC_old_segv_handler;
+ } else {
+ old_handler = GC_old_bus_handler;
+ }
+ if (old_handler == SIG_DFL) {
+ ABORT("Unexpected bus error or segmentation fault");
+ } else {
+# if defined (SUNOS4) || defined(FREEBSD)
+ (*old_handler) (sig, code, scp, addr);
+# else
+ (*(REAL_SIG_PF)old_handler) (sig, code, scp);
+# endif
+ return;
+ }
+ }
+ for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
+ register int index = PHT_HASH(h+i);
+
+ set_pht_entry_from_index(GC_dirty_pages, index);
+ }
+ if (mprotect((caddr_t)h, (int)GC_page_size,
+ PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
+ ABORT("mprotect failed in handler");
+ }
+# if defined(IRIX5) || defined(ALPHA)
+ /* IRIX resets the signal handler each time. */
+ signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
+# endif
+ /* The write may not take place before dirty bits are read. */
+ /* But then we'll fault again ... */
+ return;
+ }
+
+ ABORT("Unexpected bus error or segmentation fault");
+}
+
+void GC_write_hint(h)
+struct hblk *h;
+{
+ register struct hblk * h_trunc =
+ (struct hblk *)((word)h & ~(GC_page_size-1));
+ register int i;
+ register bool found_clean = FALSE;
+
+ for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
+ register int index = PHT_HASH(h_trunc+i);
+
+ if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
+ found_clean = TRUE;
+ set_pht_entry_from_index(GC_dirty_pages, index);
+ }
+ }
+ if (found_clean) {
+ if (mprotect((caddr_t)h_trunc, (int)GC_page_size,
+ PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
+ ABORT("mprotect failed in GC_write_hint");
+ }
+ }
+}
+
+void GC_dirty_init()
+{
+ GC_dirty_maintained = TRUE;
+ GC_page_size = getpagesize();
+ if (GC_page_size % HBLKSIZE != 0) {
+ GC_err_printf0("Page size not multiple of HBLKSIZE\n");
+ ABORT("Page size not multiple of HBLKSIZE");
+ }
+# if defined(SUNOS4) || defined(FREEBSD)
+ GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
+ if (GC_old_bus_handler == SIG_IGN) {
+ GC_err_printf0("Previously ignored bus error!?");
+ GC_old_bus_handler == SIG_DFL;
+ }
+ if (GC_old_bus_handler != SIG_DFL) {
+# ifdef PRINTSTATS
+ GC_err_printf0("Replaced other SIGBUS handler\n");
+# endif
+ }
+# endif
+# if defined(IRIX5) || defined(ALPHA) || defined(SUNOS4)
+ GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
+ if (GC_old_segv_handler == SIG_IGN) {
+ GC_err_printf0("Previously ignored segmentation violation!?");
+ GC_old_segv_handler == SIG_DFL;
+ }
+ if (GC_old_segv_handler != SIG_DFL) {
+# ifdef PRINTSTATS
+ GC_err_printf0("Replaced other SIGSEGV handler\n");
+# endif
+ }
+# endif
+}
+
+
+
+void GC_protect_heap()
+{
+ word ps = GC_page_size;
+ word pmask = (ps-1);
+ ptr_t start;
+ word offset;
+ word len;
+ int i;
+
+ for (i = 0; i < GC_n_heap_sects; i++) {
+ offset = (word)(GC_heap_sects[i].hs_start) & pmask;
+ start = GC_heap_sects[i].hs_start - offset;
+ len = GC_heap_sects[i].hs_bytes + offset;
+ len += ps-1; len &= ~pmask;
+ if (mprotect((caddr_t)start, (int)len, PROT_READ | PROT_EXEC) < 0) {
+ ABORT("mprotect failed");
+ }
+ }
+}
+
+# ifdef THREADS
+--> The following is broken. We can lose dirty bits. We would need
+--> the signal handler to cooperate, as in PCR.
+# endif
+
+void GC_read_dirty()
+{
+ BCOPY(GC_dirty_pages, GC_grungy_pages,
+ (sizeof GC_dirty_pages));
+ BZERO(GC_dirty_pages, (sizeof GC_dirty_pages));
+ GC_protect_heap();
+}
+
+bool GC_page_was_dirty(h)
+struct hblk * h;
+{
+ register word index = PHT_HASH(h);
+
+ return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
+}
+
+/*
+ * If this code needed to be thread-safe, the following would need to
+ * acquire and release the allocation lock. This is tricky, since e.g.
+ * the cord package issues a read while it already holds the allocation lock.
+ */
+
+# ifdef THREADS
+ --> fix this
+# endif
+void GC_begin_syscall()
+{
+}
+
+void GC_end_syscall()
+{
+}
+
+void GC_unprotect_range(addr, len)
+ptr_t addr;
+word len;
+{
+ struct hblk * start_block;
+ struct hblk * end_block;
+ register struct hblk *h;
+ ptr_t obj_start;
+
+ if (!GC_incremental) return;
+ obj_start = GC_base(addr);
+ if (obj_start == 0) return;
+ if (GC_base(addr + len - 1) != obj_start) {
+ ABORT("GC_unprotect_range(range bigger than object)");
+ }
+ start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
+ end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
+ end_block += GC_page_size/HBLKSIZE - 1;
+ for (h = start_block; h <= end_block; h++) {
+ register word index = PHT_HASH(h);
+
+ set_pht_entry_from_index(GC_dirty_pages, index);
+ }
+ if (mprotect((caddr_t)start_block,
+ (int)((ptr_t)end_block - (ptr_t)start_block)
+ + HBLKSIZE,
+ PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
+ ABORT("mprotect failed in GC_unprotect_range");
+ }
+}
+
+/* Replacement for UNIX system call. */
+/* Other calls that write to the heap */
+/* should be handled similarly. */
+# ifndef LINT
+ int read(fd, buf, nbyte)
+# else
+ int GC_read(fd, buf, nbyte)
+# endif
+int fd;
+char *buf;
+int nbyte;
+{
+ int result;
+
+ GC_begin_syscall();
+ GC_unprotect_range(buf, (word)nbyte);
+# ifdef IRIX5
+ /* Indirect system call exists, but is undocumented, and */
+ /* always seems to return EINVAL. There seems to be no */
+ /* general way to wrap system calls, since the system call */
+ /* convention appears to require an immediate argument for */
+ /* the system call number, and building the required code */
+ /* in the data segment also seems dangerous. We can fake it */
+ /* for read; anything else is up to the client. */
+ {
+ struct iovec iov;
+
+ iov.iov_base = buf;
+ iov.iov_len = nbyte;
+ result = readv(fd, &iov, 1);
+ }
+# else
+ result = syscall(SYS_read, fd, buf, nbyte);
+# endif
+ GC_end_syscall();
+ return(result);
+}
+
+/*ARGSUSED*/
+bool GC_page_was_ever_dirty(h)
+struct hblk *h;
+{
+ return(TRUE);
+}
+
+/* Reset the n pages starting at h to "was never dirty" status. */
+/*ARGSUSED*/
+void GC_is_fresh(h, n)
+struct hblk *h;
+word n;
+{
+}
+
+# endif /* MPROTECT_VDB */
+
+# ifdef PROC_VDB
+
+/*
+ * See DEFAULT_VDB for interface descriptions.
+ */
+
+/*
+ * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
+ * from which we can read page modified bits. This facility is far from
+ * optimal (e.g. we would like to get the info for only some of the
+ * address space), but it avoids intercepting system calls.
+ */
+
+#include <sys/types.h>
+#include <sys/signal.h>
+#include <sys/fault.h>
+#include <sys/syscall.h>
+#include <sys/procfs.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define BUFSZ 20000
+char *GC_proc_buf;
+
+page_hash_table GC_written_pages = { 0 }; /* Pages ever dirtied */
+
+#ifdef SOLARIS_THREADS
+/* We don't have exact sp values for threads. So we count on */
+/* occasionally declaring stack pages to be fresh. Thus we */
+/* need a real implementation of GC_is_fresh. We can't clear */
+/* entries in GC_written_pages, since that would declare all */
+/* pages with the given hash address to be fresh. */
+# define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
+ struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
+ /* Collisions are dropped. */
+
+# define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
+# define ADD_FRESH_PAGE(h) \
+ GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
+# define PAGE_IS_FRESH(h) \
+ (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
+#endif
+
+/* Add all pages in pht2 to pht1 */
+void GC_or_pages(pht1, pht2)
+page_hash_table pht1, pht2;
+{
+ register int i;
+
+ for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
+}
+
+int GC_proc_fd;
+
+void GC_dirty_init()
+{
+ int fd;
+ char buf[30];
+
+ GC_dirty_maintained = TRUE;
+ if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
+ register int i;
+
+ for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
+# ifdef PRINTSTATS
+ GC_printf1("Allocated words:%lu:all pages may have been written\n",
+ (unsigned long)
+ (GC_words_allocd + GC_words_allocd_before_gc));
+# endif
+ }
+ sprintf(buf, "/proc/%d", getpid());
+ fd = open(buf, O_RDONLY);
+ if (fd < 0) {
+ ABORT("/proc open failed");
+ }
+ GC_proc_fd = ioctl(fd, PIOCOPENPD, 0);
+ if (GC_proc_fd < 0) {
+ ABORT("/proc ioctl failed");
+ }
+ GC_proc_buf = GC_scratch_alloc(BUFSZ);
+# ifdef SOLARIS_THREADS
+ GC_fresh_pages = (struct hblk **)
+ GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
+ if (GC_fresh_pages == 0) {
+ GC_err_printf0("No space for fresh pages\n");
+ EXIT();
+ }
+ BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
+# endif
+}
+
+/* Ignore write hints. They don't help us here. */
+/*ARGSUSED*/
+void GC_write_hint(h)
+struct hblk *h;
+{
+}
+
+void GC_read_dirty()
+{
+ unsigned long ps, np;
+ int nmaps;
+ ptr_t vaddr;
+ struct prasmap * map;
+ char * bufp;
+ ptr_t current_addr, limit;
+ int i;
+
+ BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
+
+ bufp = GC_proc_buf;
+ if (read(GC_proc_fd, bufp, BUFSZ) <= 0) {
+ ABORT("/proc read failed: BUFSZ too small?\n");
+ }
+ /* Copy dirty bits into GC_grungy_pages */
+ nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
+ /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
+ nmaps, PG_REFERENCED, PG_MODIFIED); */
+ bufp = bufp + sizeof(struct prpageheader);
+ for (i = 0; i < nmaps; i++) {
+ map = (struct prasmap *)bufp;
+ vaddr = (ptr_t)(map -> pr_vaddr);
+ ps = map -> pr_pagesize;
+ np = map -> pr_npage;
+ /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
+ limit = vaddr + ps * np;
+ bufp += sizeof (struct prasmap);
+ for (current_addr = vaddr;
+ current_addr < limit; current_addr += ps){
+ if ((*bufp++) & PG_MODIFIED) {
+ register struct hblk * h = (struct hblk *) current_addr;
+
+ while ((ptr_t)h < current_addr + ps) {
+ register word index = PHT_HASH(h);
+
+ set_pht_entry_from_index(GC_grungy_pages, index);
+# ifdef SOLARIS_THREADS
+ {
+ register int slot = FRESH_PAGE_SLOT(h);
+
+ if (GC_fresh_pages[slot] == h) {
+ GC_fresh_pages[slot] = 0;
+ }
+ }
+# endif
+ h++;
+ }
+ }
+ }
+ bufp += sizeof(long) - 1;
+ bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
+ }
+ /* Update GC_written_pages. */
+ GC_or_pages(GC_written_pages, GC_grungy_pages);
+# ifdef SOLARIS_THREADS
+ /* Make sure that old stacks are considered completely clean */
+ /* unless written again. */
+ GC_old_stacks_are_fresh();
+# endif
+}
+
+bool GC_page_was_dirty(h)
+struct hblk *h;
+{
+ register word index = PHT_HASH(h);
+ register bool result;
+
+ result = get_pht_entry_from_index(GC_grungy_pages, index);
+# ifdef SOLARIS_THREADS
+ if (result && PAGE_IS_FRESH(h)) result = FALSE;
+ /* This happens only if page was declared fresh since */
+ /* the read_dirty call, e.g. because it's in an unused */
+ /* thread stack. It's OK to treat it as clean, in */
+ /* that case. And it's consistent with */
+ /* GC_page_was_ever_dirty. */
+# endif
+ return(result);
+}
+
+bool GC_page_was_ever_dirty(h)
+struct hblk *h;
+{
+ register word index = PHT_HASH(h);
+ register bool result;
+
+ result = get_pht_entry_from_index(GC_written_pages, index);
+# ifdef SOLARIS_THREADS
+ if (result && PAGE_IS_FRESH(h)) result = FALSE;
+# endif
+ return(result);
+}
+
+void GC_is_fresh(h, n)
+struct hblk *h;
+word n;
+{
+
+ register word index;
+
+# ifdef SOLARIS_THREADS
+ register word i;
+
+ if (GC_fresh_pages != 0) {
+ for (i = 0; i < n; i++) {
+ PAGE_IS_FRESH(h + n);
+ }
+ }
+# endif
+}
+
+# endif /* PROC_VDB */
+
+
+# ifdef PCR_VDB
+
+# include "vd/PCR_VD.h"
+
+# define NPAGES (32*1024) /* 128 MB */
+
+PCR_VD_DB GC_grungy_bits[NPAGES];
+
+ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
+ /* HBLKSIZE aligned. */
+
+void GC_dirty_init()
+{
+ GC_dirty_maintained = TRUE;
+ /* For the time being, we assume the heap generally grows up */
+ GC_vd_base = GC_heap_sects[0].hs_start;
+ if (GC_vd_base == 0) {
+ ABORT("Bad initial heap segment");
+ }
+ if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
+ != PCR_ERes_okay) {
+ ABORT("dirty bit initialization failed");
+ }
+}
+
+void GC_read_dirty()
+{
+ /* lazily enable dirty bits on newly added heap sects */
+ {
+ static int onhs = 0;
+ int nhs = GC_n_heap_sects;
+ for( ; onhs < nhs; onhs++ ) {
+ PCR_VD_WriteProtectEnable(
+ GC_heap_sects[onhs].hs_start,
+ GC_heap_sects[onhs].hs_bytes );
+ }
+ }
+
+
+ if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
+ != PCR_ERes_okay) {
+ ABORT("dirty bit read failed");
+ }
+}
+
+bool GC_page_was_dirty(h)
+struct hblk *h;
+{
+ if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
+ return(TRUE);
+ }
+ return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
+}
+
+/*ARGSUSED*/
+void GC_write_hint(h)
+struct hblk *h;
+{
+ PCR_VD_WriteProtectDisable(h, HBLKSIZE);
+ PCR_VD_WriteProtectEnable(h, HBLKSIZE);
+}
+
+# endif /* PCR_VDB */
+
+
+
+
--- /dev/null
+solaris_threads.c
+pcr_interface.c
+real_malloc.c
+mips_mach_dep.s
+rs6000_mach_dep.s
+alpha_mach_dep.s
+sparc_mach_dep.s
+PCR-Makefile
+setjmp_t.c
+SMakefile.amiga
+SCoptions.amiga
+README.amiga
+callprocs
+gc.man
+pc_excludes
+barrett_diagram
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 1:59 pm PDT */
+# include "gc_priv.h"
+
+# ifdef PCR
+/*
+ * Note that POSIX PCR requires an ANSI C compiler. Hence we are allowed
+ * to make the same assumption here.
+ * We wrap all of the allocator functions to avoid questions of
+ * compatibility between the prototyped and nonprototyped versions of the f
+ */
+# include "mm/PCR_MM.h"
+
+# define MY_MAGIC 17L
+
+void * GC_AllocProc(size_t size, PCR_Bool ptrFree, PCR_Bool clear )
+{
+ if (ptrFree) {
+ void * result = (void *)GC_malloc_atomic(size);
+ if (clear && result != 0) BZERO(result, size);
+ return(result);
+ } else {
+ return((void *)GC_malloc(size));
+ }
+}
+
+# define GC_ReallocProc GC_realloc
+
+# define GC_FreeProc GC_free
+
+typedef struct {
+ PCR_ERes (*ed_proc)(void *p, size_t size, PCR_Any data);
+ bool ed_pointerfree;
+ PCR_ERes ed_fail_code;
+ PCR_Any ed_client_data;
+} enumerate_data;
+
+void GC_enumerate_block(h, ed)
+register struct hblk *h;
+enumerate_data * ed;
+{
+ register hdr * hhdr;
+ register int sz;
+ word *p;
+ word * lim;
+
+ hhdr = HDR(h);
+ sz = hhdr -> hb_sz;
+ if (sz >= 0 && ed -> ed_pointerfree
+ || sz <= 0 && !(ed -> ed_pointerfree)) return;
+ if (sz < 0) sz = -sz;
+ lim = (word *)(h+1) - sz;
+ p = (word *)h;
+ do {
+ if (PCR_ERes_IsErr(ed -> ed_fail_code)) return;
+ ed -> ed_fail_code =
+ (*(ed -> ed_proc))(p, WORDS_TO_BYTES(sz), ed -> ed_client_data);
+ p+= sz;
+ } while (p <= lim);
+}
+
+struct PCR_MM_ProcsRep * GC_old_allocator = 0;
+
+PCR_ERes GC_EnumerateProc(
+ PCR_Bool ptrFree,
+ PCR_ERes (*proc)(void *p, size_t size, PCR_Any data),
+ PCR_Any data
+)
+{
+ enumerate_data ed;
+
+ ed.ed_proc = proc;
+ ed.ed_pointerfree = ptrFree;
+ ed.ed_fail_code = PCR_ERes_okay;
+ ed.ed_client_data = data;
+ GC_apply_to_all_blocks(GC_enumerate_block, &ed);
+ if (ed.ed_fail_code != PCR_ERes_okay) {
+ return(ed.ed_fail_code);
+ } else {
+ /* Also enumerate objects allocated by my predecessors */
+ return((*(GC_old_allocator->mmp_enumerate))(ptrFree, proc, data));
+ }
+}
+
+void GC_DummyFreeProc(void *p) {};
+
+void GC_DummyShutdownProc(void) {};
+
+struct PCR_MM_ProcsRep GC_Rep = {
+ MY_MAGIC,
+ GC_AllocProc,
+ GC_ReallocProc,
+ GC_DummyFreeProc, /* mmp_free */
+ GC_FreeProc, /* mmp_unsafeFree */
+ GC_EnumerateProc,
+ GC_DummyShutdownProc /* mmp_shutdown */
+};
+
+void GC_pcr_install()
+{
+ PCR_MM_Install(&GC_Rep, &GC_old_allocator);
+}
+# endif
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:04 pm PDT */
+
+
+# ifdef PCR
+/*
+ * This definition should go in its own file that includes no other
+ * header files. Otherwise, we risk not getting the underlying system
+ * malloc.
+ */
+# define PCR_NO_RENAME
+# include <stdlib.h>
+
+# ifdef __STDC__
+ char * real_malloc(size_t size)
+# else
+ char * real_malloc()
+ int size;
+# endif
+{
+ return((char *)malloc(size));
+}
+#endif /* PCR */
+
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:00 pm PDT */
+
+#include <stdio.h>
+#include "gc_priv.h"
+
+signed_word GC_mem_found = 0;
+ /* Number of longwords of memory GC_reclaimed */
+
+# ifdef FIND_LEAK
+static report_leak(p, sz)
+ptr_t p;
+word sz;
+{
+ if (HDR(p) -> hb_obj_kind == PTRFREE) {
+ GC_err_printf0("Leaked atomic object at ");
+ } else {
+ GC_err_printf0("Leaked composite object at ");
+ }
+ if (GC_debugging_started && GC_has_debug_info(p)) {
+ GC_print_obj(p);
+ } else {
+ GC_err_printf2("0x%lx (appr. size = %ld)\n",
+ (unsigned long)p,
+ (unsigned long)WORDS_TO_BYTES(sz));
+ }
+}
+
+# define FOUND_FREE(hblk, word_no) \
+ if (abort_if_found) { \
+ report_leak((long)hblk + WORDS_TO_BYTES(word_no), \
+ HDR(hblk) -> hb_sz); \
+ }
+# else
+# define FOUND_FREE(hblk, word_no)
+# endif
+
+/*
+ * reclaim phase
+ *
+ */
+
+
+/*
+ * Test whether a block is completely empty, i.e. contains no marked
+ * objects. This does not require the block to be in physical
+ * memory.
+ */
+
+bool GC_block_empty(hhdr)
+register hdr * hhdr;
+{
+ register word *p = (word *)(&(hhdr -> hb_marks[0]));
+ register word * plim =
+ (word *)(&(hhdr -> hb_marks[MARK_BITS_SZ]));
+ while (p < plim) {
+ if (*p++) return(FALSE);
+ }
+ return(TRUE);
+}
+
+# ifdef GATHERSTATS
+# define INCR_WORDS(sz) n_words_found += (sz)
+# else
+# define INCR_WORDS(sz)
+# endif
+/*
+ * Restore unmarked small objects in h of size sz to the object
+ * free list. Returns the new list.
+ * Clears unmarked objects.
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_clear(hbp, hhdr, sz, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+register hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+register word sz;
+{
+ register int word_no;
+ register word *p, *q, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+
+ p = (word *)(hbp->hb_body);
+ word_no = HDR_WORDS;
+ plim = (word *)((((word)hbp) + HBLKSIZE)
+ - WORDS_TO_BYTES(sz));
+
+ /* go through all words in block */
+ while( p <= plim ) {
+ if( mark_bit_from_hdr(hhdr, word_no) ) {
+ p += sz;
+ } else {
+ FOUND_FREE(hbp, word_no);
+ INCR_WORDS(sz);
+ /* object is available - put on list */
+ obj_link(p) = list;
+ list = ((ptr_t)p);
+ /* Clear object, advance p to next object in the process */
+ q = p + sz;
+ p++; /* Skip link field */
+ while (p < q) {
+ *p++ = 0;
+ }
+ }
+ word_no += sz;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+}
+
+#ifndef SMALL_CONFIG
+
+/*
+ * A special case for 2 word composite objects (e.g. cons cells):
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_clear2(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register word mark_word;
+ register int i;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & ((word)1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ p[start_displ+1] = 0; \
+ INCR_WORDS(2); \
+ }
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ for (i = 0; i < WORDSZ; i += 8) {
+ DO_OBJ(0);
+ DO_OBJ(2);
+ DO_OBJ(4);
+ DO_OBJ(6);
+ p += 8;
+ mark_word >>= 8;
+ }
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+/*
+ * Another special case for 4 word composite objects:
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_clear4(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register word mark_word;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & ((word)1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ p[start_displ+1] = 0; \
+ p[start_displ+2] = 0; \
+ p[start_displ+3] = 0; \
+ INCR_WORDS(4); \
+ }
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ DO_OBJ(0);
+ DO_OBJ(4);
+ DO_OBJ(8);
+ DO_OBJ(12);
+ DO_OBJ(16);
+ DO_OBJ(20);
+ DO_OBJ(24);
+ DO_OBJ(28);
+# if CPP_WORDSZ == 64
+ DO_OBJ(32);
+ DO_OBJ(36);
+ DO_OBJ(40);
+ DO_OBJ(44);
+ DO_OBJ(48);
+ DO_OBJ(52);
+ DO_OBJ(56);
+ DO_OBJ(60);
+# endif
+ p += WORDSZ;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+#endif /* !SMALL_CONFIG */
+
+/* The same thing, but don't clear objects: */
+/*ARGSUSED*/
+ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+register hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+register word sz;
+{
+ register int word_no;
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+
+ p = (word *)(hbp->hb_body);
+ word_no = HDR_WORDS;
+ plim = (word *)((((word)hbp) + HBLKSIZE)
+ - WORDS_TO_BYTES(sz));
+
+ /* go through all words in block */
+ while( p <= plim ) {
+ if( !mark_bit_from_hdr(hhdr, word_no) ) {
+ FOUND_FREE(hbp, word_no);
+ INCR_WORDS(sz);
+ /* object is available - put on list */
+ obj_link(p) = list;
+ list = ((ptr_t)p);
+ }
+ p += sz;
+ word_no += sz;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+}
+
+#ifndef SMALL_CONFIG
+/*
+ * Another special case for 2 word atomic objects:
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_uninit2(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register word mark_word;
+ register int i;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & ((word)1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ INCR_WORDS(2); \
+ }
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ for (i = 0; i < WORDSZ; i += 8) {
+ DO_OBJ(0);
+ DO_OBJ(2);
+ DO_OBJ(4);
+ DO_OBJ(6);
+ p += 8;
+ mark_word >>= 8;
+ }
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+/*
+ * Another special case for 4 word atomic objects:
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_uninit4(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register word mark_word;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & ((word)1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ INCR_WORDS(4); \
+ }
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ DO_OBJ(0);
+ DO_OBJ(4);
+ DO_OBJ(8);
+ DO_OBJ(12);
+ DO_OBJ(16);
+ DO_OBJ(20);
+ DO_OBJ(24);
+ DO_OBJ(28);
+# if CPP_WORDSZ == 64
+ DO_OBJ(32);
+ DO_OBJ(36);
+ DO_OBJ(40);
+ DO_OBJ(44);
+ DO_OBJ(48);
+ DO_OBJ(52);
+ DO_OBJ(56);
+ DO_OBJ(60);
+# endif
+ p += WORDSZ;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+/* Finally the one word case, which never requires any clearing: */
+/*ARGSUSED*/
+ptr_t GC_reclaim1(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register word mark_word;
+ register int i;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & ((word)1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ INCR_WORDS(1); \
+ }
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ for (i = 0; i < WORDSZ; i += 4) {
+ DO_OBJ(0);
+ DO_OBJ(1);
+ DO_OBJ(2);
+ DO_OBJ(3);
+ p += 4;
+ mark_word >>= 4;
+ }
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+#endif /* !SMALL_CONFIG */
+
+/*
+ * Restore unmarked small objects in the block pointed to by hbp
+ * to the appropriate object free list.
+ * If entirely empty blocks are to be completely deallocated, then
+ * caller should perform that check.
+ */
+void GC_reclaim_small_nonempty_block(hbp, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+int abort_if_found; /* Abort if a reclaimable object is found */
+{
+ hdr * hhdr;
+ register word sz; /* size of objects in current block */
+ register struct obj_kind * ok;
+ register ptr_t * flh;
+
+ hhdr = HDR(hbp);
+ sz = hhdr -> hb_sz;
+ hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
+ ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
+ flh = &(ok -> ok_freelist[sz]);
+ GC_write_hint(hbp);
+
+ if (ok -> ok_init) {
+ switch(sz) {
+# ifndef SMALL_CONFIG
+ case 1:
+ *flh = GC_reclaim1(hbp, hhdr, *flh, abort_if_found);
+ break;
+ case 2:
+ *flh = GC_reclaim_clear2(hbp, hhdr, *flh, abort_if_found);
+ break;
+ case 4:
+ *flh = GC_reclaim_clear4(hbp, hhdr, *flh, abort_if_found);
+ break;
+# endif
+ default:
+ *flh = GC_reclaim_clear(hbp, hhdr, sz, *flh, abort_if_found);
+ break;
+ }
+ } else {
+ switch(sz) {
+# ifndef SMALL_CONFIG
+ case 1:
+ *flh = GC_reclaim1(hbp, hhdr, *flh, abort_if_found);
+ break;
+ case 2:
+ *flh = GC_reclaim_uninit2(hbp, hhdr, *flh, abort_if_found);
+ break;
+ case 4:
+ *flh = GC_reclaim_uninit4(hbp, hhdr, *flh, abort_if_found);
+ break;
+# endif
+ default:
+ *flh = GC_reclaim_uninit(hbp, hhdr, sz, *flh, abort_if_found);
+ break;
+ }
+ }
+}
+
+/*
+ * Restore an unmarked large object or an entirely empty blocks of small objects
+ * to the heap block free list.
+ * Otherwise enqueue the block for later processing
+ * by GC_reclaim_small_nonempty_block.
+ * If abort_if_found is TRUE, then process any block immediately.
+ */
+void GC_reclaim_block(hbp, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+word abort_if_found; /* Abort if a reclaimable object is found */
+{
+ register hdr * hhdr;
+ register word sz; /* size of objects in current block */
+ register struct obj_kind * ok;
+ struct hblk ** rlh;
+
+ hhdr = HDR(hbp);
+ sz = hhdr -> hb_sz;
+ ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
+
+ if( sz > MAXOBJSZ ) { /* 1 big object */
+ if( !mark_bit_from_hdr(hhdr, HDR_WORDS) ) {
+ FOUND_FREE(hbp, HDR_WORDS);
+# ifdef GATHERSTATS
+ GC_mem_found += sz;
+# endif
+ GC_freehblk(hbp);
+ }
+ } else {
+ bool empty = GC_block_empty(hhdr);
+ if (abort_if_found) {
+ GC_reclaim_small_nonempty_block(hbp, (int)abort_if_found);
+ } else if (empty) {
+# ifdef GATHERSTATS
+ GC_mem_found += BYTES_TO_WORDS(HBLKSIZE);
+# endif
+ GC_freehblk(hbp);
+ } else {
+ /* group of smaller objects, enqueue the real work */
+ rlh = &(ok -> ok_reclaim_list[sz]);
+ hhdr -> hb_next = *rlh;
+ *rlh = hbp;
+ }
+ }
+}
+
+/* Routines to gather and print heap block info */
+/* intended for debugging. Otherwise should be called */
+/* with lock. */
+static number_of_blocks;
+static total_bytes;
+
+/* Number of set bits in a word. Not performance critical. */
+static int set_bits(n)
+word n;
+{
+ register word m = n;
+ register int result = 0;
+
+ while (m > 0) {
+ if (m & 1) result++;
+ m >>= 1;
+ }
+ return(result);
+}
+
+/* Return the number of set mark bits in the given header */
+int GC_n_set_marks(hhdr)
+hdr * hhdr;
+{
+ register int result = 0;
+ register int i;
+
+ for (i = 0; i < MARK_BITS_SZ; i++) {
+ result += set_bits(hhdr -> hb_marks[i]);
+ }
+ return(result);
+}
+
+/*ARGSUSED*/
+void GC_print_block_descr(h, dummy)
+struct hblk *h;
+word dummy;
+{
+ register hdr * hhdr = HDR(h);
+ register bytes = WORDS_TO_BYTES(hhdr -> hb_sz);
+
+ GC_printf3("(%lu:%lu,%lu)", (unsigned long)(hhdr -> hb_obj_kind),
+ (unsigned long)bytes,
+ (unsigned long)(GC_n_set_marks(hhdr)));
+ bytes += HDR_BYTES + HBLKSIZE-1;
+ bytes &= ~(HBLKSIZE-1);
+ total_bytes += bytes;
+ number_of_blocks++;
+}
+
+void GC_print_block_list()
+{
+ GC_printf0("(kind(0=ptrfree,1=normal,2=unc.,3=stubborn):size_in_bytes, #_marks_set)\n");
+ number_of_blocks = 0;
+ total_bytes = 0;
+ GC_apply_to_all_blocks(GC_print_block_descr, (word)0);
+ GC_printf2("\nblocks = %lu, bytes = %lu\n",
+ (unsigned long)number_of_blocks,
+ (unsigned long)total_bytes);
+}
+
+/*
+ * Do the same thing on the entire heap, after first clearing small object
+ * free lists (if we are not just looking for leaks).
+ */
+void GC_start_reclaim(abort_if_found)
+int abort_if_found; /* Abort if a GC_reclaimable object is found */
+{
+ int kind;
+
+ /* Clear reclaim- and free-lists */
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ register ptr_t *fop;
+ register ptr_t *lim;
+ register struct hblk ** hbpp;
+ register struct hblk ** hlim;
+
+ if (!abort_if_found) {
+ lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJSZ+1]);
+ for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
+ *fop = 0;
+ }
+ } /* otherwise free list objects are marked, */
+ /* and its safe to leave them */
+ hlim = &(GC_obj_kinds[kind].ok_reclaim_list[MAXOBJSZ+1]);
+ for( hbpp = GC_obj_kinds[kind].ok_reclaim_list;
+ hbpp < hlim; hbpp++ ) {
+ *hbpp = 0;
+ }
+ }
+
+# ifdef PRINTBLOCKS
+ GC_printf0("GC_reclaim: current block sizes:\n");
+ GC_print_block_list();
+# endif
+
+ /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
+ /* or enqueue the block for later processing. */
+ GC_apply_to_all_blocks(GC_reclaim_block, (word)abort_if_found);
+
+}
+
+/*
+ * Sweep blocks of the indicated object size and kind until either the
+ * appropriate free list is nonempty, or there are no more blocks to
+ * sweep.
+ */
+void GC_continue_reclaim(sz, kind)
+word sz; /* words */
+int kind;
+{
+ register hdr * hhdr;
+ register struct hblk * hbp;
+ register struct obj_kind * ok = &(GC_obj_kinds[kind]);
+ struct hblk ** rlh = &(ok -> ok_reclaim_list[sz]);
+ ptr_t *flh = &(ok -> ok_freelist[sz]);
+
+
+ while ((hbp = *rlh) != 0) {
+ hhdr = HDR(hbp);
+ *rlh = hhdr -> hb_next;
+ GC_reclaim_small_nonempty_block(hbp, FALSE);
+ if (*flh != 0) break;
+ }
+}
+
+/*
+ * Reclaim all blocks that have been recently reclaimed.
+ * Clear lists of blocks waiting to be reclaimed.
+ * Must be done before clearing mark bits with the world running,
+ * since otherwise a subsequent reclamation of block would see
+ * the wrong mark bits.
+ * SHOULD PROBABLY BE INCREMENTAL
+ */
+void GC_reclaim_or_delete_all()
+{
+ register word sz;
+ register int kind;
+ register hdr * hhdr;
+ register struct hblk * hbp;
+ register struct obj_kind * ok;
+ struct hblk ** rlh;
+# ifdef PRINTTIMES
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE done_time;
+
+ GET_TIME(start_time);
+# endif
+
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ ok = &(GC_obj_kinds[kind]);
+ for (sz = 1; sz <= MAXOBJSZ; sz++) {
+ rlh = &(ok -> ok_reclaim_list[sz]);
+ while ((hbp = *rlh) != 0) {
+ hhdr = HDR(hbp);
+ *rlh = hhdr -> hb_next;
+ if (hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
+ /* It's likely we'll need it this time, too */
+ /* It's been touched recently, so this */
+ /* shouldn't trigger paging. */
+ GC_reclaim_small_nonempty_block(hbp, FALSE);
+ }
+ }
+ }
+ }
+# ifdef PRINTTIMES
+ GET_TIME(done_time);
+ GC_printf1("Disposing of reclaim lists took %lu msecs\n",
+ MS_TIME_DIFF(done_time,start_time));
+# endif
+}
--- /dev/null
+ .csect
+ .set r0,0
+ .set r1,1
+ .set r2,2
+ .set r3,3
+ .set r4,4
+ .set r5,5
+ .set r6,6
+ .set r7,7
+ .set r8,8
+ .set r9,9
+ .set r10,10
+ .set r11,11
+ .set r12,12
+ .set r13,13
+ .set r14,14
+ .set r15,15
+ .set r16,16
+ .set r17,17
+ .set r18,18
+ .set r19,19
+ .set r20,20
+ .set r21,21
+ .set r22,22
+ .set r23,23
+ .set r24,24
+ .set r25,25
+ .set r26,26
+ .set r27,27
+ .set r28,28
+ .set r29,29
+ .set r30,30
+ .set r31,31
+
+ # Mark from machine registers that are saved by C compiler
+ .globl .GC_push_regs
+.GC_push_regs:
+ .extern .GC_push_one
+ stu r1,-64(r1) # reserve stack frame
+ mflr r0 # save link register
+ st r0,0x48(r1)
+ oril r3,r2,0x0 # mark from r2
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r13,0x0 # mark from r13-r31
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r14,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r15,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r16,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r17,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r18,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r19,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r20,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r21,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r22,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r23,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r24,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r25,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r26,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r27,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r28,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r29,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r30,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ oril r3,r31,0x0
+ bl .GC_push_one
+ cror 15,15,15
+ l r0,0x48(r1)
+ mtlr r0
+ ai r1,r1,64
+ br
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:01 pm PDT */
+
+/* Check whether setjmp actually saves registers in jmp_buf. */
+/* If it doesn't, the generic mark_regs code won't work. */
+/* Compilers vary as to whether they will put x in a */
+/* (callee-save) register without -O. The code is */
+/* contrived such that any decent compiler should put x in */
+/* a callee-save register with -O. Thus it is is */
+/* recommended that this be run optimized. (If the machine */
+/* has no callee-save registers, then the generic code is */
+/* safe, but this will not be noticed by this piece of */
+/* code.) */
+#include <stdio.h>
+#include <setjmp.h>
+#include "config.h"
+
+#ifdef __hpux
+/* X/OPEN PG3 defines "void* sbrk();" and this clashes with the definition */
+/* in gc_private.h, so we set the clock backwards with _CLASSIC_XOPEN_TYPES. */
+/* This is for HP-UX 8.0.
+/* sbrk() is not used in this file, of course. W. Underwood, 15 Jun 1992 */
+#define _CLASSIC_XOPEN_TYPES
+#include <unistd.h>
+int
+getpagesize()
+{
+ return sysconf(_SC_PAGE_SIZE);
+}
+#endif
+
+#if defined(SUNOS5)
+#define _CLASSIC_XOPEN_TYPES
+#include <unistd.h>
+int
+getpagesize()
+{
+ return sysconf(_SC_PAGESIZE);
+}
+#endif
+
+#ifdef _AUX_SOURCE
+#include <sys/mmu.h>
+int
+getpagesize()
+{
+ return PAGESIZE;
+}
+#endif
+
+#ifdef AMIGA
+int
+getpagesize()
+{
+ return(4096);
+}
+#endif
+
+#ifdef __OS2__
+#define INCL_DOSFILEMGR
+#define INCL_DOSMISC
+#define INCL_DOSERRORS
+#include <os2.h>
+
+int
+getpagesize()
+{
+ ULONG result[1];
+
+ if (DosQuerySysInfo(QSV_PAGE_SIZE, QSV_PAGE_SIZE,
+ (void *)result, sizeof(ULONG)) != NO_ERROR) {
+ fprintf(stderr, "DosQuerySysInfo failed\n");
+ result[0] = 4096;
+ }
+ return((int)(result[0]));
+}
+#endif
+
+struct {char a_a; char * a_b;} a;
+
+int * nested_sp()
+{
+ int dummy;
+
+ return(&dummy);
+}
+
+main()
+{
+ int dummy;
+ long ps = getpagesize();
+ jmp_buf b;
+ register int x = strlen("a"); /* 1, slightly disguised */
+ static int y = 0;
+
+ if (nested_sp() < &dummy) {
+ printf("Stack appears to grow down, which is the default.\n");
+ printf("A good guess for STACKBOTTOM on this machine is 0x%X.\n",
+ ((long)(&dummy) + ps) & ~(ps-1));
+ } else {
+ printf("Stack appears to grow up.\n");
+ printf("Define STACK_GROWS_UP in gc_private.h\n");
+ printf("A good guess for STACKBOTTOM on this machine is 0x%X.\n",
+ ((long)(&dummy) + ps) & ~(ps-1));
+ }
+ printf("Note that this may vary between machines of ostensibly\n");
+ printf("the same architecture (e.g. Sun 3/50s and 3/80s).\n");
+ printf("A good guess for ALIGNMENT on this machine is %d.\n",
+ (unsigned long)(&(a.a_b))-(unsigned long)(&a));
+
+ /* Encourage the compiler to keep x in a callee-save register */
+ x = 2*x-1;
+ printf("");
+ x = 2*x-1;
+ setjmp(b);
+ if (y == 1) {
+ if (x == 2) {
+ printf("Generic mark_regs code probably wont work\n");
+# if defined(SPARC) || defined(RS6000) || defined(VAX) || defined(MIPS) || defined(M68K) || defined(I386) || defined(NS32K) || defined(RT)
+ printf("Assembly code supplied\n");
+# else
+ printf("Need assembly code\n");
+# endif
+ } else if (x == 1) {
+ printf("Generic mark_regs code may work\n");
+ } else {
+ printf("Very strange setjmp implementation\n");
+ }
+ }
+ y++;
+ x = 2;
+ if (y == 1) longjmp(b,1);
+ return(0);
+}
+
+int g(x)
+int x;
+{
+ return(x);
+}
--- /dev/null
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/*
+ * Support code for Solaris threads. Provides functionality we wish Sun
+ * had provided. Relies on some information we probably shouldn't rely on.
+ */
+/* Boehm, May 19, 1994 2:05 pm PDT */
+
+# if defined(SOLARIS_THREADS)
+
+# include "gc_priv.h"
+# include <thread.h>
+# include <synch.h>
+# include <sys/types.h>
+# include <sys/mman.h>
+# include <sys/time.h>
+# include <sys/resource.h>
+# define _CLASSIC_XOPEN_TYPES
+# include <unistd.h>
+
+#undef thr_join
+#undef thr_create
+#undef thr_suspend
+#undef thr_continue
+
+mutex_t GC_thr_lock; /* Acquired before allocation lock */
+cond_t GC_prom_join_cv; /* Broadcast whenany thread terminates */
+cond_t GC_create_cv; /* Signalled when a new undetached */
+ /* thread starts. */
+
+bool GC_thr_initialized = FALSE;
+
+size_t GC_min_stack_sz;
+
+size_t GC_page_sz;
+
+# define N_FREE_LISTS 25
+ptr_t GC_stack_free_lists[N_FREE_LISTS] = { 0 };
+ /* GC_stack_free_lists[i] is free list for stacks of */
+ /* size GC_min_stack_sz*2**i. */
+ /* Free lists are linked through first word. */
+
+/* Return a stack of size at least *stack_size. *stack_size is */
+/* replaced by the actual stack size. */
+/* Caller holds GC_thr_lock. */
+ptr_t GC_stack_alloc(size_t * stack_size)
+{
+ register size_t requested_sz = *stack_size;
+ register size_t search_sz = GC_min_stack_sz;
+ register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
+ register ptr_t result;
+
+ while (search_sz < requested_sz) {
+ search_sz *= 2;
+ index++;
+ }
+ if ((result = GC_stack_free_lists[index]) == 0
+ && (result = GC_stack_free_lists[index+1]) != 0) {
+ /* Try next size up. */
+ search_sz *= 2; index++;
+ }
+ if (result != 0) {
+ GC_stack_free_lists[index] = *(ptr_t *)result;
+ } else {
+ result = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_sz);
+ result = (ptr_t)(((word)result + GC_page_sz) & ~(GC_page_sz - 1));
+ /* Protect hottest page to detect overflow. */
+ mprotect(result, GC_page_sz, PROT_NONE);
+ GC_is_fresh((struct hblk *)result, divHBLKSZ(search_sz));
+ result += GC_page_sz;
+ }
+ *stack_size = search_sz;
+ return(result);
+}
+
+/* Caller holds GC_thr_lock. */
+void GC_stack_free(ptr_t stack, size_t size)
+{
+ register int index = 0;
+ register size_t search_sz = GC_min_stack_sz;
+
+ while (search_sz < size) {
+ search_sz *= 2;
+ index++;
+ }
+ if (search_sz != size) ABORT("Bad stack size");
+ *(ptr_t *)stack = GC_stack_free_lists[index];
+ GC_stack_free_lists[index] = stack;
+}
+
+void GC_my_stack_limits();
+
+/* Notify virtual dirty bit implementation that known empty parts of */
+/* stacks do not contain useful data. */
+void GC_old_stacks_are_fresh()
+{
+ register int i;
+ register ptr_t p;
+ register size_t sz;
+ register struct hblk * h;
+ int dummy;
+
+ if (!GC_thr_initialized) GC_thr_init();
+ for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;
+ i++, sz *= 2) {
+ for (p = GC_stack_free_lists[i]; p != 0; p = *(ptr_t *)p) {
+ h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));
+ if ((ptr_t)h == p) {
+ GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));
+ } else {
+ GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);
+ BZERO(p, (ptr_t)h - p);
+ }
+ }
+ }
+ GC_my_stack_limits();
+}
+
+/* The set of all known threads. We intercept thread creation and */
+/* joins. We never actually create detached threads. We allocate all */
+/* new thread stacks ourselves. These allow us to maintain this */
+/* data structure. */
+/* Protected by GC_thr_lock. */
+/* Some of this should be declared vaolatile, but that's incosnsistent */
+/* with some library routine declarations. In particular, the */
+/* definition of cond_t doesn't mention volatile! */
+typedef struct GC_Thread_Rep {
+ struct GC_Thread_Rep * next;
+ thread_t id;
+ word flags;
+# define FINISHED 1 /* Thread has exited. */
+# define DETACHED 2 /* Thread is intended to be detached. */
+# define CLIENT_OWNS_STACK 4
+ /* Stack was supplied by client. */
+# define SUSPENDED 8 /* Currently suspended. */
+ ptr_t stack;
+ size_t stack_size;
+ cond_t join_cv;
+ void * status;
+} * GC_thread;
+
+# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
+volatile GC_thread GC_threads[THREAD_TABLE_SZ];
+
+/* Add a thread to GC_threads. We assume it wasn't already there. */
+/* Caller holds GC_thr_lock if there is > 1 thread. */
+/* Initial caller may hold allocation lock. */
+GC_thread GC_new_thread(thread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ GC_thread result;
+ static struct GC_Thread_Rep first_thread;
+ static bool first_thread_used = FALSE;
+
+ if (!first_thread_used) {
+ result = &first_thread;
+ first_thread_used = TRUE;
+ /* Dont acquire allocation lock, since we may already hold it. */
+ } else {
+ result = GC_NEW(struct GC_Thread_Rep);
+ }
+ if (result == 0) return(0);
+ result -> id = id;
+ result -> next = GC_threads[hv];
+ GC_threads[hv] = result;
+ /* result -> finished = 0; */
+ (void) cond_init(&(result->join_cv), USYNC_THREAD, 0);
+ return(result);
+}
+
+/* Delete a thread from GC_threads. We assume it is there. */
+/* (The code intentionally traps if it wasn't.) */
+/* Caller holds GC_thr_lock. */
+void GC_delete_thread(thread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ register GC_thread p = GC_threads[hv];
+ register GC_thread prev = 0;
+
+ while (p -> id != id) {
+ prev = p;
+ p = p -> next;
+ }
+ if (prev == 0) {
+ GC_threads[hv] = p -> next;
+ } else {
+ prev -> next = p -> next;
+ }
+}
+
+/* Return the GC_thread correpsonding to a given thread_t. */
+/* Returns 0 if it's not there. */
+/* Caller holds GC_thr_lock. */
+GC_thread GC_lookup_thread(thread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ register GC_thread p = GC_threads[hv];
+
+ while (p != 0 && p -> id != id) p = p -> next;
+ return(p);
+}
+
+/* Notify dirty bit implementation of unused parts of my stack. */
+void GC_my_stack_limits()
+{
+ int dummy;
+ register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));
+ register GC_thread me = GC_lookup_thread(thr_self());
+ register size_t stack_size = me -> stack_size;
+ register ptr_t stack;
+
+ if (stack_size == 0) {
+ /* original thread */
+ struct rlimit rl;
+
+ if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
+ /* Empirically, what should be the stack page with lowest */
+ /* address is actually inaccessible. */
+ stack_size = ((word)rl.rlim_cur & ~(HBLKSIZE-1)) - GC_page_sz;
+ stack = GC_stackbottom - stack_size + GC_page_sz;
+ } else {
+ stack = me -> stack;
+ }
+ if (stack > hottest || stack + stack_size < hottest) {
+ ABORT("sp out of bounds");
+ }
+ GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));
+}
+
+
+/* Caller holds allocation lock. */
+void GC_stop_world()
+{
+ thread_t my_thread = thr_self();
+ register int i;
+ register GC_thread p;
+
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> id != my_thread && !(p -> flags & SUSPENDED)) {
+ if (thr_suspend(p -> id) < 0) ABORT("thr_suspend failed");
+ }
+ }
+ }
+}
+
+/* Caller holds allocation lock. */
+void GC_start_world()
+{
+ thread_t my_thread = thr_self();
+ register int i;
+ register GC_thread p;
+
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> id != my_thread && !(p -> flags & SUSPENDED)) {
+ if (thr_continue(p -> id) < 0) ABORT("thr_continue failed");
+ }
+ }
+ }
+}
+
+
+void GC_push_all_stacks()
+{
+ /* We assume the world is stopped. */
+ register int i;
+ register GC_thread p;
+ word dummy;
+ register ptr_t sp = (ptr_t) (&dummy);
+ register ptr_t bottom, top;
+ struct rlimit rl;
+
+# define PUSH(bottom,top) \
+ if (GC_dirty_maintained) { \
+ GC_push_dirty((bottom), (top), GC_page_was_ever_dirty, \
+ GC_push_all_stack); \
+ } else { \
+ GC_push_all((bottom), (top)); \
+ }
+ if (!GC_thr_initialized) GC_thr_init();
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> stack_size != 0) {
+ bottom = p -> stack;
+ top = p -> stack + p -> stack_size;
+ } else {
+ /* The original stack. */
+ if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
+ bottom = GC_stackbottom - rl.rlim_cur + GC_page_sz;
+ top = GC_stackbottom;
+ }
+ if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
+ PUSH(bottom, top);
+ }
+ }
+}
+
+/* The only thread that ever really performs a thr_join. */
+void * GC_thr_daemon(void * dummy)
+{
+ void *status;
+ thread_t departed;
+ register GC_thread t;
+ register int i;
+ register int result;
+
+ for(;;) {
+ start:
+ result = thr_join((thread_t)0, &departed, &status);
+ mutex_lock(&GC_thr_lock);
+ if (result != 0) {
+ /* No more threads; wait for create. */
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (t = GC_threads[i]; t != 0; t = t -> next) {
+ if (!(t -> flags & (DETACHED | FINISHED))) {
+ mutex_unlock(&GC_thr_lock);
+ goto start; /* Thread started just before we */
+ /* acquired the lock. */
+ }
+ }
+ }
+ cond_wait(&GC_create_cv, &GC_thr_lock);
+ mutex_unlock(&GC_thr_lock);
+ goto start;
+ }
+ t = GC_lookup_thread(departed);
+ if (!(t -> flags & CLIENT_OWNS_STACK)) {
+ GC_stack_free(t -> stack, t -> stack_size);
+ }
+ if (t -> flags & DETACHED) {
+ GC_delete_thread(departed);
+ } else {
+ t -> status = status;
+ t -> flags |= FINISHED;
+ cond_signal(&(t -> join_cv));
+ cond_broadcast(&GC_prom_join_cv);
+ }
+ mutex_unlock(&GC_thr_lock);
+ }
+}
+
+GC_thr_init()
+{
+ GC_thread t;
+ /* This gets called from the first thread creation, so */
+ /* mutual exclusion is not an issue. */
+ GC_thr_initialized = TRUE;
+ GC_min_stack_sz = ((thr_min_stack() + HBLKSIZE-1) & ~(HBLKSIZE - 1));
+ GC_page_sz = sysconf(_SC_PAGESIZE);
+ mutex_init(&GC_thr_lock, USYNC_THREAD, 0);
+ cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
+ cond_init(&GC_create_cv, USYNC_THREAD, 0);
+ /* Add the initial thread, so we can stop it. */
+ t = GC_new_thread(thr_self());
+ t -> stack_size = 0;
+ t -> flags = DETACHED;
+ if (thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,
+ 0 /* arg */, THR_DETACHED | THR_DAEMON,
+ 0 /* thread_id */) != 0) {
+ ABORT("Cant fork daemon");
+ }
+
+}
+
+/* We acquire the allocation lock to prevent races with */
+/* stopping/starting world. */
+int GC_thr_suspend(thread_t target_thread)
+{
+ GC_thread t;
+ int result;
+
+ mutex_lock(&GC_thr_lock);
+ LOCK();
+ result = thr_suspend(target_thread);
+ if (result == 0) {
+ t = GC_lookup_thread(target_thread);
+ if (t == 0) ABORT("thread unknown to GC");
+ t -> flags |= SUSPENDED;
+ }
+ UNLOCK();
+ mutex_unlock(&GC_thr_lock);
+ return(result);
+}
+
+int GC_thr_continue(thread_t target_thread)
+{
+ GC_thread t;
+ int result;
+
+ mutex_lock(&GC_thr_lock);
+ LOCK();
+ result = thr_continue(target_thread);
+ if (result == 0) {
+ t = GC_lookup_thread(target_thread);
+ if (t == 0) ABORT("thread unknown to GC");
+ t -> flags &= ~SUSPENDED;
+ }
+ UNLOCK();
+ mutex_unlock(&GC_thr_lock);
+ return(result);
+}
+
+int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
+{
+ register GC_thread t;
+ int result = 0;
+
+ mutex_lock(&GC_thr_lock);
+ if (wait_for == 0) {
+ register int i;
+ register bool thread_exists;
+
+ for (;;) {
+ thread_exists = FALSE;
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (t = GC_threads[i]; t != 0; t = t -> next) {
+ if (!(t -> flags & DETACHED)) {
+ if (t -> flags & FINISHED) {
+ goto found;
+ }
+ thread_exists = TRUE;
+ }
+ }
+ }
+ if (!thread_exists) {
+ result = ESRCH;
+ goto out;
+ }
+ cond_wait(&GC_prom_join_cv, &GC_thr_lock);
+ }
+ } else {
+ t = GC_lookup_thread(wait_for);
+ if (t == 0 || t -> flags & DETACHED) {
+ result = ESRCH;
+ goto out;
+ }
+ if (wait_for == thr_self()) {
+ result = EDEADLK;
+ goto out;
+ }
+ while (!(t -> flags & FINISHED)) {
+ cond_wait(&(t -> join_cv), &GC_thr_lock);
+ }
+
+ }
+ found:
+ if (status) *status = t -> status;
+ if (departed) *departed = t -> id;
+ cond_destroy(&(t -> join_cv));
+ GC_delete_thread(t -> id);
+ out:
+ mutex_unlock(&GC_thr_lock);
+ return(result);
+}
+
+
+int
+GC_thr_create(void *stack_base, size_t stack_size,
+ void *(*start_routine)(void *), void *arg, long flags,
+ thread_t *new_thread)
+{
+ int result;
+ GC_thread t;
+ thread_t my_new_thread;
+ word my_flags = 0;
+ void * stack = stack_base;
+
+ if (!GC_thr_initialized) GC_thr_init();
+ mutex_lock(&GC_thr_lock);
+ if (stack == 0) {
+ if (stack_size == 0) stack_size = GC_min_stack_sz;
+ stack = (void *)GC_stack_alloc(&stack_size);
+ if (stack == 0) {
+ mutex_unlock(&GC_thr_lock);
+ return(ENOMEM);
+ }
+ } else {
+ my_flags |= CLIENT_OWNS_STACK;
+ }
+ if (flags & THR_DETACHED) my_flags |= DETACHED;
+ if (flags & THR_SUSPENDED) my_flags |= SUSPENDED;
+ result = thr_create(stack, stack_size, start_routine,
+ arg, flags & ~THR_DETACHED, &my_new_thread);
+ if (result == 0) {
+ t = GC_new_thread(my_new_thread);
+ t -> flags = my_flags;
+ if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);
+ t -> stack = stack;
+ t -> stack_size = stack_size;
+ if (new_thread != 0) *new_thread = my_new_thread;
+ cond_signal(&GC_create_cv);
+ } else if (!(my_flags & CLIENT_OWNS_STACK)) {
+ GC_stack_free(stack, stack_size);
+ }
+ mutex_unlock(&GC_thr_lock);
+ return(result);
+}
+
+# else
+
+#ifndef LINT
+ int GC_no_sunOS_threads;
+#endif
+
+# endif /* SOLARIS_THREADS */
--- /dev/null
+! SPARCompiler 3.0 and later apparently no loner handles
+! asm outside functions. So we need a separate .s file
+! This is only set up for SunOS 5, not SunOS 4.
+! Assumes this is called before the stack contents are
+! examined.
+
+ .seg "text"
+ .globl GC_save_regs_in_stack
+ .globl GC_push_regs
+GC_save_regs_in_stack:
+GC_push_regs:
+ ta 0x3 ! ST_FLUSH_WINDOWS
+ mov %sp,%o0
+ retl
+ nop
+
+ .globl GC_clear_stack_inner
+GC_clear_stack_inner:
+ mov %sp,%o2 ! Save sp
+ add %sp,-8,%o3 ! p = sp-8
+ clr %g1 ! [g0,g1] = 0
+ add %o1,-0x60,%sp ! Move sp out of the way,
+ ! so that traps still work.
+ ! Includes some extra words
+ ! so we can be sloppy below.
+loop:
+ std %g0,[%o3] ! *(long long *)p = 0
+ cmp %o3,%o1
+ bgu loop ! if (p > limit) goto loop
+ add %o3,-8,%o3 ! p -= 8 (delay slot)
+ retl
+ mov %o2,%sp ! Restore sp., delay slot
+
+
+
+
+
+
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:11 pm PDT */
+
+
+#include "gc_priv.h"
+
+# ifdef STUBBORN_ALLOC
+/* Stubborn object (hard to change, nearly immutable) allocation. */
+
+extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
+
+#define GENERAL_MALLOC(lb,k) \
+ (extern_ptr_t)GC_clear_stack(GC_generic_malloc((word)lb, k))
+
+/* Data structure representing immutable objects that */
+/* are still being initialized. */
+/* This is a bit baroque in order to avoid acquiring */
+/* the lock twice for a typical allocation. */
+
+extern_ptr_t * GC_changing_list_start;
+
+# ifdef THREADS
+ VOLATILE extern_ptr_t * VOLATILE GC_changing_list_current;
+# else
+ extern_ptr_t * GC_changing_list_current;
+# endif
+ /* Points at last added element. Also (ab)used for */
+ /* synchronization. Updates and reads are assumed atomic. */
+
+extern_ptr_t * GC_changing_list_limit;
+ /* Points at the last word of the buffer, which is always 0 */
+ /* All entries in (GC_changing_list_current, */
+ /* GC_changing_list_limit] are 0 */
+
+
+void GC_stubborn_init()
+{
+# define INIT_SIZE 10
+
+ GC_changing_list_start = (extern_ptr_t *)
+ GC_generic_malloc_inner(
+ (word)(INIT_SIZE * sizeof(extern_ptr_t)),
+ PTRFREE);
+ BZERO(GC_changing_list_start,
+ INIT_SIZE * sizeof(extern_ptr_t));
+ if (GC_changing_list_start == 0) {
+ GC_err_printf0("Insufficient space to start up\n");
+ ABORT("GC_stubborn_init: put of space");
+ }
+ GC_changing_list_current = GC_changing_list_start;
+ GC_changing_list_limit = GC_changing_list_start + INIT_SIZE - 1;
+ * GC_changing_list_limit = 0;
+}
+
+/* Compact and possibly grow GC_uninit_list. The old copy is */
+/* left alone. Lock must be held. */
+/* When called GC_changing_list_current == GC_changing_list_limit */
+/* which is one past the current element. */
+/* When we finish GC_changing_list_current again points one past last */
+/* element. */
+/* Invariant while this is running: GC_changing_list_current */
+/* points at a word containing 0. */
+/* Returns FALSE on failure. */
+bool GC_compact_changing_list()
+{
+ register extern_ptr_t *p, *q;
+ register word count = 0;
+ word old_size = GC_changing_list_limit-GC_changing_list_start+1;
+ register word new_size = old_size;
+ extern_ptr_t * new_list;
+
+ for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
+ if (*p != 0) count++;
+ }
+ if (2 * count > old_size) new_size = 2 * count;
+ new_list = (extern_ptr_t *)
+ GC_generic_malloc_inner(
+ new_size * sizeof(extern_ptr_t), PTRFREE);
+ /* PTRFREE is a lie. But we don't want the collector to */
+ /* consider these. We do want the list itself to be */
+ /* collectable. */
+ if (new_list == 0) return(FALSE);
+ BZERO(new_list, new_size * sizeof(extern_ptr_t));
+ q = new_list;
+ for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
+ if (*p != 0) *q++ = *p;
+ }
+ GC_changing_list_start = new_list;
+ GC_changing_list_limit = new_list + new_size - 1;
+ GC_changing_list_current = q;
+ return(TRUE);
+}
+
+/* Add p to changing list. Clear p on failure. */
+# define ADD_CHANGING(p) \
+ { \
+ register struct hblk * h = HBLKPTR(p); \
+ register word index = PHT_HASH(h); \
+ \
+ set_pht_entry_from_index(GC_changed_pages, index); \
+ } \
+ if (*GC_changing_list_current != 0 \
+ && ++GC_changing_list_current == GC_changing_list_limit) { \
+ if (!GC_compact_changing_list()) (p) = 0; \
+ } \
+ *GC_changing_list_current = p;
+
+void GC_change_stubborn(p)
+extern_ptr_t p;
+{
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ ADD_CHANGING(p);
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+void GC_end_stubborn_change(p)
+extern_ptr_t p;
+{
+# ifdef THREADS
+ register VOLATILE extern_ptr_t * my_current = GC_changing_list_current;
+# else
+ register extern_ptr_t * my_current = GC_changing_list_current;
+# endif
+ register bool tried_quick;
+ DCL_LOCK_STATE;
+
+ if (*my_current == p) {
+ /* Hopefully the normal case. */
+ /* Compaction could not have been running when we started. */
+ *my_current = 0;
+# ifdef THREADS
+ if (my_current == GC_changing_list_current) {
+ /* Compaction can't have run in the interim. */
+ /* We got away with the quick and dirty approach. */
+ return;
+ }
+ tried_quick = TRUE;
+# else
+ return;
+# endif
+ } else {
+ tried_quick = FALSE;
+ }
+ DISABLE_SIGNALS();
+ LOCK();
+ my_current = GC_changing_list_current;
+ for (; my_current >= GC_changing_list_start; my_current--) {
+ if (*my_current == p) {
+ *my_current = 0;
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return;
+ }
+ }
+ if (!tried_quick) {
+ GC_err_printf1("Bad arg to GC_end_stubborn_change: 0x%lx\n",
+ (unsigned long)p);
+ ABORT("Bad arg to GC_end_stubborn_change");
+ }
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+/* Allocate lb bytes of composite (pointerful) data */
+/* No pointer fields may be changed after a call to */
+/* GC_end_stubborn_change(p) where p is the value */
+/* returned by GC_malloc_stubborn. */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_stubborn(size_t lb)
+# else
+ extern_ptr_t GC_malloc_stubborn(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t *opp;
+register word lw;
+ptr_t result;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_sobjfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ result = GC_generic_malloc((word)lb, STUBBORN);
+ goto record;
+ }
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+ result = (extern_ptr_t) op;
+ ADD_CHANGING(result);
+ FASTUNLOCK();
+ return((extern_ptr_t)result);
+ } else {
+ result = (extern_ptr_t)
+ GC_generic_malloc((word)lb, STUBBORN);
+ }
+record:
+ DISABLE_SIGNALS();
+ LOCK();
+ ADD_CHANGING(result);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return((extern_ptr_t)GC_clear_stack(result));
+}
+
+
+/* Functions analogous to GC_read_dirty and GC_page_was_dirty. */
+/* Report pages on which stubborn objects were changed. */
+void GC_read_changed()
+{
+ register extern_ptr_t * p = GC_changing_list_start;
+ register extern_ptr_t q;
+ register struct hblk * h;
+ register word index;
+
+ if (p == 0) /* initializing */ return;
+ BCOPY(GC_changed_pages, GC_prev_changed_pages,
+ (sizeof GC_changed_pages));
+ BZERO(GC_changed_pages, (sizeof GC_changed_pages));
+ for (; p <= GC_changing_list_current; p++) {
+ if ((q = *p) != 0) {
+ h = HBLKPTR(q);
+ index = PHT_HASH(h);
+ set_pht_entry_from_index(GC_changed_pages, index);
+ }
+ }
+}
+
+bool GC_page_was_changed(h)
+struct hblk * h;
+{
+ register word index = PHT_HASH(h);
+
+ return(get_pht_entry_from_index(GC_prev_changed_pages, index));
+}
+
+/* Remove unreachable entries from changed list. Should only be */
+/* called with mark bits consistent and lock held. */
+void GC_clean_changing_list()
+{
+ register extern_ptr_t * p = GC_changing_list_start;
+ register extern_ptr_t q;
+ register ptr_t r;
+ register unsigned long count = 0;
+ register unsigned long dropped_count = 0;
+
+ if (p == 0) /* initializing */ return;
+ for (; p <= GC_changing_list_current; p++) {
+ if ((q = *p) != 0) {
+ count++;
+ r = (ptr_t)GC_base(q);
+ if (r == 0 || !GC_is_marked(r)) {
+ *p = 0;
+ dropped_count++;
+ }
+ }
+ }
+# ifdef PRINTSTATS
+ if (count > 0) {
+ GC_printf2("%lu entries in changing list: reclaimed %lu\n",
+ (unsigned long)count, (unsigned long)dropped_count);
+ }
+# endif
+}
+
+#else /* !STUBBORN_ALLOC */
+
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_stubborn(size_t lb)
+# else
+ extern_ptr_t GC_malloc_stubborn(lb)
+ size_t lb;
+# endif
+{
+ return(GC_malloc(lb));
+}
+
+/*ARGSUSED*/
+void GC_end_stubborn_change(p)
+extern_ptr_t p;
+{
+}
+
+/*ARGSUSED*/
+void GC_change_stubborn(p)
+extern_ptr_t p;
+{
+}
+
+
+#endif
--- /dev/null
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+/* Boehm, May 6, 1994 3:32 pm PDT */
+/* An incomplete test for the garbage collector. */
+/* Some more obscure entry points are not tested at all. */
+
+# include <stdlib.h>
+# include <stdio.h>
+# include "gc.h"
+# include "gc_typed.h"
+# include "gc_priv.h" /* For output and some statistics */
+# include "config.h"
+
+# ifdef MSWIN32
+# include <windows.h>
+# endif
+
+# ifdef PCR
+# include "th/PCR_ThCrSec.h"
+# include "th/PCR_Th.h"
+# endif
+
+# ifdef SOLARIS_THREADS
+# include <thread.h>
+# include <synch.h>
+# endif
+
+# if defined(PCR) || defined(SOLARIS_THREADS)
+# define THREADS
+# endif
+
+# ifdef AMIGA
+ long __stack = 200000;
+# endif
+
+# define FAIL (void)abort()
+
+/* AT_END may be defined to excercise the interior pointer test */
+/* if the collector is configured with ALL_INTERIOR_POINTERS. */
+/* As it stands, this test should succeed with either */
+/* configuration. In the FIND_LEAK configuration, it should */
+/* find lots of leaks, since we free almost nothing. */
+
+struct SEXPR {
+ struct SEXPR * sexpr_car;
+ struct SEXPR * sexpr_cdr;
+};
+
+# ifdef __STDC__
+ typedef void * void_star;
+# else
+ typedef char * void_star;
+# endif
+
+typedef struct SEXPR * sexpr;
+
+extern sexpr cons();
+
+# define nil ((sexpr) 0)
+# define car(x) ((x) -> sexpr_car)
+# define cdr(x) ((x) -> sexpr_cdr)
+# define is_nil(x) ((x) == nil)
+
+
+int extra_count = 0; /* Amount of space wasted in cons node */
+
+/* Silly implementation of Lisp cons. Intentionally wastes lots of space */
+/* to test collector. */
+sexpr cons (x, y)
+sexpr x;
+sexpr y;
+{
+ register sexpr r;
+ register int *p;
+ register my_extra = extra_count;
+
+ r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
+ if (r == 0) {
+ (void)GC_printf0("Out of memory\n");
+ exit(1);
+ }
+ for (p = (int *)r;
+ ((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
+ if (*p) {
+ (void)GC_printf1("Found nonzero at 0x%lx - allocator is broken\n",
+ (unsigned long)p);
+ FAIL;
+ }
+ *p = 13;
+ }
+# ifdef AT_END
+ r = (sexpr)((char *)r + (my_extra & ~7));
+# endif
+ r -> sexpr_car = x;
+ r -> sexpr_cdr = y;
+ my_extra++;
+ if ( my_extra >= 5000 ) {
+ extra_count = 0;
+ } else {
+ extra_count = my_extra;
+ }
+ GC_END_STUBBORN_CHANGE((char *)r);
+ return(r);
+}
+
+sexpr small_cons (x, y)
+sexpr x;
+sexpr y;
+{
+ register sexpr r;
+
+ r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
+ if (r == 0) {
+ (void)GC_printf0("Out of memory\n");
+ exit(1);
+ }
+ r -> sexpr_car = x;
+ r -> sexpr_cdr = y;
+ return(r);
+}
+
+sexpr small_cons_uncollectable (x, y)
+sexpr x;
+sexpr y;
+{
+ register sexpr r;
+
+ r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
+ if (r == 0) {
+ (void)GC_printf0("Out of memory\n");
+ exit(1);
+ }
+ r -> sexpr_car = x;
+ r -> sexpr_cdr = (sexpr) (~(unsigned long)y);
+ return(r);
+}
+
+/* Return reverse(x) concatenated with y */
+sexpr reverse1(x, y)
+sexpr x, y;
+{
+ if (is_nil(x)) {
+ return(y);
+ } else {
+ return( reverse1(cdr(x), cons(car(x), y)) );
+ }
+}
+
+sexpr reverse(x)
+sexpr x;
+{
+ return( reverse1(x, nil) );
+}
+
+sexpr ints(low, up)
+int low, up;
+{
+ if (low > up) {
+ return(nil);
+ } else {
+ return(small_cons(small_cons((sexpr)low, (sexpr)0), ints(low+1, up)));
+ }
+}
+
+/* Too check uncollectable allocation we build lists with disguised cdr */
+/* pointers, and make sure they don't go away. */
+sexpr uncollectable_ints(low, up)
+int low, up;
+{
+ if (low > up) {
+ return(nil);
+ } else {
+ return(small_cons_uncollectable(small_cons((sexpr)low, (sexpr)0),
+ uncollectable_ints(low+1, up)));
+ }
+}
+
+void check_ints(list, low, up)
+sexpr list;
+int low, up;
+{
+ if ((int)(car(car(list))) != low) {
+ (void)GC_printf0(
+ "List reversal produced incorrect list - collector is broken\n");
+ exit(1);
+ }
+ if (low == up) {
+ if (cdr(list) != nil) {
+ (void)GC_printf0("List too long - collector is broken\n");
+ exit(1);
+ }
+ } else {
+ check_ints(cdr(list), low+1, up);
+ }
+}
+
+# define UNCOLLECTABLE_CDR(x) (sexpr)(~(unsigned long)(cdr(x)))
+
+void check_uncollectable_ints(list, low, up)
+sexpr list;
+int low, up;
+{
+ if ((int)(car(car(list))) != low) {
+ (void)GC_printf0(
+ "Uncollectable list corrupted - collector is broken\n");
+ exit(1);
+ }
+ if (low == up) {
+ if (UNCOLLECTABLE_CDR(list) != nil) {
+ (void)GC_printf0("Uncollectable ist too long - collector is broken\n");
+ exit(1);
+ }
+ } else {
+ check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
+ }
+}
+
+/* Not used, but useful for debugging: */
+void print_int_list(x)
+sexpr x;
+{
+ if (is_nil(x)) {
+ (void)GC_printf0("NIL\n");
+ } else {
+ (void)GC_printf1("(%ld)", (long)(car(car(x))));
+ if (!is_nil(cdr(x))) {
+ (void)GC_printf0(", ");
+ (void)print_int_list(cdr(x));
+ } else {
+ (void)GC_printf0("\n");
+ }
+ }
+}
+
+/* Try to force a to be strangely aligned */
+struct {
+ char dummy;
+ sexpr aa;
+} A;
+#define a A.aa
+
+/*
+ * Repeatedly reverse lists built out of very different sized cons cells.
+ * Check that we didn't lose anything.
+ */
+void reverse_test()
+{
+ int i;
+ sexpr b;
+ sexpr c;
+ sexpr d;
+ sexpr e;
+# if defined(MSWIN32)
+ /* Win32S only allows 128K stacks */
+# define BIG 1000
+# else
+# define BIG 4500
+# endif
+
+ a = ints(1, 49);
+ b = ints(1, 50);
+ c = ints(1, BIG);
+ d = uncollectable_ints(1, 100);
+ e = uncollectable_ints(1, 1);
+ /* Superficially test interior pointer recognition on stack */
+ c = (sexpr)((char *)c + sizeof(char *));
+ d = (sexpr)((char *)d + sizeof(char *));
+# ifdef __STDC__
+ GC_FREE((void *)e);
+# else
+ GC_FREE((char *)e);
+# endif
+ for (i = 0; i < 50; i++) {
+ b = reverse(reverse(b));
+ }
+ check_ints(b,1,50);
+ for (i = 0; i < 60; i++) {
+ /* This maintains the invariant that a always points to a list of */
+ /* 49 integers. Thus this is thread safe without locks. */
+ a = reverse(reverse(a));
+# if !defined(AT_END) && !defined(THREADS)
+ /* This is not thread safe, since realloc explicitly deallocates */
+ if (i & 1) {
+ a = (sexpr)GC_REALLOC((void_star)a, 500);
+ } else {
+ a = (sexpr)GC_REALLOC((void_star)a, 8200);
+ }
+# endif
+ }
+ check_ints(a,1,49);
+ check_ints(b,1,50);
+ c = (sexpr)((char *)c - sizeof(char *));
+ d = (sexpr)((char *)d - sizeof(char *));
+ check_ints(c,1,BIG);
+ check_uncollectable_ints(d, 1, 100);
+ a = b = c = 0;
+}
+
+/*
+ * The rest of this builds balanced binary trees, checks that they don't
+ * disappear, and tests finalization.
+ */
+typedef struct treenode {
+ int level;
+ struct treenode * lchild;
+ struct treenode * rchild;
+} tn;
+
+int finalizable_count = 0;
+int finalized_count = 0;
+int dropped_something = 0;
+
+# ifdef __STDC__
+ void finalizer(void * obj, void * client_data)
+# else
+ void finalizer(obj, client_data)
+ char * obj;
+ char * client_data;
+# endif
+{
+ tn * t = (tn *)obj;
+
+# ifdef PCR
+ PCR_ThCrSec_EnterSys();
+# endif
+# ifdef SOLARIS_THREADS
+ static mutex_t incr_lock;
+ mutex_lock(&incr_lock);
+# endif
+ if ((int)client_data != t -> level) {
+ (void)GC_printf0("Wrong finalization data - collector is broken\n");
+ FAIL;
+ }
+ finalized_count++;
+# ifdef PCR
+ PCR_ThCrSec_ExitSys();
+# endif
+# ifdef SOLARIS_THREADS
+ mutex_unlock(&incr_lock);
+# endif
+}
+
+size_t counter = 0;
+
+# define MAX_FINALIZED 8000
+GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
+int live_indicators_count = 0;
+
+tn * mktree(n)
+int n;
+{
+ tn * result = (tn *)GC_MALLOC(sizeof(tn));
+
+ if (n == 0) return(0);
+ if (result == 0) {
+ (void)GC_printf0("Out of memory\n");
+ exit(1);
+ }
+ result -> level = n;
+ result -> lchild = mktree(n-1);
+ result -> rchild = mktree(n-1);
+ if (counter++ % 17 == 0 && n >= 2) {
+ tn * tmp = result -> lchild -> rchild;
+
+ result -> lchild -> rchild = result -> rchild -> lchild;
+ result -> rchild -> lchild = tmp;
+ }
+ if (counter++ % 119 == 0) {
+ int my_index;
+
+ {
+# ifdef PCR
+ PCR_ThCrSec_EnterSys();
+# endif
+# ifdef SOLARIS_THREADS
+ static mutex_t incr_lock;
+ mutex_lock(&incr_lock);
+# endif
+ /* Losing a count here causes erroneous report of failure. */
+ finalizable_count++;
+ my_index = live_indicators_count++;
+# ifdef PCR
+ PCR_ThCrSec_ExitSys();
+# endif
+# ifdef SOLARIS_THREADS
+ mutex_unlock(&incr_lock);
+# endif
+ }
+
+ GC_REGISTER_FINALIZER((void_star)result, finalizer, (void_star)n,
+ (GC_finalization_proc *)0, (void_star *)0);
+ live_indicators[my_index] = 13;
+ if (GC_general_register_disappearing_link(
+ (void_star *)(&(live_indicators[my_index])),
+ (void_star)result) != 0) {
+ GC_printf0("GC_general_register_disappearing_link failed\n");
+ FAIL;
+ }
+ if (GC_unregister_disappearing_link(
+ (void_star *)
+ (&(live_indicators[my_index]))) == 0) {
+ GC_printf0("GC_unregister_disappearing_link failed\n");
+ FAIL;
+ }
+ if (GC_general_register_disappearing_link(
+ (void_star *)(&(live_indicators[my_index])),
+ (void_star)result) != 0) {
+ GC_printf0("GC_general_register_disappearing_link failed 2\n");
+ FAIL;
+ }
+ }
+ return(result);
+}
+
+void chktree(t,n)
+tn *t;
+int n;
+{
+ if (n == 0 && t != 0) {
+ (void)GC_printf0("Clobbered a leaf - collector is broken\n");
+ FAIL;
+ }
+ if (n == 0) return;
+ if (t -> level != n) {
+ (void)GC_printf1("Lost a node at level %lu - collector is broken\n",
+ (unsigned long)n);
+ FAIL;
+ }
+ if (counter++ % 373 == 0) (void) GC_MALLOC(counter%5001);
+ chktree(t -> lchild, n-1);
+ if (counter++ % 73 == 0) (void) GC_MALLOC(counter%373);
+ chktree(t -> rchild, n-1);
+}
+
+# ifdef SOLARIS_THREADS
+thread_key_t fl_key;
+
+void * alloc8bytes()
+{
+ void ** my_free_list_ptr;
+ void * my_free_list;
+
+ if (thr_getspecific(fl_key, (void **)(&my_free_list_ptr)) != 0) {
+ (void)GC_printf0("thr_getspecific failed\n");
+ FAIL;
+ }
+ if (my_free_list_ptr == 0) {
+ my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
+ if (thr_setspecific(fl_key, my_free_list_ptr) != 0) {
+ (void)GC_printf0("thr_setspecific failed\n");
+ FAIL;
+ }
+ }
+ my_free_list = *my_free_list_ptr;
+ if (my_free_list == 0) {
+ my_free_list = GC_malloc_many(8);
+ if (my_free_list == 0) {
+ (void)GC_printf0("alloc8bytes out of memory\n");
+ FAIL;
+ }
+ }
+ *my_free_list_ptr = GC_NEXT(my_free_list);
+ GC_NEXT(my_free_list) = 0;
+ return(my_free_list);
+}
+
+#else
+# define alloc8bytes() GC_MALLOC_ATOMIC(8)
+#endif
+
+void alloc_small(n)
+int n;
+{
+ register int i;
+
+ for (i = 0; i < n; i += 8) {
+ if (alloc8bytes() == 0) {
+ (void)GC_printf0("Out of memory\n");
+ FAIL;
+ }
+ }
+}
+
+void tree_test()
+{
+ tn * root;
+ register int i;
+
+ root = mktree(16);
+ alloc_small(5000000);
+ chktree(root, 16);
+ if (finalized_count && ! dropped_something) {
+ (void)GC_printf0("Premature finalization - collector is broken\n");
+ FAIL;
+ }
+ dropped_something = 1;
+ root = mktree(16);
+ chktree(root, 16);
+ for (i = 16; i >= 0; i--) {
+ root = mktree(i);
+ chktree(root, i);
+ }
+ alloc_small(5000000);
+}
+
+unsigned n_tests = 0;
+
+/* A very simple test of explicitly typed allocation */
+void typed_test()
+{
+ GC_word * old, * new;
+ GC_word bm3 = 0x3;
+ GC_word bm2 = 0x2;
+ GC_word bm_large = 0xf7ff7fff;
+ GC_descr d1 = GC_make_descriptor(&bm3, 2);
+ GC_descr d2 = GC_make_descriptor(&bm2, 2);
+# ifndef LINT
+ GC_descr dummy = GC_make_descriptor(&bm_large, 32);
+# endif
+ GC_descr d3 = GC_make_descriptor(&bm_large, 32);
+ register int i;
+
+ old = 0;
+ for (i = 0; i < 4000; i++) {
+ new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
+ new[0] = 17;
+ new[1] = (GC_word)old;
+ old = new;
+ new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
+ new[0] = 17;
+ new[1] = (GC_word)old;
+ old = new;
+ new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
+ new[0] = 17;
+ new[1] = (GC_word)old;
+ old = new;
+ new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
+ d1);
+ new[0] = 17;
+ new[1] = (GC_word)old;
+ old = new;
+ if (i & 0xff) {
+ new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
+ d2);
+ } else {
+ new = (GC_word *) GC_calloc_explicitly_typed(1001,
+ 3 * sizeof(GC_word),
+ d2);
+ }
+ new[0] = 17;
+ new[1] = (GC_word)old;
+ old = new;
+ }
+ for (i = 0; i < 20000; i++) {
+ if (new[0] != 17) {
+ (void)GC_printf1("typed alloc failed at %lu\n",
+ (unsigned long)i);
+ FAIL;
+ }
+ new[0] = 0;
+ old = new;
+ new = (GC_word *)(old[1]);
+ }
+}
+
+void run_one_test()
+{
+ DCL_LOCK_STATE;
+
+# ifndef GC_DEBUG
+ if (GC_size(GC_MALLOC(7)) != 8
+ || GC_size(GC_MALLOC(15)) != 16) {
+ (void)GC_printf0("GC_size produced unexpected results\n");
+ FAIL;
+ }
+# endif
+ reverse_test();
+# ifdef PRINTSTATS
+ GC_printf0("-------------Finished reverse_test\n");
+# endif
+ typed_test();
+# ifdef PRINTSTATS
+ GC_printf0("-------------Finished typed_test\n");
+# endif
+ tree_test();
+ LOCK();
+ n_tests++;
+ UNLOCK();
+
+}
+
+void check_heap_stats()
+{
+ unsigned long max_heap_sz;
+ register int i;
+ int still_live;
+
+ if (sizeof(char *) > 4) {
+ max_heap_sz = 13000000;
+ } else {
+ max_heap_sz = 10000000;
+ }
+# ifdef GC_DEBUG
+ max_heap_sz *= 2;
+# ifdef SPARC
+ max_heap_sz *= 2;
+# endif
+# endif
+ /* Garbage collect repeatedly so that all inaccessible objects */
+ /* can be finalized. */
+ for (i = 0; i < 16; i++) {
+ GC_gcollect();
+ }
+ (void)GC_printf1("Completed %lu tests\n", (unsigned long)n_tests);
+ (void)GC_printf2("Finalized %lu/%lu objects - ",
+ (unsigned long)finalized_count,
+ (unsigned long)finalizable_count);
+ if (finalized_count > finalizable_count
+ || finalized_count < finalizable_count/2) {
+ (void)GC_printf0("finalization is probably broken\n");
+ FAIL;
+ } else {
+ (void)GC_printf0("finalization is probably ok\n");
+ }
+ still_live = 0;
+ for (i = 0; i < MAX_FINALIZED; i++) {
+ if (live_indicators[i] != 0) {
+ still_live++;
+ }
+ }
+ if (still_live != finalizable_count - finalized_count) {
+ (void)GC_printf1
+ ("%lu disappearing links remain - disappearing links are broken\n",
+ (unsigned long) still_live);
+ FAIL;
+ }
+ (void)GC_printf1("Total number of bytes allocated is %lu\n",
+ (unsigned long)
+ WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc));
+ (void)GC_printf1("Final heap size is %lu bytes\n",
+ (unsigned long)GC_get_heap_size());
+ if (WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc)
+ < 33500000*n_tests) {
+ (void)GC_printf0("Incorrect execution - missed some allocations\n");
+ FAIL;
+ }
+ if (GC_get_heap_size() > max_heap_sz*n_tests) {
+ (void)GC_printf0("Unexpected heap growth - collector may be broken\n");
+ FAIL;
+ }
+ (void)GC_printf0("Collector appears to work\n");
+}
+
+#if !defined(PCR) && !defined(SOLARIS_THREADS) || defined(LINT)
+#ifdef MSWIN32
+ int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
+#else
+ int main()
+#endif
+{
+ n_tests = 0;
+# if defined(MPROTECT_VDB) || defined(PROC_VDB)
+ GC_enable_incremental();
+ (void) GC_printf0("Switched to incremental mode\n");
+# if defined(MPROTECT_VDB)
+ (void)GC_printf0("Emulating dirty bits with mprotect/signals\n");
+# else
+ (void)GC_printf0("Reading dirty bits from /proc\n");
+# endif
+# endif
+ run_one_test();
+ check_heap_stats();
+ (void)fflush(stdout);
+# ifdef LINT
+ /* Entry points we should be testing, but aren't. */
+ /* Some can be tested by defining GC_DEBUG at the top of this file */
+ /* This is a bit SunOS4 specific. */
+ GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
+ GC_register_disappearing_link,
+ GC_print_obj, GC_debug_change_stubborn,
+ GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
+ GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
+ GC_init, GC_make_closure, GC_debug_invoke_finalizer,
+ GC_page_was_ever_dirty, GC_is_fresh,
+ GC_malloc_ignore_off_page);
+# endif
+ return(0);
+}
+# endif
+
+#ifdef PCR
+test()
+{
+ PCR_Th_T * th1;
+ PCR_Th_T * th2;
+ int code;
+
+ n_tests = 0;
+ GC_enable_incremental();
+ th1 = PCR_Th_Fork(run_one_test, 0);
+ th2 = PCR_Th_Fork(run_one_test, 0);
+ run_one_test();
+ if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
+ != PCR_ERes_okay || code != 0) {
+ (void)GC_printf0("Thread 1 failed\n");
+ }
+ if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
+ != PCR_ERes_okay || code != 0) {
+ (void)GC_printf0("Thread 2 failed\n");
+ }
+ check_heap_stats();
+ (void)fflush(stdout);
+ return(0);
+}
+#endif
+
+#ifdef SOLARIS_THREADS
+void * thr_run_one_test(void * arg)
+{
+ run_one_test();
+ return(0);
+}
+main()
+{
+ thread_t th1;
+ thread_t th2;
+ int code;
+
+ n_tests = 0;
+ GC_enable_incremental();
+ if (thr_keycreate(&fl_key, GC_free) != 0) {
+ (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
+ FAIL;
+ }
+ if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, 0, &th1)) != 0) {
+ (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
+ FAIL;
+ }
+ if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, THR_NEW_LWP, &th2)) != 0) {
+ (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
+ FAIL;
+ }
+ run_one_test();
+ if ((code = thr_join(th1, 0, 0)) != 0) {
+ (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
+ FAIL;
+ }
+ if (thr_join(th2, 0, 0) != 0) {
+ (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
+ FAIL;
+ }
+ check_heap_stats();
+ (void)fflush(stdout);
+ return(0);
+}
+#endif
--- /dev/null
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+/* Boehm, May 19, 1994 2:06 pm PDT */
+
+
+/*
+ * Some simple primitives for allocation with explicit type information.
+ * Simple objects are allocated such that they contain a GC_descr at the
+ * end (in the last allocated word). This descriptor may be a procedure
+ * which then examines an extended descriptor passed as its environment.
+ *
+ * Arrays are treated as simple objects if they have sufficiently simple
+ * structure. Otherwise they are allocated from an array kind that supplies
+ * a special mark procedure. These arrays contain a pointer to a
+ * complex_descriptor as their last word.
+ * This is done because the environment field is too small, and the collector
+ * must trace the complex_descriptor.
+ *
+ * Note that descriptors inside objects may appear cleared, if we encounter a
+ * false refrence to an object on a free list. In the GC_descr case, this
+ * is OK, since a 0 descriptor corresponds to examining no fields.
+ * In the complex_descriptor case, we explicitly check for that case.
+ *
+ * MAJOR PARTS OF THIS CODE HAVE NOT BEEN TESTED AT ALL and are not testable,
+ * since they are not accessible through the current interface.
+ */
+
+#include "gc_priv.h"
+#include "gc_mark.h"
+#include "gc_typed.h"
+
+# ifdef ADD_BYTE_AT_END
+# define EXTRA_BYTES (sizeof(word) - 1)
+# else
+# define EXTRA_BYTES (sizeof(word))
+# endif
+
+bool GC_explicit_typing_initialized = FALSE;
+
+int GC_explicit_kind; /* Object kind for objects with indirect */
+ /* (possibly extended) descriptors. */
+
+int GC_array_kind; /* Object kind for objects with complex */
+ /* descriptors and GC_array_mark_proc. */
+
+/* Extended descriptors. GC_typed_mark_proc understands these. */
+/* These are used for simple objects that are larger than what */
+/* can be described by a BITMAP_BITS sized bitmap. */
+typedef struct {
+ word ed_bitmap; /* lsb corresponds to first word. */
+ bool ed_continued; /* next entry is continuation. */
+} ext_descr;
+
+/* Array descriptors. GC_array_mark_proc understands these. */
+/* We may eventually need to add provisions for headers and */
+/* trailers. Hence we provide for tree structured descriptors, */
+/* though we don't really use them currently. */
+typedef union ComplexDescriptor {
+ struct LeafDescriptor { /* Describes simple array */
+ word ld_tag;
+# define LEAF_TAG 1
+ word ld_size; /* bytes per element */
+ /* multiple of ALIGNMENT */
+ word ld_nelements; /* Number of elements. */
+ GC_descr ld_descriptor; /* A simple length, bitmap, */
+ /* or procedure descriptor. */
+ } ld;
+ struct ComplexArrayDescriptor {
+ word ad_tag;
+# define ARRAY_TAG 2
+ word ad_nelements;
+ union ComplexDescriptor * ad_element_descr;
+ } ad;
+ struct SequenceDescriptor {
+ word sd_tag;
+# define SEQUENCE_TAG 3
+ union ComplexDescriptor * sd_first;
+ union ComplexDescriptor * sd_second;
+ } sd;
+} complex_descriptor;
+#define TAG ld.ld_tag
+
+ext_descr * GC_ext_descriptors; /* Points to array of extended */
+ /* descriptors. */
+
+word GC_ed_size = 0; /* Current size of above arrays. */
+# define ED_INITIAL_SIZE 100;
+
+word GC_avail_descr = 0; /* Next available slot. */
+
+int GC_typed_mark_proc_index; /* Indices of my mark */
+int GC_array_mark_proc_index; /* procedures. */
+
+/* Add a multiword bitmap to GC_ext_descriptors arrays. Return */
+/* starting index. */
+/* Returns -1 on failure. */
+/* Caller does not hold allocation lock. */
+signed_word GC_add_ext_descriptor(bm, nbits)
+GC_bitmap bm;
+word nbits;
+{
+ register size_t nwords = divWORDSZ(nbits + WORDSZ-1);
+ register signed_word result;
+ register word i;
+ register word last_part;
+ register int extra_bits;
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ while (GC_avail_descr + nwords >= GC_ed_size) {
+ ext_descr * new;
+ size_t new_size;
+ word ed_size = GC_ed_size;
+
+ UNLOCK();
+ ENABLE_SIGNALS();
+ if (ed_size == 0) {
+ new_size = ED_INITIAL_SIZE;
+ } else {
+ new_size = 2 * ed_size;
+ if (new_size > MAX_ENV) return(-1);
+ }
+ new = (ext_descr *) GC_malloc_atomic(new_size * sizeof(ext_descr));
+ if (new == 0) return(-1);
+ DISABLE_SIGNALS();
+ LOCK();
+ if (ed_size == GC_ed_size) {
+ if (GC_avail_descr != 0) {
+ BCOPY(GC_ext_descriptors, new,
+ GC_avail_descr * sizeof(ext_descr));
+ }
+ GC_ed_size = new_size;
+ GC_ext_descriptors = new;
+ } /* else another thread already resized it in the meantime */
+ }
+ result = GC_avail_descr;
+ for (i = 0; i < nwords-1; i++) {
+ GC_ext_descriptors[result + i].ed_bitmap = bm[i];
+ GC_ext_descriptors[result + i].ed_continued = TRUE;
+ }
+ last_part = bm[i];
+ /* Clear irrelevant bits. */
+ extra_bits = nwords * WORDSZ - nbits;
+ last_part <<= extra_bits;
+ last_part >>= extra_bits;
+ GC_ext_descriptors[result + i].ed_bitmap = last_part;
+ GC_ext_descriptors[result + i].ed_continued = FALSE;
+ GC_avail_descr += nwords;
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(result);
+}
+
+/* Table of bitmap descriptors for n word long all pointer objects. */
+GC_descr GC_bm_table[WORDSZ/2];
+
+/* Return a descriptor for the concatenation of 2 nwords long objects, */
+/* each of which is described by descriptor. */
+/* The result is known to be short enough to fit into a bitmap */
+/* descriptor. */
+/* Descriptor is a DS_LENGTH or DS_BITMAP descriptor. */
+GC_descr GC_double_descr(descriptor, nwords)
+register GC_descr descriptor;
+register word nwords;
+{
+ if (descriptor && DS_TAGS == DS_LENGTH) {
+ descriptor = GC_bm_table[BYTES_TO_WORDS((word)descriptor)];
+ };
+ descriptor |= (descriptor & ~DS_TAGS) >> nwords;
+ return(descriptor);
+}
+
+complex_descriptor * GC_make_sequence_descriptor();
+
+/* Build a descriptor for an array with nelements elements, */
+/* each of which can be described by a simple descriptor. */
+/* We try to optimize some common cases. */
+/* If the result is COMPLEX, then a complex_descr* is returned */
+/* in *complex_d. */
+/* If the result is LEAF, then we built a LeafDescriptor in */
+/* the structure pointed to by leaf. */
+/* The tag in the leaf structure is not set. */
+/* If the result is SIMPLE, then a GC_descr */
+/* is returned in *simple_d. */
+/* If the result is NO_MEM, then */
+/* we failed to allocate the descriptor. */
+/* The implementation knows that DS_LENGTH is 0. */
+/* *leaf, *complex_d, and *simple_d may be used as temporaries */
+/* during the construction. */
+# define COMPLEX 2
+# define LEAF 1
+# define SIMPLE 0
+# define NO_MEM (-1)
+int GC_make_array_descriptor(nelements, size, descriptor,
+ simple_d, complex_d, leaf)
+word size;
+word nelements;
+GC_descr descriptor;
+GC_descr *simple_d;
+complex_descriptor **complex_d;
+struct LeafDescriptor * leaf;
+{
+# define OPT_THRESHOLD 50
+ /* For larger arrays, we try to combine descriptors of adjacent */
+ /* descriptors to speed up marking, and to reduce the amount */
+ /* of space needed on the mark stack. */
+ if ((descriptor & DS_TAGS) == DS_LENGTH) {
+ if ((word)descriptor == size) {
+ *simple_d = nelements * descriptor;
+ return(SIMPLE);
+ } else if ((word)descriptor == 0) {
+ *simple_d = (GC_descr)0;
+ return(SIMPLE);
+ }
+ }
+ if (nelements <= OPT_THRESHOLD) {
+ if (nelements <= 1) {
+ if (nelements == 1) {
+ *simple_d = descriptor;
+ return(SIMPLE);
+ } else {
+ *simple_d = (GC_descr)0;
+ return(SIMPLE);
+ }
+ }
+ } else if (size <= BITMAP_BITS/2
+ && (descriptor & DS_TAGS) != DS_PROC
+ && (size & (sizeof(word)-1)) == 0) {
+ int result =
+ GC_make_array_descriptor(nelements/2, 2*size,
+ GC_double_descr(descriptor,
+ BYTES_TO_WORDS(size)),
+ simple_d, complex_d, leaf);
+ if ((nelements & 1) == 0) {
+ return(result);
+ } else {
+ struct LeafDescriptor * one_element =
+ (struct LeafDescriptor *)
+ GC_malloc_atomic(sizeof(struct LeafDescriptor));
+
+ if (result == NO_MEM || one_element == 0) return(NO_MEM);
+ one_element -> ld_tag = LEAF_TAG;
+ one_element -> ld_size = size;
+ one_element -> ld_nelements = 1;
+ one_element -> ld_descriptor = descriptor;
+ switch(result) {
+ case SIMPLE:
+ {
+ struct LeafDescriptor * beginning =
+ (struct LeafDescriptor *)
+ GC_malloc_atomic(sizeof(struct LeafDescriptor));
+ if (beginning == 0) return(NO_MEM);
+ beginning -> ld_tag = LEAF_TAG;
+ beginning -> ld_size = size;
+ beginning -> ld_nelements = 1;
+ beginning -> ld_descriptor = *simple_d;
+ *complex_d = GC_make_sequence_descriptor(
+ (complex_descriptor *)beginning,
+ (complex_descriptor *)one_element);
+ break;
+ }
+ case LEAF:
+ {
+ struct LeafDescriptor * beginning =
+ (struct LeafDescriptor *)
+ GC_malloc_atomic(sizeof(struct LeafDescriptor));
+ if (beginning == 0) return(NO_MEM);
+ beginning -> ld_tag = LEAF_TAG;
+ beginning -> ld_size = leaf -> ld_size;
+ beginning -> ld_nelements = leaf -> ld_nelements;
+ beginning -> ld_descriptor = leaf -> ld_descriptor;
+ *complex_d = GC_make_sequence_descriptor(
+ (complex_descriptor *)beginning,
+ (complex_descriptor *)one_element);
+ break;
+ }
+ case COMPLEX:
+ *complex_d = GC_make_sequence_descriptor(
+ *complex_d,
+ (complex_descriptor *)one_element);
+ break;
+ }
+ return(COMPLEX);
+ }
+ }
+ {
+ leaf -> ld_size = size;
+ leaf -> ld_nelements = nelements;
+ leaf -> ld_descriptor = descriptor;
+ return(LEAF);
+ }
+}
+
+complex_descriptor * GC_make_sequence_descriptor(first, second)
+complex_descriptor * first;
+complex_descriptor * second;
+{
+ struct SequenceDescriptor * result =
+ (struct SequenceDescriptor *)
+ GC_malloc(sizeof(struct SequenceDescriptor));
+ /* Can't result in overly conservative marking, since tags are */
+ /* very small integers. Probably faster than maintaining type */
+ /* info. */
+ if (result != 0) {
+ result -> sd_tag = SEQUENCE_TAG;
+ result -> sd_first = first;
+ result -> sd_second = second;
+ }
+ return((complex_descriptor *)result);
+}
+
+#ifdef UNDEFINED
+complex_descriptor * GC_make_complex_array_descriptor(nelements, descr)
+word nelements;
+complex_descriptor * descr;
+{
+ struct ComplexArrayDescriptor * result =
+ (struct ComplexArrayDescriptor *)
+ GC_malloc(sizeof(struct ComplexArrayDescriptor));
+
+ if (result != 0) {
+ result -> ad_tag = ARRAY_TAG;
+ result -> ad_nelements = nelements;
+ result -> ad_element_descr = descr;
+ }
+ return((complex_descriptor *)result);
+}
+#endif
+
+ptr_t * GC_eobjfreelist;
+
+ptr_t * GC_arobjfreelist;
+
+struct hblk ** GC_ereclaim_list;
+
+struct hblk ** GC_arreclaim_list;
+
+mse * GC_typed_mark_proc();
+
+mse * GC_array_mark_proc();
+
+GC_descr GC_generic_array_descr;
+
+/* Caller does not hold allocation lock. */
+void GC_init_explicit_typing()
+{
+ register int i;
+ DCL_LOCK_STATE;
+
+
+# ifdef PRINTSTATS
+ if (sizeof(struct LeafDescriptor) % sizeof(word) != 0)
+ ABORT("Bad leaf descriptor size");
+# endif
+ DISABLE_SIGNALS();
+ LOCK();
+ if (GC_explicit_typing_initialized) {
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return;
+ }
+ GC_explicit_typing_initialized = TRUE;
+ /* Set up object kind with simple indirect descriptor. */
+ GC_eobjfreelist = (ptr_t *)
+ GC_generic_malloc_inner((MAXOBJSZ+1)*sizeof(ptr_t), PTRFREE);
+ if (GC_eobjfreelist == 0) ABORT("Couldn't allocate GC_eobjfreelist");
+ BZERO(GC_eobjfreelist, (MAXOBJSZ+1)*sizeof(ptr_t));
+ GC_ereclaim_list = (struct hblk **)
+ GC_generic_malloc_inner((MAXOBJSZ+1)*sizeof(struct hblk *), PTRFREE);
+ if (GC_ereclaim_list == 0)
+ ABORT("Couldn't allocate GC_ereclaim_list");
+ BZERO(GC_ereclaim_list, (MAXOBJSZ+1)*sizeof(struct hblk *));
+ GC_explicit_kind = GC_n_kinds++;
+ GC_obj_kinds[GC_explicit_kind].ok_freelist = GC_eobjfreelist;
+ GC_obj_kinds[GC_explicit_kind].ok_reclaim_list = GC_ereclaim_list;
+ GC_obj_kinds[GC_explicit_kind].ok_descriptor =
+ (((word)WORDS_TO_BYTES(-1)) | DS_PER_OBJECT);
+ GC_obj_kinds[GC_explicit_kind].ok_relocate_descr = TRUE;
+ GC_obj_kinds[GC_explicit_kind].ok_init = TRUE;
+ /* Descriptors are in the last word of the object. */
+ GC_typed_mark_proc_index = GC_n_mark_procs;
+ GC_mark_procs[GC_typed_mark_proc_index] = GC_typed_mark_proc;
+ GC_n_mark_procs++;
+ /* Moving this up breaks DEC AXP compiler. */
+ /* Set up object kind with array descriptor. */
+ GC_arobjfreelist = (ptr_t *)
+ GC_generic_malloc_inner((MAXOBJSZ+1)*sizeof(ptr_t), PTRFREE);
+ if (GC_arobjfreelist == 0) ABORT("Couldn't allocate GC_arobjfreelist");
+ BZERO(GC_arobjfreelist, (MAXOBJSZ+1)*sizeof(ptr_t));
+ GC_arreclaim_list = (struct hblk **)
+ GC_generic_malloc_inner((MAXOBJSZ+1)*sizeof(struct hblk *), PTRFREE);
+ if (GC_arreclaim_list == 0) ABORT("Couldn't allocate GC_arreclaim_list");
+ BZERO(GC_arreclaim_list, (MAXOBJSZ+1)*sizeof(struct hblk *));
+ if (GC_arreclaim_list == 0) ABORT("Couldn't allocate GC_arreclaim_list");
+ if (GC_n_mark_procs >= MAX_MARK_PROCS)
+ ABORT("No slot for array mark proc");
+ GC_array_mark_proc_index = GC_n_mark_procs++;
+ if (GC_n_kinds >= MAXOBJKINDS)
+ ABORT("No kind available for array objects");
+ GC_array_kind = GC_n_kinds++;
+ GC_obj_kinds[GC_array_kind].ok_freelist = GC_arobjfreelist;
+ GC_obj_kinds[GC_array_kind].ok_reclaim_list = GC_arreclaim_list;
+ GC_obj_kinds[GC_array_kind].ok_descriptor =
+ MAKE_PROC(GC_array_mark_proc_index, 0);;
+ GC_obj_kinds[GC_array_kind].ok_relocate_descr = FALSE;
+ GC_obj_kinds[GC_array_kind].ok_init = TRUE;
+ /* Descriptors are in the last word of the object. */
+ GC_mark_procs[GC_array_mark_proc_index] = GC_array_mark_proc;
+ for (i = 0; i < WORDSZ/2; i++) {
+ GC_descr d = (((word)(-1)) >> (WORDSZ - i)) << (WORDSZ - i);
+ d |= DS_BITMAP;
+ GC_bm_table[i] = d;
+ }
+ GC_generic_array_descr = MAKE_PROC(GC_array_mark_proc_index, 0);
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+mse * GC_typed_mark_proc(addr, mark_stack_ptr, mark_stack_limit, env)
+register word * addr;
+register mse * mark_stack_ptr;
+mse * mark_stack_limit;
+word env;
+{
+ register word bm = GC_ext_descriptors[env].ed_bitmap;
+ register word * current_p = addr;
+ register word current;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+
+ for (; bm != 0; bm >>= 1, current_p++) {
+ if (bm & 1) {
+ current = *current_p;
+ if ((ptr_t)current >= least_ha && (ptr_t)current <= greatest_ha) {
+ PUSH_CONTENTS(current, mark_stack_ptr, mark_stack_limit);
+ }
+ }
+ }
+ if (GC_ext_descriptors[env].ed_continued) {
+ /* Push an entry with the rest of the descriptor back onto the */
+ /* stack. Thus we never do too much work at once. Note that */
+ /* we also can't overflow the mark stack unless we actually */
+ /* mark something. */
+ mark_stack_ptr++;
+ if (mark_stack_ptr >= mark_stack_limit) {
+ mark_stack_ptr = GC_signal_mark_stack_overflow(mark_stack_ptr);
+ }
+ mark_stack_ptr -> mse_start = addr + WORDSZ;
+ mark_stack_ptr -> mse_descr =
+ MAKE_PROC(GC_typed_mark_proc_index, env+1);
+ }
+ return(mark_stack_ptr);
+}
+
+/* Return the size of the object described by d. It would be faster to */
+/* store this directly, or to compute it as part of */
+/* GC_push_complex_descriptor, but hopefully it doesn't matter. */
+word GC_descr_obj_size(d)
+register complex_descriptor *d;
+{
+ switch(d -> TAG) {
+ case LEAF_TAG:
+ return(d -> ld.ld_nelements * d -> ld.ld_size);
+ case ARRAY_TAG:
+ return(d -> ad.ad_nelements
+ * GC_descr_obj_size(d -> ad.ad_element_descr));
+ case SEQUENCE_TAG:
+ return(GC_descr_obj_size(d -> sd.sd_first)
+ + GC_descr_obj_size(d -> sd.sd_second));
+ default:
+ ABORT("Bad complex descriptor");
+ /*NOTREACHED*/
+ }
+}
+
+/* Push descriptors for the object at addr with complex descriptor d */
+/* onto the mark stack. Return 0 if the mark stack overflowed. */
+mse * GC_push_complex_descriptor(addr, d, msp, msl)
+word * addr;
+register complex_descriptor *d;
+register mse * msp;
+mse * msl;
+{
+ register ptr_t current = (ptr_t) addr;
+ register word nelements;
+ register word sz;
+ register word i;
+
+ switch(d -> TAG) {
+ case LEAF_TAG:
+ {
+ register GC_descr descr = d -> ld.ld_descriptor;
+
+ nelements = d -> ld.ld_nelements;
+ if (msl - msp <= (ptrdiff_t)nelements) return(0);
+ sz = d -> ld.ld_size;
+ for (i = 0; i < nelements; i++) {
+ msp++;
+ msp -> mse_start = (word *)current;
+ msp -> mse_descr = descr;
+ current += sz;
+ }
+ return(msp);
+ }
+ case ARRAY_TAG:
+ {
+ register complex_descriptor *descr = d -> ad.ad_element_descr;
+
+ nelements = d -> ad.ad_nelements;
+ sz = GC_descr_obj_size(descr);
+ for (i = 0; i < nelements; i++) {
+ msp = GC_push_complex_descriptor((word *)current, descr,
+ msp, msl);
+ if (msp == 0) return(0);
+ current += sz;
+ }
+ return(msp);
+ }
+ case SEQUENCE_TAG:
+ {
+ sz = GC_descr_obj_size(d -> sd.sd_first);
+ msp = GC_push_complex_descriptor((word *)current, d -> sd.sd_first,
+ msp, msl);
+ if (msp == 0) return(0);
+ current += sz;
+ msp = GC_push_complex_descriptor((word *)current, d -> sd.sd_second,
+ msp, msl);
+ return(msp);
+ }
+ default:
+ ABORT("Bad complex descriptor");
+ /*NOTREACHED*/
+ }
+}
+
+/*ARGSUSED*/
+mse * GC_array_mark_proc(addr, mark_stack_ptr, mark_stack_limit, env)
+register word * addr;
+register mse * mark_stack_ptr;
+mse * mark_stack_limit;
+word env;
+{
+ register hdr * hhdr = HDR(addr);
+ register word sz = hhdr -> hb_sz;
+ register complex_descriptor * descr = (complex_descriptor *)(addr[sz-1]);
+ mse * orig_mark_stack_ptr = mark_stack_ptr;
+ mse * new_mark_stack_ptr;
+
+ if (descr == 0) {
+ /* Found a reference to a free list entry. Ignore it. */
+ return(orig_mark_stack_ptr);
+ }
+ /* In use counts were already updated when array descriptor was */
+ /* pushed. Here we only replace it by subobject descriptors, so */
+ /* no update is necessary. */
+ new_mark_stack_ptr = GC_push_complex_descriptor(addr, descr,
+ mark_stack_ptr,
+ mark_stack_limit-1);
+ if (new_mark_stack_ptr == 0) {
+ /* Doesn't fit. Conservatively push the whole array as a unit */
+ /* and request a mark stack expansion. */
+ /* This cannot cause a mark stack overflow, since it replaces */
+ /* the original array entry. */
+ GC_mark_stack_too_small = TRUE;
+ new_mark_stack_ptr = orig_mark_stack_ptr + 1;
+ new_mark_stack_ptr -> mse_start = addr;
+ new_mark_stack_ptr -> mse_descr = WORDS_TO_BYTES(sz) | DS_LENGTH;
+ } else {
+ /* Push descriptor itself */
+ new_mark_stack_ptr++;
+ new_mark_stack_ptr -> mse_start = addr + sz - 1;
+ new_mark_stack_ptr -> mse_descr = sizeof(word) | DS_LENGTH;
+ }
+ return(new_mark_stack_ptr);
+}
+
+#if defined(__STDC__) || defined(__cplusplus)
+ GC_descr GC_make_descriptor(GC_bitmap bm, size_t len)
+#else
+ GC_descr GC_make_descriptor(bm, len)
+ GC_bitmap bm;
+ size_t len;
+#endif
+{
+ register signed_word last_set_bit = len - 1;
+ register word result;
+ register int i;
+# define HIGH_BIT (((word)1) << (WORDSZ - 1))
+
+ if (!GC_explicit_typing_initialized) GC_init_explicit_typing();
+ while (last_set_bit >= 0 && !GC_get_bit(bm, last_set_bit)) last_set_bit --;
+ if (last_set_bit < 0) return(0 /* no pointers */);
+# if ALIGNMENT == CPP_WORDSZ/8
+ {
+ register bool all_bits_set = TRUE;
+ for (i = 0; i < last_set_bit; i++) {
+ if (!GC_get_bit(bm, i)) {
+ all_bits_set = FALSE;
+ break;
+ }
+ }
+ if (all_bits_set) {
+ /* An initial section contains all pointers. Use length descriptor. */
+ return(WORDS_TO_BYTES(last_set_bit+1) | DS_LENGTH);
+ }
+ }
+# endif
+ if (last_set_bit < BITMAP_BITS) {
+ /* Hopefully the common case. */
+ /* Build bitmap descriptor (with bits reversed) */
+ result = HIGH_BIT;
+ for (i = last_set_bit - 1; i >= 0; i--) {
+ result >>= 1;
+ if (GC_get_bit(bm, i)) result |= HIGH_BIT;
+ }
+ result |= DS_BITMAP;
+ return(result);
+ } else {
+ signed_word index;
+
+ index = GC_add_ext_descriptor(bm, (word)last_set_bit+1);
+ if (index == -1) return(WORDS_TO_BYTES(last_set_bit+1) | DS_LENGTH);
+ /* Out of memory: use conservative */
+ /* approximation. */
+ result = MAKE_PROC(GC_typed_mark_proc_index, (word)index);
+ return(result);
+ }
+}
+
+ptr_t GC_clear_stack();
+
+#define GENERAL_MALLOC(lb,k) \
+ (extern_ptr_t)GC_clear_stack(GC_generic_malloc((word)lb, k))
+
+#if defined(__STDC__) || defined(__cplusplus)
+ extern void * GC_malloc_explicitly_typed(size_t lb, GC_descr d)
+#else
+ extern char * GC_malloc_explicitly_typed(lb, d)
+ size_t lb;
+ GC_descr d;
+#endif
+{
+register ptr_t op;
+register ptr_t * opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ lb += EXTRA_BYTES;
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_eobjfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ op = (ptr_t)GENERAL_MALLOC((word)lb, GC_explicit_kind);
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb]; /* May have been uninitialized. */
+# endif
+ } else {
+ *opp = obj_link(op);
+ GC_words_allocd += lw;
+ FASTUNLOCK();
+ }
+ } else {
+ op = (ptr_t)GENERAL_MALLOC((word)lb, GC_explicit_kind);
+ lw = BYTES_TO_WORDS(GC_size(op));
+ }
+ ((word *)op)[lw - 1] = d;
+ return((extern_ptr_t) op);
+}
+
+#if defined(__STDC__) || defined(__cplusplus)
+ void * GC_calloc_explicitly_typed(size_t n,
+ size_t lb,
+ GC_descr d)
+#else
+ char * GC_calloc_explicitly_typed(n, lb, d)
+ size_t n;
+ size_t lb;
+ GC_descr d;
+#endif
+{
+register ptr_t op;
+register ptr_t * opp;
+register word lw;
+GC_descr simple_descr;
+complex_descriptor *complex_descr;
+register int descr_type;
+struct LeafDescriptor leaf;
+DCL_LOCK_STATE;
+
+ descr_type = GC_make_array_descriptor((word)n, (word)lb, d,
+ &simple_descr, &complex_descr, &leaf);
+ switch(descr_type) {
+ case NO_MEM: return(0);
+ case SIMPLE: return(GC_malloc_explicitly_typed(n*lb, simple_descr));
+ case LEAF:
+ lb *= n;
+ lb += sizeof(struct LeafDescriptor) + EXTRA_BYTES;
+ break;
+ case COMPLEX:
+ lb *= n;
+ lb += EXTRA_BYTES;
+ break;
+ }
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_arobjfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ op = (ptr_t)GENERAL_MALLOC((word)lb, GC_array_kind);
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb]; /* May have been uninitialized. */
+# endif
+ } else {
+ *opp = obj_link(op);
+ GC_words_allocd += lw;
+ FASTUNLOCK();
+ }
+ } else {
+ op = (ptr_t)GENERAL_MALLOC((word)lb, GC_array_kind);
+ lw = BYTES_TO_WORDS(GC_size(op));
+ }
+ if (descr_type == LEAF) {
+ /* Set up the descriptor inside the object itself. */
+ VOLATILE struct LeafDescriptor * lp =
+ (struct LeafDescriptor *)
+ ((word *)op
+ + lw - (BYTES_TO_WORDS(sizeof(struct LeafDescriptor)) + 1));
+
+ lp -> ld_tag = LEAF_TAG;
+ lp -> ld_size = leaf.ld_size;
+ lp -> ld_nelements = leaf.ld_nelements;
+ lp -> ld_descriptor = leaf.ld_descriptor;
+ ((VOLATILE word *)op)[lw - 1] = (word)lp;
+ } else {
+ extern unsigned GC_finalization_failures;
+ unsigned ff = GC_finalization_failures;
+
+ ((word *)op)[lw - 1] = (word)complex_descr;
+ /* Make sure the descriptor is cleared once there is any danger */
+ /* it may have been collected. */
+ (void)
+ GC_general_register_disappearing_link((extern_ptr_t *)
+ ((word *)op+lw-1),
+ (extern_ptr_t) op);
+ if (ff != GC_finalization_failures) {
+ /* We may have failed to register op due to lack of memory. */
+ /* We were out of memory very recently, so we can safely */
+ /* punt. */
+ ((word *)op)[lw - 1] = 0;
+ return(0);
+ }
+ }
+ return((extern_ptr_t) op);
+}