11/22/94 pcb StripAddress the temporary memory handle for 24-bit mode.
11/30/94 pcb Tracking all memory usage so we can deallocate it all at once.
- 02/10/96 pcb Added routine to perform a final collection when\runloading shared library.
+ 02/10/96 pcb Added routine to perform a final collection when
+unloading shared library.
by Patrick C. Beard.
*/
}
theTemporaryMemory = NULL;
-# if !defined(SHARED_LIBRARY_BUILD)
+# if !defined(SILENT) && !defined(SHARED_LIBRARY_BUILD)
fprintf(stdout, "[total memory used: %ld bytes.]\n",
totalMemoryUsed);
fprintf(stdout, "[total collections: %ld.]\n", GC_gc_no);
/* Boehm, November 17, 1995 12:05 pm PST */
#ifdef __MWERKS__
-#if defined(__powerc)
-#include <MacHeadersPPC>
-#else
-#include <MacHeaders68K>
+
+// for CodeWarrior Pro with Metrowerks Standard Library (MSL).
+// #define MSL_USE_PRECOMPILED_HEADERS 0
+#include <ansi_prefix.mac.h>
+#ifndef __STDC__
+#define __STDC__ 0
#endif
+
#endif
// these are defined again in gc_priv.h.
#undef FALSE
#define ALL_INTERIOR_POINTERS // follows interior pointers.
-//#define SILENT // want collection messages.
+//#define SILENT // want collection messages.
//#define DONT_ADD_BYTE_AT_END // no padding.
-//#define SMALL_CONFIG // whether to a smaller heap.
-#define NO_SIGNALS // signals aren't real on the Macintosh.
+//#define SMALL_CONFIG // whether to a smaller heap.
+#define NO_SIGNALS // signals aren't real on the Macintosh.
#define USE_TEMPORARY_MEMORY // use Macintosh temporary memory.
// CFLAGS= -O -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DSILENT
// since some ports use malloc or calloc to obtain system memory.
// (Probably works for UNIX, and win32.)
// -DNO_DEBUG removes GC_dump and the debugging routines it calls.
-// Reduces code size slightly at the expense of debuggability.
\ No newline at end of file
+// Reduces code size slightly at the expense of debuggability.
/* Boehm, November 17, 1995 12:10 pm PST */
#ifdef __MWERKS__
-#if defined(__powerc)
-#include <MacHeadersPPC>
-#else
-#include <MacHeaders68K>
-#endif
+
+// for CodeWarrior Pro with Metrowerks Standard Library (MSL).
+// #define MSL_USE_PRECOMPILED_HEADERS 0
+#include <ansi_prefix.mac.h>
+#ifndef __STDC__
+#define __STDC__ 0
#endif
+#endif /* __MWERKS__ */
+
// these are defined again in gc_priv.h.
#undef TRUE
#undef FALSE
# and runs some tests of collector and cords. Does not add cords or
# c++ interface to gc.a
# cord/de - builds dumb editor based on cords.
-CC=cc
-CXX=CC
-AS=as
+ABI_FLAG=
+CC=cc $(ABI_FLAG)
+CXX=CC $(ABI_FLAG)
+AS=as $(ABI_FLAG)
# The above doesn't work with gas, which doesn't run cpp.
# Define AS as `gcc -c -x assembler-with-cpp' instead.
# Under Irix 6, you will have to specify the ABI for as if you specify
# it for the C compiler.
-CFLAGS= -O -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DATOMIC_UNCOLLECTABLE -DNO_EXECUTE_PERMISSION -DSILENT
+CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DNO_EXECUTE_PERMISSION -DSILENT
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
# in a sepearte postpass, and hence their memory won't be reclaimed.
# Not recommended unless you are implementing a language that specifies
# these semantics.
+# -DFINALIZE_ON_DEMAND causes finalizers to be run only in response
+# to explicit GC_invoke_finalizers() calls.
# -DATOMIC_UNCOLLECTABLE includes code for GC_malloc_atomic_uncollectable.
# This is useful if either the vendor malloc implementation is poor,
# or if REDIRECT_MALLOC is used.
# Works for Solaris and Irix.
# -DMMAP_STACKS (for Solaris threads) Use mmap from /dev/zero rather than
# GC_scratch_alloc() to get stack memory.
+# -DPRINT_BLACK_LIST Whenever a black list entry is added, i.e. whenever
+# the garbage collector detects a value that looks almost, but not quite,
+# like a pointer, print both the address containing the value, and the
+# value of the near-bogus-pointer. Can be used to identifiy regions of
+# memory that are likely to contribute misidentified pointers.
+# -DOLD_BLOCK_ALLOC Use the old, possibly faster, large block
+# allocation strategy. The new strategy tries harder to minimize
+# fragmentation, sometimes at the expense of spending more time in the
+# large block allocator and/or collecting more frequently.
+#
srcdir = .
VPATH = $(srcdir)
-OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o irix_threads.o typd_mlc.o ptr_chck.o mallocx.o solaris_pthreads.o
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o irix_threads.o linux_threads.o typd_mlc.o ptr_chck.o mallocx.o solaris_pthreads.o
-CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c irix_threads.c typd_mlc.c ptr_chck.c mallocx.c solaris_pthreads.c
+CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c irix_threads.c linux_threads.c typd_mlc.c ptr_chck.c mallocx.c solaris_pthreads.c
CORD_SRCS= cord/cordbscs.c cord/cordxtra.c cord/cordprnt.c cord/de.c cord/cordtest.c cord/cord.h cord/ec.h cord/private/cord_pos.h cord/de_win.c cord/de_win.h cord/de_cmds.h cord/de_win.ICO cord/de_win.RC cord/SCOPTIONS.amiga cord/SMakefile.amiga
# Alpha/OSF shared library version of the collector
libalphagc.so: $(OBJS)
ld -shared -o libalphagc.so $(OBJS) dyn_load.o -lc
+ ln libalphagc.so libgc.so
# IRIX shared library version of the collector
libirixgc.so: $(OBJS) dyn_load.o
- ld -shared -o libirixgc.so $(OBJS) dyn_load.o -lc
+ ld -shared $(ABI_FLAG) -o libirixgc.so $(OBJS) dyn_load.o -lc
+ ln libirixgc.so libgc.so
mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_sgi_mach_dep.s $(srcdir)/mips_ultrix_mach_dep.s $(srcdir)/rs6000_mach_dep.s $(UTILS)
rm -f mach_dep.o
# not time-critical anyway.
# Set SPECIALCFLAGS to -q nodirect_code on Encore.
-all: gc.a gctest
+all: gc.a gctest$(EXE_SUFFIX)
pcr: PCR-Makefile gc_private.h gc_hdrs.h gc.h config.h mach_dep.o $(SRCS)
make -f PCR-Makefile depend
-$(RM) test_cpp test_cpp$(EXE_SUFFIX)
./if_mach HP_PA "" $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a -ldld
./if_not_there test_cpp$(EXE_SUFFIX) $(CXXLD) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a
+ $(RM) test_cpp
c++: gc_cpp.o $(srcdir)/gc_cpp.h test_cpp
-$(RM) on_sparc_sunos5
$(AR) ru gc.a gc_cpp.o
$(RANLIB) gc.a
- ./test_cpp 1
+ ./test_cpp$(EXE_SUFFIX) 1
echo > c++
dyn_load_sunos53.o: dyn_load.c
$(CC) $(CFLAGS) -DSUNOS53_SHARED_LIB -c $(srcdir)/dyn_load.c -o $@
mach_dep.o: $(srcdir)/mach_dep.c
-# $(srcdir)/mips_mach_dep.s $(srcdir)/rs6000_mach_dep.s if_mach if_not_there
-$(RM) mach_dep.o
$(CC) -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
if_mach$(EXE_SUFFIX): $(srcdir)/if_mach.c $(srcdir)/config.h
$(CC) $(CFLAGS) -o if_mach $(srcdir)/if_mach.c
+ -$(RM) if_mach
threadlibs$(EXE_SUFFIX): $(srcdir)/threadlibs.c $(srcdir)/config.h Makefile
$(CC) $(CFLAGS) -o threadlibs $(srcdir)/threadlibs.c
+ -$(RM) threadlibs
if_not_there$(EXE_SUFFIX): $(srcdir)/if_not_there.c
$(CC) $(CFLAGS) -o if_not_there $(srcdir)/if_not_there.c
+ -$(RM) if_not_there
clean:
- -$(RM) gc.a *.o gctest gctest_dyn_link test_cpp \
+ -$(RM) gc.a *.o
+ -$(RM) *.o
+ -$(RM) gctest gctest_dyn_link test_cpp \
setjmp_test mon.out gmon.out a.out core if_not_there if_mach \
$(CORD_OBJS) cordtest cord/cordtest de cord/de
-$(RM) gctest$(EXE_SUFFIX) gctest_dyn_link$(EXE_SUFFIX) test_cpp$(EXE_SUFFIX) \
gctest$(EXE_SUFFIX): test.o gc.a
-$(RM) gctest$(EXE_SUFFIX)
$(CC) $(CFLAGS) -o gctest test.o gc.a
+ $(RM) gctest
# If an optimized setjmp_test generates a segmentation fault,
# odds are your compiler is broken. Gctest may still work.
if_mach$(EXE_SUFFIX) if_not_there$(EXE_SUFFIX)
-$(RM) setjmp_test$(EXE_SUFFIX)
$(CC) $(CFLAGS) -o setjmp_test $(srcdir)/setjmp_t.c
+ $(RM) setjmp_test
test: KandRtest cord/cordtest$(EXE_SUFFIX)
./cord/cordtest$(EXE_SUFFIX)
./setjmp_test$(EXE_SUFFIX)
./gctest$(EXE_SUFFIX)
-
+++ /dev/null
-gc.mak
\ No newline at end of file
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
-Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
-This is version 4.12 of a conservative garbage collector for C and C++.
+This is version 4.13alpha2 of a conservative garbage collector for C and C++.
+
+You might find a more recent version of this at
+
+http://reality.sgi.com/boehm/gc.html
HISTORY -
seen by the garbage collector. Thus objects pointed to only from such a
region may be prematurely deallocated. It is thus suggested that the
standard "malloc" be used only for memory regions, such as I/O buffers, that
-are guaranteed not to contain pointers. Pointers in C language automatic,
-static, or register variables, are correctly recognized. (Note that
-GC_malloc_uncollectable has semantics similar to standard malloc,
-but allocates objects that are traced by the collector.)
+are guaranteed not to contain pointers to garbage collectable memory.
+Pointers in C language automatic, static, or register variables,
+are correctly recognized. (Note that GC_malloc_uncollectable has semantics
+similar to standard malloc, but allocates objects that are traced by the
+collector.)
The collector does not always know how to find pointers in data
areas that are associated with dynamic libraries. This is easy to
remedy IF you know how to find those data areas on your operating
system (see GC_add_roots). Code for doing this under SunOS, IRIX 5.X and 6.X,
-HP/UX, Alpha OSF/1 and win32 is included and used by default. (See
-README.win32 for win32 details.) On other systems pointers from dynamic library
-data areas may not be considered by the collector.
+HP/UX, Alpha OSF/1, Linux, and win32 is included and used by default. (See
+README.win32 for win32 details.) On other systems pointers from dynamic
+library data areas may not be considered by the collector.
Note that the garbage collector does not need to be informed of shared
read-only data. However if the shared library mechanism can introduce
On the other hand, it seems to be needed often enough that it's worth
adding as a standard facility.
+Since 4.12:
+ - Fixed a crucial bug in the Watcom port. There was a redundant decl
+ of GC_push_one in gc_priv.h.
+ - Added FINALIZE_ON_DEMAND.
+ - Fixed some pre-ANSI cc problems in test.c.
+ - Removed getpagesize() use for Solaris. It seems to be missing in one
+ or two versions.
+ - Fixed bool handling for SPARCCompiler version 4.2.
+ - Fixed some files in include that had gotten unlinked from the main
+ copy.
+ - Some RS/6000 fixes (missing casts). Thanks to Toralf Foerster.
+ - Fixed several problems in GC_debug_realloc, affecting mostly the
+ FIND_LEAK case.
+ - GC_exclude_static_roots contained a buggy unsigned comparison to
+ terminate a loop. (Thanks to Wilson Ho.)
+ - CORD_str failed if the substring occurred at the last possible position.
+ (Only affects cord users.)
+ - Fixed Linux code to deal with RedHat 5.0 and integrated Peter Bigot's
+ os_dep.c code for dealing with various Linux versions.
+ - Added workaround for Irix pthreads sigaction bug and possible signal
+ misdirection problems.
+Since alpha1:
+ - Changed RS6000 STACKBOTTOM.
+ - Integrated Patrick Beard's Mac changes.
+ - Alpha1 didn't compile on Irix m.n, m < 6.
+ - Replaced Makefile.dj with a new one from Gary Leavens.
+ - Added Andrew Stitcher's changes to support SCO OpenServer.
+ - Added PRINT_BLACK_LIST, to allow debugging of high densities of false
+ pointers.
+ - Added code to debug allocator to keep track of return address
+ in GC_malloc caller, thus giving a bit more context.
+ - Changed default behavior of large block allocator to more
+ aggressively avoid fragmentation. This is likely to slow down the
+ collector when it succeeds at reducing space cost.
+ - Integrated Fergus Henderson's CYGWIN32 changes. They are untested,
+ but needed for newer versions.
+ - USE_MMAP had some serious bugs. This caused the collector to fail
+ consistently on Solaris with -DSMALL_CONFIG.
+ - Added Linux threads support, thanks largely to Fergus Henderson.
+
To do:
+ - I have a backlog of unintegrated contributed platform-specific changes.
- Very large root set sizes (> 16 MB or so) could cause the collector
to abort with an unexpected mark stack overflow. (Thanks again to
Peter Chubb.) NOT YET FIXED. Workaround is to increase the initial
off DYNAMIC_LOADING in the collector as a workaround. It may also
be possible to conditionally intercept mmap and use GC_exclude_static_roots.
The real fix is to walk rld data structures, which looks possible.
- - SGI pthreads and incremental collection don't mix yet.
- Integrate MIT and DEC pthreads ports.
+Patrick Beard's Notes for building GC v4.12 with CodeWarrior Pro 2:
+----------------------------------------------------------------------------
+The current build environment for the collector is CodeWarrior Pro 2.
+Projects for CodeWarrior Pro 2 (and for quite a few older versions)
+are distributed in the file Mac_projects.sit.hqx. The project file
+:Mac_projects:gc.prj builds static library versions of the collector.
+:Mac_projects:gctest.prj builds the GC test suite.
+
+Configuring the collector is still done by editing the files
+:Mac_files:MacOS_config.h and :Mac_files:MacOS_Test_config.h.
+
Lars Farm's suggestions on building the collector:
----------------------------------------------------------------------------
Garbage Collection on MacOS - a manual 'MakeFile'
#include <ansi_prefix.mac.h>
#undef NDEBUG
-#define ALL_INTERIOR_POINTERS /* for GC_priv.h
+#define ALL_INTERIOR_POINTERS /* for GC_priv.h */
---- ( cut here ) ----
3) Test that the C++ interface 'gc_cpp.cc/h' works with 'test_cpp.cc'.
Dynamic libraries are supported on an ELF system. A static executable
should be linked with the gcc option "-Wl,-defsym,_DYNAMIC=0".
+
+The collector appears to work with Linux threads. We have seen
+intermittent hangs in sem_wait. So far we have been unable to reproduce
+these unless the process was being debugged or traced. Thus it's
+possible that the only real issue is that the debugger loses
+signals on rare occasions.
+
+The garbage collector uses SIGPWR and SIGXCPU if it is used with
+Linux threads. These should not be touched by the client program.
+
+To use threads, you need to abide by the following requirements:
+
+1) You need to use LinuxThreads (which are included in libc6).
+
+ The collector relies on some implementation details of the LinuxThreads
+ package. It is unlikely that this code will work on other
+ pthread implementations (in particular it will *not* work with
+ MIT pthreads).
+
+2) You must compile the collector with -DLINUX_THREADS and -D_REENTRANT
+ specified in the Makefile.
+
+3) Every file that makes thread calls should define LINUX_THREADS and
+ _REENTRANT and then include gc.h. Gc.h redefines some of the
+ pthread primitives as macros which also provide the collector with
+ information it requires.
+
+4) Currently dlopen() is probably not safe. The collector must traverse
+ the list of libraries maintained by the runtime loader. That can
+ probably be an inconsistent state when a thread calling the loader is
+ is stopped for GC. (It's possible that this is fixable in the
+ same way it is handled for SOLARIS_THREADS, with GC_dlopen.)
Sproc threads are not supported in this version, though there may exist other
ports.
-Pthreads are somewhat supported without incremental collection. This
-requires that:
+Pthreads support is provided. This requires that:
1) You compile the collector with -DIRIX_THREADS specified in the Makefile.
include gc.h. Gc.h redefines some of the pthread primitives as macros which
also provide the collector with information it requires.
-4) For the time being, you should not use dlopen.
+4) pthread_cond_wait and pthread_cond_timed_wait should be prepared for
+premature wakeups. (I believe the pthreads and realted standards require this
+anyway. Irix pthreads often terminate a wait if a signal arrives.
+The garbage collector uses signals to stop threads.)
+
+5) It is expensive to stop a thread waiting in IO at the time the request is
+initiated. Applications with many such threads may not exhibit acceptable
+performance with the collector. (Increasing the heap size may help.)
+
first thread. (This avoids a deadlock arising from calling GC_thr_init
with the allocation lock held.)
+It appears that there is a problem in using gc_cpp.h in conjunction with
+Solaris threads and Sun's C++ runtime. Apparently the overloaded new operator
+is invoked by some iostream initialization code before threads are correctly
+initialized. As a result, call to thr_self() in garbage collector
+initialization segfaults. Currently the only known workaround is to not
+invoke the garbage collector from a user defined global operator new, or to
+have it invoke the garbage-collector's allocators only after main has started.
+(Note that the latter requires a moderately expensive test in operator
+delete.)
+
Hans-J. Boehm
(The above contains my personal opinions, which are probably not shared
by anyone else.)
For GNU-win32, use the regular makefile, possibly after uncommenting
the line "include Makefile.DLLs". The latter should be necessary only
-if you want to package the collector as a DLL.
+if you want to package the collector as a DLL. The GNU-win32 port is
+believed to work only for b18, not b19, probably dues to linker changes
+in b19. This is probably fixable with a different definition of
+DATASTART and DATAEND in config.h.
For Borland tools, use BCC_MAKEFILE. Note that
Borland's compiler defaults to 1 byte alignment in structures (-a1),
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
/* Initialize hdr for a block containing the indicated size and */
/* kind of objects. */
/* Return FALSE on failure. */
-static bool setup_header(hhdr, sz, kind, flags)
+static GC_bool setup_header(hhdr, sz, kind, flags)
register hdr * hhdr;
word sz; /* object size in words */
int kind;
return(TRUE);
}
+#ifdef EXACT_FIRST
+# define LAST_TRIP 2
+#else
+# define LAST_TRIP 1
+#endif
+
/*
* Allocate (and return pointer to) a heap block
* for objects of size sz words.
register hdr * phdr; /* Header corr. to prevhbp */
signed_word size_needed; /* number of bytes in requested objects */
signed_word size_avail; /* bytes available in this block */
- bool first_time = TRUE;
+ int trip_count = 0;
size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);
hhdr = HDR(hbp);
- if( prevhbp == GC_savhbp && !first_time) {
- return(0);
+ if( prevhbp == GC_savhbp) {
+ if (trip_count == LAST_TRIP) return(0);
+ ++trip_count;
}
- first_time = FALSE;
-
if( hbp == 0 ) continue;
size_avail = hhdr->hb_sz;
+# ifdef EXACT_FIRST
+ if (trip_count <= 1 && size_avail != size_needed) continue;
+# endif
if (size_avail < size_needed) continue;
+# ifdef PRESERVE_LAST
+ if (size_avail != size_needed
+ && !GC_incremental
+ && GC_in_last_heap_sect(hbp) && GC_should_collect()) {
+ continue;
+ }
+# endif
/* If the next heap block is obviously better, go on. */
/* This prevents us from disassembling a single large block */
/* to get tiny blocks. */
if (GC_savhbp == hbp) GC_savhbp = prevhbp;
hbp = prevhbp;
hhdr = phdr;
- if (hbp == GC_savhbp) first_time = TRUE;
+ if (hbp == GC_savhbp) --trip_count;
}
# endif
}
extern signed_word GC_mem_found; /* Number of reclaimed longwords */
/* after garbage collection */
-bool GC_dont_expand = 0;
+GC_bool GC_dont_expand = 0;
word GC_free_space_divisor = 4;
-extern bool GC_collection_in_progress();
+extern GC_bool GC_collection_in_progress();
int GC_never_stop_func GC_PROTO((void)) { return(0); }
}
/* Have we allocated enough to amortize a collection? */
-bool GC_should_collect()
+GC_bool GC_should_collect()
{
return(GC_adj_words_allocd() >= min_words_allocd());
}
* Stop the world garbage collection. Assumes lock held, signals disabled.
* If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE.
*/
-bool GC_try_to_collect_inner(stop_func)
+GC_bool GC_try_to_collect_inner(stop_func)
GC_stop_func stop_func;
{
if (GC_collection_in_progress()) {
* Otherwise we may fail and return FALSE if this takes too long.
* Increment GC_gc_no if we succeed.
*/
-bool GC_stopped_mark(stop_func)
+GC_bool GC_stopped_mark(stop_func)
GC_stop_func stop_func;
{
register int i;
int result;
DCL_LOCK_STATE;
- GC_invoke_finalizers();
+ GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
ENTER_GC();
EXIT_GC();
UNLOCK();
ENABLE_SIGNALS();
- if(result) GC_invoke_finalizers();
+ if(result) GC_INVOKE_FINALIZERS();
return(result);
}
}
}
+#ifdef PRESERVE_LAST
+GC_bool GC_in_last_heap_sect(p)
+ptr_t p;
+{
+ struct HeapSect * last_heap_sect = &(GC_heap_sects[GC_n_heap_sects-1]);
+ ptr_t start = last_heap_sect -> hs_start;
+ ptr_t end;
+
+ if (p < start) return FALSE;
+ end = start + last_heap_sect -> hs_bytes;
+ if (p >= end) return FALSE;
+ return TRUE;
+}
+#endif
+
# if !defined(NO_DEBUGGING)
void GC_print_heap_sects()
{
* Tiny values of n are rounded up.
* Returns FALSE on failure.
*/
-bool GC_expand_hp_inner(n)
+GC_bool GC_expand_hp_inner(n)
word n;
{
word bytes;
/* How many consecutive GC/expansion failures? */
/* Reset by GC_allochblk. */
-bool GC_collect_or_expand(needed_blocks, ignore_off_page)
+GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
word needed_blocks;
-bool ignore_off_page;
+GC_bool ignore_off_page;
{
if (!GC_incremental && !GC_dont_gc && GC_should_collect()) {
- # $Id: alpha_mach_dep.s,v 1.2 1993/01/18 22:54:51 dosser Exp $
+ # $Id: alpha_mach_dep.s,v 1.1 1999/04/07 14:56:06 tromey Exp $
# define call_push(x) \
lda $16, 0(x); /* copy x to first argument register */ \
void GC_clear_bl();
+void GC_default_print_heap_obj_proc(p)
+ptr_t p;
+{
+ ptr_t base = GC_base(p);
+
+ GC_err_printf2("start: 0x%lx, appr. length: %ld", base, GC_size(base));
+}
+
+void (*GC_print_heap_obj)(/* char * s, ptr_t p */) =
+ GC_default_print_heap_obj_proc;
+
+void GC_print_source_ptr(ptr_t p)
+{
+ ptr_t base = GC_base(p);
+ if (0 == base) {
+ GC_err_printf0("in root set");
+ } else {
+ GC_err_printf0("in object at ");
+ (*GC_print_heap_obj)(base);
+ }
+}
+
void GC_bl_init()
{
# ifndef ALL_INTERIOR_POINTERS
/* P is not a valid pointer reference, but it falls inside */
/* the plausible heap bounds. */
/* Add it to the normal incomplete black list if appropriate. */
-void GC_add_to_black_list_normal(p)
+#ifdef PRINT_BLACK_LIST
+ void GC_add_to_black_list_normal(p, source)
+ ptr_t source;
+#else
+ void GC_add_to_black_list_normal(p)
+#endif
word p;
{
if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
register int index = PHT_HASH(p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
-# ifdef PRINTBLACKLIST
+# ifdef PRINT_BLACK_LIST
if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
- GC_printf1("Black listing (normal) 0x%lx\n",
- (unsigned long) p);
+ GC_err_printf2(
+ "Black listing (normal) 0x%lx referenced from 0x%lx ",
+ (unsigned long) p, (unsigned long) source);
+ GC_print_source_ptr(source);
+ GC_err_puts("\n");
}
# endif
set_pht_entry_from_index(GC_incomplete_normal_bl, index);
# endif
/* And the same for false pointers from the stack. */
-void GC_add_to_black_list_stack(p)
+#ifdef PRINT_BLACK_LIST
+ void GC_add_to_black_list_stack(p, source)
+ ptr_t source;
+#else
+ void GC_add_to_black_list_stack(p)
+#endif
word p;
{
register int index = PHT_HASH(p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
-# ifdef PRINTBLACKLIST
+# ifdef PRINT_BLACK_LIST
if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
- GC_printf1("Black listing (stack) 0x%lx\n",
- (unsigned long)p);
+ GC_err_printf2(
+ "Black listing (stack) 0x%lx referenced from 0x%lx ",
+ (unsigned long)p, (unsigned long)source);
+ GC_print_source_ptr(source);
+ GC_err_puts("\n");
}
# endif
set_pht_entry_from_index(GC_incomplete_stack_bl, index);
# define OFFSET 0x10000
typedef struct {
- bool new_valid;
+ GC_bool new_valid;
word old_sum;
word new_sum;
struct hblk * block; /* Block to which this refers + OFFSET */
# ifdef STUBBORN_ALLOC
/* Check whether a stubborn object from the given block appears on */
/* the appropriate free list. */
-bool GC_on_free_list(h)
+GC_bool GC_on_free_list(h)
struct hblk *h;
{
register hdr * hhdr = HDR(h);
# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
/* The above test may need refinement */
# define I386
-# define SCO
+# if defined(_SCO_ELF)
+# define SCO_ELF
+# else
+# define SCO
+# endif
# define mach_type_known
# endif
# if defined(_AUX_SOURCE)
# endif
# if defined(__DJGPP__)
# define I386
-# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# ifndef DJGPP
+# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# endif
# define mach_type_known
# endif
# if defined(__CYGWIN32__)
# endif
# define PROC_VDB
# define HEURISTIC1
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
+ /* getpagesize() appeared to be missing from at least one */
+ /* Solaris 5.4 installation. Weird. */
# endif
# ifdef SUNOS4
# define OS_TYPE "SUNOS4"
+((word)&etext & 0xfff))
# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
# endif
+# ifdef SCO_ELF
+# define OS_TYPE "SCO_ELF"
+ extern int etext;
+# define DATASTART ((ptr_t)(&etext))
+# define STACKBOTTOM ((ptr_t) 0x08048000)
+# define DYNAMIC_LOADING
+# define ELF_CLASS ELFCLASS32
+# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
# define STACKBOTTOM ((ptr_t)0xc0000000)
+ /* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
+ /* Probably needs to be more flexible, but I don't yet */
+ /* fully understand how flexible. */
# define MPROTECT_VDB
# ifdef __ELF__
# define DYNAMIC_LOADING
-# endif
-# ifdef __ELF__
-# define DYNAMIC_LOADING
# ifdef UNDEFINED /* includes ro data */
extern int _etext;
# define DATASTART ((ptr_t)((((word) (&_etext)) + 0xfff) & ~0xfff))
# endif
- extern char **__environ;
-# define DATASTART ((ptr_t)(&__environ))
+# include <linux/version.h>
+# include <features.h>
+# if LINUX_VERSION_CODE >= 0x20000 && defined(__GLIBC__) && __GLIBC__ >= 2
+ extern int __data_start;
+# define DATASTART ((ptr_t)(&__data_start))
+# else
+ extern char **__environ;
+# define DATASTART ((ptr_t)(&__environ))
/* hideous kludge: __environ is the first */
/* word in crt0.o, and delimits the start */
/* of the data segment, no matter which */
/* would include .rodata, which may */
/* contain large read-only data tables */
/* that we'd rather not scan. */
+# endif
extern int _end;
# define DATAEND (&_end)
# else
# endif
# endif
# ifdef CYGWIN32
-# define OS_TYPE "CYGWIN32"
- extern int _bss_start__;
-# define DATASTART ((ptr_t)&_bss_start__)
- extern int _data_end__;
-# define DATAEND ((ptr_t)&_data_end__)
+ extern int _data_start__;
+ extern int _data_end__;
+ extern int _bss_start__;
+ extern int _bss_end__;
+ /* For binutils 2.9.1, we have */
+ /* DATASTART = _data_start__ */
+ /* DATAEND = _bss_end__ */
+ /* whereas for some earlier versions it was */
+ /* DATASTART = _bss_start__ */
+ /* DATAEND = _data_end__ */
+ /* To get it right for both, we take the */
+ /* minumum/maximum of the two. */
+# define MAX(x,y) ((x) > (y) ? (x) : (y))
+# define MIN(x,y) ((x) < (y) ? (x) : (y))
+# define DATASTART ((ptr_t) MIN(_data_start__, _bss_start__))
+# define DATAEND ((ptr_t) MAX(_data_end__, _bss_end__))
# undef STACK_GRAN
# define STACK_GRAN 0x10000
# define HEURISTIC1
extern int _fdata;
# define DATASTART ((ptr_t)(&_fdata))
# ifdef USE_MMAP
-# define HEAP_START (ptr_t)0x40000000
+# define HEAP_START (ptr_t)0x30000000
# else
# define HEAP_START DATASTART
# endif
# endif
# ifdef IRIX5
# define OS_TYPE "IRIX5"
-# ifndef IRIX_THREADS
-# define MPROTECT_VDB
-# endif
+# define MPROTECT_VDB
# ifdef _MIPS_SZPTR
# define CPP_WORDSZ _MIPS_SZPTR
# define ALIGNMENT (_MIPS_SZPTR/8)
# define ALIGNMENT 4
# define DATASTART ((ptr_t)0x20000000)
extern int errno;
-# define STACKBOTTOM ((ptr_t)((ulong)&errno + 2*sizeof(int)))
+# define STACKBOTTOM ((ptr_t)((ulong)&errno))
# define DYNAMIC_LOADING
/* For really old versions of AIX, this may have to be removed. */
# endif
# define DYNAMIC_LOADING
# include <unistd.h>
# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
+ /* They misspelled the Posix macro? */
# endif
# ifdef ALPHA
# endif
# if defined(SVR4) && !defined(GETPAGESIZE)
-# include <unistd.h>
- int
- GC_getpagesize()
- {
- return sysconf(_SC_PAGESIZE);
- }
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
# endif
+
# ifndef GETPAGESIZE
+# if defined(SUNOS5) || defined(IRIX5)
+# include <unistd.h>
+# endif
# define GETPAGESIZE() getpagesize()
# endif
# if defined(IRIX_THREADS) && !defined(IRIX5)
--> inconsistent configuration
# endif
+# if defined(LINUX_THREADS) && !defined(LINUX)
+--> inconsistent configuration
+# endif
# if defined(SOLARIS_THREADS) && !defined(SUNOS5)
--> inconsistent configuration
# endif
-# if defined(PCR) || defined(SRC_M3) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(PCR) || defined(SRC_M3) || \
+ defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
+ defined(IRIX_THREADS) || defined(LINUX_THREADS)
# define THREADS
# endif
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
-Please send bug reports to Hans-J. Boehm (boehm@parc.xerox.com).
+Please send bug reports to Hans-J. Boehm (boehm@sgi.com).
This is a string packages that uses a tree-based representation.
-See gc.h for a description of the functions provided. Ec.h describes
+See cord.h for a description of the functions provided. Ec.h describes
"extensible cords", which are essentially output streams that write
to a cord. These allow for efficient construction of cords without
requiring a bound on the size of a cord.
x_buf |= CORD_pos_fetch(xpos);
CORD_next(xpos);
}
- for (match_pos = start; match_pos < xlen - slen; match_pos++) {
+ for (match_pos = start; ; match_pos++) {
if ((x_buf & mask) == s_buf) {
if (slen == start_len ||
CORD_ncmp(x, match_pos + start_len,
return(match_pos);
}
}
+ if ( match_pos == xlen - slen ) {
+ return(CORD_NOT_FOUND);
+ }
x_buf <<= 8;
x_buf |= CORD_pos_fetch(xpos);
CORD_next(xpos);
}
- return(CORD_NOT_FOUND);
}
void CORD_ec_flush_buf(CORD_ec x)
GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
+#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
+# define GC_ADD_CALLER
+# define GC_RETURN_ADDR (GC_word)__return_address
+#endif
+
+#ifdef GC_ADD_CALLER
+# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
+# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
+#else
+# define GC_EXTRAS __FILE__, __LINE__
+# define GC_EXTRA_PARAMS char * descr_string, int descr_int
+#endif
+
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
GC_API GC_PTR GC_debug_malloc
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_atomic
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_uncollectable
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_stubborn
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
GC_API GC_PTR GC_debug_realloc
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
- char * descr_string, int descr_int));
+ GC_EXTRA_PARAMS));
GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
-# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
-# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
- __FILE__, __LINE__)
-# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
- __LINE__)
+ GC_EXTRAS)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
# define GC_FREE(p) GC_debug_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_debug_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
- __LINE__)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
+GC_API int GC_invoke_finalizers GC_PROTO((void));
+ /* Run finalizers for all objects that are ready to */
+ /* be finalized. Return the number of finalizers */
+ /* that were run. Normally this is also called */
+ /* implicitly during some allocations. If */
+ /* FINALIZE_ON_DEMAND is defined, it must be called */
+ /* explicitly. */
+
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
/* p may not be a NULL pointer. */
typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
# endif /* SOLARIS_THREADS */
-#ifdef IRIX_THREADS
+#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
# define pthread_sigmask GC_pthread_sigmask
# define pthread_join GC_pthread_join
-#endif /* IRIX_THREADS */
+#endif /* IRIX_THREADS || LINUX_THREADS */
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
+#if defined(THREADS) && !defined(SRC_M3)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
/* in returned list. */
extern void GC_thr_init(); /* Needed for Solaris/X86 */
-#endif /* SOLARIS_THREADS */
+#endif /* THREADS && !SRC_M3 */
/*
* If you are planning on putting
# endif
#endif
-#ifdef __WATCOMC__
- /* Ivan Demakov: Programs compiled by Watcom C with -5r option
- * crash without this declaration
- * HB: Could this go into gc_priv.h?
- */
- void GC_noop(void*, ...);
-#endif
-
#ifdef __cplusplus
} /* end of extern "C" */
#endif
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
typedef struct {
char * oh_string; /* object descriptor string */
word oh_int; /* object descriptor integers */
-# ifdef SAVE_CALL_CHAIN
+# ifdef NEED_CALLINFO
struct callinfo oh_ci[NFRAMES];
# endif
word oh_sz; /* Original malloc arg. */
#ifdef SAVE_CALL_CHAIN
-# define ADD_CALL_CHAIN(base) GC_save_callers(((oh *)(base)) -> oh_ci)
+# define ADD_CALL_CHAIN(base, ra) GC_save_callers(((oh *)(base)) -> oh_ci)
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
#else
-# define ADD_CALL_CHAIN(base)
+# ifdef GC_ADD_CALLER
+# define ADD_CALL_CHAIN(base, ra) ((oh *)(base)) -> oh_ci[0].ci_pc = (ra)
+# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
+# else
+# define ADD_CALL_CHAIN(base, ra)
# define PRINT_CALL_CHAIN(base)
+# endif
#endif
/* Check whether object with base pointer p has debugging info */
/* p is assumed to point to a legitimate object in our part */
/* of the heap. */
-bool GC_has_debug_info(p)
+GC_bool GC_has_debug_info(p)
ptr_t p;
{
register oh * ohdr = (oh *)p;
(unsigned long)(ohdr -> oh_sz));
PRINT_CALL_CHAIN(ohdr);
}
+
+void GC_debug_print_heap_obj_proc(p)
+ptr_t p;
+{
+ if (GC_has_debug_info(p)) {
+ GC_print_obj(p);
+ } else {
+ GC_default_print_heap_obj_proc(p);
+ }
+}
+
void GC_print_smashed_obj(p, clobbered_addr)
ptr_t p, clobbered_addr;
{
void GC_start_debugging()
{
GC_check_heap = GC_check_heap_proc;
+ GC_print_heap_obj = GC_debug_print_heap_obj_proc;
GC_debugging_started = TRUE;
GC_register_displacement((word)sizeof(oh));
}
GC_register_displacement((word)sizeof(oh) + offset);
}
+# ifdef GC_ADD_CALLER
+# define EXTRA_ARGS word ra, char * s, int i
+# define OPT_RA ra,
+# else
+# define EXTRA_ARGS char * s, int i
+# define OPT_RA
+# endif
+
# ifdef __STDC__
- GC_PTR GC_debug_malloc(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc(lb, s, i)
size_t lb;
char * s;
int i;
+# ifdef GC_ADD_CALLER
+ --> GC_ADD_CALLER not implemented for K&R C
+# endif
# endif
{
GC_PTR result = GC_malloc(lb + DEBUG_BYTES);
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
#ifdef STUBBORN_ALLOC
# ifdef __STDC__
- GC_PTR GC_debug_malloc_stubborn(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_stubborn(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc_stubborn(lb, s, i)
size_t lb;
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
#endif /* STUBBORN_ALLOC */
# ifdef __STDC__
- GC_PTR GC_debug_malloc_atomic(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_atomic(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc_atomic(lb, s, i)
size_t lb;
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
# ifdef __STDC__
- GC_PTR GC_debug_malloc_uncollectable(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_uncollectable(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc_uncollectable(lb, s, i)
size_t lb;
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
+#ifdef ATOMIC_UNCOLLECTABLE
+# ifdef __STDC__
+ GC_PTR GC_debug_malloc_atomic_uncollectable(size_t lb, EXTRA_ARGS)
+# else
+ GC_PTR GC_debug_malloc_atomic_uncollectable(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ GC_PTR result = GC_malloc_atomic_uncollectable(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf1(
+ "GC_debug_malloc_atomic_uncollectable(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf1(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_start_debugging();
+ }
+ ADD_CALL_CHAIN(result, ra);
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+#endif /* ATOMIC_UNCOLLECTABLE */
# ifdef __STDC__
void GC_debug_free(GC_PTR p)
# else
{
register hdr * hhdr = HDR(p);
- bool uncollectable = FALSE;
+ GC_bool uncollectable = FALSE;
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
uncollectable = TRUE;
}
# ifdef __STDC__
- GC_PTR GC_debug_realloc(GC_PTR p, size_t lb, char *s, int i)
+ GC_PTR GC_debug_realloc(GC_PTR p, size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_realloc(p, lb, s, i)
GC_PTR p;
{
register GC_PTR base = GC_base(p);
register ptr_t clobbered;
- register GC_PTR result = GC_debug_malloc(lb, s, i);
+ register GC_PTR result;
register size_t copy_sz = lb;
register size_t old_sz;
register hdr * hhdr;
- if (p == 0) return(GC_debug_malloc(lb, s, i));
+ if (p == 0) return(GC_debug_malloc(lb, OPT_RA s, i));
if (base == 0) {
GC_err_printf1(
- "Attempt to free invalid pointer %lx\n", (unsigned long)p);
+ "Attempt to reallocate invalid pointer %lx\n", (unsigned long)p);
ABORT("realloc(invalid pointer)");
}
if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
switch (hhdr -> hb_obj_kind) {
# ifdef STUBBORN_ALLOC
case STUBBORN:
- result = GC_debug_malloc_stubborn(lb, s, i);
+ result = GC_debug_malloc_stubborn(lb, OPT_RA s, i);
break;
# endif
case NORMAL:
- result = GC_debug_malloc(lb, s, i);
+ result = GC_debug_malloc(lb, OPT_RA s, i);
break;
case PTRFREE:
- result = GC_debug_malloc_atomic(lb, s, i);
+ result = GC_debug_malloc_atomic(lb, OPT_RA s, i);
break;
case UNCOLLECTABLE:
- result = GC_debug_malloc_uncollectable(lb, s, i);
+ result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i);
break;
+# ifdef ATOMIC_UNCOLLECTABLE
+ case AUNCOLLECTABLE:
+ result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i);
+ break;
+# endif
default:
GC_err_printf0("GC_debug_realloc: encountered bad kind\n");
ABORT("bad kind");
if (old_sz < copy_sz) copy_sz = old_sz;
if (result == 0) return(0);
BCOPY(p, result, copy_sz);
+ GC_debug_free(p);
return(result);
}
#if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
!defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
!defined(HP_PA) && (!defined(LINUX) && !defined(__ELF__)) && \
- !defined(RS6000)
+ !defined(RS6000) && !defined(SCO_ELF)
--> We only know how to find data segments of dynamic libraries for the
--> above. Additional SVR4 variants might not be too
--> hard to add.
# endif /* !USE_PROC ... */
# endif /* SUNOS */
-#if defined(LINUX) && defined(__ELF__)
+#if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF)
/* Dynamic loading code for Linux running ELF. Somewhat tested on
* Linux/x86, untested but hopefully should work on Linux/Alpha.
/* that could possibly have been written to. */
DWORD GC_allocation_granularity;
- extern bool GC_is_heap_base (ptr_t p);
+ extern GC_bool GC_is_heap_base (ptr_t p);
# ifdef WIN32_THREADS
extern void GC_get_next_stack(char *start, char **lo, char **hi);
# endif
}
- extern bool GC_win32s;
+ extern GC_bool GC_win32s;
void GC_register_dynamic_libraries()
{
GC_enqueue_all_finalizers();
UNLOCK();
ENABLE_SIGNALS();
- GC_invoke_finalizers();
+ GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
}
/* Invoke finalizers for all objects that are ready to be finalized. */
/* Should be called without allocation lock. */
-void GC_invoke_finalizers()
+int GC_invoke_finalizers()
{
register struct finalizable_object * curr_fo;
+ register int count = 0;
DCL_LOCK_STATE;
while (GC_finalize_now != 0) {
(*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
curr_fo -> fo_client_data);
curr_fo -> fo_client_data = 0;
+ ++count;
# ifdef UNDEFINED
/* This is probably a bad idea. It throws off accounting if */
/* nearly all objects are finalizable. O.w. it shouldn't */
GC_free((GC_PTR)curr_fo);
# endif
}
+ return count;
}
# ifdef __STDC__
GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
+#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
+# define GC_ADD_CALLER
+# define GC_RETURN_ADDR (GC_word)__return_address
+#endif
+
+#ifdef GC_ADD_CALLER
+# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
+# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
+#else
+# define GC_EXTRAS __FILE__, __LINE__
+# define GC_EXTRA_PARAMS char * descr_string, int descr_int
+#endif
+
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
GC_API GC_PTR GC_debug_malloc
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_atomic
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_uncollectable
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_stubborn
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
GC_API GC_PTR GC_debug_realloc
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
- char * descr_string, int descr_int));
+ GC_EXTRA_PARAMS));
GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
-# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
-# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
- __FILE__, __LINE__)
-# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
- __LINE__)
+ GC_EXTRAS)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
# define GC_FREE(p) GC_debug_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_debug_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
- __LINE__)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
+GC_API int GC_invoke_finalizers GC_PROTO((void));
+ /* Run finalizers for all objects that are ready to */
+ /* be finalized. Return the number of finalizers */
+ /* that were run. Normally this is also called */
+ /* implicitly during some allocations. If */
+ /* FINALIZE_ON_DEMAND is defined, it must be called */
+ /* explicitly. */
+
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
/* p may not be a NULL pointer. */
typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
# endif /* SOLARIS_THREADS */
-#ifdef IRIX_THREADS
+#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
# define pthread_sigmask GC_pthread_sigmask
# define pthread_join GC_pthread_join
-#endif /* IRIX_THREADS */
+#endif /* IRIX_THREADS || LINUX_THREADS */
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
+#if defined(THREADS) && !defined(SRC_M3)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
/* in returned list. */
extern void GC_thr_init(); /* Needed for Solaris/X86 */
-#endif /* SOLARIS_THREADS */
+#endif /* THREADS && !SRC_M3 */
/*
* If you are planning on putting
# endif
#endif
-#ifdef __WATCOMC__
- /* Ivan Demakov: Programs compiled by Watcom C with -5r option
- * crash without this declaration
- * HB: Could this go into gc_priv.h?
- */
- void GC_noop(void*, ...);
-#endif
-
#ifdef __cplusplus
} /* end of extern "C" */
#endif
/*
- * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
#define GC_ALLOC_H
#define __ALLOC_H // Prevent inclusion of the default version. Ugly.
+#define __SGI_STL_ALLOC_H
+#define __SGI_STL_INTERNAL_ALLOC_H
#ifndef __ALLOC
# define __ALLOC alloc
static void * allocate(size_t n) { return GC_malloc(n); }
static void * ptr_free_allocate(size_t n)
{ return GC_malloc_atomic(n); }
- static void deallocate(void *p, size_t n) { }
- static void ptr_free_deallocate(void *p, size_t n) { }
+ static void deallocate(void *, size_t) { }
+ static void ptr_free_deallocate(void *, size_t) { }
};
typedef gc_alloc_template < 0 > gc_alloc;
static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
static void * ptr_free_allocate(size_t n)
{ return GC_malloc_atomic_uncollectable(n); }
- static void deallocate(void *p, size_t n) { GC_free(p); }
- static void ptr_free_deallocate(void *p, size_t n) { GC_free(p); }
+ static void deallocate(void *p, size_t) { GC_free(p); }
+ static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
};
typedef alloc_template < 0 > alloc;
__GC_SPECIALIZE(float, single_client_alloc)
__GC_SPECIALIZE(double, single_client_alloc)
+#ifdef __STL_USE_STD_ALLOCATORS
+
+???copy stuff from stl_alloc.h or remove it to a different file ???
+
+#endif /* __STL_USE_STD_ALLOCATORS */
+
#endif /* _SGI_SOURCE */
#endif /* GC_ALLOC_H */
} \
}
-/* Push the contenst of current onto the mark stack if it is a valid */
+#ifdef PRINT_BLACK_LIST
+# define GC_FIND_START(current, hhdr, source) \
+ GC_find_start(current, hhdr, source)
+#else
+# define GC_FIND_START(current, hhdr, source) \
+ GC_find_start(current, hhdr)
+#endif
+
+/* Push the contents of current onto the mark stack if it is a valid */
/* ptr to a currently unmarked object. Mark it. */
-# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit) \
+/* If we assumed a standard-conforming compiler, we could probably */
+/* generate the exit_label transparently. */
+# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
+ source, exit_label) \
{ \
register int displ; /* Displacement in block; first bytes, then words */ \
register hdr * hhdr; \
\
GET_HDR(current,hhdr); \
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
- current = GC_find_start(current, hhdr); \
- if (current == 0) continue; \
+ current = GC_FIND_START(current, hhdr, (word)source); \
+ if (current == 0) goto exit_label; \
hhdr = HDR(current); \
} \
displ = HBLKDISPL(current); \
map_entry = MAP_ENTRY((hhdr -> hb_map), displ); \
if (map_entry == OBJ_INVALID) { \
- GC_ADD_TO_BLACK_LIST_NORMAL(current); continue; \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); goto exit_label; \
} \
displ = BYTES_TO_WORDS(displ); \
displ -= map_entry; \
\
if (mark_word & mark_bit) { \
/* Mark bit is already set */ \
- continue; \
+ goto exit_label; \
} \
*mark_word_addr = mark_word | mark_bit; \
} \
PUSH_OBJ(((word *)(HBLKPTR(current)) + displ), hhdr, \
mark_stack_top, mark_stack_limit) \
+ exit_label: ; \
}
} \
}
-extern bool GC_mark_stack_too_small;
+extern GC_bool GC_mark_stack_too_small;
/* We need a larger mark stack. May be */
/* set by client supplied mark routines.*/
# include "gc_hdrs.h"
# endif
-# if !defined(bool) && !defined(__cplusplus)
- typedef int bool;
- /* This is problematic with C++ implementations that do not define bool. */
- /* By now they should. */
-# else
-# if defined(_SGI_SOURCE) && !defined(_BOOL)
- typedef int bool;
-# endif
-# if defined(__SUNPRO_CC) && __SUNPRO_CC <= 0x410
- typedef int bool;
-# endif
-# if defined(__cplusplus) && defined(_MSC_VER) && _MSC_VER <= 1020
- /* Visual C++ 4.2 does not have bool type. */
- typedef int bool;
-# endif
-# endif
+typedef int GC_bool;
# define TRUE 1
# define FALSE 0
#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
/* whether the objects are atomic or composite, and */
/* whether or not the block was found to be empty */
- /* duing the reclaim phase. Typically generates */
+ /* during the reclaim phase. Typically generates */
/* about one screenful per garbage collection. */
#undef PRINTBLOCKS
-#define PRINTBLACKLIST /* Print black listed blocks, i.e. values that */
- /* cause the allocator to avoid allocating certain */
- /* blocks in order to avoid introducing "false */
- /* hits". */
-#undef PRINTBLACKLIST
-
#ifdef SILENT
# ifdef PRINTSTATS
# undef PRINTSTATS
# define GATHERSTATS
#endif
+#ifdef FINALIZE_ON_DEMAND
+# define GC_INVOKE_FINALIZERS()
+#else
+# define GC_INVOKE_FINALIZERS() (void)GC_invoke_finalizers()
+#endif
+
#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
/* free lists are actually maintained. This applies */
/* only to the top level routines in misc.c, not to */
/* May save significant amounts of space for obj_map */
/* entries. */
+#ifndef OLD_BLOCK_ALLOC
+ /* Macros controlling large block allocation strategy. */
+# define EXACT_FIRST /* Make a complete pass through the large object */
+ /* free list before splitting a block */
+# define PRESERVE_LAST /* Do not divide last allocated heap segment */
+ /* unless we would otherwise need to expand the */
+ /* heap. */
+#endif
+
/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
# define MERGE_SIZES
# ifndef LARGE_CONFIG
# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+ /* Must be multiple of largest page size. */
# define MAXHINCR 512 /* Maximum heap increment, in blocks */
# else
# define MINHINCR 64
/* */
/*********************************/
+#ifdef SAVE_CALL_CHAIN
+
/*
* Number of frames and arguments to save in objects allocated by
* debugging allocator.
/* alignment reasons. */
# define NARGS 2 /* Mumber of arguments to save for each call. */
-
-#ifdef SAVE_CALL_CHAIN
- struct callinfo {
- word ci_pc;
- word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
- };
+# define NEED_CALLINFO
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+#else
+
+# ifdef GC_ADD_CALLER
+# define NFRAMES 1
+# define NARGS 0
+# define NEED_CALLINFO
+# endif
+
+#endif
+
+#ifdef NEED_CALLINFO
+ struct callinfo {
+ word ci_pc;
+# if NARGS > 0
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+# endif
+# if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1
+ /* Likely alignment problem. */
+ word ci_dummy;
+# endif
+ };
#endif
/* HBLKSIZE aligned allocation. 0 is taken to mean failure */
/* space is assumed to be cleared. */
+/* In the case os USE_MMAP, the argument must also be a */
+/* physical page size. */
# ifdef PCR
char * real_malloc();
# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
# define LOCK() mutex_lock(&GC_allocate_ml);
# define UNLOCK() mutex_unlock(&GC_allocate_ml);
# endif
+# ifdef LINUX_THREADS
+# include <pthread.h>
+# ifdef __i386__
+ inline static GC_test_and_set(volatile unsigned int *addr) {
+ int oldval;
+ /* Note: the "xchg" instruction does not need a "lock" prefix */
+ __asm__ __volatile__("xchgl %0, %1"
+ : "=r"(oldval), "=m"(*(addr))
+ : "0"(1), "m"(*(addr)));
+ return oldval;
+ }
+# else
+ -- > Need implementation of GC_test_and_set()
+# endif
+# define GC_clear(addr) (*(addr) = 0)
+
+ extern volatile unsigned int GC_allocate_lock;
+ /* This is not a mutex because mutexes that obey the (optional) */
+ /* POSIX scheduling rules are subject to convoys in high contention */
+ /* applications. This is basically a spin lock. */
+ extern pthread_t GC_lock_holder;
+ extern void GC_lock(void);
+ /* Allocation lock holder. Only set if acquired by client through */
+ /* GC_call_with_alloc_lock. */
+# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
+# define NO_THREAD (pthread_t)(-1)
+# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
+# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
+# ifdef UNDEFINED
+# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
+# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# else
+# define LOCK() \
+ { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
+# define UNLOCK() \
+ GC_clear(&GC_allocate_lock)
+# endif
+ extern GC_bool GC_collecting;
+# define ENTER_GC() \
+ { \
+ GC_collecting = 1; \
+ }
+# define EXIT_GC() GC_collecting = 0;
+# endif /* LINUX_THREADS */
# ifdef IRIX_THREADS
# include <pthread.h>
# include <mutex.h>
# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
-# define __test_and_set(l,v) test_and_set(l,v)
+# define GC_test_and_set(addr, v) test_and_set(addr,v)
+# else
+# define GC_test_and_set(addr, v) __test_and_set(addr,v)
# endif
extern unsigned long GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# else
-# define LOCK() { if (__test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
+# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64))
# define UNLOCK() __lock_release(&GC_allocate_lock)
# else
# define UNLOCK() GC_allocate_lock = 0
# endif
# endif
- extern bool GC_collecting;
+ extern GC_bool GC_collecting;
# define ENTER_GC() \
{ \
GC_collecting = 1; \
}
# define EXIT_GC() GC_collecting = 0;
-# endif
+# endif /* IRIX_THREADS */
# ifdef WIN32_THREADS
# include <windows.h>
GC_API CRITICAL_SECTION GC_allocate_ml;
# else
# if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) \
|| defined(MSWIN32) || defined(MACOS) || defined(DJGPP) \
- || defined(NO_SIGNALS) || defined(IRIX_THREADS)
+ || defined(NO_SIGNALS) || defined(IRIX_THREADS) \
+ || defined(LINUX_THREADS)
/* Also useful for debugging. */
/* Should probably use thr_sigsetmask for SOLARIS_THREADS. */
# define DISABLE_SIGNALS()
PCR_allSigsBlocked, \
PCR_waitForever);
# else
-# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
+ || defined(IRIX_THREADS) || defined(LINUX_THREADS)
void GC_stop_world();
void GC_start_world();
# define STOP_WORLD() GC_stop_world()
/* swept. */
word ok_descriptor; /* Descriptor template for objects in this */
/* block. */
- bool ok_relocate_descr;
+ GC_bool ok_relocate_descr;
/* Add object size in bytes to descriptor */
/* template to obtain descriptor. Otherwise */
/* template is used as is. */
- bool ok_init; /* Clear objects before putting them on the free list. */
+ GC_bool ok_init; /* Clear objects before putting them on the free list. */
} GC_obj_kinds[MAXOBJKINDS];
/* Predefined kinds: */
# define PTRFREE 0
/* header structure associated with */
/* block. */
-extern bool GC_is_initialized; /* GC_init() has been run. */
+extern GC_bool GC_is_initialized; /* GC_init() has been run. */
-extern bool GC_objects_are_marked; /* There are marked objects in */
+extern GC_bool GC_objects_are_marked; /* There are marked objects in */
/* the heap. */
-extern int GC_incremental; /* Using incremental/generational collection. */
+extern GC_bool GC_incremental; /* Using incremental/generational collection. */
-extern bool GC_dirty_maintained;/* Dirty bits are being maintained, */
+extern GC_bool GC_dirty_maintained;
+ /* Dirty bits are being maintained, */
/* either for incremental collection, */
/* or to limit the root set. */
extern word GC_root_size; /* Total size of registered root sections */
-extern bool GC_debugging_started; /* GC_debug_malloc has been called. */
+extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
extern ptr_t GC_least_plausible_heap_addr;
extern ptr_t GC_greatest_plausible_heap_addr;
/* object are used. */
-/* Mark bit perations */
+/* Mark bit operations */
/*
* Retrieve, set, clear the mark bit corresponding
/* Important internal collector routines */
+ptr_t GC_approx_sp();
+
+GC_bool GC_should_collect();
+#ifdef PRESERVE_LAST
+ GC_bool GC_in_last_heap_sect(/* ptr_t */);
+ /* In last added heap section? If so, avoid breaking up. */
+#endif
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
/* Return after about one pages worth of */
/* work. */
-bool GC_mark_stack_empty();
-bool GC_mark_some(); /* Perform about one pages worth of marking */
+GC_bool GC_mark_stack_empty();
+GC_bool GC_mark_some(); /* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
/* subintervals of [b,t) onto */
/* mark stack. */
#ifndef SMALL_CONFIG
- void GC_push_conditional(/* ptr_t b, ptr_t t, bool all*/);
+ void GC_push_conditional(/* ptr_t b, ptr_t t, GC_bool all*/);
#else
# define GC_push_conditional(b, t, all) GC_push_all(b, t)
#endif
/* on the third arg. */
void GC_push_all_stack(/*b,t*/); /* As above, but consider */
/* interior pointers as valid */
-void GC_push_roots(/* bool all */); /* Push all or dirty roots. */
+void GC_push_roots(/* GC_bool all */); /* Push all or dirty roots. */
extern void (*GC_push_other_roots)();
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
void GC_push_regs(); /* Push register contents onto mark stack. */
void GC_remark(); /* Mark from all marked objects. Used */
/* only if we had to drop something. */
-void GC_push_one(/*p*/); /* If p points to an object, mark it */
- /* and push contents on the mark stack */
-/* Ivan Demakov: Watcom C error'ed without this */
-# if defined(MSWIN32) && defined(__WATCOMC__)
+# if defined(MSWIN32)
void __cdecl GC_push_one();
# else
void GC_push_one(/*p*/); /* If p points to an object, mark it */
/* Ditto, but also mark from clean pages. */
struct hblk * GC_push_next_marked_uncollectable(/* h */);
/* Ditto, but mark only from uncollectable pages. */
-bool GC_stopped_mark(); /* Stop world and mark from all roots */
+GC_bool GC_stopped_mark(); /* Stop world and mark from all roots */
/* and rescuers. */
void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
void GC_set_hdr_marks(/* hhdr */); /* Set the mark bits in a header */
void GC_add_roots_inner();
-bool GC_is_static_root(/* ptr_t p */);
+GC_bool GC_is_static_root(/* ptr_t p */);
/* Is the address p in one of the registered static */
/* root sections? */
void GC_register_dynamic_libraries();
/* Black listing: */
void GC_bl_init();
# ifndef ALL_INTERIOR_POINTERS
- void GC_add_to_black_list_normal(/* bits */);
+ void GC_add_to_black_list_normal(/* bits, maybe source */);
/* Register bits as a possible future false */
/* reference from the heap or static data */
-# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_normal(bits)
+# ifdef PRINT_BLACK_LIST
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_normal(bits, source)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_normal(bits)
+# endif
# else
-# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_stack(bits)
+# ifdef PRINT_BLACK_LIST
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_stack(bits, source)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_stack(bits)
+# endif
# endif
-void GC_add_to_black_list_stack(/* bits */);
+void GC_add_to_black_list_stack(/* bits, maybe source */);
struct hblk * GC_is_black_listed(/* h, len */);
/* If there are likely to be false references */
/* to a block starting at h of the indicated */
/* with the block. This identifies */
/* the block as invalid to the mark */
/* routines. */
-bool GC_add_map_entry(/*sz*/);
+GC_bool GC_add_map_entry(/*sz*/);
/* Add a heap block map for objects of */
/* size sz to obj_map. */
/* Return FALSE on failure. */
/* Misc GC: */
void GC_init_inner();
-bool GC_expand_hp_inner();
+GC_bool GC_expand_hp_inner();
void GC_start_reclaim(/*abort_if_found*/);
/* Restore unmarked objects to free */
/* lists, or (if abort_if_found is */
/* Arrange for all reclaim lists to be */
/* empty. Judiciously choose between */
/* sweeping and discarding each page. */
-bool GC_reclaim_all(/* GC_stop_func f*/);
+GC_bool GC_reclaim_all(/* GC_stop_func f*/);
/* Reclaim all blocks. Abort (in a */
/* consistent state) if f returns TRUE. */
-bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
-bool GC_never_stop_func(); /* Returns FALSE. */
-bool GC_try_to_collect_inner(/* GC_stop_func f */);
+GC_bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
+GC_bool GC_never_stop_func(); /* Returns FALSE. */
+GC_bool GC_try_to_collect_inner(/* GC_stop_func f */);
/* Collect; caller must have acquired */
/* lock and disabled signals. */
/* Collection is aborted if f returns */
(void) GC_try_to_collect_inner(GC_never_stop_func)
void GC_finish_collection(); /* Finish collection. Mark bits are */
/* consistent and lock is still held. */
-bool GC_collect_or_expand(/* needed_blocks */);
+GC_bool GC_collect_or_expand(/* needed_blocks */);
/* Collect or expand heap in an attempt */
/* make the indicated number of free */
/* blocks available. Should be called */
/* head. */
void GC_init_headers();
-bool GC_install_header(/*h*/);
+GC_bool GC_install_header(/*h*/);
/* Install a header for block h. */
/* Return FALSE on failure. */
-bool GC_install_counts(/*h, sz*/);
+GC_bool GC_install_counts(/*h, sz*/);
/* Set up forwarding counts for block */
/* h of size sz. */
/* Return FALSE on failure. */
/* Unreachable finalizable objects are enqueued */
/* for processing by GC_invoke_finalizers. */
/* Invoked with lock. */
-void GC_invoke_finalizers(); /* Run eligible finalizers. */
- /* Invoked without lock. */
void GC_add_to_heap(/*p, bytes*/);
/* Add a HBLKSIZE aligned chunk to the heap. */
/* Check that all objects in the heap with */
/* debugging info are intact. Print */
/* descriptions of any that are not. */
+extern void (*GC_print_heap_obj)(/* ptr_t p */);
+ /* If possible print s followed by a more */
+ /* detailed description of the object */
+ /* referred to by p. */
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
-bool GC_page_was_dirty(/* struct hblk * h */);
+GC_bool GC_page_was_dirty(/* struct hblk * h */);
/* Read retrieved dirty bits. */
-bool GC_page_was_ever_dirty(/* struct hblk * h */);
+GC_bool GC_page_was_ever_dirty(/* struct hblk * h */);
/* Could the page contain valid heap pointers? */
void GC_is_fresh(/* struct hblk * h, word number_of_blocks */);
/* Assert the region currently contains no */
void GC_dirty_init();
/* Slow/general mark bit manipulation: */
-bool GC_is_marked();
+GC_bool GC_is_marked();
void GC_clear_mark_bit();
void GC_set_mark_bit();
/* Stubborn objects: */
void GC_read_changed(); /* Analogous to GC_read_dirty */
-bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
+GC_bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
void GC_clean_changing_list(); /* Collect obsolete changing list entries */
void GC_stubborn_init();
void GC_dump();
/* Make arguments appear live to compiler */
-GC_API void GC_noop();
+# ifdef __WATCOMC__
+ void GC_noop(void*, ...);
+# else
+ GC_API void GC_noop();
+# endif
+
void GC_noop1(/* word arg */);
/* Logging and diagnostic output: */
if (bytes_to_get <= bytes) {
/* Undo the damage, and get memory directly */
- ptr_t result = (ptr_t)GET_MEM(bytes);
+ bytes_to_get = bytes;
+# ifdef USE_MMAP
+ bytes_to_get += GC_page_size - 1;
+ bytes_to_get &= ~(GC_page_size - 1);
+# endif
+ result = (ptr_t)GET_MEM(bytes_to_get);
scratch_free_ptr -= bytes;
GC_scratch_last_end_ptr = result + bytes;
return(result);
GC_printf0("Out of memory - trying to allocate less\n");
# endif
scratch_free_ptr -= bytes;
- return((ptr_t)GET_MEM(bytes));
+ bytes_to_get = bytes;
+# ifdef USE_MMAP
+ bytes_to_get += GC_page_size - 1;
+ bytes_to_get &= (GC_page_size - 1);
+# endif
+ return((ptr_t)GET_MEM(bytes_to_get));
}
scratch_free_ptr = result;
GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
/* Make sure that there is a bottom level index block for address addr */
/* Return FALSE on failure. */
-static bool get_index(addr)
+static GC_bool get_index(addr)
register word addr;
{
register word hi =
/* Install a header for block h. */
/* The header is uninitialized. */
/* Returns FALSE on failure. */
-bool GC_install_header(h)
+GC_bool GC_install_header(h)
register struct hblk * h;
{
hdr * result;
}
/* Set up forwarding counts for block h of size sz */
-bool GC_install_counts(h, sz)
+GC_bool GC_install_counts(h, sz)
register struct hblk * h;
register word sz; /* bytes */
{
GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
+#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
+# define GC_ADD_CALLER
+# define GC_RETURN_ADDR (GC_word)__return_address
+#endif
+
+#ifdef GC_ADD_CALLER
+# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
+# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
+#else
+# define GC_EXTRAS __FILE__, __LINE__
+# define GC_EXTRA_PARAMS char * descr_string, int descr_int
+#endif
+
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
GC_API GC_PTR GC_debug_malloc
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_atomic
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_uncollectable
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_stubborn
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
GC_API GC_PTR GC_debug_realloc
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
- char * descr_string, int descr_int));
+ GC_EXTRA_PARAMS));
GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
-# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
-# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
- __FILE__, __LINE__)
-# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
- __LINE__)
+ GC_EXTRAS)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
# define GC_FREE(p) GC_debug_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_debug_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
- __LINE__)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
+GC_API int GC_invoke_finalizers GC_PROTO((void));
+ /* Run finalizers for all objects that are ready to */
+ /* be finalized. Return the number of finalizers */
+ /* that were run. Normally this is also called */
+ /* implicitly during some allocations. If */
+ /* FINALIZE_ON_DEMAND is defined, it must be called */
+ /* explicitly. */
+
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
/* p may not be a NULL pointer. */
typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
# endif /* SOLARIS_THREADS */
-#ifdef IRIX_THREADS
+#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
# define pthread_sigmask GC_pthread_sigmask
# define pthread_join GC_pthread_join
-#endif /* IRIX_THREADS */
+#endif /* IRIX_THREADS || LINUX_THREADS */
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
+#if defined(THREADS) && !defined(SRC_M3)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
/* in returned list. */
extern void GC_thr_init(); /* Needed for Solaris/X86 */
-#endif /* SOLARIS_THREADS */
+#endif /* THREADS && !SRC_M3 */
/*
* If you are planning on putting
# endif
#endif
-#ifdef __WATCOMC__
- /* Ivan Demakov: Programs compiled by Watcom C with -5r option
- * crash without this declaration
- * HB: Could this go into gc_priv.h?
- */
- void GC_noop(void*, ...);
-#endif
-
#ifdef __cplusplus
} /* end of extern "C" */
#endif
/*
- * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
#define GC_ALLOC_H
#define __ALLOC_H // Prevent inclusion of the default version. Ugly.
+#define __SGI_STL_ALLOC_H
+#define __SGI_STL_INTERNAL_ALLOC_H
#ifndef __ALLOC
# define __ALLOC alloc
static void * allocate(size_t n) { return GC_malloc(n); }
static void * ptr_free_allocate(size_t n)
{ return GC_malloc_atomic(n); }
- static void deallocate(void *p, size_t n) { }
- static void ptr_free_deallocate(void *p, size_t n) { }
+ static void deallocate(void *, size_t) { }
+ static void ptr_free_deallocate(void *, size_t) { }
};
typedef gc_alloc_template < 0 > gc_alloc;
static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
static void * ptr_free_allocate(size_t n)
{ return GC_malloc_atomic_uncollectable(n); }
- static void deallocate(void *p, size_t n) { GC_free(p); }
- static void ptr_free_deallocate(void *p, size_t n) { GC_free(p); }
+ static void deallocate(void *p, size_t) { GC_free(p); }
+ static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
};
typedef alloc_template < 0 > alloc;
__GC_SPECIALIZE(float, single_client_alloc)
__GC_SPECIALIZE(double, single_client_alloc)
+#ifdef __STL_USE_STD_ALLOCATORS
+
+???copy stuff from stl_alloc.h or remove it to a different file ???
+
+#endif /* __STL_USE_STD_ALLOCATORS */
+
#endif /* _SGI_SOURCE */
#endif /* GC_ALLOC_H */
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, October 3, 1995 6:39 pm PDT */
#ifndef CONFIG_H
# endif
# if defined(mips) || defined(__mips)
# define MIPS
-# if defined(ultrix) || defined(__ultrix)
+# if defined(ultrix) || defined(__ultrix) || defined(__NetBSD__)
# define ULTRIX
# else
# if defined(_SYSTYPE_SVR4) || defined(SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__)
-# define IRIX5
+# define IRIX5 /* or IRIX 6.X */
# else
# define RISCOS /* or IRIX 4.X */
# endif
# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
/* The above test may need refinement */
# define I386
-# define SCO
+# if defined(_SCO_ELF)
+# define SCO_ELF
+# else
+# define SCO
+# endif
# define mach_type_known
# endif
# if defined(_AUX_SOURCE)
# define LINUX
# define mach_type_known
# endif
-# if defined(__alpha)
+# if defined(linux) && defined(powerpc)
+# define POWERPC
+# define LINUX
+# define mach_type_known
+# endif
+# if defined(__alpha) || defined(__alpha__)
# define ALPHA
+# if defined(linux) || defined(__linux__)
+# define LINUX
+# else
+# define OSF1 /* a.k.a Digital Unix */
+# endif
# define mach_type_known
# endif
# if defined(_AMIGA)
/* DGUX defined */
# define mach_type_known
# endif
-# if defined(_MSDOS) && (_M_IX86 == 300) || (_M_IX86 == 400)
+# if (defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300)
# define I386
# define MSWIN32 /* or Win32s */
# define mach_type_known
# endif
-# if defined(GO32)
+# if defined(__DJGPP__)
# define I386
-# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# ifndef DJGPP
+# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# endif
+# define mach_type_known
+# endif
+# if defined(__CYGWIN32__)
+# define I386
+# define CYGWIN32
# define mach_type_known
# endif
# if defined(__BORLANDC__)
# define MSWIN32
# define mach_type_known
# endif
+# if defined(_UTS) && !defined(mach_type_known)
+# define S370
+# define UTS4
+# define mach_type_known
+# endif
+/* Ivan Demakov */
+# if defined(__WATCOMC__) && defined(__386__)
+# define I386
+# if !defined(OS2) && !defined(MSWIN32) && !defined(DOS4GW)
+# if defined(__OS2__)
+# define OS2
+# else
+# if defined(__WINDOWS_386__) || defined(__NT__)
+# define MSWIN32
+# else
+# define DOS4GW
+# endif
+# endif
+# endif
+# define mach_type_known
+# endif
/* Feel free to add more clauses here */
/* SPARC ==> SPARC under SunOS */
/* (SUNOS4, SUNOS5, */
/* DRSNX variants) */
- /* ALPHA ==> DEC Alpha OSF/1 */
+ /* ALPHA ==> DEC Alpha */
+ /* (OSF1 and LINUX variants) */
/* M88K ==> Motorola 88XX0 */
/* (CX_UX and DGUX) */
+ /* S370 ==> 370-like machine */
+ /* running Amdahl UTS4 */
/*
*
* DATASTART is the beginning of the data segment.
* On UNIX systems, the collector will scan the area between DATASTART
- * and &end for root pointers.
+ * and DATAEND for root pointers.
+ *
+ * DATAEND, if not &end.
+ *
+ * ALIGN_DOUBLE of GC_malloc should return blocks aligned to twice
+ * the pointer size.
*
* STACKBOTTOM is the cool end of the stack, which is usually the
* highest address in the stack.
# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
# define STACKBOTTOM ((ptr_t) 0xffeffffc)
/* empirically determined. seems to work. */
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
# endif
# ifdef SYSV
# define OS_TYPE "SYSV"
/* that the stack direction is incorrect. Two */
/* bytes down from 0x0 should be safe enough. */
/* --Parag */
+# include <sys/mmu.h>
+# define GETPAGESIZE() PAGESIZE /* Is this still right? */
# endif
# ifdef AMIGA
# define OS_TYPE "AMIGA"
/* STACKBOTTOM and DATASTART handled specially */
/* in os_dep.c */
# define DATAEND /* not needed */
+# define GETPAGESIZE() 4096
# endif
# ifdef MACOS
# ifndef __LOWMEM__
/* see os_dep.c for details of global data segments. */
# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
# define DATAEND /* not needed */
+# define GETPAGESIZE() 4096
# endif
# ifdef NEXT
# define OS_TYPE "NEXT"
# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
# define DATAEND /* not needed */
# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define STACKBOTTOM ((ptr_t)0x80000000)
+# define DATASTART GC_data_start
+ extern int _end;
+# define DATAEND (&_end)
+# endif
# endif
# ifdef VAX
# ifdef SPARC
# define MACH_TYPE "SPARC"
# define ALIGNMENT 4 /* Required by hardware */
+# define ALIGN_DOUBLE
extern int etext;
# ifdef SUNOS5
# define OS_TYPE "SUNOS5"
extern char * GC_SysVGetDataStart();
# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &_etext)
# define DATAEND (&_end)
+# ifndef USE_MMAP
+# define USE_MMAP
+# endif
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x40000000
+# else
+# define HEAP_START DATAEND
+# endif
# define PROC_VDB
# define HEURISTIC1
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
+ /* getpagesize() appeared to be missing from at least one */
+ /* Solaris 5.4 installation. Weird. */
# endif
# ifdef SUNOS4
# define OS_TYPE "SUNOS4"
# define ALIGNMENT 4 /* Appears to hold for all "32 bit" compilers */
/* except Borland. The -a4 option fixes */
/* Borland. */
+ /* Ivan Demakov: For Watcom the option is -zp4. */
+# ifndef SMALL_CONFIG
+# define ALIGN_DOUBLE /* Not strictly necessary, but may give speed */
+ /* improvement on Pentiums. */
+# endif
# ifdef SEQUENT
# define OS_TYPE "SEQUENT"
extern int etext;
extern char * GC_SysVGetDataStart();
# define DATASTART GC_SysVGetDataStart(0x1000, &etext)
# define STACKBOTTOM ((ptr_t)(&_start))
-# define PROC_VDB
+/** At least in Solaris 2.5, PROC_VDB gives wrong values for dirty bits. */
+/*# define PROC_VDB*/
+# define DYNAMIC_LOADING
+# ifndef USE_MMAP
+# define USE_MMAP
+# endif
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x40000000
+# else
+# define HEAP_START DATAEND
+# endif
# endif
# ifdef SCO
# define OS_TYPE "SCO"
+((word)&etext & 0xfff))
# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
# endif
+# ifdef SCO_ELF
+# define OS_TYPE "SCO_ELF"
+ extern int etext;
+# define DATASTART ((ptr_t)(&etext))
+# define STACKBOTTOM ((ptr_t) 0x08048000)
+# define DYNAMIC_LOADING
+# define ELF_CLASS ELFCLASS32
+# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
- extern int etext;
-# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
# define STACKBOTTOM ((ptr_t)0xc0000000)
+ /* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
+ /* Probably needs to be more flexible, but I don't yet */
+ /* fully understand how flexible. */
# define MPROTECT_VDB
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# ifdef UNDEFINED /* includes ro data */
+ extern int _etext;
+# define DATASTART ((ptr_t)((((word) (&_etext)) + 0xfff) & ~0xfff))
+# endif
+# include <linux/version.h>
+# include <features.h>
+# if LINUX_VERSION_CODE >= 0x20000 && defined(__GLIBC__) && __GLIBC__ >= 2
+ extern int __data_start;
+# define DATASTART ((ptr_t)(&__data_start))
+# else
+ extern char **__environ;
+# define DATASTART ((ptr_t)(&__environ))
+ /* hideous kludge: __environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+ /* We could use _etext instead, but that */
+ /* would include .rodata, which may */
+ /* contain large read-only data tables */
+ /* that we'd rather not scan. */
+# endif
+ extern int _end;
+# define DATAEND (&_end)
+# else
+ extern int etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# endif
+# endif
+# ifdef CYGWIN32
+ extern int _data_start__;
+ extern int _data_end__;
+ extern int _bss_start__;
+ extern int _bss_end__;
+ /* For binutils 2.9.1, we have */
+ /* DATASTART = _data_start__ */
+ /* DATAEND = _bss_end__ */
+ /* whereas for some earlier versions it was */
+ /* DATASTART = _bss_start__ */
+ /* DATAEND = _data_end__ */
+ /* To get it right for both, we take the */
+ /* minumum/maximum of the two. */
+# define MAX(x,y) ((x) > (y) ? (x) : (y))
+# define MIN(x,y) ((x) < (y) ? (x) : (y))
+# define DATASTART ((ptr_t) MIN(_data_start__, _bss_start__))
+# define DATAEND ((ptr_t) MAX(_data_end__, _bss_end__))
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000
+# define HEURISTIC1
# endif
# ifdef OS2
# define OS_TYPE "OS2"
# define OS_TYPE "MSWIN32"
/* STACKBOTTOM and DATASTART are handled specially in */
/* os_dep.c. */
-# define MPROTECT_VDB
+# ifndef __WATCOMC__
+# define MPROTECT_VDB
+# endif
# define DATAEND /* not needed */
# endif
# ifdef DJGPP
# define OS_TYPE "DJGPP"
+# include "stubinfo.h"
extern int etext;
-# define DATASTART ((ptr_t)(&etext))
-# define STACKBOTTOM ((ptr_t)0x00080000)
+ extern int _stklen;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x1ff) & ~0x1ff))
+# define STACKBOTTOM ((ptr_t)((word) _stubinfo + _stubinfo->size \
+ + _stklen))
+ /* This may not be right. */
# endif
# ifdef FREEBSD
# define OS_TYPE "FREEBSD"
# define STACKBOTTOM ((ptr_t)0xc0000000)
# define DATAEND /* not needed */
# endif
+# ifdef DOS4GW
+# define OS_TYPE "DOS4GW"
+ /* Get_DATASTART, Get_DATAEND, Get_STACKBOTTOM
+ * Defined in gc-watcom.asm
+ */
+ extern char* Get_DATASTART (void);
+ extern char* Get_DATAEND (void);
+ extern char* Get_STACKBOTTOM (void);
+# pragma aux Get_DATASTART "*" value [eax];
+# pragma aux Get_DATAEND "*" value [eax];
+# pragma aux Get_STACKBOTTOM "*" value [eax];
+# define DATASTART ((ptr_t) Get_DATASTART())
+# define STACKBOTTOM ((ptr_t) Get_STACKBOTTOM())
+# define DATAEND ((ptr_t) Get_DATAEND())
+# endif
# endif
# ifdef NS32K
# ifdef MIPS
# define MACH_TYPE "MIPS"
-# define ALIGNMENT 4 /* Required by hardware */
-# define DATASTART 0x10000000
+# ifndef IRIX5
+# define DATASTART (ptr_t)0x10000000
/* Could probably be slightly higher since */
- /* startup code allocates lots of junk */
+ /* startup code allocates lots of stuff. */
+# else
+ extern int _fdata;
+# define DATASTART ((ptr_t)(&_fdata))
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x30000000
+# else
+# define HEAP_START DATASTART
+# endif
+ /* Lowest plausible heap address. */
+ /* In the MMAP case, we map there. */
+ /* In either case it is used to identify */
+ /* heap sections so they're not */
+ /* considered as roots. */
+# endif /* IRIX5 */
# define HEURISTIC2
+/* # define STACKBOTTOM ((ptr_t)0x7fff8000) sometimes also works. */
# ifdef ULTRIX
# define OS_TYPE "ULTRIX"
+# define ALIGNMENT 4
# endif
# ifdef RISCOS
# define OS_TYPE "RISCOS"
+# define ALIGNMENT 4 /* Required by hardware */
# endif
# ifdef IRIX5
# define OS_TYPE "IRIX5"
-# define MPROTECT_VDB
- /* The above is dubious. Mprotect and signals do work, */
- /* and dirty bits are implemented under IRIX5. But, */
- /* at least under IRIX5.2, mprotect seems to be so */
- /* slow relative to the hardware that incremental */
- /* collection is likely to be rarely useful. */
+# define MPROTECT_VDB
+# ifdef _MIPS_SZPTR
+# define CPP_WORDSZ _MIPS_SZPTR
+# define ALIGNMENT (_MIPS_SZPTR/8)
+# if CPP_WORDSZ != 64
+# define ALIGN_DOUBLE
+# endif
+# else
+# define ALIGNMENT 4
+# define ALIGN_DOUBLE
+# endif
# define DYNAMIC_LOADING
# endif
# endif
# define MACH_TYPE "RS6000"
# define ALIGNMENT 4
# define DATASTART ((ptr_t)0x20000000)
-# define STACKBOTTOM ((ptr_t)0x2ff80000)
+ extern int errno;
+# define STACKBOTTOM ((ptr_t)((ulong)&errno))
+# define DYNAMIC_LOADING
+ /* For really old versions of AIX, this may have to be removed. */
# endif
# ifdef HP_PA
# define MACH_TYPE "HP_PA"
# define ALIGNMENT 4
+# define ALIGN_DOUBLE
extern int __data_start;
# define DATASTART ((ptr_t)(&__data_start))
-# define HEURISTIC2
+# if 0
+ /* The following appears to work for 7xx systems running HP/UX */
+ /* 9.xx Furthermore, it might result in much faster */
+ /* collections than HEURISTIC2, which may involve scanning */
+ /* segments that directly precede the stack. It is not the */
+ /* default, since it may not work on older machine/OS */
+ /* combinations. (Thanks to Raymond X.T. Nijssen for uncovering */
+ /* this.) */
+# define STACKBOTTOM ((ptr_t) 0x7b033000) /* from /etc/conf/h/param.h */
+# else
+# define HEURISTIC2
+# endif
# define STACK_GROWS_UP
+# define DYNAMIC_LOADING
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
+ /* They misspelled the Posix macro? */
# endif
# ifdef ALPHA
# define MACH_TYPE "ALPHA"
# define ALIGNMENT 8
-# define DATASTART ((ptr_t) 0x140000000)
-# define HEURISTIC2
+# ifdef OSF1
+# define OS_TYPE "OSF1"
+# define DATASTART ((ptr_t) 0x140000000)
+# define HEURISTIC2
/* Normally HEURISTIC2 is too conervative, since */
/* the text segment immediately follows the stack. */
/* Hence we give an upper pound. */
- extern __start;
-# define HEURISTIC2_LIMIT ((ptr_t)((word)(&__start) & ~(getpagesize()-1)))
-# define CPP_WORDSZ 64
-# define MPROTECT_VDB
-# define DYNAMIC_LOADING
+ extern __start;
+# define HEURISTIC2_LIMIT ((ptr_t)((word)(&__start) & ~(getpagesize()-1)))
+# define CPP_WORDSZ 64
+# define MPROTECT_VDB
+# define DYNAMIC_LOADING
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define CPP_WORDSZ 64
+# define STACKBOTTOM ((ptr_t) 0x120000000)
+# ifdef __ELF__
+ extern int __data_start;
+# define DATASTART &__data_start
+# define DYNAMIC_LOADING
+# else
+# define DATASTART ((ptr_t) 0x140000000)
+# endif
+ extern int _end;
+# define DATAEND (&_end)
+ /* As of 1.3.90, I couldn't find a way to retrieve the correct */
+ /* fault address from a signal handler. */
+ /* Hence MPROTECT_VDB is broken. */
+# endif
# endif
# ifdef M88K
# define MACH_TYPE "M88K"
# define ALIGNMENT 4
+# define ALIGN_DOUBLE
+ extern int etext;
# ifdef CX_UX
+# define OS_TYPE "CX_UX"
# define DATASTART ((((word)&etext + 0x3fffff) & ~0x3fffff) + 0x10000)
# endif
# ifdef DGUX
+# define OS_TYPE "DGUX"
extern char * GC_SysVGetDataStart();
# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &etext)
# endif
# define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */
# endif
+# ifdef S370
+# define MACH_TYPE "S370"
+# define OS_TYPE "UTS4"
+# define ALIGNMENT 4 /* Required by hardware */
+ extern int etext;
+ extern int _etext;
+ extern int _end;
+ extern char * GC_SysVGetDataStart();
+# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &_etext)
+# define DATAEND (&_end)
+# define HEURISTIC2
+# endif
+
# ifndef STACK_GROWS_UP
# define STACK_GROWS_DOWN
# endif
# define DATAEND (&end)
# endif
-# if defined(SUNOS5) || defined(DRSNX)
+# if defined(SVR4) && !defined(GETPAGESIZE)
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
+# endif
+
+# ifndef GETPAGESIZE
+# if defined(SUNOS5) || defined(IRIX5)
+# include <unistd.h>
+# endif
+# define GETPAGESIZE() getpagesize()
+# endif
+
+# if defined(SUNOS5) || defined(DRSNX) || defined(UTS4)
/* OS has SVR4 generic features. Probably others also qualify. */
# define SVR4
# endif
# define DEFAULT_VDB
# endif
+# if defined(IRIX_THREADS) && !defined(IRIX5)
+--> inconsistent configuration
+# endif
+# if defined(LINUX_THREADS) && !defined(LINUX)
+--> inconsistent configuration
+# endif
+# if defined(SOLARIS_THREADS) && !defined(SUNOS5)
+--> inconsistent configuration
+# endif
+# if defined(PCR) || defined(SRC_M3) || \
+ defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
+ defined(IRIX_THREADS) || defined(LINUX_THREADS)
+# define THREADS
+# endif
+
# if defined(SPARC)
# define SAVE_CALL_CHAIN
+# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
+ /* include assembly code to do it well. */
# endif
# endif
# include "gc_hdrs.h"
# endif
-# if !defined(bool) && !defined(__cplusplus)
- typedef int bool;
- /* This is problematic with C++ implementations that do not define bool. */
- /* By now they should. */
-# else
-# if defined(_SGI_SOURCE) && !defined(_BOOL)
- typedef int bool;
-# endif
-# if defined(__SUNPRO_CC) && __SUNPRO_CC <= 0x410
- typedef int bool;
-# endif
-# if defined(__cplusplus) && defined(_MSC_VER) && _MSC_VER <= 1020
- /* Visual C++ 4.2 does not have bool type. */
- typedef int bool;
-# endif
-# endif
+typedef int GC_bool;
# define TRUE 1
# define FALSE 0
#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
/* whether the objects are atomic or composite, and */
/* whether or not the block was found to be empty */
- /* duing the reclaim phase. Typically generates */
+ /* during the reclaim phase. Typically generates */
/* about one screenful per garbage collection. */
#undef PRINTBLOCKS
-#define PRINTBLACKLIST /* Print black listed blocks, i.e. values that */
- /* cause the allocator to avoid allocating certain */
- /* blocks in order to avoid introducing "false */
- /* hits". */
-#undef PRINTBLACKLIST
-
#ifdef SILENT
# ifdef PRINTSTATS
# undef PRINTSTATS
# define GATHERSTATS
#endif
+#ifdef FINALIZE_ON_DEMAND
+# define GC_INVOKE_FINALIZERS()
+#else
+# define GC_INVOKE_FINALIZERS() (void)GC_invoke_finalizers()
+#endif
+
#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
/* free lists are actually maintained. This applies */
/* only to the top level routines in misc.c, not to */
/* May save significant amounts of space for obj_map */
/* entries. */
+#ifndef OLD_BLOCK_ALLOC
+ /* Macros controlling large block allocation strategy. */
+# define EXACT_FIRST /* Make a complete pass through the large object */
+ /* free list before splitting a block */
+# define PRESERVE_LAST /* Do not divide last allocated heap segment */
+ /* unless we would otherwise need to expand the */
+ /* heap. */
+#endif
+
/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
# define MERGE_SIZES
# ifndef LARGE_CONFIG
# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+ /* Must be multiple of largest page size. */
# define MAXHINCR 512 /* Maximum heap increment, in blocks */
# else
# define MINHINCR 64
/* */
/*********************************/
+#ifdef SAVE_CALL_CHAIN
+
/*
* Number of frames and arguments to save in objects allocated by
* debugging allocator.
/* alignment reasons. */
# define NARGS 2 /* Mumber of arguments to save for each call. */
-
-#ifdef SAVE_CALL_CHAIN
- struct callinfo {
- word ci_pc;
- word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
- };
+# define NEED_CALLINFO
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+#else
+
+# ifdef GC_ADD_CALLER
+# define NFRAMES 1
+# define NARGS 0
+# define NEED_CALLINFO
+# endif
+
+#endif
+
+#ifdef NEED_CALLINFO
+ struct callinfo {
+ word ci_pc;
+# if NARGS > 0
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+# endif
+# if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1
+ /* Likely alignment problem. */
+ word ci_dummy;
+# endif
+ };
#endif
/* HBLKSIZE aligned allocation. 0 is taken to mean failure */
/* space is assumed to be cleared. */
+/* In the case os USE_MMAP, the argument must also be a */
+/* physical page size. */
# ifdef PCR
char * real_malloc();
# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
# define LOCK() mutex_lock(&GC_allocate_ml);
# define UNLOCK() mutex_unlock(&GC_allocate_ml);
# endif
+# ifdef LINUX_THREADS
+# include <pthread.h>
+# ifdef __i386__
+ inline static GC_test_and_set(volatile unsigned int *addr) {
+ int oldval;
+ /* Note: the "xchg" instruction does not need a "lock" prefix */
+ __asm__ __volatile__("xchgl %0, %1"
+ : "=r"(oldval), "=m"(*(addr))
+ : "0"(1), "m"(*(addr)));
+ return oldval;
+ }
+# else
+ -- > Need implementation of GC_test_and_set()
+# endif
+# define GC_clear(addr) (*(addr) = 0)
+
+ extern volatile unsigned int GC_allocate_lock;
+ /* This is not a mutex because mutexes that obey the (optional) */
+ /* POSIX scheduling rules are subject to convoys in high contention */
+ /* applications. This is basically a spin lock. */
+ extern pthread_t GC_lock_holder;
+ extern void GC_lock(void);
+ /* Allocation lock holder. Only set if acquired by client through */
+ /* GC_call_with_alloc_lock. */
+# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
+# define NO_THREAD (pthread_t)(-1)
+# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
+# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
+# ifdef UNDEFINED
+# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
+# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# else
+# define LOCK() \
+ { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
+# define UNLOCK() \
+ GC_clear(&GC_allocate_lock)
+# endif
+ extern GC_bool GC_collecting;
+# define ENTER_GC() \
+ { \
+ GC_collecting = 1; \
+ }
+# define EXIT_GC() GC_collecting = 0;
+# endif /* LINUX_THREADS */
# ifdef IRIX_THREADS
# include <pthread.h>
# include <mutex.h>
# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
-# define __test_and_set(l,v) test_and_set(l,v)
+# define GC_test_and_set(addr, v) test_and_set(addr,v)
+# else
+# define GC_test_and_set(addr, v) __test_and_set(addr,v)
# endif
extern unsigned long GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# else
-# define LOCK() { if (__test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
+# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64))
# define UNLOCK() __lock_release(&GC_allocate_lock)
# else
# define UNLOCK() GC_allocate_lock = 0
# endif
# endif
- extern bool GC_collecting;
+ extern GC_bool GC_collecting;
# define ENTER_GC() \
{ \
GC_collecting = 1; \
}
# define EXIT_GC() GC_collecting = 0;
-# endif
+# endif /* IRIX_THREADS */
# ifdef WIN32_THREADS
# include <windows.h>
GC_API CRITICAL_SECTION GC_allocate_ml;
# else
# if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) \
|| defined(MSWIN32) || defined(MACOS) || defined(DJGPP) \
- || defined(NO_SIGNALS) || defined(IRIX_THREADS)
+ || defined(NO_SIGNALS) || defined(IRIX_THREADS) \
+ || defined(LINUX_THREADS)
/* Also useful for debugging. */
/* Should probably use thr_sigsetmask for SOLARIS_THREADS. */
# define DISABLE_SIGNALS()
PCR_allSigsBlocked, \
PCR_waitForever);
# else
-# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
+ || defined(IRIX_THREADS) || defined(LINUX_THREADS)
void GC_stop_world();
void GC_start_world();
# define STOP_WORLD() GC_stop_world()
/* swept. */
word ok_descriptor; /* Descriptor template for objects in this */
/* block. */
- bool ok_relocate_descr;
+ GC_bool ok_relocate_descr;
/* Add object size in bytes to descriptor */
/* template to obtain descriptor. Otherwise */
/* template is used as is. */
- bool ok_init; /* Clear objects before putting them on the free list. */
+ GC_bool ok_init; /* Clear objects before putting them on the free list. */
} GC_obj_kinds[MAXOBJKINDS];
/* Predefined kinds: */
# define PTRFREE 0
/* header structure associated with */
/* block. */
-extern bool GC_is_initialized; /* GC_init() has been run. */
+extern GC_bool GC_is_initialized; /* GC_init() has been run. */
-extern bool GC_objects_are_marked; /* There are marked objects in */
+extern GC_bool GC_objects_are_marked; /* There are marked objects in */
/* the heap. */
-extern int GC_incremental; /* Using incremental/generational collection. */
+extern GC_bool GC_incremental; /* Using incremental/generational collection. */
-extern bool GC_dirty_maintained;/* Dirty bits are being maintained, */
+extern GC_bool GC_dirty_maintained;
+ /* Dirty bits are being maintained, */
/* either for incremental collection, */
/* or to limit the root set. */
extern word GC_root_size; /* Total size of registered root sections */
-extern bool GC_debugging_started; /* GC_debug_malloc has been called. */
+extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
extern ptr_t GC_least_plausible_heap_addr;
extern ptr_t GC_greatest_plausible_heap_addr;
/* object are used. */
-/* Mark bit perations */
+/* Mark bit operations */
/*
* Retrieve, set, clear the mark bit corresponding
/* Important internal collector routines */
+ptr_t GC_approx_sp();
+
+GC_bool GC_should_collect();
+#ifdef PRESERVE_LAST
+ GC_bool GC_in_last_heap_sect(/* ptr_t */);
+ /* In last added heap section? If so, avoid breaking up. */
+#endif
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
/* Return after about one pages worth of */
/* work. */
-bool GC_mark_stack_empty();
-bool GC_mark_some(); /* Perform about one pages worth of marking */
+GC_bool GC_mark_stack_empty();
+GC_bool GC_mark_some(); /* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
/* subintervals of [b,t) onto */
/* mark stack. */
#ifndef SMALL_CONFIG
- void GC_push_conditional(/* ptr_t b, ptr_t t, bool all*/);
+ void GC_push_conditional(/* ptr_t b, ptr_t t, GC_bool all*/);
#else
# define GC_push_conditional(b, t, all) GC_push_all(b, t)
#endif
/* on the third arg. */
void GC_push_all_stack(/*b,t*/); /* As above, but consider */
/* interior pointers as valid */
-void GC_push_roots(/* bool all */); /* Push all or dirty roots. */
+void GC_push_roots(/* GC_bool all */); /* Push all or dirty roots. */
extern void (*GC_push_other_roots)();
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
void GC_push_regs(); /* Push register contents onto mark stack. */
void GC_remark(); /* Mark from all marked objects. Used */
/* only if we had to drop something. */
-void GC_push_one(/*p*/); /* If p points to an object, mark it */
- /* and push contents on the mark stack */
-/* Ivan Demakov: Watcom C error'ed without this */
-# if defined(MSWIN32) && defined(__WATCOMC__)
+# if defined(MSWIN32)
void __cdecl GC_push_one();
# else
void GC_push_one(/*p*/); /* If p points to an object, mark it */
/* Ditto, but also mark from clean pages. */
struct hblk * GC_push_next_marked_uncollectable(/* h */);
/* Ditto, but mark only from uncollectable pages. */
-bool GC_stopped_mark(); /* Stop world and mark from all roots */
+GC_bool GC_stopped_mark(); /* Stop world and mark from all roots */
/* and rescuers. */
void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
void GC_set_hdr_marks(/* hhdr */); /* Set the mark bits in a header */
void GC_add_roots_inner();
-bool GC_is_static_root(/* ptr_t p */);
+GC_bool GC_is_static_root(/* ptr_t p */);
/* Is the address p in one of the registered static */
/* root sections? */
void GC_register_dynamic_libraries();
/* Black listing: */
void GC_bl_init();
# ifndef ALL_INTERIOR_POINTERS
- void GC_add_to_black_list_normal(/* bits */);
+ void GC_add_to_black_list_normal(/* bits, maybe source */);
/* Register bits as a possible future false */
/* reference from the heap or static data */
-# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_normal(bits)
+# ifdef PRINT_BLACK_LIST
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_normal(bits, source)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_normal(bits)
+# endif
# else
-# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_stack(bits)
+# ifdef PRINT_BLACK_LIST
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_stack(bits, source)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_stack(bits)
+# endif
# endif
-void GC_add_to_black_list_stack(/* bits */);
+void GC_add_to_black_list_stack(/* bits, maybe source */);
struct hblk * GC_is_black_listed(/* h, len */);
/* If there are likely to be false references */
/* to a block starting at h of the indicated */
/* with the block. This identifies */
/* the block as invalid to the mark */
/* routines. */
-bool GC_add_map_entry(/*sz*/);
+GC_bool GC_add_map_entry(/*sz*/);
/* Add a heap block map for objects of */
/* size sz to obj_map. */
/* Return FALSE on failure. */
/* Misc GC: */
void GC_init_inner();
-bool GC_expand_hp_inner();
+GC_bool GC_expand_hp_inner();
void GC_start_reclaim(/*abort_if_found*/);
/* Restore unmarked objects to free */
/* lists, or (if abort_if_found is */
/* Arrange for all reclaim lists to be */
/* empty. Judiciously choose between */
/* sweeping and discarding each page. */
-bool GC_reclaim_all(/* GC_stop_func f*/);
+GC_bool GC_reclaim_all(/* GC_stop_func f*/);
/* Reclaim all blocks. Abort (in a */
/* consistent state) if f returns TRUE. */
-bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
-bool GC_never_stop_func(); /* Returns FALSE. */
-bool GC_try_to_collect_inner(/* GC_stop_func f */);
+GC_bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
+GC_bool GC_never_stop_func(); /* Returns FALSE. */
+GC_bool GC_try_to_collect_inner(/* GC_stop_func f */);
/* Collect; caller must have acquired */
/* lock and disabled signals. */
/* Collection is aborted if f returns */
(void) GC_try_to_collect_inner(GC_never_stop_func)
void GC_finish_collection(); /* Finish collection. Mark bits are */
/* consistent and lock is still held. */
-bool GC_collect_or_expand(/* needed_blocks */);
+GC_bool GC_collect_or_expand(/* needed_blocks */);
/* Collect or expand heap in an attempt */
/* make the indicated number of free */
/* blocks available. Should be called */
/* head. */
void GC_init_headers();
-bool GC_install_header(/*h*/);
+GC_bool GC_install_header(/*h*/);
/* Install a header for block h. */
/* Return FALSE on failure. */
-bool GC_install_counts(/*h, sz*/);
+GC_bool GC_install_counts(/*h, sz*/);
/* Set up forwarding counts for block */
/* h of size sz. */
/* Return FALSE on failure. */
/* Unreachable finalizable objects are enqueued */
/* for processing by GC_invoke_finalizers. */
/* Invoked with lock. */
-void GC_invoke_finalizers(); /* Run eligible finalizers. */
- /* Invoked without lock. */
void GC_add_to_heap(/*p, bytes*/);
/* Add a HBLKSIZE aligned chunk to the heap. */
/* Check that all objects in the heap with */
/* debugging info are intact. Print */
/* descriptions of any that are not. */
+extern void (*GC_print_heap_obj)(/* ptr_t p */);
+ /* If possible print s followed by a more */
+ /* detailed description of the object */
+ /* referred to by p. */
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
-bool GC_page_was_dirty(/* struct hblk * h */);
+GC_bool GC_page_was_dirty(/* struct hblk * h */);
/* Read retrieved dirty bits. */
-bool GC_page_was_ever_dirty(/* struct hblk * h */);
+GC_bool GC_page_was_ever_dirty(/* struct hblk * h */);
/* Could the page contain valid heap pointers? */
void GC_is_fresh(/* struct hblk * h, word number_of_blocks */);
/* Assert the region currently contains no */
void GC_dirty_init();
/* Slow/general mark bit manipulation: */
-bool GC_is_marked();
+GC_bool GC_is_marked();
void GC_clear_mark_bit();
void GC_set_mark_bit();
/* Stubborn objects: */
void GC_read_changed(); /* Analogous to GC_read_dirty */
-bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
+GC_bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
void GC_clean_changing_list(); /* Collect obsolete changing list entries */
void GC_stubborn_init();
void GC_dump();
/* Make arguments appear live to compiler */
-GC_API void GC_noop();
+# ifdef __WATCOMC__
+ void GC_noop(void*, ...);
+# else
+ GC_API void GC_noop();
+# endif
+
void GC_noop1(/* word arg */);
/* Logging and diagnostic output: */
* Support code for Irix (>=6.2) Pthreads. This relies on properties
* not guaranteed by the Pthread standard. It may or may not be portable
* to other implementations.
+ *
+ * Note that there is a lot of code duplication between linux_threads.c
+ * and irix_threads.c; any changes made here may need to be reflected
+ * there too.
*/
# if defined(IRIX_THREADS)
/* guaranteed to be dead, but we may */
/* not yet have registered the join.) */
pthread_t id;
+ word stop;
+# define NOT_STOPPED 0
+# define PLEASE_STOP 1
+# define STOPPED 2
word flags;
# define FINISHED 1 /* Thread has exited. */
# define DETACHED 2 /* Thread is intended to be detached. */
# define SIG_SUSPEND (SIGRTMIN + 6)
pthread_mutex_t GC_suspend_lock = PTHREAD_MUTEX_INITIALIZER;
-volatile unsigned GC_n_stopped = 0;
/* Number of threads stopped so far */
pthread_cond_t GC_suspend_ack_cv = PTHREAD_COND_INITIALIZER;
pthread_cond_t GC_continue_cv = PTHREAD_COND_INITIALIZER;
int i;
if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
- pthread_mutex_lock(&GC_suspend_lock);
me = GC_lookup_thread(pthread_self());
/* The lookup here is safe, since I'm doing this on behalf */
/* of a thread which holds the allocation lock in order */
/* to stop the world. Thus concurrent modification of the */
/* data structure is impossible. */
+ if (PLEASE_STOP != me -> stop) {
+ /* Misdirected signal. */
+ pthread_mutex_unlock(&GC_suspend_lock);
+ return;
+ }
+ pthread_mutex_lock(&GC_suspend_lock);
me -> stack_ptr = (ptr_t)(&dummy);
- GC_n_stopped++;
+ me -> stop = STOPPED;
pthread_cond_signal(&GC_suspend_ack_cv);
pthread_cond_wait(&GC_continue_cv, &GC_suspend_lock);
pthread_mutex_unlock(&GC_suspend_lock);
}
-bool GC_thr_initialized = FALSE;
+GC_bool GC_thr_initialized = FALSE;
size_t GC_min_stack_sz;
int hv = ((word)id) % THREAD_TABLE_SZ;
GC_thread result;
static struct GC_Thread_Rep first_thread;
- static bool first_thread_used = FALSE;
+ static GC_bool first_thread_used = FALSE;
if (!first_thread_used) {
result = &first_thread;
result -> id = id;
result -> next = GC_threads[hv];
GC_threads[hv] = result;
- /* result -> flags = 0; */
+ /* result -> flags = 0; */
+ /* result -> stop = 0; */
return(result);
}
pthread_t my_thread = pthread_self();
register int i;
register GC_thread p;
- register int n_live_threads = 0;
register int result;
+ struct timespec timeout;
- GC_n_stopped = 0;
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (p = GC_threads[i]; p != 0; p = p -> next) {
if (p -> id != my_thread) {
- if (p -> flags & FINISHED) continue;
- n_live_threads++;
+ if (p -> flags & FINISHED) {
+ p -> stop = STOPPED;
+ continue;
+ }
+ p -> stop = PLEASE_STOP;
result = pthread_kill(p -> id, SIG_SUSPEND);
/* GC_printf1("Sent signal to 0x%x\n", p -> id); */
switch(result) {
case ESRCH:
/* Not really there anymore. Possible? */
- n_live_threads--;
+ p -> stop = STOPPED;
break;
case 0:
break;
default:
- ABORT("thr_kill failed");
+ ABORT("pthread_kill failed");
}
}
}
}
pthread_mutex_lock(&GC_suspend_lock);
- while(GC_n_stopped < n_live_threads) {
- /* GC_printf3("\nwaiting:%d %d %d\n", GC_gc_no,
- GC_n_stopped, n_live_threads); */
- pthread_cond_wait(&GC_suspend_ack_cv, &GC_suspend_lock);
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ while (p -> id != my_thread && p -> stop != STOPPED) {
+ clock_gettime(CLOCK_REALTIME, &timeout);
+ timeout.tv_nsec += 50000000; /* 50 msecs */
+ if (timeout.tv_nsec >= 1000000000) {
+ timeout.tv_nsec -= 1000000000;
+ ++timeout.tv_sec;
+ }
+ result = pthread_cond_timedwait(&GC_suspend_ack_cv,
+ &GC_suspend_lock,
+ &timeout);
+ if (result == ETIMEDOUT) {
+ /* Signal was lost or misdirected. Try again. */
+ /* Duplicate signals should be benign. */
+ result = pthread_kill(p -> id, SIG_SUSPEND);
+ }
+ }
+ }
}
pthread_mutex_unlock(&GC_suspend_lock);
/* GC_printf1("World stopped 0x%x\n", pthread_self()); */
/* Caller holds allocation lock. */
void GC_start_world()
{
+ GC_thread p;
+ unsigned i;
+
/* GC_printf0("World starting\n"); */
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ p -> stop = NOT_STOPPED;
+ }
+ }
pthread_mutex_lock(&GC_suspend_lock);
/* All other threads are at pthread_cond_wait in signal handler. */
/* Otherwise we couldn't have acquired the lock. */
}
# endif
-extern ptr_t GC_approx_sp();
-
/* We hold allocation lock. We assume the world is stopped. */
void GC_push_all_stacks()
{
void GC_thr_init()
{
GC_thread t;
+ struct sigaction act;
GC_thr_initialized = TRUE;
GC_min_stack_sz = HBLKSIZE;
GC_page_sz = sysconf(_SC_PAGESIZE);
- if (sigset(SIG_SUSPEND, GC_suspend_handler) != SIG_DFL)
+ (void) sigaction(SIG_SUSPEND, 0, &act);
+ if (act.sa_handler != SIG_DFL)
ABORT("Previously installed SIG_SUSPEND handler");
+ /* Install handler. */
+ act.sa_handler = GC_suspend_handler;
+ act.sa_flags = SA_RESTART;
+ (void) sigemptyset(&act.sa_mask);
+ if (0 != sigaction(SIG_SUSPEND, &act, 0))
+ ABORT("Failed to install SIG_SUSPEND handler");
/* Add the initial thread, so we can stop it. */
t = GC_new_thread(pthread_self());
t -> stack_size = 0;
return(result);
}
-bool GC_collecting = 0; /* A hint that we're in the collector and */
+GC_bool GC_collecting = 0; /* A hint that we're in the collector and */
/* holding the allocation lock for an */
/* extended period. */
unsigned long GC_allocate_lock = 0;
+#define SLEEP_THRESHOLD 3
+
void GC_lock()
{
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
unsigned my_spin_max;
static unsigned last_spins = 0;
unsigned my_last_spins;
- unsigned junk;
+ volatile unsigned junk;
# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
int i;
- if (!__test_and_set(&GC_allocate_lock, 1)) {
+ if (!GC_test_and_set(&GC_allocate_lock, 1)) {
return;
}
+ junk = 0;
my_spin_max = spin_max;
my_last_spins = last_spins;
for (i = 0; i < my_spin_max; i++) {
PAUSE;
continue;
}
- if (!__test_and_set(&GC_allocate_lock, 1)) {
+ if (!GC_test_and_set(&GC_allocate_lock, 1)) {
/*
* got it!
* Spinning worked. Thus we're probably not being scheduled
/* We are probably being scheduled against the other process. Sleep. */
spin_max = low_spin_max;
yield:
- for (;;) {
- if (!__test_and_set(&GC_allocate_lock, 1)) {
+ for (i = 0;; ++i) {
+ if (!GC_test_and_set(&GC_allocate_lock, 1)) {
return;
}
- sched_yield();
+ if (i < SLEEP_THRESHOLD) {
+ sched_yield();
+ } else {
+ struct timespec ts;
+
+ if (i > 26) i = 26;
+ /* Don't wait for more than about 60msecs, even */
+ /* under extreme contention. */
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1 << i;
+ nanosleep(&ts, 0);
+ }
}
}
--- /dev/null
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/*
+ * Support code for LinuxThreads, the clone()-based kernel
+ * thread package for Linux which is included in libc6.
+ *
+ * This code relies on implementation details of LinuxThreads,
+ * (i.e. properties not guaranteed by the Pthread standard):
+ *
+ * - the function GC_linux_thread_top_of_stack(void)
+ * relies on the way LinuxThreads lays out thread stacks
+ * in the address space.
+ *
+ * Note that there is a lot of code duplication between linux_threads.c
+ * and irix_threads.c; any changes made here may need to be reflected
+ * there too.
+ */
+
+# if defined(LINUX_THREADS)
+
+# include "gc_priv.h"
+# include <pthread.h>
+# include <time.h>
+# include <errno.h>
+# include <unistd.h>
+# include <sys/mman.h>
+# include <sys/time.h>
+# include <semaphore.h>
+
+#undef pthread_create
+#undef pthread_sigmask
+#undef pthread_join
+
+void GC_thr_init();
+
+#if 0
+void GC_print_sig_mask()
+{
+ sigset_t blocked;
+ int i;
+
+ if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
+ ABORT("pthread_sigmask");
+ GC_printf0("Blocked: ");
+ for (i = 1; i <= MAXSIG; i++) {
+ if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
+ }
+ GC_printf0("\n");
+}
+#endif
+
+/* We use the allocation lock to protect thread-related data structures. */
+
+/* The set of all known threads. We intercept thread creation and */
+/* joins. We never actually create detached threads. We allocate all */
+/* new thread stacks ourselves. These allow us to maintain this */
+/* data structure. */
+/* Protected by GC_thr_lock. */
+/* Some of this should be declared volatile, but that's incosnsistent */
+/* with some library routine declarations. */
+typedef struct GC_Thread_Rep {
+ struct GC_Thread_Rep * next; /* More recently allocated threads */
+ /* with a given pthread id come */
+ /* first. (All but the first are */
+ /* guaranteed to be dead, but we may */
+ /* not yet have registered the join.) */
+ pthread_t id;
+ word flags;
+# define FINISHED 1 /* Thread has exited. */
+# define DETACHED 2 /* Thread is intended to be detached. */
+# define MAIN_THREAD 4 /* True for the original thread only. */
+
+ ptr_t stack_end;
+ ptr_t stack_ptr; /* Valid only when stopped. */
+ int signal;
+ void * status; /* The value returned from the thread. */
+ /* Used only to avoid premature */
+ /* reclamation of any data it might */
+ /* reference. */
+} * GC_thread;
+
+GC_thread GC_lookup_thread(pthread_t id);
+
+/*
+ * The only way to suspend threads given the pthread interface is to send
+ * signals. We can't use SIGSTOP directly, because we need to get the
+ * thread to save its stack pointer in the GC thread table before
+ * suspending. So we have to reserve a signal of our own for this.
+ * This means we have to intercept client calls to change the signal mask.
+ * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
+ * so we need to reuse something else. I chose SIGPWR.
+ * (Perhaps SIGUNUSED would be a better choice.)
+ */
+#define SIG_SUSPEND SIGPWR
+
+#define SIG_RESTART SIGXCPU
+
+sem_t GC_suspend_ack_sem;
+
+/*
+GC_linux_thread_top_of_stack() relies on implementation details of
+LinuxThreads, namely that thread stacks are allocated on 2M boundaries
+and grow to no more than 2M.
+To make sure that we're using LinuxThreads and not some other thread
+package, we generate a dummy reference to `__pthread_initial_thread_bos',
+which is a symbol defined in LinuxThreads, but (hopefully) not in other
+thread packages.
+*/
+extern char * __pthread_initial_thread_bos;
+char **dummy_var_to_force_linux_threads = &__pthread_initial_thread_bos;
+
+#define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
+
+static inline ptr_t GC_linux_thread_top_of_stack(void)
+{
+ char *sp = GC_approx_sp();
+ ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
+#if DEBUG_THREADS
+ GC_printf1("SP = %lx\n", (unsigned long)sp);
+ GC_printf1("TOS = %lx\n", (unsigned long)tos);
+#endif
+ return tos;
+}
+
+void GC_suspend_handler(int sig)
+{
+ int dummy;
+ pthread_t my_thread = pthread_self();
+ GC_thread me;
+ sigset_t all_sigs;
+ sigset_t old_sigs;
+ int i;
+ sigset_t mask;
+
+ if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
+
+#if DEBUG_THREADS
+ GC_printf1("Suspending 0x%x\n", my_thread);
+#endif
+
+ me = GC_lookup_thread(my_thread);
+ /* The lookup here is safe, since I'm doing this on behalf */
+ /* of a thread which holds the allocation lock in order */
+ /* to stop the world. Thus concurrent modification of the */
+ /* data structure is impossible. */
+ me -> stack_ptr = (ptr_t)(&dummy);
+ me -> stack_end = GC_linux_thread_top_of_stack();
+
+ /* Tell the thread that wants to stop the world that this */
+ /* thread has been stopped. Note that sem_post() is */
+ /* the only async-signal-safe primitive in LinuxThreads. */
+ sem_post(&GC_suspend_ack_sem);
+
+ /* Wait until that thread tells us to restart by sending */
+ /* this thread a SIG_RESTART signal. */
+ /* SIG_RESTART should be masked at this point. Thus there */
+ /* is no race. */
+ if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
+ if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
+ do {
+ me->signal = 0;
+ sigsuspend(&mask); /* Wait for signal */
+ } while (me->signal != SIG_RESTART);
+
+#if DEBUG_THREADS
+ GC_printf1("Continuing 0x%x\n", my_thread);
+#endif
+}
+
+void GC_restart_handler(int sig)
+{
+ GC_thread me;
+
+ if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
+
+ /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
+ /* The lookup here is safe, since I'm doing this on behalf */
+ /* of a thread which holds the allocation lock in order */
+ /* to stop the world. Thus concurrent modification of the */
+ /* data structure is impossible. */
+ me = GC_lookup_thread(pthread_self());
+ me->signal = SIG_RESTART;
+
+ /*
+ ** Note: even if we didn't do anything useful here,
+ ** it would still be necessary to have a signal handler,
+ ** rather than ignoring the signals, otherwise
+ ** the signals will not be delivered at all, and
+ ** will thus not interrupt the sigsuspend() above.
+ */
+
+#if DEBUG_THREADS
+ GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
+#endif
+}
+
+GC_bool GC_thr_initialized = FALSE;
+
+# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
+volatile GC_thread GC_threads[THREAD_TABLE_SZ];
+
+/* Add a thread to GC_threads. We assume it wasn't already there. */
+/* Caller holds allocation lock. */
+GC_thread GC_new_thread(pthread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ GC_thread result;
+ static struct GC_Thread_Rep first_thread;
+ static GC_bool first_thread_used = FALSE;
+
+ if (!first_thread_used) {
+ result = &first_thread;
+ first_thread_used = TRUE;
+ /* Dont acquire allocation lock, since we may already hold it. */
+ } else {
+ result = (struct GC_Thread_Rep *)
+ GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
+ }
+ if (result == 0) return(0);
+ result -> id = id;
+ result -> next = GC_threads[hv];
+ GC_threads[hv] = result;
+ /* result -> flags = 0; */
+ return(result);
+}
+
+/* Delete a thread from GC_threads. We assume it is there. */
+/* (The code intentionally traps if it wasn't.) */
+/* Caller holds allocation lock. */
+void GC_delete_thread(pthread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ register GC_thread p = GC_threads[hv];
+ register GC_thread prev = 0;
+
+ while (!pthread_equal(p -> id, id)) {
+ prev = p;
+ p = p -> next;
+ }
+ if (prev == 0) {
+ GC_threads[hv] = p -> next;
+ } else {
+ prev -> next = p -> next;
+ }
+}
+
+/* If a thread has been joined, but we have not yet */
+/* been notified, then there may be more than one thread */
+/* in the table with the same pthread id. */
+/* This is OK, but we need a way to delete a specific one. */
+void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ register GC_thread p = GC_threads[hv];
+ register GC_thread prev = 0;
+
+ while (p != gc_id) {
+ prev = p;
+ p = p -> next;
+ }
+ if (prev == 0) {
+ GC_threads[hv] = p -> next;
+ } else {
+ prev -> next = p -> next;
+ }
+}
+
+/* Return a GC_thread corresponding to a given thread_t. */
+/* Returns 0 if it's not there. */
+/* Caller holds allocation lock or otherwise inhibits */
+/* updates. */
+/* If there is more than one thread with the given id we */
+/* return the most recent one. */
+GC_thread GC_lookup_thread(pthread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ register GC_thread p = GC_threads[hv];
+
+ while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
+ return(p);
+}
+
+/* Caller holds allocation lock. */
+void GC_stop_world()
+{
+ pthread_t my_thread = pthread_self();
+ register int i;
+ register GC_thread p;
+ register int n_live_threads = 0;
+ register int result;
+
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> id != my_thread) {
+ if (p -> flags & FINISHED) continue;
+ n_live_threads++;
+ #if DEBUG_THREADS
+ GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
+ #endif
+ result = pthread_kill(p -> id, SIG_SUSPEND);
+ switch(result) {
+ case ESRCH:
+ /* Not really there anymore. Possible? */
+ n_live_threads--;
+ break;
+ case 0:
+ break;
+ default:
+ ABORT("pthread_kill failed");
+ }
+ }
+ }
+ }
+ for (i = 0; i < n_live_threads; i++) {
+ sem_wait(&GC_suspend_ack_sem);
+ }
+ #if DEBUG_THREADS
+ GC_printf1("World stopped 0x%x\n", pthread_self());
+ #endif
+}
+
+/* Caller holds allocation lock. */
+void GC_start_world()
+{
+ pthread_t my_thread = pthread_self();
+ register int i;
+ register GC_thread p;
+ register int n_live_threads = 0;
+ register int result;
+
+# if DEBUG_THREADS
+ GC_printf0("World starting\n");
+# endif
+
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> id != my_thread) {
+ if (p -> flags & FINISHED) continue;
+ n_live_threads++;
+ #if DEBUG_THREADS
+ GC_printf1("Sending restart signal to 0x%x\n", p -> id);
+ #endif
+ result = pthread_kill(p -> id, SIG_RESTART);
+ switch(result) {
+ case ESRCH:
+ /* Not really there anymore. Possible? */
+ n_live_threads--;
+ break;
+ case 0:
+ break;
+ default:
+ ABORT("pthread_kill failed");
+ }
+ }
+ }
+ }
+ #if DEBUG_THREADS
+ GC_printf0("World started\n");
+ #endif
+}
+
+/* We hold allocation lock. We assume the world is stopped. */
+void GC_push_all_stacks()
+{
+ register int i;
+ register GC_thread p;
+ register ptr_t sp = GC_approx_sp();
+ register ptr_t lo, hi;
+ pthread_t me = pthread_self();
+
+ if (!GC_thr_initialized) GC_thr_init();
+ #if DEBUG_THREADS
+ GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
+ #endif
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> flags & FINISHED) continue;
+ if (pthread_equal(p -> id, me)) {
+ lo = GC_approx_sp();
+ } else {
+ lo = p -> stack_ptr;
+ }
+ if ((p -> flags & MAIN_THREAD) == 0) {
+ if (pthread_equal(p -> id, me)) {
+ hi = GC_linux_thread_top_of_stack();
+ } else {
+ hi = p -> stack_end;
+ }
+ } else {
+ /* The original stack. */
+ hi = GC_stackbottom;
+ }
+ #if DEBUG_THREADS
+ GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
+ (unsigned long) p -> id,
+ (unsigned long) lo, (unsigned long) hi);
+ #endif
+ GC_push_all_stack(lo, hi);
+ }
+ }
+}
+
+
+/* We hold the allocation lock. */
+void GC_thr_init()
+{
+ GC_thread t;
+ struct sigaction act;
+
+ GC_thr_initialized = TRUE;
+
+ if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
+ ABORT("sem_init failed");
+
+ act.sa_flags = SA_RESTART;
+ if (sigfillset(&act.sa_mask) != 0) {
+ ABORT("sigfillset() failed");
+ }
+ /* SIG_RESTART is unmasked by the handler when necessary. */
+ act.sa_handler = GC_suspend_handler;
+ if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
+ ABORT("Cannot set SIG_SUSPEND handler");
+ }
+
+ act.sa_handler = GC_restart_handler;
+ if (sigaction(SIG_RESTART, &act, NULL) != 0) {
+ ABORT("Cannot set SIG_SUSPEND handler");
+ }
+
+ /* Add the initial thread, so we can stop it. */
+ t = GC_new_thread(pthread_self());
+ t -> stack_ptr = (ptr_t)(&t);
+ t -> flags = DETACHED | MAIN_THREAD;
+}
+
+int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+ sigset_t fudged_set;
+
+ if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
+ fudged_set = *set;
+ sigdelset(&fudged_set, SIG_SUSPEND);
+ set = &fudged_set;
+ }
+ return(pthread_sigmask(how, set, oset));
+}
+
+struct start_info {
+ void *(*start_routine)(void *);
+ void *arg;
+};
+
+void GC_thread_exit_proc(void *dummy)
+{
+ GC_thread me;
+
+ LOCK();
+ me = GC_lookup_thread(pthread_self());
+ if (me -> flags & DETACHED) {
+ GC_delete_thread(pthread_self());
+ } else {
+ me -> flags |= FINISHED;
+ }
+ UNLOCK();
+}
+
+int GC_pthread_join(pthread_t thread, void **retval)
+{
+ int result;
+ GC_thread thread_gc_id;
+
+ LOCK();
+ thread_gc_id = GC_lookup_thread(thread);
+ /* This is guaranteed to be the intended one, since the thread id */
+ /* cant have been recycled by pthreads. */
+ UNLOCK();
+ result = pthread_join(thread, retval);
+ LOCK();
+ /* Here the pthread thread id may have been recycled. */
+ GC_delete_gc_thread(thread, thread_gc_id);
+ UNLOCK();
+ return result;
+}
+
+void * GC_start_routine(void * arg)
+{
+ struct start_info * si = arg;
+ void * result;
+ GC_thread me;
+
+ LOCK();
+ me = GC_lookup_thread(pthread_self());
+ UNLOCK();
+ pthread_cleanup_push(GC_thread_exit_proc, 0);
+# ifdef DEBUG_THREADS
+ GC_printf1("Starting thread 0x%x\n", pthread_self());
+ GC_printf1("pid = %ld\n", (long) getpid());
+ GC_printf1("sp = 0x%lx\n", (long) &arg);
+# endif
+ result = (*(si -> start_routine))(si -> arg);
+#if DEBUG_THREADS
+ GC_printf1("Finishing thread 0x%x\n", pthread_self());
+#endif
+ me -> status = result;
+ me -> flags |= FINISHED;
+ pthread_cleanup_pop(1);
+ /* This involves acquiring the lock, ensuring that we can't exit */
+ /* while a collection that thinks we're alive is trying to stop */
+ /* us. */
+ return(result);
+}
+
+int
+GC_pthread_create(pthread_t *new_thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *), void *arg)
+{
+ int result;
+ GC_thread t;
+ pthread_t my_new_thread;
+ void * stack;
+ size_t stacksize;
+ pthread_attr_t new_attr;
+ int detachstate;
+ word my_flags = 0;
+ struct start_info * si = GC_malloc(sizeof(struct start_info));
+
+ if (0 == si) return(ENOMEM);
+ si -> start_routine = start_routine;
+ si -> arg = arg;
+ LOCK();
+ if (!GC_thr_initialized) GC_thr_init();
+ if (NULL == attr) {
+ stack = 0;
+ (void) pthread_attr_init(&new_attr);
+ } else {
+ new_attr = *attr;
+ }
+ pthread_attr_getdetachstate(&new_attr, &detachstate);
+ if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
+ result = pthread_create(&my_new_thread, &new_attr, GC_start_routine, si);
+ /* No GC can start until the thread is registered, since we hold */
+ /* the allocation lock. */
+ if (0 == result) {
+ t = GC_new_thread(my_new_thread);
+ t -> flags = my_flags;
+ t -> stack_ptr = 0;
+ t -> stack_end = 0;
+ if (0 != new_thread) *new_thread = my_new_thread;
+ }
+ UNLOCK();
+ /* pthread_attr_destroy(&new_attr); */
+ return(result);
+}
+
+GC_bool GC_collecting = 0;
+ /* A hint that we're in the collector and */
+ /* holding the allocation lock for an */
+ /* extended period. */
+
+/* Reasonably fast spin locks. Basically the same implementation */
+/* as STL alloc.h. This isn't really the right way to do this. */
+/* but until the POSIX scheduling mess gets straightened out ... */
+
+volatile unsigned int GC_allocate_lock = 0;
+
+
+void GC_lock()
+{
+# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
+# define high_spin_max 1000 /* spin cycles for multiprocessor */
+ static unsigned spin_max = low_spin_max;
+ unsigned my_spin_max;
+ static unsigned last_spins = 0;
+ unsigned my_last_spins;
+ volatile unsigned junk;
+# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
+ int i;
+
+ if (!GC_test_and_set(&GC_allocate_lock)) {
+ return;
+ }
+ junk = 0;
+ my_spin_max = spin_max;
+ my_last_spins = last_spins;
+ for (i = 0; i < my_spin_max; i++) {
+ if (GC_collecting) goto yield;
+ if (i < my_last_spins/2 || GC_allocate_lock) {
+ PAUSE;
+ continue;
+ }
+ if (!GC_test_and_set(&GC_allocate_lock)) {
+ /*
+ * got it!
+ * Spinning worked. Thus we're probably not being scheduled
+ * against the other process with which we were contending.
+ * Thus it makes sense to spin longer the next time.
+ */
+ last_spins = i;
+ spin_max = high_spin_max;
+ return;
+ }
+ }
+ /* We are probably being scheduled against the other process. Sleep. */
+ spin_max = low_spin_max;
+yield:
+ for (i = 0;; ++i) {
+ if (!GC_test_and_set(&GC_allocate_lock)) {
+ return;
+ }
+# define SLEEP_THRESHOLD 12
+ /* nanosleep(<= 2ms) just spins under Linux. We */
+ /* want to be careful to avoid that behavior. */
+ if (i < SLEEP_THRESHOLD) {
+ sched_yield();
+ } else {
+ struct timespec ts;
+
+ if (i > 26) i = 26;
+ /* Don't wait for more than about 60msecs, even */
+ /* under extreme contention. */
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1 << i;
+ nanosleep(&ts, 0);
+ }
+ }
+}
+
+# endif /* LINUX_THREADS */
+
# endif /* MACOS */
# if defined(I386) &&!defined(OS2) &&!defined(SVR4) &&!defined(MSWIN32) \
- && !defined(SCO) && !(defined(LINUX) && defined(__ELF__)) \
- && !defined(DOS4GW)
+ && !defined(SCO) && !defined(SCO_ELF) && !(defined(LINUX) \
+ && defined(__ELF__)) && !defined(DOS4GW)
/* I386 code, generic code does not appear to work */
/* It does appear to work under OS2, and asms dont */
/* This is used for some 38g UNIX variants and for CYGWIN32 */
__asm add esp,4
# endif
-# if defined(I386) && (defined(SVR4) || defined(SCO))
+# if defined(I386) && (defined(SVR4) || defined(SCO) || defined(SCO_ELF))
/* I386 code, SVR4 variant, generic code does not appear to work */
asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
-bool GC_alloc_reclaim_list(kind)
+GC_bool GC_alloc_reclaim_list(kind)
register struct obj_kind * kind;
{
struct hblk ** result = (struct hblk **)
ptr_t result;
DCL_LOCK_STATE;
- GC_invoke_finalizers();
+ GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
result = GC_generic_malloc_inner(lb, k);
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
void GC_extend_size_map(); /* in misc.c. */
-bool GC_alloc_reclaim_list(); /* in malloc.c */
+GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
/* Some externally visible but unadvertised variables to allow access to */
/* free lists from inlined allocators without including gc_priv.h */
register ptr_t result;
DCL_LOCK_STATE;
- GC_invoke_finalizers();
+ GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
result = GC_generic_malloc_inner_ignore_off_page(lb,k);
register struct obj_kind * kind = GC_obj_kinds + k;
DCL_LOCK_STATE;
- GC_invoke_finalizers();
+ GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
opp = &(kind -> ok_freelist[lw]);
return(op);
}
lw = ALIGNED_WORDS(lb);
- GC_invoke_finalizers();
+ GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
opp = &(GC_obj_kinds[k].ok_freelist[lw]);
mark_state_t GC_mark_state = MS_NONE;
-bool GC_mark_stack_too_small = FALSE;
+GC_bool GC_mark_stack_too_small = FALSE;
-bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
+GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
/* objects in the heap? */
-bool GC_collection_in_progress()
+GC_bool GC_collection_in_progress()
{
return(GC_mark_state != MS_NONE);
}
clear_mark_bit_from_hdr(hhdr, word_no);
}
-bool GC_is_marked(p)
+GC_bool GC_is_marked(p)
ptr_t p;
{
register struct hblk *h = HBLKPTR(p);
/* Perform a small amount of marking. */
/* We try to touch roughly a page of memory. */
/* Return TRUE if we just finished a mark phase. */
-bool GC_mark_some()
+GC_bool GC_mark_some()
{
switch(GC_mark_state) {
case MS_NONE:
}
-bool GC_mark_stack_empty()
+GC_bool GC_mark_stack_empty()
{
return(GC_mark_stack_top < GC_mark_stack);
}
/* Returns NIL without black listing if current points to a block */
/* with IGNORE_OFF_PAGE set. */
/*ARGSUSED*/
-word GC_find_start(current, hhdr)
+# ifdef PRINT_BLACK_LIST
+ word GC_find_start(current, hhdr, source)
+ word source;
+# else
+ word GC_find_start(current, hhdr)
+# define source 0
+# endif
register word current;
register hdr * hhdr;
{
if ((word *)orig - (word *)current
>= (ptrdiff_t)(hhdr->hb_sz)) {
/* Pointer past the end of the block */
- GC_ADD_TO_BLACK_LIST_NORMAL(orig);
+ GC_ADD_TO_BLACK_LIST_NORMAL(orig, source);
return(0);
}
return(current);
} else {
- GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
return(0);
}
# else
- GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
return(0);
# endif
+# undef source
}
void GC_invalidate_mark_state()
credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
while (descr != 0) {
if ((signed_word)descr < 0) {
- current = *current_p++;
- descr <<= 1;
- if ((ptr_t)current < least_ha) continue;
- if ((ptr_t)current >= greatest_ha) continue;
- PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit);
- } else {
- descr <<= 1;
- current_p++;
+ current = *current_p;
+ if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
+ PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit,
+ current_p, exit1);
+ }
}
+ descr <<= 1;
+ ++ current_p;
}
continue;
case DS_PROC:
limit -= 1;
while (current_p <= limit) {
current = *current_p;
+ if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
+ PUSH_CONTENTS(current, GC_mark_stack_top_reg,
+ mark_stack_limit, current_p, exit2);
+ }
current_p = (word *)((char *)current_p + ALIGNMENT);
- if ((ptr_t)current < least_ha) continue;
- if ((ptr_t)current >= greatest_ha) continue;
- PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit);
}
}
GC_mark_stack_top = GC_mark_stack_top_reg;
# endif
/* As above, but argument passed preliminary test. */
-void GC_push_one_checked(p, interior_ptrs)
+# ifdef PRINT_BLACK_LIST
+ void GC_push_one_checked(p, interior_ptrs, source)
+ ptr_t source;
+# else
+ void GC_push_one_checked(p, interior_ptrs)
+# define source 0
+# endif
register word p;
-register bool interior_ptrs;
+register GC_bool interior_ptrs;
{
register word r;
register hdr * hhdr;
/* displ is the word index within the block. */
if (hhdr == 0) {
if (interior_ptrs) {
- GC_add_to_black_list_stack(p);
+# ifdef PRINT_BLACK_LIST
+ GC_add_to_black_list_stack(p, source);
+# else
+ GC_add_to_black_list_stack(p);
+# endif
} else {
- GC_ADD_TO_BLACK_LIST_NORMAL(p);
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+# undef source /* In case we had to define it. */
}
} else {
if (!mark_bit_from_hdr(hhdr, displ)) {
if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
}
-void GC_print_trace(word gc_no, bool lock)
+void GC_print_trace(word gc_no, GC_bool lock)
{
int i;
struct trace_entry *p;
#ifndef SMALL_CONFIG
/* Test whether any page in the given block is dirty */
-bool GC_block_was_dirty(h, hhdr)
+GC_bool GC_block_was_dirty(h, hhdr)
struct hblk *h;
register hdr * hhdr;
{
# ifndef MSWIN32
struct roots * r_next;
# endif
- bool r_tmp;
+ GC_bool r_tmp;
/* Delete before registering new dynamic libraries */
};
/* Primarily for debugging support: */
/* Is the address p in one of the registered static */
/* root sections? */
-bool GC_is_static_root(p)
+GC_bool GC_is_static_root(p)
ptr_t p;
{
static int last_root_set = 0;
/* reregistering dynamic libraries. */
void GC_add_roots_inner(b, e, tmp)
char * b; char * e;
-bool tmp;
+GC_bool tmp;
{
struct roots * old;
return;
}
next_index = next - excl_table;
- for (i = excl_table_entries - 1; i >= next_index; --i) {
- excl_table[i+1] = excl_table[i];
+ for (i = excl_table_entries; i > next_index; --i) {
+ excl_table[i] = excl_table[i-1];
}
} else {
next_index = excl_table_entries;
*/
void GC_push_roots(all)
-bool all;
+GC_bool all;
{
register int i;
# define RAOFF FRAMESZ-SZREG
# define GPOFF FRAMESZ-(2*SZREG)
NESTED(GC_push_regs, FRAMESZ, ra)
+ .mask 0x80000000,-SZREG # inform debugger of saved ra loc
move t0,gp
SETUP_GPX(t8)
PTR_SUBU sp,FRAMESZ
# ifdef WIN32_THREADS
GC_API CRITICAL_SECTION GC_allocate_ml;
# else
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
# ifdef UNDEFINED
pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
# endif
GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
-bool GC_debugging_started = FALSE;
+GC_bool GC_debugging_started = FALSE;
/* defined here so we don't have to load debug_malloc.o */
void (*GC_check_heap)() = (void (*)())0;
ptr_t GC_stackbottom = 0;
-bool GC_dont_gc = 0;
+GC_bool GC_dont_gc = 0;
-bool GC_quiet = 0;
+GC_bool GC_quiet = 0;
/*ARGSUSED*/
GC_PTR GC_default_oom_fn GC_PROTO((size_t bytes_requested))
}
#endif
-extern ptr_t GC_approx_sp(); /* in mark_rts.c */
-
/* Clear some of the inaccessible part of the stack. Returns its */
/* argument, so it can be used in a tail call position, hence clearing */
/* another frame. */
/* Make sure r points to the beginning of the object */
r &= ~(WORDS_TO_BYTES(1) - 1);
{
- register int offset =
- (char *)r - (char *)(HBLKPTR(r)) - HDR_BYTES;
+ register int offset = (char *)r - (char *)(HBLKPTR(r));
register signed_word sz = candidate_hdr -> hb_sz;
# ifdef ALL_INTERIOR_POINTERS
return ((size_t) WORDS_TO_BYTES(GC_words_allocd));
}
-bool GC_is_initialized = FALSE;
-
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
- extern void GC_thr_init();
-#endif
+GC_bool GC_is_initialized = FALSE;
void GC_init()
{
/* We need dirty bits in order to find live stack sections. */
GC_dirty_init();
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
GC_thr_init();
# endif
-# if !defined(THREADS) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if !defined(THREADS) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
+ || defined(IRIX_THREADS) || defined(LINUX_THREADS)
if (GC_stackbottom == 0) {
GC_stackbottom = GC_get_stack_base();
}
GC_setpagesize();
# ifdef MSWIN32
{
- extern bool GC_is_win32s();
+ extern GC_bool GC_is_win32s();
/* VirtualProtect is not functional under win32s. */
if (GC_is_win32s()) goto out;
}
#endif
+#ifdef NEED_CALLINFO
+
+void GC_print_callers (info)
+struct callinfo info[NFRAMES];
+{
+ register int i,j;
+
+# if NFRAMES == 1
+ GC_err_printf0("\tCaller at allocation:\n");
+# else
+ GC_err_printf0("\tCall chain at allocation:\n");
+# endif
+ for (i = 0; i < NFRAMES; i++) {
+ if (info[i].ci_pc == 0) break;
+# if NARGS > 0
+ GC_err_printf0("\t\targs: ");
+ for (j = 0; j < NARGS; j++) {
+ if (j != 0) GC_err_printf0(", ");
+ GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
+ ~(info[i].ci_arg[j]));
+ }
+ GC_err_printf0("\n");
+# endif
+ GC_err_printf1("\t\t##PC##= 0x%X\n", info[i].ci_pc);
+ }
+}
+
+#endif /* SAVE_CALL_CHAIN */
+
# ifdef SRC_M3
void GC_enable()
{
*prev;
word *last_object; /* points to last object in new hblk */
register struct hblk *h; /* the new heap block */
- register bool clear = GC_obj_kinds[kind].ok_init;
+ register GC_bool clear = GC_obj_kinds[kind].ok_init;
# ifdef PRINTSTATS
if ((sizeof (struct hblk)) > HBLKSIZE) {
/* Add a heap block map for objects of size sz to obj_map. */
/* Return FALSE on failure. */
-bool GC_add_map_entry(sz)
+GC_bool GC_add_map_entry(sz)
word sz;
{
register unsigned obj_start;
# define __KERNEL__
# include <asm/signal.h>
# undef __KERNEL__
-# elif (LINUX_VERSION_CODE < 0x20100)
-# include <asm/sigcontext.h>
+# else
+ /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
+ /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
+ /* prototypes, so we have to include the top-level sigcontext.h to */
+ /* make sure the former gets defined to be the latter if appropriate. */
+# include <features.h>
+# if 2 <= __GLIBC__
+# include <sigcontext.h>
+# else /* not 2 <= __GLIBC__ */
+ /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
+ /* one. Check LINUX_VERSION_CODE to see which we should reference. */
+# include <asm/sigcontext.h>
+# endif /* 2 <= __GLIBC__ */
# endif
# endif
# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
# include <unistd.h>
# endif
# endif
+
# include <stdio.h>
# include <signal.h>
{
extern ptr_t GC_find_limit();
extern char **_environ;
+ /* This may need to be environ, without the underscore, for */
+ /* some versions. */
GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
}
#endif
# define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
# endif
-static bool mask_initialized = FALSE;
+static GC_bool mask_initialized = FALSE;
static SIGSET_T new_mask;
}
# else
-# if defined(MPROTECT_VDB) || defined(PROC_VDB)
+# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
void GC_setpagesize()
{
GC_page_size = GETPAGESIZE();
typedef void (*handler)();
# endif
-# ifdef SUNOS5SIGS
- static struct sigaction oldact;
+# if defined(SUNOS5SIGS) || defined(IRIX5)
+ static struct sigaction old_segv_act;
+ static struct sigaction old_bus_act;
# else
static handler old_segv_handler, old_bus_handler;
# endif
void GC_setup_temporary_fault_handler()
{
-# ifdef SUNOS5SIGS
+# if defined(SUNOS5SIGS) || defined(IRIX5)
struct sigaction act;
act.sa_handler = GC_fault_handler;
- act.sa_flags = SA_RESTART | SA_SIGINFO | SA_NODEFER;
+ act.sa_flags = SA_RESTART | SA_NODEFER;
/* The presence of SA_NODEFER represents yet another gross */
/* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
/* interact correctly with -lthread. We hide the confusion */
/* signal mask. */
(void) sigemptyset(&act.sa_mask);
- (void) sigaction(SIGSEGV, &act, &oldact);
+# ifdef IRIX_THREADS
+ /* Older versions have a bug related to retrieving and */
+ /* and setting a handler at the same time. */
+ (void) sigaction(SIGSEGV, 0, &old_segv_act);
+ (void) sigaction(SIGSEGV, &act, 0);
+# else
+ (void) sigaction(SIGSEGV, &act, &old_segv_act);
+# ifdef _sigargs /* Irix 5.x, not 6.x */
+ /* Under 5.x, we may get SIGBUS. */
+ /* Pthreads doesn't exist under 5.x, so we don't */
+ /* have to worry in the threads case. */
+ (void) sigaction(SIGBUS, &act, &old_bus_act);
+# endif
+# endif /* IRIX_THREADS */
# else
old_segv_handler = signal(SIGSEGV, GC_fault_handler);
# ifdef SIGBUS
void GC_reset_fault_handler()
{
-# ifdef SUNOS5SIGS
- (void) sigaction(SIGSEGV, &oldact, 0);
+# if defined(SUNOS5SIGS) || defined(IRIX5)
+ (void) sigaction(SIGSEGV, &old_segv_act, 0);
+# ifdef _sigargs /* Irix 5.x, not 6.x */
+ (void) sigaction(SIGBUS, &old_bus_act, 0);
+# endif
# else
(void) signal(SIGSEGV, old_segv_handler);
# ifdef SIGBUS
/* the smallest location q s.t. [q,p] is addressible (!up). */
ptr_t GC_find_limit(p, up)
ptr_t p;
- bool up;
+ GC_bool up;
{
static VOLATILE ptr_t result;
/* Needs to be static, since otherwise it may not be */
/* all real work is done by GC_register_dynamic_libraries. Under */
/* win32s, we cannot find the data segments associated with dll's. */
/* We rgister the main data segment here. */
- bool GC_win32s = FALSE; /* We're running under win32s. */
+ GC_bool GC_win32s = FALSE; /* We're running under win32s. */
- bool GC_is_win32s()
+ GC_bool GC_is_win32s()
{
DWORD v = GetVersion();
/* Is p the start of either the malloc heap, or of one of our */
/* heap sections? */
- bool GC_is_heap_base (ptr_t p)
+ GC_bool GC_is_heap_base (ptr_t p)
{
register unsigned i;
ptr_t GC_unix_get_mem(bytes)
word bytes;
{
- caddr_t cur_brk = sbrk(0);
+ caddr_t cur_brk = (caddr_t)sbrk(0);
caddr_t result;
SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
static caddr_t my_brk_val = 0;
if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
if (lsbs != 0) {
- if(sbrk(GC_page_size - lsbs) == (caddr_t)(-1)) return(0);
+ if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
}
if (cur_brk == my_brk_val) {
/* Use the extra block we allocated last time. */
ptr_t GC_unix_get_mem(bytes)
word bytes;
{
- static bool initialized = FALSE;
+ static GC_bool initialized = FALSE;
static int fd;
void *result;
static ptr_t last_addr = HEAP_START;
fd = open("/dev/zero", O_RDONLY);
initialized = TRUE;
}
+ if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
MAP_PRIVATE | MAP_FIXED, fd, 0/* offset */);
if (result == MAP_FAILED) return(0);
# endif /* SRC_M3 */
-# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
+ || defined(IRIX_THREADS) || defined LINUX_THREADS
extern void GC_push_all_stacks();
* or write only to the stack.
*/
-bool GC_dirty_maintained = FALSE;
+GC_bool GC_dirty_maintained = FALSE;
# ifdef DEFAULT_VDB
/* of the pages overlapping h are dirty. This routine may err on the */
/* side of labelling pages as dirty (and this implementation does). */
/*ARGSUSED*/
-bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(h)
struct hblk *h;
{
return(TRUE);
/* Could any valid GC heap pointer ever have been written to this page? */
/*ARGSUSED*/
-bool GC_page_was_ever_dirty(h)
+GC_bool GC_page_was_ever_dirty(h)
struct hblk *h;
{
return(TRUE);
if (SIG_OK && CODE_OK) {
register struct hblk * h =
(struct hblk *)((word)addr & ~(GC_page_size-1));
- bool in_allocd_block;
+ GC_bool in_allocd_block;
# ifdef SUNOS5SIGS
/* Address is only within the correct physical page. */
set_pht_entry_from_index(GC_dirty_pages, index);
}
UNPROTECT(h, GC_page_size);
-# if defined(IRIX5) || defined(OSF1) || defined(LINUX)
+# if defined(OSF1) || defined(LINUX)
/* These reset the signal handler each time by default. */
signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
# endif
{
register struct hblk * h_trunc;
register unsigned i;
- register bool found_clean;
+ register GC_bool found_clean;
if (!GC_dirty_maintained) return;
h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
void GC_dirty_init()
{
-#if defined(SUNOS5SIGS)
+#if defined(SUNOS5SIGS) || defined(IRIX5)
struct sigaction act, oldact;
- act.sa_sigaction = GC_write_fault_handler;
- act.sa_flags = SA_RESTART | SA_SIGINFO;
+# ifdef IRIX5
+ act.sa_flags = SA_RESTART;
+ act.sa_handler = GC_write_fault_handler;
+# else
+ act.sa_flags = SA_RESTART | SA_SIGINFO;
+ act.sa_sigaction = GC_write_fault_handler;
+# endif
(void)sigemptyset(&act.sa_mask);
#endif
# ifdef PRINTSTATS
# endif
}
# endif
-# if defined(IRIX5) || defined(OSF1) || defined(SUNOS4) || defined(LINUX)
+# if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
if (GC_old_segv_handler == SIG_IGN) {
GC_err_printf0("Previously ignored segmentation violation!?");
# endif
}
# endif
-# if defined(SUNOS5SIGS)
- sigaction(SIGSEGV, &act, &oldact);
- if (oldact.sa_flags & SA_SIGINFO) {
+# if defined(SUNOS5SIGS) || defined(IRIX5)
+# ifdef IRIX_THREADS
+ sigaction(SIGSEGV, 0, &oldact);
+ sigaction(SIGSEGV, &act, 0);
+# else
+ sigaction(SIGSEGV, &act, &oldact);
+# endif
+# if defined(_sigargs)
+ /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
+ /* sa_sigaction. */
+ GC_old_segv_handler = oldact.sa_handler;
+# else /* Irix 6.x or SUNOS5SIGS */
+ if (oldact.sa_flags & SA_SIGINFO) {
GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
- } else {
+ } else {
GC_old_segv_handler = oldact.sa_handler;
- }
+ }
+# endif
if (GC_old_segv_handler == SIG_IGN) {
GC_err_printf0("Previously ignored segmentation violation!?");
GC_old_segv_handler = SIG_DFL;
GC_protect_heap();
}
-bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(h)
struct hblk * h;
{
register word index = PHT_HASH(h);
GC_begin_syscall();
GC_unprotect_range(buf, (word)nbyte);
# ifdef IRIX5
- /* Indirect system call exists, but is undocumented, and */
- /* always seems to return EINVAL. There seems to be no */
- /* general way to wrap system calls, since the system call */
- /* convention appears to require an immediate argument for */
- /* the system call number, and building the required code */
- /* in the data segment also seems dangerous. We can fake it */
- /* for read; anything else is up to the client. */
+ /* Indirect system call may not always be easily available. */
+ /* We could call _read, but that would interfere with the */
+ /* libpthread interception of read. */
{
struct iovec iov;
#endif /* !MSWIN32 */
/*ARGSUSED*/
-bool GC_page_was_ever_dirty(h)
+GC_bool GC_page_was_ever_dirty(h)
struct hblk *h;
{
return(TRUE);
#undef READ
-bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(h)
struct hblk *h;
{
register word index = PHT_HASH(h);
- register bool result;
+ register GC_bool result;
result = get_pht_entry_from_index(GC_grungy_pages, index);
# ifdef SOLARIS_THREADS
return(result);
}
-bool GC_page_was_ever_dirty(h)
+GC_bool GC_page_was_ever_dirty(h)
struct hblk *h;
{
register word index = PHT_HASH(h);
- register bool result;
+ register GC_bool result;
result = get_pht_entry_from_index(GC_written_pages, index);
# ifdef SOLARIS_THREADS
}
}
-bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(h)
struct hblk *h;
{
if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
#endif /* SAVE_CALL_CHAIN */
#endif /* SPARC */
-#ifdef SAVE_CALL_CHAIN
-
-void GC_print_callers (info)
-struct callinfo info[NFRAMES];
-{
- register int i,j;
-
- GC_err_printf0("\tCall chain at allocation:\n");
- for (i = 0; i < NFRAMES; i++) {
- if (info[i].ci_pc == 0) break;
- GC_err_printf0("\t\targs: ");
- for (j = 0; j < NARGS; j++) {
- if (j != 0) GC_err_printf0(", ");
- GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
- ~(info[i].ci_arg[j]));
- }
- GC_err_printf1("\n\t\t##PC##= 0x%X\n", info[i].ci_pc);
- }
-}
-
-#endif /* SAVE_CALL_CHAIN */
typedef struct {
PCR_ERes (*ed_proc)(void *p, size_t size, PCR_Any data);
- bool ed_pointerfree;
+ GC_bool ed_pointerfree;
PCR_ERes ed_fail_code;
PCR_Any ed_client_data;
} enumerate_data;
GC_DummyShutdownProc /* mmp_shutdown */
};
-bool GC_use_debug = 0;
+GC_bool GC_use_debug = 0;
void GC_pcr_install()
{
if( !PCR_Base_TestPCRArg("-nogc") ) {
GC_quiet = ( PCR_Base_TestPCRArg("-gctrace") ? 0 : 1 );
- GC_use_debug = (bool)PCR_Base_TestPCRArg("-debug_alloc");
+ GC_use_debug = (GC_bool)PCR_Base_TestPCRArg("-debug_alloc");
GC_init();
if( !PCR_Base_TestPCRArg("-nogc_incremental") ) {
/*
GC_default_is_visible_print_proc;
/* Could p be a stack address? */
-bool GC_on_stack(p)
+GC_bool GC_on_stack(p)
ptr_t p;
{
# ifdef THREADS
if (GC_on_stack(p)) return(p);
hhdr = HDR((word)p);
if (hhdr == 0) {
- bool result;
+ GC_bool result;
if (GC_is_static_root(p)) return(p);
/* Else do it again correctly: */
/* Number of words of memory reclaimed */
# ifdef FIND_LEAK
-static report_leak(p, sz)
+static void report_leak(p, sz)
ptr_t p;
word sz;
{
# define FOUND_FREE(hblk, word_no) \
if (abort_if_found) { \
- report_leak((long)hblk + WORDS_TO_BYTES(word_no), \
+ report_leak((ptr_t)hblk + WORDS_TO_BYTES(word_no), \
HDR(hblk) -> hb_sz); \
}
# else
* memory.
*/
-bool GC_block_empty(hhdr)
+GC_bool GC_block_empty(hhdr)
register hdr * hhdr;
{
register word *p = (word *)(&(hhdr -> hb_marks[0]));
ptr_t GC_reclaim_clear(hbp, hhdr, sz, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
register hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
register word sz;
{
ptr_t GC_reclaim_clear2(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
ptr_t GC_reclaim_clear4(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
register hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
register word sz;
{
ptr_t GC_reclaim_uninit2(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
ptr_t GC_reclaim_uninit4(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
ptr_t GC_reclaim1(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
GC_freehblk(hbp);
}
} else {
- bool empty = GC_block_empty(hhdr);
+ GC_bool empty = GC_block_empty(hhdr);
if (abort_if_found) {
GC_reclaim_small_nonempty_block(hbp, (int)abort_if_found);
} else if (empty) {
* recently reclaimed, and discard the rest.
* Stop_func may be 0.
*/
-bool GC_reclaim_all(stop_func, ignore_old)
+GC_bool GC_reclaim_all(stop_func, ignore_old)
GC_stop_func stop_func;
-bool ignore_old;
+GC_bool ignore_old;
{
register word sz;
register int kind;
* Modified Peter C. for Solaris Posix Threads.
*/
/* Boehm, September 14, 1994 4:44 pm PDT */
-/* $Id: solaris_pthreads.c,v 1.10 1997/05/13 23:09:09 peterc Exp $ */
+/* $Id: solaris_pthreads.c,v 1.1 1999/04/07 14:56:06 tromey Exp $ */
# if defined(_SOLARIS_PTHREADS)
# include "gc_priv.h"
# define _CLASSIC_XOPEN_TYPES
# include <unistd.h>
# include <errno.h>
+# include "solaris_threads.h"
+# include <stdio.h>
#undef pthread_join
#undef pthread_create
# else
#ifndef LINT
- int GC_no_sunOS_threads;
+ int GC_no_sunOS_pthreads;
#endif
# endif /* SOLARIS_THREADS */
char buf[30];
prstatus_t status;
register int i;
- bool changed;
+ GC_bool changed;
lwpid_t me = _lwp_self();
if (GC_main_proc_fd == -1) {
{
int lwp_fd;
register int i;
- bool changed;
+ GC_bool changed;
lwpid_t me = _lwp_self();
# define PARANOID
if (i >= max_lwps) ABORT("Too many lwps");
}
-bool GC_multithreaded = 0;
+GC_bool GC_multithreaded = 0;
void GC_stop_world()
{
void GC_thr_init(void);
-bool GC_thr_initialized = FALSE;
+GC_bool GC_thr_initialized = FALSE;
size_t GC_min_stack_sz;
int hv = ((word)id) % THREAD_TABLE_SZ;
GC_thread result;
static struct GC_Thread_Rep first_thread;
- static bool first_thread_used = FALSE;
+ static GC_bool first_thread_used = FALSE;
if (!first_thread_used) {
result = &first_thread;
}
-extern ptr_t GC_approx_sp();
-
/* We hold allocation lock. We assume the world is stopped. */
void GC_push_all_stacks()
{
LOCK();
if (wait_for == 0) {
register int i;
- register bool thread_exists;
+ register GC_bool thread_exists;
for (;;) {
thread_exists = FALSE;
} * GC_thread;
extern GC_thread GC_new_thread(thread_t id);
- extern bool GC_thr_initialized;
+ extern GC_bool GC_thr_initialized;
extern volatile GC_thread GC_threads[];
extern size_t GC_min_stack_sz;
extern size_t GC_page_sz;
/* Invariant while this is running: GC_changing_list_current */
/* points at a word containing 0. */
/* Returns FALSE on failure. */
-bool GC_compact_changing_list()
+GC_bool GC_compact_changing_list()
{
register GC_PTR *p, *q;
register word count = 0;
# else
register GC_PTR * my_current = GC_changing_list_current;
# endif
- register bool tried_quick;
+ register GC_bool tried_quick;
DCL_LOCK_STATE;
if (*my_current == p) {
}
}
-bool GC_page_was_changed(h)
+GC_bool GC_page_was_changed(h)
struct hblk * h;
{
register word index = PHT_HASH(h);
# include <assert.h> /* Not normally used, but handy for debugging. */
# include "gc.h"
# include "gc_typed.h"
-# include "gc_priv.h" /* For output and some statistics */
+# include "gc_priv.h" /* For output, locking, and some statistics */
# include "config.h"
# ifdef MSWIN32
# include <synch.h>
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
# include <pthread.h>
# endif
static mutex_t incr_lock;
mutex_lock(&incr_lock);
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&incr_lock);
# endif
# ifdef SOLARIS_THREADS
mutex_unlock(&incr_lock);
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
pthread_mutex_unlock(&incr_lock);
# endif
# ifdef WIN32_THREADS
static mutex_t incr_lock;
mutex_lock(&incr_lock);
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&incr_lock);
# endif
# ifdef SOLARIS_THREADS
mutex_unlock(&incr_lock);
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
pthread_mutex_unlock(&incr_lock);
# endif
# ifdef WIN32_THREADS
void * alloc8bytes()
{
+# ifdef SMALL_CONFIG
+ return(GC_malloc(8));
+# else
void ** my_free_list_ptr;
void * my_free_list;
*my_free_list_ptr = GC_NEXT(my_free_list);
GC_NEXT(my_free_list) = 0;
return(my_free_list);
+# endif
}
#else
# endif
GC_descr d3 = GC_make_descriptor(&bm_large, 32);
GC_descr d4 = GC_make_descriptor(bm_huge, 320);
- GC_word * x = GC_malloc_explicitly_typed(2000, d4);
+ GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
register int i;
old = 0;
LOCK();
n_tests++;
UNLOCK();
- /* GC_printf1("Finished %x\n", pthread_self()); */
+ /* GC_printf1("Finished %x\n", pthread_self()); */
}
void check_heap_stats()
unsigned long max_heap_sz;
register int i;
int still_live;
+ int late_finalize_count = 0;
if (sizeof(char *) > 4) {
max_heap_sz = 13000000;
while (GC_collect_a_little()) { }
for (i = 0; i < 16; i++) {
GC_gcollect();
+ late_finalize_count += GC_invoke_finalizers();
}
(void)GC_printf1("Completed %lu tests\n", (unsigned long)n_tests);
(void)GC_printf2("Finalized %lu/%lu objects - ",
(unsigned long)finalized_count,
(unsigned long)finalizable_count);
+# ifdef FINALIZE_ON_DEMAND
+ if (finalized_count != late_finalize_count) {
+ (void)GC_printf0("Demand finalization error\n");
+ FAIL;
+ }
+# endif
if (finalized_count > finalizable_count
|| finalized_count < finalizable_count/2) {
(void)GC_printf0("finalization is probably broken\n");
i = finalizable_count - finalized_count - still_live;
if (0 != i) {
(void)GC_printf2
- ("%lu disappearing links remain and %lu more objects "
- "were not finalized\n",
+ ("%lu disappearing links remain and %lu more objects were not finalized\n",
(unsigned long) still_live, (unsigned long)i);
if (i > 10) {
GC_printf0("\tVery suspicious!\n");
}
-#if !defined(PCR) && !defined(SOLARIS_THREADS) && !defined(WIN32_THREADS) && !defined(IRIX_THREADS) || defined(LINT)
+#if !defined(PCR) && !defined(SOLARIS_THREADS) && !defined(WIN32_THREADS) \
+ && !defined(IRIX_THREADS) && !defined(LINUX_THREADS) || defined(LINT)
#ifdef MSWIN32
int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
#else
}
#endif
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
+#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS) || defined(LINUX_THREADS)
void * thr_run_one_test(void * arg)
{
run_one_test();
# ifdef IRIX_THREADS
/* Force a larger stack to be preallocated */
/* Since the initial cant always grow later. */
- *((char *)&code - 1024*1024) = 0; /* Require 1 Mb */
+ *((volatile char *)&code - 1024*1024) = 0; /* Require 1 Mb */
# endif /* IRIX_THREADS */
pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, 1000000);
+# ifdef IRIX_THREADS
+ pthread_attr_setstacksize(&attr, 1000000);
+# endif
n_tests = 0;
- GC_enable_incremental();
+# ifdef MPROTECT_VDB
+ GC_enable_incremental();
+ (void) GC_printf0("Switched to incremental mode\n");
+ (void) GC_printf0("Emulating dirty bits with mprotect/signals\n");
+# endif
(void) GC_set_warn_proc(warn_proc);
if ((code = pthread_create(&th1, &attr, thr_run_one_test, 0)) != 0) {
(void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
return(0);
}
#endif /* pthreads */
-#endif /* SOLARIS_THREADS || IRIX_THREADS */
+#endif /* SOLARIS_THREADS || IRIX_THREADS || LINUX_THREADS */
extern "C" {
#include "gc_priv.h"
}
-# ifdef MSWIN32
-# include <windows.h>
-# endif
+#ifdef MSWIN32
+# include <windows.h>
+#endif
#define my_assert( e ) \
if (0 == argv[ argc ]) break;}
#else
-int main( int argc, char* argv[] ) {
+# ifdef MACOS
+ int main() {
+# else
+ int main( int argc, char* argv[] ) {
+# endif
#endif
+# if defined(MACOS) // MacOS
+ char* argv_[] = {"test_cpp", "10"}; // doesn't
+ argv = argv_; // have a
+ argc = sizeof(argv_)/sizeof(argv_[0]); // commandline
+# endif
int i, iters, n;
-# ifndef __GNUC__
+# if !defined(__GNUC__) && !defined(MACOS)
int *x = (int *)alloc::allocate(sizeof(int));
*x = 29;
if (0 == i % 10) {
B::Deleting( 1 );
delete b;
- B::Deleting( 0 );}}
+ B::Deleting( 0 );}
+# ifdef FINALIZE_ON_DEMAND
+ GC_invoke_finalizers();
+# endif
+ }
/* Make sure the uncollectable As and Bs are still there. */
for (i = 0; i < 1000; i++) {
b->Test( i );
B::Deleting( 1 );
delete b;
- B::Deleting( 0 );}
+ B::Deleting( 0 );
+# ifdef FINALIZE_ON_DEMAND
+ GC_invoke_finalizers();
+# endif
+
+ }
/* Make sure most of the finalizable Cs, Ds, and Fs have
gone away. */
D::Test();
F::Test();}
-# ifndef __GNUC__
+# if !defined(__GNUC__) && !defined(MACOS)
my_assert (29 == x[3]);
# endif
GC_printf0( "The test appears to have succeeded.\n" );
int main()
{
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
printf("-lpthread\n");
# endif
# ifdef SOLARIS_THREADS
# define EXTRA_BYTES (sizeof(word))
# endif
-bool GC_explicit_typing_initialized = FALSE;
+GC_bool GC_explicit_typing_initialized = FALSE;
int GC_explicit_kind; /* Object kind for objects with indirect */
/* (possibly extended) descriptors. */
/* can be described by a BITMAP_BITS sized bitmap. */
typedef struct {
word ed_bitmap; /* lsb corresponds to first word. */
- bool ed_continued; /* next entry is continuation. */
+ GC_bool ed_continued; /* next entry is continuation. */
} ext_descr;
/* Array descriptors. GC_array_mark_proc understands these. */
if (bm & 1) {
current = *current_p;
if ((ptr_t)current >= least_ha && (ptr_t)current <= greatest_ha) {
- PUSH_CONTENTS(current, mark_stack_ptr, mark_stack_limit);
+ PUSH_CONTENTS(current, mark_stack_ptr,
+ mark_stack_limit, current_p, exit1);
}
}
}
if (last_set_bit < 0) return(0 /* no pointers */);
# if ALIGNMENT == CPP_WORDSZ/8
{
- register bool all_bits_set = TRUE;
+ register GC_bool all_bits_set = TRUE;
for (i = 0; i < last_set_bit; i++) {
if (!GC_get_bit(bm, i)) {
all_bits_set = FALSE;
#define GC_VERSION_MAJOR 4
-#define GC_VERSION_MINOR 12
-#define GC_ALPHA_VERSION GC_NOT_ALPHA
+#define GC_VERSION_MINOR 13
+#define GC_ALPHA_VERSION 2
# define GC_NOT_ALPHA 0xff