* blacklst.c: Fix typo ("even though").
* cord/cordbscs.c: Fix typo ("exponentially").
* cord/cordxtra.c: Fix typo ("represented").
* dyn_load.c: Fix typos ("uncollectible", "occurred").
* extra/AmigaOS: Fix typos ("specific", "necessary", "always",
"effectiveness").
* finalize.c: Fix typo ("descendants").
* include/cord.h: Fix typo ("idiosyncrasies").
* include/gc.h: Fix typo ("collectible").
* include/gc_allocator.h: Fix typos ("allocator", "[un]collectible").
* mach_dep.c: Fix typo ("erroneously").
* malloc.c: Fix typos ("[un]collectible", "pointer-free",
"initialization").
* mallocx.c: Fix typos ("largely", "pointer-free", "uncollectible").
* mark.c: Fix typos ("[un]collectible", "even though").
* misc.c: Fix typo ("erroneously").
* os_dep.c: Fix typos ("non-addressable", "happening", "pointer-free").
* tests/test.c: Fix typos ("uncollectible", "reversed").
* tests/test_cpp.cc: Fix typos ("[un]collectible").
* typd_mlc.c: Fix typo ("copyright").
* win32_threads.c: Fix typos ("optimization", "uncollectible").
* See the definition of page_hash_table in gc_private.h.
* False hits from the stack(s) are much more dangerous than false hits
* from elsewhere, since the former can pin a large object that spans the
- * block, eventhough it does not start on the dangerous block.
+ * block, even though it does not start on the dangerous block.
*/
/*
* The following functions are concerned with balancing cords.
* Strategy:
* Scan the cord from left to right, keeping the cord scanned so far
- * as a forest of balanced trees of exponentialy decreasing length.
+ * as a forest of balanced trees of exponentially decreasing length.
* When a new subtree needs to be added to the forest, we concatenate all
* shorter ones to the new tree in the appropriate order, and then insert
* the result into the forest.
c = getc(f);
if (c == 0) {
/* Append the right number of NULs */
- /* Note that any string of NULs is rpresented in 4 words, */
+ /* Note that any string of NULs is represented in 4 words, */
/* independent of its length. */
register size_t count = 1;
/* away pointers in pieces of the stack segment that we */
/* don't scan. We work around this */
/* by treating anything allocated by libpthread as */
- /* uncollectable, as we do in some other cases. */
+ /* uncollectible, as we do in some other cases. */
/* A specifically identified problem is that */
/* thread stacks contain pointers to dynamic thread */
/* vectors, which may be reused due to thread caching. */
/* Get info about next shared library */
status = shl_get(index, &shl_desc);
- /* Check if this is the end of the list or if some error occured */
+ /* Check if this is the end of the list or if some error occurred */
if (status != 0) {
# ifdef GC_HPUX_THREADS
/* I've seen errno values of 0. The man page is not clear */
/******************************************************************
- AmigaOS-spesific routines for GC.
+ AmigaOS-specific routines for GC.
This file is normally included from os_dep.c
******************************************************************/
/******************************************************************
- Amiga-spesific routines to obtain memory, and force GC to give
+ Amiga-specific routines to obtain memory, and force GC to give
back fast-mem whenever possible.
These hacks makes gc-programs go many times faster when
- the amiga is low on memory, and are therefore strictly necesarry.
+ the Amiga is low on memory, and are therefore strictly necessary.
-Kjetil S. Matheussen, 2000.
******************************************************************/
/*
- * Allways set to the last size of memory tried to be allocated.
+ * Always set to the last size of memory tried to be allocated.
* Needed to ensure allocation when the size is bigger than 100000.
*
*/
#endif
-/* The allocating-functions defined inside the amiga-blocks in gc.h is called
+/* The allocating-functions defined inside the Amiga-blocks in gc.h is called
* via these functions.
*/
void *GC_amiga_allocwrapper_any(size_t size,void *(*AllocFunction)(size_t size2)){
void *ret,*ret2;
- GC_amiga_dontalloc=TRUE; // Pretty tough thing to do, but its indeed necesarry.
+ GC_amiga_dontalloc=TRUE; // Pretty tough thing to do, but its indeed necessary.
latestsize=size;
ret=(*AllocFunction)(size);
#ifdef GC_AMIGA_RETRY
else{
/* We got chip-mem. Better try again and again and again etc., we might get fast-mem sooner or later... */
- /* Using gctest to check the effectiviness of doing this, does seldom give a very good result. */
+ /* Using gctest to check the effectiveness of doing this, does seldom give a very good result. */
/* However, real programs doesn't normally rapidly allocate and deallocate. */
// printf("trying to force... %d bytes... ",size);
if(
/* Type of mark procedure used for marking from finalizable object. */
/* This procedure normally does not mark the object, only its */
-/* descendents. */
+/* descendants. */
typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
#define HASH3(addr,size,log_size) \
/* the correct buffer size. */
/* 4. Most of the conversions are implement through the native */
/* vsprintf. Hence they are usually no faster, and */
-/* idiosyncracies of the native printf are preserved. However, */
+/* idiosyncrasies of the native printf are preserved. However, */
/* CORD arguments to CORD_sprintf and CORD_vsprintf are NOT copied; */
/* the result shares the original structure. This may make them */
/* very efficient in some unusual applications. */
/* new object is cleared. GC_malloc_stubborn promises that no changes */
/* to the object will occur after GC_end_stubborn_change has been */
/* called on the result of GC_malloc_stubborn. GC_malloc_uncollectable */
-/* allocates an object that is scanned for pointers to collectable */
-/* objects, but is not itself collectable. The object is scanned even */
+/* allocates an object that is scanned for pointers to collectible */
+/* objects, but is not itself collectible. The object is scanned even */
/* if it does not appear to be reachable. GC_malloc_uncollectable and */
/* GC_free called on the resulting object implicitly update */
/* GC_non_gc_bytes appropriately. */
/* allocated by GC_malloc or friends. Obj may also be */
/* NULL or point to something outside GC heap (in this */
/* case, fn is ignored, *ofn and *ocd are set to NULL). */
- /* Note that any garbage collectable object referenced */
+ /* Note that any garbage collectible object referenced */
/* by cd will be considered accessible until the */
/* finalizer is invoked. */
/*
* This implements standard-conforming allocators that interact with
- * the garbage collector. Gc_alloctor<T> allocates garbage-collectable
+ * the garbage collector. Gc_allocator<T> allocates garbage-collectible
* objects of type T. Traceable_allocator<T> allocates objects that
* are not themselves garbage collected, but are scanned by the
- * collector for pointers to collectable objects. Traceable_alloc
+ * collector for pointers to collectible objects. Traceable_alloc
* should be used for explicitly managed STL containers that may
- * point to collectable objects.
+ * point to collectible objects.
*
* This code was derived from an earlier version of the GNU C++ standard
* library, which itself was derived from the SGI STL implementation.
Garbage Collection for C++", by John R. Elis and David L. Detlefs
(ftp://ftp.parc.xerox.com/pub/ellis/gc).
-All heap-allocated objects are either "collectable" or
-"uncollectable". Programs must explicitly delete uncollectable
+All heap-allocated objects are either "collectible" or
+"uncollectible". Programs must explicitly delete uncollectible
objects, whereas the garbage collector will automatically delete
-collectable objects when it discovers them to be inaccessible.
-Collectable objects may freely point at uncollectable objects and vice
+collectible objects when it discovers them to be inaccessible.
+Collectible objects may freely point at uncollectible objects and vice
versa.
-Objects allocated with the built-in "::operator new" are uncollectable.
+Objects allocated with the built-in "::operator new" are uncollectible.
-Objects derived from class "gc" are collectable. For example:
+Objects derived from class "gc" are collectible. For example:
class A: public gc {...};
- A* a = new A; // a is collectable.
+ A* a = new A; // a is collectible.
-Collectable instances of non-class types can be allocated using the GC
+Collectible instances of non-class types can be allocated using the GC
(or UseGC) placement:
typedef int A[ 10 ];
A* a = new (GC) A;
-Uncollectable instances of classes derived from "gc" can be allocated
+Uncollectible instances of classes derived from "gc" can be allocated
using the NoGC placement:
class A: public gc {...};
- A* a = new (NoGC) A; // a is uncollectable.
+ A* a = new (NoGC) A; // a is uncollectible.
-The new(PointerFreeGC) syntax allows the allocation of collectable
+The new(PointerFreeGC) syntax allows the allocation of collectible
objects that are not scanned by the collector. This useful if you
are allocating compressed data, bitmaps, or network packets. (In
the latter case, it may remove danger of unfriendly network packets
intentionally containing values that cause spurious memory retention.)
-Both uncollectable and collectable objects can be explicitly deleted
+Both uncollectible and collectible objects can be explicitly deleted
with "delete", which invokes an object's destructors and frees its
storage immediately.
-A collectable object may have a clean-up function, which will be
+A collectible object may have a clean-up function, which will be
invoked when the collector discovers the object to be inaccessible.
An object derived from "gc_cleanup" or containing a member derived
from "gc_cleanup" has a default clean-up function that invokes the
storage released, B will then become inaccessible and will have its
clean-up invoked. If A points at B and B points to A, forming a
cycle, then that's considered a storage leak, and neither will be
-collectable. See the interface gc.h for low-level facilities for
+collectible. See the interface gc.h for low-level facilities for
handling such cycles of objects with clean-up.
The collector cannot guarantee that it will find all inaccessible
If your compiler doesn't support "operator new[]", beware that an
array of type T, where T is derived from "gc", may or may not be
-allocated as a collectable object (it depends on the compiler). Use
-the explicit GC placement to make the array collectable. For example:
+allocated as a collectible object (it depends on the compiler). Use
+the explicit GC placement to make the array collectible. For example:
class A: public gc {...};
- A* a1 = new A[ 10 ]; // collectable or uncollectable?
- A* a2 = new (GC) A[ 10 ]; // collectable
+ A* a1 = new A[ 10 ]; // collectible or uncollectible?
+ A* a2 = new (GC) A[ 10 ]; // collectible.
-3. The destructors of collectable arrays of objects derived from
+3. The destructors of collectible arrays of objects derived from
"gc_cleanup" will not be invoked properly. For example:
class A: public gc_cleanup {...};
GC_NS_QUALIFY(GCCleanUpFunc) cleanup = 0,
void* clientData = 0 );
/*
- Allocates a collectable or uncollected object, according to the
+ Allocates a collectible or uncollectible object, according to the
value of "gcp".
- For collectable objects, if "cleanup" is non-null, then when the
+ For collectible objects, if "cleanup" is non-null, then when the
allocated object "obj" becomes inaccessible, the collector will
invoke the function "cleanup( obj, clientData )" but will not
invoke the object's destructors. It is an error to explicitly
# if defined(M68K) && defined(AMIGA)
/* This function is not static because it could also be */
- /* errorneously defined in .S file, so this error would be caught */
+ /* erroneously defined in .S file, so this error would be caught */
/* by the linker. */
void GC_push_regs(void)
{
}
}
-/* Allocate lb bytes of atomic (pointerfree) data */
+/* Allocate lb bytes of atomic (pointer-free) data. */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc_atomic(size_t lb)
#else
}
}
-/* Allocate lb bytes of pointerful, traced, but not collectable data */
+/* Allocate lb bytes of pointerful, traced, but not collectible data. */
GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
{
void *op;
/* But any decent compiler should reduce the extra procedure call */
/* to at most a jump instruction in this case. */
# if defined(I386) && defined(GC_SOLARIS_THREADS)
- /*
- * Thread initialisation can call malloc before
- * we're ready for it.
- * It's not clear that this is enough to help matters.
- * The thread implementation may well call malloc at other
- * inopportune times.
- */
+ /* Thread initialization can call malloc before we're ready for. */
+ /* It's not clear that this is enough to help matters. */
+ /* The thread implementation may well call malloc at other */
+ /* inopportune times. */
if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
# endif /* I386 && GC_SOLARIS_THREADS */
return((void *)REDIRECT_MALLOC(lb));
return NULL;
# if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
/* libpthread allocated some memory that is only pointed to by */
- /* mmapped thread stacks. Make sure it's not collectable. */
+ /* mmapped thread stacks. Make sure it is not collectible. */
{
static GC_bool lib_bounds_set = FALSE;
ptr_t caller = (ptr_t)__builtin_return_address(0);
{
/* Don't bother with initialization checks. If nothing */
/* has been initialized, the check fails, and that's safe, */
- /* since we haven't allocated uncollectable objects either. */
+ /* since we have not allocated uncollectible objects neither. */
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
return result;
}
-/* This one exists largerly to redirect posix_memalign for leaks finding. */
+/* This one exists largely to redirect posix_memalign for leaks finding. */
GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
{
/* Check alignment properly. */
}
#ifdef ATOMIC_UNCOLLECTABLE
- /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
+ /* Allocate lb bytes of pointer-free, untraced, uncollectible data */
/* This is normally roughly equivalent to the system malloc. */
/* But it may be useful if malloc is redefined. */
GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
static struct hblk * scan_ptr;
STATIC GC_bool GC_objects_are_marked = FALSE;
- /* Are there collectable marked objects in the heap? */
+ /* Are there collectible marked objects in the heap? */
/* Is a collection in progress? Note that this can return true in the */
/* nonincremental case, if a collection has been abandoned and the */
hhdr -> hb_n_marks = 0;
}
-/* Set all mark bits in the header. Used for uncollectable blocks. */
+/* Set all mark bits in the header. Used for uncollectible blocks. */
GC_INNER void GC_set_hdr_marks(hdr *hhdr)
{
unsigned i;
STATIC struct hblk * GC_push_next_marked(struct hblk *h);
/* Ditto, but also mark from clean pages. */
STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h);
- /* Ditto, but mark only from uncollectable pages. */
+ /* Ditto, but mark only from uncollectible pages. */
static void alloc_mark_stack(size_t);
/* Try to share the load, since the main stack is empty, */
/* and helper threads are waiting for a refill. */
/* The entries near the bottom of the stack are likely */
- /* to require more work. Thus we return those, eventhough */
+ /* to require more work. Thus we return those, even though */
/* it's harder. */
mse * new_bottom = local_mark_stack
+ (local_top - local_mark_stack)/2;
}
#endif /* !GC_DISABLE_INCREMENTAL */
-/* Similar to above, but for uncollectable pages. Needed since we */
+/* Similar to above, but for uncollectible pages. Needed since we */
/* do not clear marks for such pages, even for full collections. */
STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
{
void *GC_clear_stack_inner(void *, ptr_t);
#else
/* Clear the stack up to about limit. Return arg. This function is */
- /* not static because it could also be errorneously defined in .S */
+ /* not static because it could also be erroneously defined in .S */
/* file, so this error would be caught by the linker. */
void * GC_clear_stack_inner(void *arg, ptr_t limit)
{
siglongjmp(GC_jmp_buf_openbsd, 1);
}
- /* Return the first non-addressible location > p or bound. */
+ /* Return the first non-addressable location > p or bound. */
/* Requires the allocation lock. */
STATIC ptr_t GC_find_limit_openbsd(ptr_t p, ptr_t bound)
{
* SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
* This means that clients must ensure that system calls don't write
* to the write-protected heap. Probably the best way to do this is to
- * ensure that system calls write at most to POINTERFREE objects in the
+ * ensure that system calls write at most to pointer-free objects in the
* heap, and do even that only if we are on a platform on which those
* are not protected. Another alternative is to wrap system calls
* (see example for read below), but the current implementation holds
* applications.
* We assume the page size is a multiple of HBLKSIZE.
- * We prefer them to be the same. We avoid protecting POINTERFREE
+ * We prefer them to be the same. We avoid protecting pointer-free
* objects only if they are the same.
*/
# ifdef DARWIN
}
/* We assume that either the world is stopped or its OK to lose dirty */
-/* bits while this is happenning (as in GC_enable_incremental). */
+/* bits while this is happening (as in GC_enable_incremental). */
GC_INNER void GC_read_dirty(void)
{
# if defined(GWW_VDB)
}
#endif /* GC_GCJ_SUPPORT */
-/* To check uncollectable allocation we build lists with disguised cdr */
+/* To check uncollectible allocation we build lists with disguised cdr */
/* pointers, and make sure they don't go away. */
sexpr uncollectable_ints(int low, int up)
{
h[1999] = gcj_ints(1,200);
for (i = 0; i < 51; ++i)
h[1999] = gcj_reverse(h[1999]);
- /* Leave it as the reveresed list for now. */
+ /* Leave it as the reversed list for now. */
# else
h[1999] = ints(1,200);
# endif
#endif
class A {public:
- /* An uncollectable class. */
+ /* An uncollectible class. */
A( int iArg ): i( iArg ) {}
void Test( int iArg ) {
class B: public GC_NS_QUALIFY(gc), public A { public:
- /* A collectable class. */
+ /* A collectible class. */
B( int j ): A( j ) {}
~B() {
class C: public GC_NS_QUALIFY(gc_cleanup), public A { public:
- /* A collectable class with cleanup and virtual multiple inheritance. */
+ /* A collectible class with cleanup and virtual multiple inheritance. */
C( int levelArg ): A( levelArg ), level( levelArg ) {
nAllocated++;
class D: public GC_NS_QUALIFY(gc) { public:
- /* A collectable class with a static member function to be used as
+ /* A collectible class with a static member function to be used as
an explicit clean-up function supplied to ::new. */
D( int iArg ): i( iArg ) {
class E: public GC_NS_QUALIFY(gc_cleanup) { public:
- /* A collectable class with clean-up for use by F. */
+ /* A collectible class with clean-up for use by F. */
E() {
nAllocated++;}
class F: public E {public:
- /* A collectable class with clean-up, a base with clean-up, and a
+ /* A collectible class with clean-up, a base with clean-up, and a
member with clean-up. */
F() {
for (iters = 1; iters <= n; iters++) {
GC_printf( "Starting iteration %d\n", iters );
- /* Allocate some uncollectable As and disguise their pointers.
+ /* Allocate some uncollectible As and disguise their pointers.
Later we'll check to see if the objects are still there. We're
- checking to make sure these objects really are uncollectable. */
+ checking to make sure these objects really are uncollectible. */
GC_word as[ 1000 ];
GC_word bs[ 1000 ];
for (i = 0; i < 1000; i++) {
(void)f;
if (0 == i % 10) delete c;}
- /* Allocate a very large number of collectable As and Bs and
+ /* Allocate a very large number of collectible As and Bs and
drop the references to them immediately, forcing many
collections. */
for (i = 0; i < 1000000; i++) {
# endif
}
- /* Make sure the uncollectable As and Bs are still there. */
+ /* Make sure the uncollectible As and Bs are still there. */
for (i = 0; i < 1000; i++) {
A* a = (A*) Undisguise( as[ i ] );
B* b = (B*) Undisguise( bs[ i ] );
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * opyright (c) 1999-2000 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 1999-2000 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
/* Push all registers that might point into the heap. Frame */
/* pointer registers are included in case client code was */
- /* compiled with the 'omit frame pointer' optimisation. */
+ /* compiled with the 'omit frame pointer' optimization. */
# define PUSH1(reg) GC_push_one((word)context.reg)
# define PUSH2(r1,r2) (PUSH1(r1), PUSH1(r2))
# define PUSH4(r1,r2,r3,r4) (PUSH2(r1,r2), PUSH2(r3,r4))
start = si -> start_routine;
start_arg = si -> arg;
- GC_free(si); /* was allocated uncollectable */
+ GC_free(si); /* was allocated uncollectible */
pthread_cleanup_push(GC_thread_exit_proc, (void *)me);
result = (*start)(start_arg);