+
+Tue Oct 10 16:58:37 CEST 2006 Paolo Molaro <lupus@ximian.com>
+
+ * darwin_stop_world.c: patch from Allan Hsu <allan@counterpop.net>
+ to avoid using memory just after freeing it.
+
2006-08-31 Zoltan Varga <vargaz@freemail.hu>
* libgc.vcproj: Define GC_INSIDE_DLL on VC build too.
Bryan Berg (bryan AT imeem DOT com).
2005-10-11 Zoltan Varga <vargaz@gmail.com>
-
+
* pthread_support.c (GC_thread_deregister_foreign): Make this return
void instead of void*.
-
+
+2005-09-29 Raja R Harinath <harinath@gmail.com>
+
+ * configure.host: Fix syntax error.
+
2005-08-24 Zoltan Varga <vargaz@gmail.com>
* win32_threads.c: If GC_INSIDE_DLL is defined, define the DllMain function for automatic registration
the mono executable.
2004-07-27 John Merryweather Cooper <john_m_cooper@yahoo.com>
- # configure.in: Fix compiler flags selection to be correct for
- both FreeBSD 4.x (aka -STABLE) and FreeBSD 5.x (aka -CURRENT).
+
+ * configure.in: Fix to properly select flags for pthreads
+ on FreeBSD 4.x (aka -STABLE) and 5.x (aka -CURRENT).
2004-07-01 Zoltan Varga <vargaz@freemail.hu>
* include/install-sh doc/install-sh: New files.
+2004-06-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * include/private/gc_priv.h (SMALL_OBJ): Add Hans' fix for crashes
+ seen on SPARC64.
+
Thu Jun 24 15:39:00 CEST 2004 Paolo Molaro <lupus@ximian.com>
* alloc.c: use Hans' fix for bug #59557.
rs6000_mach_dep.s sparc_mach_dep.S sparc_netbsd_mach_dep.s \
sparc_sunos4_mach_dep.s ia64_save_regs_in_stack.s
+libmonogc_static_la_SOURCES = $(libmonogc_la_SOURCES)
+libmonogc_static_la_LIBADD = $(libmonogc_la_LIBADD)
+libmonogc_static_la_LDFLAGS = -static
+
EXTRA_DIST += alpha_mach_dep.S mips_sgi_mach_dep.s sparc_mach_dep.S
AM_CFLAGS = @GC_CFLAGS@
gc_cflags=""
if test :"$GCC": = :yes: ; then
+ :
else
case "$host" in
hppa*-*-hpux* )
prev_list = NULL;
prevcount = 0;
do {
- int result;
+ int result;
kern_result = task_threads(my_task, &act_list, &listcount);
- result = GC_suspend_thread_list(act_list, listcount,
- prev_list, prevcount);
- changes = result;
- prev_list = act_list;
- prevcount = listcount;
- if(kern_result == KERN_SUCCESS) {
- int i;
+ if(kern_result == KERN_SUCCESS) {
+ result = GC_suspend_thread_list(act_list, listcount,
+ prev_list, prevcount);
+ changes = result;
- for(i = 0; i < listcount; i++)
- mach_port_deallocate(my_task, act_list[i]);
+ if(prev_list != NULL) {
+ for(i = 0; i < prevcount; i++)
+ mach_port_deallocate(my_task, prev_list[i]);
+
+ vm_deallocate(my_task, (vm_address_t)prev_list, sizeof(thread_t) * prevcount);
+ }
- vm_deallocate(my_task, (vm_address_t)act_list, sizeof(thread_t) * listcount);
- }
+ prev_list = act_list;
+ prevcount = listcount;
+ }
} while (changes);
-
+
+ for(i = 0; i < listcount; i++)
+ mach_port_deallocate(my_task, act_list[i]);
+
+ vm_deallocate(my_task, (vm_address_t)act_list, sizeof(thread_t) * listcount);
+
# ifdef MPROTECT_VDB
if(GC_incremental) {
* These routines normally require an explicit call to GC_init(), though
* that may be done from a constructor function.
*/
-
-#ifndef GC_LOCAL_ALLOC_H
-#define GC_LOCAL_ALLOC_H
-
+
#ifndef _GC_H
# include "gc.h"
#endif
# include "gc_gcj.h"
#endif
+#ifndef GC_LOCAL_ALLOC_H
+#define GC_LOCAL_ALLOC_H
+
/* We assume ANSI C for this interface. */
GC_PTR GC_local_malloc(size_t bytes);
#if defined(GC_GCJ_SUPPORT)
GC_PTR GC_local_gcj_malloc(size_t bytes,
void * ptr_to_struct_containing_descr);
+ GC_PTR GC_local_gcj_fast_malloc(size_t lw,
+ void * ptr_to_struct_containing_descr);
#endif
# ifdef GC_DEBUG
# define GC_LOCAL_MALLOC_ATOMIC(s) GC_debug_malloc_atomic(s,GC_EXTRAS)
# ifdef GC_GCJ_SUPPORT
# define GC_LOCAL_GCJ_MALLOC(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
+# define GC_LOCAL_GCJ_FAST_MALLOC(s,d) GC_debug_gcj_fast_malloc(s,d,GC_EXTRAS)
# endif
# else
# define GC_LOCAL_MALLOC(s) GC_local_malloc(s)
# define GC_LOCAL_MALLOC_ATOMIC(s) GC_local_malloc_atomic(s)
# ifdef GC_GCJ_SUPPORT
# define GC_LOCAL_GCJ_MALLOC(s,d) GC_local_gcj_malloc(s,d)
+# define GC_LOCAL_GCJ_FAST_MALLOC(s,d) GC_local_gcj_fast_malloc(s,d)
# endif
# endif
# define GC_MALLOC_ATOMIC(s) GC_LOCAL_MALLOC_ATOMIC(s)
# ifdef GC_GCJ_SUPPORT
# undef GC_GCJ_MALLOC
+# undef GC_GCJ_FAST_MALLOC
# define GC_GCJ_MALLOC(s,d) GC_LOCAL_GCJ_MALLOC(s,d)
+# define GC_GCJ_FAST_MALLOC(s,d) GC_LOCAL_GCJ_FAST_MALLOC(s,d)
# endif
# endif
# define VOLATILE
#endif
-#if 0 /* defined(__GNUC__) doesn't work yet */
+#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
+/* This doesn't work in some earlier gcc versions */
# define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
/* Equivalent to (expr), but predict that usually (expr)==outcome. */
#else
# define EXPECT(expr, outcome) (expr)
-#endif /* __GNUC__ */
+#endif
# ifndef GC_LOCKS_H
# include "gc_locks.h"
# else
# define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
# endif
-# define SMALL_OBJ(bytes) ((bytes) < (MAXOBJBYTES - EXTRA_BYTES))
+# define SMALL_OBJ(bytes) ((bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES)
# ifndef MIN_WORDS
/* MIN_WORDS is the size of the smallest allocated object. */
}
}
+/* Similar to GC_local_gcj_malloc, but the size is in words, and we don't */
+/* adjust it. The size is assumed to be such that it can be */
+/* allocated as a small object. */
+void * GC_local_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr)
+{
+ ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
+ -> gcj_freelists + lw;
+ ptr_t my_entry = *my_fl;
+
+ GC_ASSERT(GC_gcj_malloc_initialized);
+
+ if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
+ GC_PTR result = (GC_PTR)my_entry;
+ GC_ASSERT(!GC_incremental);
+ /* We assert that any concurrent marker will stop us. */
+ /* Thus it is impossible for a mark procedure to see the */
+ /* allocation of the next object, but to see this object */
+ /* still containing a free list pointer. Otherwise the */
+ /* marker might find a random "mark descriptor". */
+ *(volatile ptr_t *)my_fl = obj_link(my_entry);
+ /* We must update the freelist before we store the pointer. */
+ /* Otherwise a GC at this point would see a corrupted */
+ /* free list. */
+ /* A memory barrier is probably never needed, since the */
+ /* action of stopping this thread will cause prior writes */
+ /* to complete. */
+ GC_ASSERT(((void * volatile *)result)[1] == 0);
+ *(void * volatile *)result = ptr_to_struct_containing_descr;
+ return result;
+ } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
+ if (!GC_incremental) *my_fl = my_entry + lw + 1;
+ /* In the incremental case, we always have to take this */
+ /* path. Thus we leave the counter alone. */
+ return GC_gcj_fast_malloc(lw, ptr_to_struct_containing_descr);
+ } else {
+ GC_generic_malloc_many(BYTES_FROM_INDEX(lw), GC_gcj_kind, my_fl);
+ if (*my_fl == 0) return GC_oom_fn(BYTES_FROM_INDEX(lw));
+ return GC_local_gcj_fast_malloc(lw, ptr_to_struct_containing_descr);
+ }
+}
+
#endif /* GC_GCJ_SUPPORT */
# else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */