return fn;
}
-/*
- * Stop the world garbage collection. Assumes lock held. If stop_func is
- * not GC_never_stop_func then abort if stop_func returns TRUE.
- * Return TRUE if we successfully completed the collection.
- */
+/* Stop the world garbage collection. If stop_func is not */
+/* GC_never_stop_func then abort if stop_func returns TRUE. */
+/* Return TRUE if we successfully completed the collection. */
GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
{
# ifndef SMALL_CONFIG
CLOCK_TYPE current_time;
# endif
ASSERT_CANCEL_DISABLED();
+ GC_ASSERT(I_HOLD_LOCK());
if (GC_dont_gc || (*stop_func)()) return FALSE;
if (GC_on_collection_event)
GC_on_collection_event(GC_EVENT_START);
word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
word new_size = (word)1 << log_new_size;
/* FIXME: Power of 2 size often gets rounded up to one more page. */
- struct hash_chain_entry **new_table = (struct hash_chain_entry **)
- GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
- (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
+ struct hash_chain_entry **new_table;
+ GC_ASSERT(I_HOLD_LOCK());
+ new_table = (struct hash_chain_entry **)
+ GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
+ (size_t)new_size * sizeof(struct hash_chain_entry *),
+ NORMAL);
if (new_table == 0) {
if (*table == 0) {
ABORT("Insufficient space for initial table allocation");
/* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
-/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
ptr_t result;
GC_bool retry = FALSE;
+ GC_ASSERT(I_HOLD_LOCK());
lb = ROUNDUP_GRANULE_SIZE(lb);
n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
}
/* Allocate a large block of size lb bytes. Clear if appropriate. */
-/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
{
- ptr_t result = GC_alloc_large(lb, k, flags);
+ ptr_t result;
word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
- if (0 == result) return 0;
- if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
+ GC_ASSERT(I_HOLD_LOCK());
+ result = GC_alloc_large(lb, k, flags);
+ if (result != NULL
+ && (GC_debugging_started || GC_obj_kinds[k].ok_init)) {
/* Clear the whole block, in case of GC_realloc call. */
BZERO(result, n_blocks * HBLKSIZE);
}
/* Should not be used to directly to allocate */
/* objects such as STUBBORN objects that */
/* require special handling on allocation. */
-/* First a version that assumes we already */
-/* hold lock: */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
void *op;
+ GC_ASSERT(I_HOLD_LOCK());
if(SMALL_OBJ(lb)) {
struct obj_kind * kind = GC_obj_kinds + k;
size_t lg = GC_size_map[lb];
}
/* Allocate a composite object of size n bytes. The caller guarantees */
-/* that pointers past the first page are not relevant. Caller holds */
-/* allocation lock. */
+/* that pointers past the first page are not relevant. */
GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
word lb_adjusted;
void * op;
+ GC_ASSERT(I_HOLD_LOCK());
if (lb <= HBLKSIZE)
return(GC_generic_malloc_inner(lb, k));
lb_adjusted = ADD_SLOP(lb);
/* Helper procedures for new kind creation. */
GC_API void ** GC_CALL GC_new_free_list_inner(void)
{
- void *result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1)*sizeof(ptr_t),
- PTRFREE);
- if (result == 0) ABORT("Failed to allocate freelist for new kind");
+ void *result;
+
+ GC_ASSERT(I_HOLD_LOCK());
+ result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1) * sizeof(ptr_t), PTRFREE);
+ if (NULL == result) ABORT("Failed to allocate freelist for new kind");
BZERO(result, (MAXOBJGRANULES+1)*sizeof(ptr_t));
return result;
}