From b7b1004a2aedaee00bc7baefd191268dbe9574a8 Mon Sep 17 00:00:00 2001 From: Ivan Maidanski Date: Thu, 22 Oct 2015 01:52:39 +0300 Subject: [PATCH] Add assertion on lock status to GC_alloc_large and its callers (code refactoring) * alloc.c (GC_try_to_collect_inner): Remove comment about expected lock status; add assertion about holding the allocation lock. * finalize.c (GC_grow_table): Likewise. * malloc.c (GC_alloc_large, GC_alloc_large_and_clear, GC_generic_malloc_inner, GC_generic_malloc_inner_ignore_off_page): Likewise. * misc.c (GC_new_free_list_inner): Likewise. --- alloc.c | 9 ++++----- finalize.c | 9 ++++++--- malloc.c | 18 +++++++++--------- misc.c | 8 +++++--- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/alloc.c b/alloc.c index 220ce866..f043f059 100644 --- a/alloc.c +++ b/alloc.c @@ -428,11 +428,9 @@ GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void) return fn; } -/* - * Stop the world garbage collection. Assumes lock held. If stop_func is - * not GC_never_stop_func then abort if stop_func returns TRUE. - * Return TRUE if we successfully completed the collection. - */ +/* Stop the world garbage collection. If stop_func is not */ +/* GC_never_stop_func then abort if stop_func returns TRUE. */ +/* Return TRUE if we successfully completed the collection. */ GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func) { # ifndef SMALL_CONFIG @@ -440,6 +438,7 @@ GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func) CLOCK_TYPE current_time; # endif ASSERT_CANCEL_DISABLED(); + GC_ASSERT(I_HOLD_LOCK()); if (GC_dont_gc || (*stop_func)()) return FALSE; if (GC_on_collection_event) GC_on_collection_event(GC_EVENT_START); diff --git a/finalize.c b/finalize.c index 73e50a37..192de4de 100644 --- a/finalize.c +++ b/finalize.c @@ -103,10 +103,13 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table, word old_size = ((log_old_size == -1)? 0: (1 << log_old_size)); word new_size = (word)1 << log_new_size; /* FIXME: Power of 2 size often gets rounded up to one more page. */ - struct hash_chain_entry **new_table = (struct hash_chain_entry **) - GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE( - (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL); + struct hash_chain_entry **new_table; + GC_ASSERT(I_HOLD_LOCK()); + new_table = (struct hash_chain_entry **) + GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE( + (size_t)new_size * sizeof(struct hash_chain_entry *), + NORMAL); if (new_table == 0) { if (*table == 0) { ABORT("Insufficient space for initial table allocation"); diff --git a/malloc.c b/malloc.c index 4bc0a613..0f35fe24 100644 --- a/malloc.c +++ b/malloc.c @@ -37,7 +37,6 @@ GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, /* Allocate a large block of size lb bytes. */ /* The block is not cleared. */ /* Flags is 0 or IGNORE_OFF_PAGE. */ -/* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) { @@ -46,6 +45,7 @@ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) ptr_t result; GC_bool retry = FALSE; + GC_ASSERT(I_HOLD_LOCK()); lb = ROUNDUP_GRANULE_SIZE(lb); n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); @@ -79,15 +79,16 @@ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) } /* Allocate a large block of size lb bytes. Clear if appropriate. */ -/* We hold the allocation lock. */ /* EXTRA_BYTES were already added to lb. */ STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags) { - ptr_t result = GC_alloc_large(lb, k, flags); + ptr_t result; word n_blocks = OBJ_SZ_TO_BLOCKS(lb); - if (0 == result) return 0; - if (GC_debugging_started || GC_obj_kinds[k].ok_init) { + GC_ASSERT(I_HOLD_LOCK()); + result = GC_alloc_large(lb, k, flags); + if (result != NULL + && (GC_debugging_started || GC_obj_kinds[k].ok_init)) { /* Clear the whole block, in case of GC_realloc call. */ BZERO(result, n_blocks * HBLKSIZE); } @@ -98,12 +99,11 @@ STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags) /* Should not be used to directly to allocate */ /* objects such as STUBBORN objects that */ /* require special handling on allocation. */ -/* First a version that assumes we already */ -/* hold lock: */ GC_INNER void * GC_generic_malloc_inner(size_t lb, int k) { void *op; + GC_ASSERT(I_HOLD_LOCK()); if(SMALL_OBJ(lb)) { struct obj_kind * kind = GC_obj_kinds + k; size_t lg = GC_size_map[lb]; @@ -146,13 +146,13 @@ GC_INNER void * GC_generic_malloc_inner(size_t lb, int k) } /* Allocate a composite object of size n bytes. The caller guarantees */ -/* that pointers past the first page are not relevant. Caller holds */ -/* allocation lock. */ +/* that pointers past the first page are not relevant. */ GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k) { word lb_adjusted; void * op; + GC_ASSERT(I_HOLD_LOCK()); if (lb <= HBLKSIZE) return(GC_generic_malloc_inner(lb, k)); lb_adjusted = ADD_SLOP(lb); diff --git a/misc.c b/misc.c index 88885e30..17f65cf6 100644 --- a/misc.c +++ b/misc.c @@ -1810,9 +1810,11 @@ GC_API int GC_CALL GC_is_disabled(void) /* Helper procedures for new kind creation. */ GC_API void ** GC_CALL GC_new_free_list_inner(void) { - void *result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1)*sizeof(ptr_t), - PTRFREE); - if (result == 0) ABORT("Failed to allocate freelist for new kind"); + void *result; + + GC_ASSERT(I_HOLD_LOCK()); + result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1) * sizeof(ptr_t), PTRFREE); + if (NULL == result) ABORT("Failed to allocate freelist for new kind"); BZERO(result, (MAXOBJGRANULES+1)*sizeof(ptr_t)); return result; } -- 2.40.0