From f9c8aa30003f00ee27f4df0a34decf77a2918d4f Mon Sep 17 00:00:00 2001 From: Ivan Maidanski Date: Tue, 20 Sep 2016 00:07:47 +0300 Subject: [PATCH] Fix malloc routines to prevent size value wrap-around See issue #135 on Github. * allchblk.c (GC_allochblk, GC_allochblk_nth): Use OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS. * malloc.c (GC_alloc_large): Likewise. * alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent overflow when computing GC_heapsize+bytes > GC_max_heapsize. * dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page, GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc, GC_debug_generic_malloc_inner, GC_debug_generic_malloc_inner_ignore_off_page, GC_debug_malloc_stubborn, GC_debug_malloc_atomic, GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable): Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb value. * fnlz_mlc.c (GC_finalized_malloc): Likewise. * gcj_mlc.c (GC_debug_gcj_malloc): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Likewise. * include/private/gcconfig.h (GET_MEM): Likewise. * mallocx.c (GC_malloc_many, GC_memalign): Likewise. * os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise. * typd_mlc.c (GC_malloc_explicitly_typed, GC_malloc_explicitly_typed_ignore_off_page, GC_calloc_explicitly_typed): Likewise. * headers.c (GC_scratch_alloc): Change type of bytes_to_get from word to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed). * include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already defined). * include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from malloc.c file. * include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before include gcconfig.h). * include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type to size_t. * os_dep.c (GC_page_size): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument. * include/private/gcconfig.h (GET_MEM): Likewise. * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES, ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE, ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb". * include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro. * include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem, GC_unix_get_mem): Change argument type from word to int. * os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem, GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise. * malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only if no value wrap around is guaranteed. * malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case (because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value wrap around). * mallocx.c (GC_generic_malloc_ignore_off_page): Likewise. * misc.c (GC_init_size_map): Change "i" local variable type from int to size_t. * os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise. * misc.c (GC_envfile_init): Cast len to size_t when passed to ROUNDUP_PAGESIZE_IF_MMAP. * os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and GETPAGESIZE() to size_t (when setting GC_page_size). * os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection): Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking (the argument is of word type). * os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with ~GC_page_size+1 (because GC_page_size is unsigned); remove redundant cast to size_t. * os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size to SBRK_ARG_T. * os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable to size_t. * typd_mlc.c: Do not include limits.h. * typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in gc_priv.h now). --- allchblk.c | 4 +-- alloc.c | 15 ++++++----- dbg_mlc.c | 25 ++++++++++-------- fnlz_mlc.c | 2 +- gcj_mlc.c | 3 ++- headers.c | 2 +- include/private/gc_priv.h | 46 +++++++++++++++++++++++---------- include/private/gcconfig.h | 34 ++++++++++++++---------- malloc.c | 16 +++--------- mallocx.c | 11 ++++---- misc.c | 6 ++--- os_dep.c | 53 +++++++++++++++++++++----------------- typd_mlc.c | 18 ++++--------- 13 files changed, 128 insertions(+), 107 deletions(-) diff --git a/allchblk.c b/allchblk.c index 387aa447..638e6bbe 100644 --- a/allchblk.c +++ b/allchblk.c @@ -583,7 +583,7 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */) /* split. */ GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0); - blocks = OBJ_SZ_TO_BLOCKS(sz); + blocks = OBJ_SZ_TO_BLOCKS_CHECKED(sz); if ((signed_word)(blocks * HBLKSIZE) < 0) { return 0; } @@ -646,7 +646,7 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, int may_split) signed_word size_needed; /* number of bytes in requested objects */ signed_word size_avail; /* bytes available in this block */ - size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz); + size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(sz); /* search for a big enough block in free list */ for (hbp = GC_hblkfreelist[n];; hbp = hhdr -> hb_next) { diff --git a/alloc.c b/alloc.c index 3da99025..99247669 100644 --- a/alloc.c +++ b/alloc.c @@ -1240,25 +1240,28 @@ GC_word GC_max_retries = 0; /* Returns FALSE on failure. */ GC_INNER GC_bool GC_expand_hp_inner(word n) { - word bytes; + size_t bytes; struct hblk * space; word expansion_slop; /* Number of bytes by which we expect the */ /* heap to expand soon. */ if (n < MINHINCR) n = MINHINCR; - bytes = ROUNDUP_PAGESIZE(n * HBLKSIZE); - if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) { + bytes = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE); + if (GC_max_heapsize != 0 + && (GC_max_heapsize < (word)bytes + || GC_heapsize > GC_max_heapsize - (word)bytes)) { /* Exceeded self-imposed limit */ return(FALSE); } space = GET_MEM(bytes); GC_add_to_our_memory((ptr_t)space, bytes); if (space == 0) { - WARN("Failed to expand heap by %" WARN_PRIdPTR " bytes\n", bytes); + WARN("Failed to expand heap by %" WARN_PRIdPTR " bytes\n", + (word)bytes); return(FALSE); } GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n", - TO_KiB_UL(GC_heapsize + bytes), + TO_KiB_UL(GC_heapsize + (word)bytes), (unsigned long)GC_bytes_allocd); /* Adjust heap limits generously for blacklisting to work better. */ /* GC_add_to_heap performs minimal adjustment needed for */ @@ -1268,7 +1271,7 @@ GC_INNER GC_bool GC_expand_hp_inner(word n) || (GC_last_heap_addr != 0 && (word)GC_last_heap_addr < (word)space)) { /* Assume the heap is growing up */ - word new_limit = (word)space + bytes + expansion_slop; + word new_limit = (word)space + (word)bytes + expansion_slop; if (new_limit > (word)space) { GC_greatest_plausible_heap_addr = (void *)GC_max((word)GC_greatest_plausible_heap_addr, diff --git a/dbg_mlc.c b/dbg_mlc.c index 02e36def..614ac8c7 100644 --- a/dbg_mlc.c +++ b/dbg_mlc.c @@ -515,7 +515,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc(size_t lb, /* Note that according to malloc() specification, if size is 0 then */ /* malloc() returns either NULL, or a unique pointer value that can */ /* later be successfully passed to free(). We always do the latter. */ - result = GC_malloc(lb + DEBUG_BYTES); + result = GC_malloc(SIZET_SAT_ADD(lb, DEBUG_BYTES)); # ifdef GC_ADD_CALLER if (s == NULL) { GC_caller_func_offset(ra, &s, &i); @@ -536,7 +536,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc(size_t lb, GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_ignore_off_page(size_t lb, GC_EXTRA_PARAMS) { - void * result = GC_malloc_ignore_off_page(lb + DEBUG_BYTES); + void * result = GC_malloc_ignore_off_page(SIZET_SAT_ADD(lb, DEBUG_BYTES)); if (result == 0) { GC_err_printf("GC_debug_malloc_ignore_off_page(%lu)" @@ -553,7 +553,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_atomic_ignore_off_page(size_t lb, GC_EXTRA_PARAMS) { - void * result = GC_malloc_atomic_ignore_off_page(lb + DEBUG_BYTES); + void * result = GC_malloc_atomic_ignore_off_page( + SIZET_SAT_ADD(lb, DEBUG_BYTES)); if (result == 0) { GC_err_printf("GC_debug_malloc_atomic_ignore_off_page(%lu)" @@ -569,7 +570,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL STATIC void * GC_debug_generic_malloc(size_t lb, int knd, GC_EXTRA_PARAMS) { - void * result = GC_generic_malloc(lb + DEBUG_BYTES, knd); + void * result = GC_generic_malloc(SIZET_SAT_ADD(lb, DEBUG_BYTES), knd); if (NULL == result) { GC_err_printf( @@ -592,7 +593,8 @@ STATIC void * GC_debug_generic_malloc(size_t lb, int knd, GC_EXTRA_PARAMS) /* we already hold the GC lock. */ GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k) { - void * result = GC_generic_malloc_inner(lb + DEBUG_BYTES, k); + void * result = GC_generic_malloc_inner( + SIZET_SAT_ADD(lb, DEBUG_BYTES), k); if (result == 0) { GC_err_printf("GC internal allocation (%lu bytes) returning NULL\n", @@ -610,7 +612,7 @@ STATIC void * GC_debug_generic_malloc(size_t lb, int knd, GC_EXTRA_PARAMS) int k) { void * result = GC_generic_malloc_inner_ignore_off_page( - lb + DEBUG_BYTES, k); + SIZET_SAT_ADD(lb, DEBUG_BYTES), k); if (result == 0) { GC_err_printf("GC internal allocation (%lu bytes) returning NULL\n", @@ -629,7 +631,7 @@ STATIC void * GC_debug_generic_malloc(size_t lb, int knd, GC_EXTRA_PARAMS) GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS) { - void * result = GC_malloc_stubborn(lb + DEBUG_BYTES); + void * result = GC_malloc_stubborn(SIZET_SAT_ADD(lb, DEBUG_BYTES)); if (result == 0) { GC_err_printf("GC_debug_malloc_stubborn(%lu)" @@ -692,7 +694,7 @@ STATIC void * GC_debug_generic_malloc(size_t lb, int knd, GC_EXTRA_PARAMS) GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_atomic(size_t lb, GC_EXTRA_PARAMS) { - void * result = GC_malloc_atomic(lb + DEBUG_BYTES); + void * result = GC_malloc_atomic(SIZET_SAT_ADD(lb, DEBUG_BYTES)); if (result == 0) { GC_err_printf("GC_debug_malloc_atomic(%lu) returning NULL (%s:%d)\n", @@ -770,7 +772,8 @@ GC_API GC_ATTR_MALLOC char * GC_CALL GC_debug_strndup(const char *str, GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_uncollectable(size_t lb, GC_EXTRA_PARAMS) { - void * result = GC_malloc_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES); + void * result = GC_malloc_uncollectable( + SIZET_SAT_ADD(lb, UNCOLLECTABLE_DEBUG_BYTES)); if (result == 0) { GC_err_printf("GC_debug_malloc_uncollectable(%lu)" @@ -788,8 +791,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_uncollectable(size_t lb, GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_atomic_uncollectable(size_t lb, GC_EXTRA_PARAMS) { - void * result = - GC_malloc_atomic_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES); + void * result = GC_malloc_atomic_uncollectable( + SIZET_SAT_ADD(lb, UNCOLLECTABLE_DEBUG_BYTES)); if (result == 0) { GC_err_printf("GC_debug_malloc_atomic_uncollectable(%lu)" diff --git a/fnlz_mlc.c b/fnlz_mlc.c index c6ba7aca..fdab2124 100644 --- a/fnlz_mlc.c +++ b/fnlz_mlc.c @@ -89,7 +89,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_finalized_malloc(size_t lb, word *op; GC_ASSERT(done_init); - op = GC_malloc_kind(lb + sizeof(word), GC_finalized_kind); + op = GC_malloc_kind(SIZET_SAT_ADD(lb, sizeof(word)), GC_finalized_kind); if (EXPECT(NULL == op, FALSE)) return NULL; *op = (word)fclos | FINALIZER_CLOSURE_FLAG; diff --git a/gcj_mlc.c b/gcj_mlc.c index 50a99bf9..787a3c53 100644 --- a/gcj_mlc.c +++ b/gcj_mlc.c @@ -209,7 +209,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_gcj_malloc(size_t lb, /* confuse the backtrace. */ LOCK(); maybe_finalize(); - result = GC_generic_malloc_inner(lb + DEBUG_BYTES, GC_gcj_debug_kind); + result = GC_generic_malloc_inner(SIZET_SAT_ADD(lb, DEBUG_BYTES), + GC_gcj_debug_kind); if (result == 0) { GC_oom_func oom_fn = GC_oom_fn; UNLOCK(); diff --git a/headers.c b/headers.c index 057d989f..c8f7f640 100644 --- a/headers.c +++ b/headers.c @@ -118,7 +118,7 @@ static ptr_t scratch_free_ptr = 0; GC_INNER ptr_t GC_scratch_alloc(size_t bytes) { ptr_t result = scratch_free_ptr; - word bytes_to_get; + size_t bytes_to_get; bytes = ROUNDUP_GRANULE_SIZE(bytes); for (;;) { diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h index a9b05514..2d4631a4 100644 --- a/include/private/gc_priv.h +++ b/include/private/gc_priv.h @@ -90,6 +90,20 @@ typedef char * ptr_t; /* A generic pointer to which we can add */ /* byte displacements and which can be used */ /* for address comparisons. */ +#ifndef SIZE_MAX +# include +#endif +#ifdef SIZE_MAX +# define GC_SIZE_MAX SIZE_MAX +#else +# define GC_SIZE_MAX (~(size_t)0) +#endif + +/* Saturated addition of size_t values. Used to avoid value wrap */ +/* around on overflow. The arguments should have no side effects. */ +#define SIZET_SAT_ADD(a, b) \ + ((a) < GC_SIZE_MAX - (b) ? (a) + (b) : GC_SIZE_MAX) + #ifndef GCCONFIG_H # include "gcconfig.h" #endif @@ -285,9 +299,9 @@ typedef char * ptr_t; /* A generic pointer to which we can add */ # ifdef LINT2 /* Explicitly instruct the code analysis tool that */ /* GC_all_interior_pointers is assumed to have only 0 or 1 value. */ -# define EXTRA_BYTES (GC_all_interior_pointers? 1 : 0) +# define EXTRA_BYTES ((size_t)(GC_all_interior_pointers? 1 : 0)) # else -# define EXTRA_BYTES GC_all_interior_pointers +# define EXTRA_BYTES (size_t)GC_all_interior_pointers # endif # define MAX_EXTRA_BYTES 1 #else @@ -757,6 +771,7 @@ GC_EXTERN GC_warn_proc GC_current_warn_proc; # define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE) # define HBLKSIZE ((size_t)CPP_HBLKSIZE) +#define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1) /* Max size objects supported by freelist (larger objects are */ /* allocated directly with allchblk(), by rounding to the next */ @@ -785,12 +800,12 @@ GC_EXTERN GC_warn_proc GC_current_warn_proc; # define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1)) /* Round up allocation size (in bytes) to a multiple of a granule. */ -#define ROUNDUP_GRANULE_SIZE(bytes) \ - (((bytes) + (GRANULE_BYTES - 1)) & ~(GRANULE_BYTES - 1)) +#define ROUNDUP_GRANULE_SIZE(lb) /* lb should have no side-effect */ \ + (SIZET_SAT_ADD(lb, GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1)) /* Round up byte allocation requests to integral number of words, etc. */ -# define ROUNDED_UP_GRANULES(n) \ - BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES)) +# define ROUNDED_UP_GRANULES(lb) /* lb should have no side-effect */ \ + BYTES_TO_GRANULES(SIZET_SAT_ADD(lb, GRANULE_BYTES - 1 + EXTRA_BYTES)) # if MAX_EXTRA_BYTES == 0 # define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), TRUE) # else @@ -800,7 +815,8 @@ GC_EXTERN GC_warn_proc GC_current_warn_proc; /* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES. */ /* But we try to avoid looking up EXTRA_BYTES. */ # endif -# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES) +# define ADD_SLOP(lb) /* lb should have no side-effect */ \ + SIZET_SAT_ADD(lb, EXTRA_BYTES) # ifndef MIN_WORDS # define MIN_WORDS 2 /* FIXME: obsolete */ # endif @@ -1018,9 +1034,11 @@ struct hblk { # define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0) -# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ((sz) + HBLKSIZE-1) +# define OBJ_SZ_TO_BLOCKS(lb) divHBLKSZ((lb) + HBLKSIZE-1) +# define OBJ_SZ_TO_BLOCKS_CHECKED(lb) /* lb should have no side-effect */ \ + divHBLKSZ(SIZET_SAT_ADD(lb, HBLKSIZE - 1)) /* Size of block (in units of HBLKSIZE) needed to hold objects of */ - /* given sz (in bytes). */ + /* given lb (in bytes). The checked variant prevents wrap around. */ /* Object free list link */ # define obj_link(p) (*(void **)(p)) @@ -1422,18 +1440,18 @@ GC_EXTERN word GC_n_heap_sects; /* Number of separately added heap */ /* sections. */ #endif -GC_EXTERN word GC_page_size; +GC_EXTERN size_t GC_page_size; /* Round up allocation size to a multiple of a page size. */ /* GC_setpagesize() is assumed to be already invoked. */ -#define ROUNDUP_PAGESIZE(bytes) \ - (((bytes) + GC_page_size - 1) & ~(GC_page_size - 1)) +#define ROUNDUP_PAGESIZE(lb) /* lb should have no side-effect */ \ + (SIZET_SAT_ADD(lb, GC_page_size - 1) & ~(GC_page_size - 1)) /* Same as above but used to make GET_MEM() argument safe. */ #ifdef MMAP_SUPPORTED -# define ROUNDUP_PAGESIZE_IF_MMAP(bytes) ROUNDUP_PAGESIZE(bytes) +# define ROUNDUP_PAGESIZE_IF_MMAP(lb) ROUNDUP_PAGESIZE(lb) #else -# define ROUNDUP_PAGESIZE_IF_MMAP(bytes) (bytes) +# define ROUNDUP_PAGESIZE_IF_MMAP(lb) (lb) #endif #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32) diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h index ee84664e..bd556087 100644 --- a/include/private/gcconfig.h +++ b/include/private/gcconfig.h @@ -3169,7 +3169,8 @@ /* usually makes it possible to merge consecutively allocated */ /* chunks. It also avoids unintended recursion with */ /* REDIRECT_MALLOC macro defined. */ - /* GET_MEM() returns a HLKSIZE aligned chunk. */ + /* GET_MEM() argument should be of size_t type and have */ + /* no side-effect. GET_MEM() returns HLKSIZE-aligned chunk; */ /* 0 is taken to mean failure. */ /* In case of MMAP_SUPPORTED, the argument must also be */ /* a multiple of a physical page size. */ @@ -3179,45 +3180,52 @@ struct hblk; /* See gc_priv.h. */ # if defined(PCR) char * real_malloc(size_t bytes); -# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)(bytes) + GC_page_size) \ +# define GET_MEM(bytes) HBLKPTR(real_malloc(SIZET_SAT_ADD(bytes, \ + GC_page_size)) \ + GC_page_size-1) # elif defined(OS2) void * os2_alloc(size_t bytes); -# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)(bytes) \ - + GC_page_size) + GC_page_size-1) +# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc( \ + SIZET_SAT_ADD(bytes, \ + GC_page_size)) \ + + GC_page_size-1) # elif defined(NEXT) || defined(DOS4GW) || defined(NONSTOP) \ || (defined(AMIGA) && !defined(GC_AMIGA_FASTALLOC)) \ || (defined(SOLARIS) && !defined(USE_MMAP)) || defined(RTEMS) \ || defined(__CC_ARM) # define GET_MEM(bytes) HBLKPTR((size_t)calloc(1, \ - (size_t)(bytes) + GC_page_size) \ + SIZET_SAT_ADD(bytes, \ + GC_page_size)) \ + GC_page_size - 1) # elif defined(MSWIN32) || defined(CYGWIN32) - ptr_t GC_win32_get_mem(GC_word bytes); + ptr_t GC_win32_get_mem(size_t bytes); # define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes) # elif defined(MACOS) # if defined(USE_TEMPORARY_MEMORY) Ptr GC_MacTemporaryNewPtr(size_t size, Boolean clearMemory); -# define GET_MEM(bytes) HBLKPTR( \ - GC_MacTemporaryNewPtr((bytes) + GC_page_size, true) \ +# define GET_MEM(bytes) HBLKPTR(GC_MacTemporaryNewPtr( \ + SIZET_SAT_ADD(bytes, \ + GC_page_size), true) \ + GC_page_size-1) # else -# define GET_MEM(bytes) HBLKPTR(NewPtrClear((bytes) + GC_page_size) \ +# define GET_MEM(bytes) HBLKPTR(NewPtrClear(SIZET_SAT_ADD(bytes, \ + GC_page_size)) \ + GC_page_size-1) # endif # elif defined(MSWINCE) - ptr_t GC_wince_get_mem(GC_word bytes); + ptr_t GC_wince_get_mem(size_t bytes); # define GET_MEM(bytes) (struct hblk *)GC_wince_get_mem(bytes) # elif defined(AMIGA) && defined(GC_AMIGA_FASTALLOC) void *GC_amiga_get_mem(size_t bytes); -# define GET_MEM(bytes) HBLKPTR((size_t) \ - GC_amiga_get_mem((size_t)(bytes) + GC_page_size) \ +# define GET_MEM(bytes) HBLKPTR((size_t)GC_amiga_get_mem( \ + SIZET_SAT_ADD(bytes, \ + GC_page_size)) \ + GC_page_size-1) # elif defined(SN_TARGET_PS3) void *ps3_get_mem(size_t bytes); # define GET_MEM(bytes) (struct hblk*)ps3_get_mem(bytes) # else - ptr_t GC_unix_get_mem(GC_word bytes); + ptr_t GC_unix_get_mem(size_t bytes); # define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes) # endif #endif /* GC_PRIVATE_H */ diff --git a/malloc.c b/malloc.c index adf5cc10..9b72114a 100644 --- a/malloc.c +++ b/malloc.c @@ -48,7 +48,7 @@ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) GC_ASSERT(I_HOLD_LOCK()); lb = ROUNDUP_GRANULE_SIZE(lb); - n_blocks = OBJ_SZ_TO_BLOCKS(lb); + n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(lb); if (!EXPECT(GC_is_initialized, TRUE)) { DCL_LOCK_STATE; UNLOCK(); /* just to unset GC_lock_holder */ @@ -89,12 +89,13 @@ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags) { ptr_t result; - word n_blocks = OBJ_SZ_TO_BLOCKS(lb); GC_ASSERT(I_HOLD_LOCK()); result = GC_alloc_large(lb, k, flags); if (result != NULL && (GC_debugging_started || GC_obj_kinds[k].ok_init)) { + word n_blocks = OBJ_SZ_TO_BLOCKS(lb); + /* Clear the whole block, in case of GC_realloc call. */ BZERO(result, n_blocks * HBLKSIZE); } @@ -205,8 +206,6 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k) lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); - if (lb_rounded < lb) - return((*GC_get_oom_fn())(lb)); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; LOCK(); @@ -418,15 +417,6 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb) } # endif /* GC_LINUX_THREADS */ -# include -# ifdef SIZE_MAX -# define GC_SIZE_MAX SIZE_MAX -# else -# define GC_SIZE_MAX (~(size_t)0) -# endif - -# define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1) - void * calloc(size_t n, size_t lb) { if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */ diff --git a/mallocx.c b/mallocx.c index 4d697e99..740650a3 100644 --- a/mallocx.c +++ b/mallocx.c @@ -188,8 +188,6 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_ASSERT(k < MAXOBJKINDS); lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); - if (lb_rounded < lb) - return((*GC_get_oom_fn())(lb)); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; if (EXPECT(GC_have_errors, FALSE)) @@ -446,8 +444,11 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t lb) { void *result; - GC_generic_malloc_many(ROUNDUP_GRANULE_SIZE(lb + EXTRA_BYTES), - NORMAL, &result); + /* Add EXTRA_BYTES and round up to a multiple of a granule. */ + lb = SIZET_SAT_ADD(lb, EXTRA_BYTES + GRANULE_BYTES - 1) + & ~(GRANULE_BYTES - 1); + + GC_generic_malloc_many(lb, NORMAL, &result); return result; } @@ -471,7 +472,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb) } /* We could also try to make sure that the real rounded-up object size */ /* is a multiple of align. That would be correct up to HBLKSIZE. */ - new_lb = lb + align - 1; + new_lb = SIZET_SAT_ADD(lb, align - 1); result = GC_malloc(new_lb); /* It is OK not to check result for NULL as in that case */ /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */ diff --git a/misc.c b/misc.c index 9e480dc9..d0b3400d 100644 --- a/misc.c +++ b/misc.c @@ -230,13 +230,13 @@ GC_API void GC_CALL GC_set_handle_fork(int value GC_ATTR_UNUSED) /* quantization algorithm (but we precompute it). */ STATIC void GC_init_size_map(void) { - int i; + size_t i; /* Map size 0 to something bigger. */ /* This avoids problems at lower levels. */ GC_size_map[0] = 1; for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) { - GC_size_map[i] = (unsigned)ROUNDED_UP_GRANULES(i); + GC_size_map[i] = ROUNDED_UP_GRANULES(i); # ifndef _MSC_VER GC_ASSERT(GC_size_map[i] < TINY_FREELISTS); /* Seems to tickle bug in VC++ 2008 for AMD64 */ @@ -684,7 +684,7 @@ GC_API void GC_CALL GC_get_heap_usage_safe(GC_word *pheap_size, } /* At this execution point, GC_setpagesize() and GC_init_win32() */ /* must already be called (for GET_MEM() to work correctly). */ - content = (char *)GET_MEM(ROUNDUP_PAGESIZE_IF_MMAP(len + 1)); + content = (char *)GET_MEM(ROUNDUP_PAGESIZE_IF_MMAP((size_t)len + 1)); if (content == NULL) { CloseHandle(hFile); return; /* allocation failure */ diff --git a/os_dep.c b/os_dep.c index 6dbcd2f2..9a489125 100644 --- a/os_dep.c +++ b/os_dep.c @@ -682,7 +682,7 @@ struct o32_obj { # endif /* OS/2 */ /* Find the page size */ -GC_INNER word GC_page_size = 0; +GC_INNER size_t GC_page_size = 0; #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32) # ifndef VER_PLATFORM_WIN32_CE @@ -698,7 +698,7 @@ GC_INNER word GC_page_size = 0; GC_INNER void GC_setpagesize(void) { GetSystemInfo(&GC_sysinfo); - GC_page_size = GC_sysinfo.dwPageSize; + GC_page_size = (size_t)GC_sysinfo.dwPageSize; # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION) { OSVERSIONINFO verInfo; @@ -789,7 +789,7 @@ GC_INNER word GC_page_size = 0; GC_INNER void GC_setpagesize(void) { # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) - GC_page_size = GETPAGESIZE(); + GC_page_size = (size_t)GETPAGESIZE(); if (!GC_page_size) ABORT("getpagesize failed"); # else /* It's acceptable to fake it. */ @@ -2032,7 +2032,7 @@ void GC_register_data_segments(void) extern char* GC_get_private_path_and_zero_file(void); #endif -STATIC ptr_t GC_unix_mmap_get_mem(word bytes) +STATIC ptr_t GC_unix_mmap_get_mem(size_t bytes) { void *result; static ptr_t last_addr = HEAP_START; @@ -2064,13 +2064,14 @@ STATIC ptr_t GC_unix_mmap_get_mem(word bytes) # undef IGNORE_PAGES_EXECUTABLE if (result == MAP_FAILED) return(0); - last_addr = (ptr_t)ROUNDUP_PAGESIZE((word)result + bytes); + last_addr = (ptr_t)(((word)result + bytes + GC_page_size - 1) + & ~(GC_page_size - 1)); # if !defined(LINUX) if (last_addr == 0) { /* Oops. We got the end of the address space. This isn't */ /* usable by arbitrary C code, since one-past-end pointers */ /* don't work, so we discard it and try again. */ - munmap(result, (size_t)(-GC_page_size) - (size_t)result); + munmap(result, ~GC_page_size - (size_t)result + 1); /* Leave last page mapped, so we can't repeat. */ return GC_unix_mmap_get_mem(bytes); } @@ -2086,13 +2087,13 @@ STATIC ptr_t GC_unix_mmap_get_mem(word bytes) # endif /* MMAP_SUPPORTED */ #if defined(USE_MMAP) - ptr_t GC_unix_get_mem(word bytes) + ptr_t GC_unix_get_mem(size_t bytes) { return GC_unix_mmap_get_mem(bytes); } #else /* !USE_MMAP */ -STATIC ptr_t GC_unix_sbrk_get_mem(word bytes) +STATIC ptr_t GC_unix_sbrk_get_mem(size_t bytes) { ptr_t result; # ifdef IRIX5 @@ -2109,7 +2110,7 @@ STATIC ptr_t GC_unix_sbrk_get_mem(word bytes) goto out; } if (lsbs != 0) { - if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) { + if((ptr_t)sbrk((SBRK_ARG_T)GC_page_size - lsbs) == (ptr_t)(-1)) { result = 0; goto out; } @@ -2133,7 +2134,7 @@ STATIC ptr_t GC_unix_sbrk_get_mem(word bytes) return(result); } -ptr_t GC_unix_get_mem(word bytes) +ptr_t GC_unix_get_mem(size_t bytes) { # if defined(MMAP_SUPPORTED) /* By default, we try both sbrk and mmap, in that order. */ @@ -2179,7 +2180,7 @@ void * os2_alloc(size_t bytes) # endif /* OS2 */ #ifdef MSWINCE - ptr_t GC_wince_get_mem(word bytes) + ptr_t GC_wince_get_mem(size_t bytes) { ptr_t result = 0; /* initialized to prevent warning. */ word i; @@ -2198,8 +2199,9 @@ void * os2_alloc(size_t bytes) if (i == GC_n_heap_bases) { /* Reserve more pages */ - word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1) - & ~(GC_sysinfo.dwAllocationGranularity-1); + size_t res_bytes = + SIZET_SAT_ADD(bytes, (size_t)GC_sysinfo.dwAllocationGranularity-1) + & ~((size_t)GC_sysinfo.dwAllocationGranularity-1); /* If we ever support MPROTECT_VDB here, we will probably need to */ /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */ /* never spans regions. It seems to be OK for a VirtualFree */ @@ -2249,7 +2251,7 @@ void * os2_alloc(size_t bytes) # define GC_mem_top_down 0 # endif /* !GC_USE_MEM_TOP_DOWN */ - ptr_t GC_win32_get_mem(word bytes) + ptr_t GC_win32_get_mem(size_t bytes) { ptr_t result; @@ -2261,8 +2263,8 @@ void * os2_alloc(size_t bytes) /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */ /* There are also unconfirmed rumors of other */ /* problems, so we dodge the issue. */ - result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE); - result = (ptr_t)(((word)result + HBLKSIZE - 1) & ~(HBLKSIZE-1)); + result = (ptr_t)(((word)GlobalAlloc(0, SIZET_SAT_ADD(bytes, HBLKSIZE)) + + HBLKSIZE - 1) & ~(HBLKSIZE - 1)); } else # endif /* else */ { @@ -2291,12 +2293,13 @@ void * os2_alloc(size_t bytes) /* available. Otherwise we waste resources or possibly */ /* cause VirtualAlloc to fail (observed in Windows 2000 */ /* SP2). */ - result = (ptr_t) VirtualAlloc(NULL, bytes + VIRTUAL_ALLOC_PAD, - GetWriteWatch_alloc_flag + result = (ptr_t) VirtualAlloc(NULL, + SIZET_SAT_ADD(bytes, VIRTUAL_ALLOC_PAD), + GetWriteWatch_alloc_flag | (MEM_COMMIT | MEM_RESERVE) | GC_mem_top_down, - GC_pages_executable ? PAGE_EXECUTE_READWRITE : - PAGE_READWRITE); + GC_pages_executable ? PAGE_EXECUTE_READWRITE : + PAGE_READWRITE); # undef IGNORE_PAGES_EXECUTABLE } # endif /* USE_WINALLOC */ @@ -2359,7 +2362,8 @@ void * os2_alloc(size_t bytes) /* Return 0 if the block is too small to make this feasible. */ STATIC ptr_t GC_unmap_start(ptr_t start, size_t bytes) { - ptr_t result = (ptr_t)ROUNDUP_PAGESIZE((word)start); + ptr_t result = (ptr_t)(((word)start + GC_page_size - 1) + & ~(GC_page_size - 1)); if ((word)(result + GC_page_size) > (word)(start + bytes)) return 0; return result; @@ -3132,7 +3136,7 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) char * addr = (char *) (exc_info -> ExceptionRecord -> ExceptionInformation[1]); # endif - unsigned i; + size_t i; if (SIG_OK && CODE_OK) { register struct hblk * h = @@ -3263,7 +3267,8 @@ GC_INNER void GC_remove_protection(struct hblk *h, word nblocks, # endif if (!GC_dirty_maintained) return; h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1)); - h_end = (struct hblk *)ROUNDUP_PAGESIZE((word)(h + nblocks)); + h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size - 1) + & ~(GC_page_size - 1)); if (h_end == h_trunc + 1 && get_pht_entry_from_index(GC_dirty_pages, PHT_HASH(h_trunc))) { /* already marked dirty, and hence unprotected. */ @@ -4197,7 +4202,7 @@ catch_exception_raise(mach_port_t exception_port GC_ATTR_UNUSED, kern_return_t r; char *addr; struct hblk *h; - unsigned int i; + size_t i; thread_state_flavor_t flavor = DARWIN_EXC_STATE; mach_msg_type_number_t exc_state_count = DARWIN_EXC_STATE_COUNT; DARWIN_EXC_STATE_T exc_state; diff --git a/typd_mlc.c b/typd_mlc.c index 01a2ddd4..b0699c5f 100644 --- a/typd_mlc.c +++ b/typd_mlc.c @@ -584,7 +584,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_explicitly_typed(size_t lb, size_t lg; GC_ASSERT(GC_explicit_typing_initialized); - lb += TYPD_EXTRA_BYTES; + lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES); op = GC_malloc_kind(lb, GC_explicit_kind); if (EXPECT(NULL == op, FALSE)) return NULL; @@ -601,7 +601,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL DCL_LOCK_STATE; GC_ASSERT(GC_explicit_typing_initialized); - lb += TYPD_EXTRA_BYTES; + lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES); if (SMALL_OBJ(lb)) { GC_DBG_COLLECT_AT_MALLOC(lb); lg = GC_size_map[lb]; @@ -629,15 +629,6 @@ GC_API GC_ATTR_MALLOC void * GC_CALL return((void *) op); } -#include -#ifdef SIZE_MAX -# define GC_SIZE_MAX SIZE_MAX -#else -# define GC_SIZE_MAX (~(size_t)0) -#endif - -#define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1) - GC_API GC_ATTR_MALLOC void * GC_CALL GC_calloc_explicitly_typed(size_t n, size_t lb, GC_descr d) { @@ -660,10 +651,11 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_calloc_explicitly_typed(size_t n, case SIMPLE: return GC_malloc_explicitly_typed(lb, simple_descr); case LEAF: - lb += sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES; + lb = SIZET_SAT_ADD(lb, + sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES); break; case COMPLEX: - lb += TYPD_EXTRA_BYTES; + lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES); break; } op = GC_malloc_kind(lb, GC_array_kind); -- 2.40.0