op = GC_finalized_objfreelist[lg];
if (EXPECT(0 == op, FALSE)) {
UNLOCK();
- op = GC_generic_malloc((word)lb, GC_finalized_kind);
+ op = GC_generic_malloc(lb, GC_finalized_kind);
if (NULL == op)
return NULL;
/* GC_generic_malloc has extended the size map for us. */
} else {
size_t op_sz;
- op = GC_generic_malloc((word)lb, GC_finalized_kind);
+ op = GC_generic_malloc(lb, GC_finalized_kind);
if (NULL == op)
return NULL;
op_sz = GC_size(op);
GC_DBG_COLLECT_AT_MALLOC(lb);
if (SMALL_OBJ(lb)) {
LOCK();
- result = GC_generic_malloc_inner((word)lb, k);
+ result = GC_generic_malloc_inner(lb, k);
UNLOCK();
} else {
size_t lg;
UNLOCK();
} else {
UNLOCK();
- op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
+ op = GC_generic_malloc(lb, UNCOLLECTABLE);
/* For small objects, the free lists are completely marked. */
}
GC_ASSERT(0 == op || GC_is_marked(op));
- return((void *) op);
} else {
hdr * hhdr;
- op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
+ op = GC_generic_malloc(lb, UNCOLLECTABLE);
if (0 == op) return(0);
GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
# endif
hhdr -> hb_n_marks = 1;
UNLOCK();
- return((void *) op);
}
+ return op;
}
#ifdef REDIRECT_MALLOC
switch(knd) {
# ifdef STUBBORN_ALLOC
case STUBBORN:
- return(GC_malloc_stubborn((size_t)lb));
+ return GC_malloc_stubborn(lb);
# endif
case PTRFREE:
- return(GC_malloc_atomic((size_t)lb));
+ return GC_malloc_atomic(lb);
case NORMAL:
- return(GC_malloc((size_t)lb));
+ return GC_malloc(lb);
case UNCOLLECTABLE:
- return(GC_malloc_uncollectable((size_t)lb));
+ return GC_malloc_uncollectable(lb);
# ifdef ATOMIC_UNCOLLECTABLE
case AUNCOLLECTABLE:
- return(GC_malloc_atomic_uncollectable((size_t)lb));
+ return GC_malloc_atomic_uncollectable(lb);
# endif /* ATOMIC_UNCOLLECTABLE */
default:
- return(GC_generic_malloc(lb,knd));
+ return GC_generic_malloc(lb, knd);
}
}
DCL_LOCK_STATE;
if (SMALL_OBJ(lb))
- return(GC_generic_malloc((word)lb, k));
+ return GC_generic_malloc(lb, k);
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
if (lb_rounded < lb)
GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
{
- return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
+ return GC_generic_malloc_ignore_off_page(lb, NORMAL);
}
GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
{
- return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
+ return GC_generic_malloc_ignore_off_page(lb, PTRFREE);
}
/* Increment GC_bytes_allocd from code that doesn't have direct access */