* src/atomic_ops_malloc.c (AO_malloc_large): Add assertion that the
stored size is greater than LOG_MAX_SIZE.
* src/atomic_ops_malloc.c (AO_malloc): Add assertions for log_size.
* tests/test_malloc.c (LOG_MAX_SIZE, CHUNK_SIZE): New macro (copied
from atomic_ops_malloc.c).
* tests/test_malloc.c (main): Call AO_free(0), AO_malloc(0),
AO_malloc(CHUNK_SIZE-sizeof(AO_t)+1); add comment.
/* The header will force us to waste ALIGNMENT bytes, incl. header. */
/* Round to multiple of CHUNK_SIZE. */
sz = SIZET_SAT_ADD(sz, ALIGNMENT + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1);
+ assert(sz > LOG_MAX_SIZE);
result = get_mmaped(sz);
if (result == 0) return 0;
result += ALIGNMENT;
if (sz > CHUNK_SIZE - sizeof(AO_t))
return AO_malloc_large(sz);
log_sz = msb(sz + (sizeof(AO_t) - 1));
+ assert(log_sz <= LOG_MAX_SIZE);
+ assert(((size_t)1 << log_sz) >= sz + sizeof(AO_t));
result = AO_stack_pop(AO_free_list+log_sz);
while (0 == result) {
void * chunk = get_chunk();
return arg; /* use arg to suppress compiler warning */
}
+#ifndef LOG_MAX_SIZE
+# define LOG_MAX_SIZE 16
+#endif
+
+#define CHUNK_SIZE (1 << LOG_MAX_SIZE)
+
int main(int argc, char **argv) {
int nthreads;
printf("Performing %d reversals of %d element lists in %d threads\n",
N_REVERSALS, LIST_LENGTH, nthreads);
AO_malloc_enable_mmap();
+
+ /* Test various corner cases. */
+ AO_free(NULL);
+ AO_free(AO_malloc(0));
+ AO_free(AO_malloc(CHUNK_SIZE - (sizeof(AO_t)-1))); /* large alloc */
+
run_parallel(nthreads, run_one_test, dummy_test, "AO_malloc/AO_free");
return 0;
}