From a18bf5fc9303e2739ee7026bdb3bbf272bf4baa8 Mon Sep 17 00:00:00 2001 From: Xinchen Hui Date: Wed, 16 Jul 2014 16:35:48 +0800 Subject: [PATCH] stash --- Zend/zend.h | 4 +- Zend/zend_alloc.c | 4027 ++++++++++++++++++++--------------------- Zend/zend_alloc.h | 105 +- Zend/zend_variables.c | 12 +- 4 files changed, 2112 insertions(+), 2036 deletions(-) diff --git a/Zend/zend.h b/Zend/zend.h index 1021829c3b..bfdbf5b9dc 100644 --- a/Zend/zend.h +++ b/Zend/zend.h @@ -303,8 +303,10 @@ typedef enum { !(EG(current_execute_data)->prev_execute_data->opline->result_type & EXT_TYPE_UNUSED)) #if defined(__GNUC__) && __GNUC__ >= 3 && !defined(__INTEL_COMPILER) && !defined(DARWIN) && !defined(__hpux) && !defined(_AIX) && !defined(__osf__) +# define ZEND_NORETURN __attribute__((noreturn)) void zend_error_noreturn(int type, const char *format, ...) __attribute__ ((noreturn)); #else +# define ZEND_NORETURN # define zend_error_noreturn zend_error #endif @@ -744,7 +746,7 @@ END_EXTERN_C() ZEND_ASSERT(Z_ISREF_P(_z)); \ ref = Z_REF_P(_z); \ ZVAL_COPY_VALUE(_z, &ref->val); \ - efree(ref); \ + efree_size(ref, sizeof(zend_reference)); \ } while (0) #define SEPARATE_STRING(zv) do { \ diff --git a/Zend/zend_alloc.c b/Zend/zend_alloc.c index bb3e66c989..eb440786fa 100644 --- a/Zend/zend_alloc.c +++ b/Zend/zend_alloc.c @@ -20,6 +20,39 @@ /* $Id$ */ +/* + * zend_alloc is designed to be a modern CPU cache friendly memory manager + * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations. + * + * All allocations are split into 3 categories: + * + * Huge - the size is greater than CHUNK size (~2M by default), allocation is + * performed using mmap(). + * + * Large - a number of pages (4096K by default) inside a CHUNK. Large blocks + * are always alligned on page boundary. + * + * Small - less than half of page size. Small sizes are aligned to nearest + * greater predefined small size (there are 30 predefined sizes: + * 2, 4, 8, 16, 24, 32, ... 2048). Small blocks are allocated from + * RUNs. Each RUN is allocated as a single or few following pages. + * Allocation inside RUNs implemented as fetching first element from + * a list of free elements or first bit from bitset of free elements. + * + * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory + * blocks are always aligned to CHUNK boundary. So it's very easy to determine + * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single + * page at start for special purpose. It contains bitset of free pages, + * few bitset for available runs of predefined small sizes, map of pages that + * keeps information about usage of each page in this CHUNK, etc. + * + * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it + * provides specialized and optimized routines to allocate blocks of predefined + * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc) + * The library uses C preprocessor tricks that substitute calls to emalloc() + * with more specialized routines when the requested size is known. + */ + #include "zend.h" #include "zend_alloc.h" #include "zend_globals.h" @@ -37,17 +70,9 @@ # include #endif -#ifndef ZEND_MM_HEAP_PROTECTION -# define ZEND_MM_HEAP_PROTECTION ZEND_DEBUG -#endif - -#ifndef ZEND_MM_SAFE_UNLINKING -# define ZEND_MM_SAFE_UNLINKING 1 -#endif - -#ifndef ZEND_MM_COOKIES -# define ZEND_MM_COOKIES ZEND_DEBUG -#endif +#include +#include +#include #ifdef _WIN64 # define PTR_FMT "0x%0.16I64x" @@ -77,11 +102,7 @@ void zend_debug_alloc_output(char *format, ...) } #endif -#if (defined (__GNUC__) && __GNUC__ > 2 ) && !defined(__INTEL_COMPILER) && !defined(DARWIN) && !defined(__hpux) && !defined(_AIX) -static void zend_mm_panic(const char *message) __attribute__ ((noreturn)); -#endif - -static void zend_mm_panic(const char *message) +static ZEND_NORETURN void zend_mm_panic(const char *message) { fprintf(stderr, "%s\n", message); /* See http://support.microsoft.com/kb/190351 */ @@ -98,11 +119,6 @@ static void zend_mm_panic(const char *message) /* Storage Manager */ /*******************/ -#ifdef ZEND_WIN32 -# define HAVE_MEM_WIN32 /* use VirtualAlloc() to allocate memory */ -#endif -#define HAVE_MEM_MALLOC /* use malloc() to allocate segments */ - #include #include #if HAVE_LIMITS_H @@ -111,7 +127,7 @@ static void zend_mm_panic(const char *message) #include #include -#if defined(HAVE_MEM_MMAP_ANON) || defined(HAVE_MEM_MMAP_ZERO) +#ifndef _WIN32 # ifdef HAVE_MREMAP # ifndef _GNU_SOURCE # define _GNU_SOURCE @@ -132,2282 +148,2148 @@ static void zend_mm_panic(const char *message) # ifndef MAP_FAILED # define MAP_FAILED ((void*)-1) # endif +# ifndef MAP_POPULATE +# define MAP_POPULATE 0 +#endif #endif -static zend_mm_storage* zend_mm_mem_dummy_init(void *params) -{ - return malloc(sizeof(zend_mm_storage)); -} - -static void zend_mm_mem_dummy_dtor(zend_mm_storage *storage) -{ - free(storage); -} - -static void zend_mm_mem_dummy_compact(zend_mm_storage *storage) -{ -} - -#if defined(HAVE_MEM_MMAP_ANON) || defined(HAVE_MEM_MMAP_ZERO) +/****************/ +/* Heap Manager */ +/****************/ -static zend_mm_segment* zend_mm_mem_mmap_realloc(zend_mm_storage *storage, zend_mm_segment* segment, size_t size) -{ - zend_mm_segment *ret; -#ifdef HAVE_MREMAP -#if defined(__NetBSD__) - /* NetBSD 5 supports mremap but takes an extra newp argument */ - ret = (zend_mm_segment*)mremap(segment, segment->size, segment, size, MREMAP_MAYMOVE); -#else - ret = (zend_mm_segment*)mremap(segment, segment->size, size, MREMAP_MAYMOVE); +#ifndef ZEND_MM_STAT +# define ZEND_MM_STAT 1 /* track current and peak memory usage */ #endif - if (ret == MAP_FAILED) { +#ifndef ZEND_MM_LIMIT +# define ZEND_MM_LIMIT 1 /* support for user-defined memory limit */ #endif - ret = storage->handlers->_alloc(storage, size); - if (ret) { - memcpy(ret, segment, size > segment->size ? segment->size : size); - storage->handlers->_free(storage, segment); - } -#ifdef HAVE_MREMAP - } +#ifndef ZEND_MM_CUSTOM +# define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */ + /* USE_ZEND_ALLOC=0 may switch to system malloc() */ #endif - return ret; -} - -static void zend_mm_mem_mmap_free(zend_mm_storage *storage, zend_mm_segment* segment) -{ - munmap((void*)segment, segment->size); -} - +#ifndef ZEND_MM_SPEC_CT +# define ZEND_MM_SPEC_CT 0 /* use compile-time specializer */ #endif - -#ifdef HAVE_MEM_MMAP_ANON - -static zend_mm_segment* zend_mm_mem_mmap_anon_alloc(zend_mm_storage *storage, size_t size) -{ - zend_mm_segment *ret = (zend_mm_segment*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); - if (ret == MAP_FAILED) { - ret = NULL; - } - return ret; -} - -# define ZEND_MM_MEM_MMAP_ANON_DSC {"mmap_anon", zend_mm_mem_dummy_init, zend_mm_mem_dummy_dtor, zend_mm_mem_dummy_compact, zend_mm_mem_mmap_anon_alloc, zend_mm_mem_mmap_realloc, zend_mm_mem_mmap_free} - +#ifndef ZEND_MM_SPEC_RT +# define ZEND_MM_SPEC_RT 0 /* use run-time specializer */ +#endif +#ifndef ZEND_MM_ERROR +# define ZEND_MM_ERROR 1 /* report system errors */ #endif -#ifdef HAVE_MEM_MMAP_ZERO - -static int zend_mm_dev_zero_fd = -1; - -static zend_mm_storage* zend_mm_mem_mmap_zero_init(void *params) -{ - if (zend_mm_dev_zero_fd == -1) { - zend_mm_dev_zero_fd = open("/dev/zero", O_RDWR, S_IRUSR | S_IWUSR); - } - if (zend_mm_dev_zero_fd >= 0) { - return malloc(sizeof(zend_mm_storage)); - } else { - return NULL; - } -} - -static void zend_mm_mem_mmap_zero_dtor(zend_mm_storage *storage) -{ - close(zend_mm_dev_zero_fd); - free(storage); -} - -static zend_mm_segment* zend_mm_mem_mmap_zero_alloc(zend_mm_storage *storage, size_t size) -{ - zend_mm_segment *ret = (zend_mm_segment*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, zend_mm_dev_zero_fd, 0); - if (ret == MAP_FAILED) { - ret = NULL; - } - return ret; -} +#ifndef ZEND_MM_CHECK +# define ZEND_MM_CHECK(condition, message) do { \ + if (UNEXPECTED(!(condition))) { \ + zend_mm_panic(message); \ + } \ + } while (0) +#endif -# define ZEND_MM_MEM_MMAP_ZERO_DSC {"mmap_zero", zend_mm_mem_mmap_zero_init, zend_mm_mem_mmap_zero_dtor, zend_mm_mem_dummy_compact, zend_mm_mem_mmap_zero_alloc, zend_mm_mem_mmap_realloc, zend_mm_mem_mmap_free} +typedef unsigned int zend_mm_page_info; /* 4-byte integer */ +//???typedef unsigned int zend_mm_bitset; /* 4-byte or 8-byte integer */ +typedef unsigned long zend_mm_bitset; /* 4-byte or 8-byte integer */ -#endif +#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \ + (((size_t)(size)) & ((alignment) - 1)) +#define ZEND_MM_ALIGNED_BASE(size, alignment) \ + (((size_t)(size)) & ~((alignment) - 1)) +#define ZEND_MM_ALIGNED_SIZE_EX(size, alignment) \ + (((size_t)(size) + ((alignment) - 1)) & ~((alignment) - 1)) +#define ZEND_MM_SIZE_TO_NUM(size, alignment) \ + (((size_t)(size) + ((alignment) - 1)) / (alignment)) -#ifdef HAVE_MEM_WIN32 +#define ZEND_MM_BITSET_LEN (sizeof(zend_mm_bitset) * 8) /* 32 or 64 */ +#define ZEND_MM_PAGE_MAP_LEN (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */ +#define ZEND_MM_ELEMENTS_LEN (ZEND_MM_PAGE_SIZE / ZEND_MM_MIN_SMALL_SIZE / ZEND_MM_BITSET_LEN) -static zend_mm_storage* zend_mm_mem_win32_init(void *params) -{ - HANDLE heap = HeapCreate(HEAP_NO_SERIALIZE, 0, 0); - zend_mm_storage* storage; +typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */ - if (heap == NULL) { - return NULL; - } - storage = (zend_mm_storage*)malloc(sizeof(zend_mm_storage)); - if (storage == NULL) { - HeapDestroy(heap); - return NULL; - } - storage->data = (void*) heap; - return storage; -} +#define ZEND_MM_FREE_LIST_END 0 -static void zend_mm_mem_win32_dtor(zend_mm_storage *storage) -{ - HeapDestroy((HANDLE)storage->data); - free(storage); -} +#define ZEND_MM_IS_FRUM 0x00000000 +#define ZEND_MM_IS_LRUN 0x00000400 +#define ZEND_MM_IS_SRUN 0x00000800 +#define ZEND_MM_IS_NRUN 0x00000c00 -static void zend_mm_mem_win32_compact(zend_mm_storage *storage) -{ - HeapDestroy((HANDLE)storage->data); - storage->data = (void*)HeapCreate(HEAP_NO_SERIALIZE, 0, 0); -} +#define ZEND_MM_LRUN_PAGES_MASK 0x000003ff +#define ZEND_MM_LRUN_PAGES_OFFSET 0 -static zend_mm_segment* zend_mm_mem_win32_alloc(zend_mm_storage *storage, size_t size) -{ - return (zend_mm_segment*) HeapAlloc((HANDLE)storage->data, HEAP_NO_SERIALIZE, size); -} +#define ZEND_MM_NRUN_PAGES_MASK 0x000003ff +#define ZEND_MM_NRUN_PAGES_OFFSET 0 -static void zend_mm_mem_win32_free(zend_mm_storage *storage, zend_mm_segment* segment) -{ - HeapFree((HANDLE)storage->data, HEAP_NO_SERIALIZE, segment); -} +#define ZEND_MM_SRUN_BIN_NUM_MASK 0x0000001f +#define ZEND_MM_SRUN_BITSET_MASK 0xffff0000 +#define ZEND_MM_SRUN_BIN_NUM_OFFSET 0 +#define ZEND_MM_SRUN_BITSET_OFFSET 16 -static zend_mm_segment* zend_mm_mem_win32_realloc(zend_mm_storage *storage, zend_mm_segment* segment, size_t size) -{ - return (zend_mm_segment*) HeapReAlloc((HANDLE)storage->data, HEAP_NO_SERIALIZE, segment, size); -} +#define ZEND_MM_LRUN_PAGES(info) (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET) +#define ZEND_MM_NRUN_PAGES(info) (((info) & ZEND_MM_NRUN_PAGES_MASK) >> ZEND_MM_NRUN_PAGES_OFFSET) +#define ZEND_MM_SRUN_BIN_NUM(info) (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET) +#define ZEND_MM_SRUN_BITSET(info) (((info) & ZEND_MM_SRUN_BITSET_MASK) >> ZEND_MM_SRUN_BITSET_OFFSET) -# define ZEND_MM_MEM_WIN32_DSC {"win32", zend_mm_mem_win32_init, zend_mm_mem_win32_dtor, zend_mm_mem_win32_compact, zend_mm_mem_win32_alloc, zend_mm_mem_win32_realloc, zend_mm_mem_win32_free} +#define ZEND_MM_FRUN() ZEND_MM_IS_FRUN +#define ZEND_MM_LRUN(count) (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET)) +#define ZEND_MM_NRUN(num) (ZEND_MM_IS_NRUN | ((num) << ZEND_MM_NRUN_PAGES_OFFSET)) +#define ZEND_MM_SRUN(bin_num) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET)) -#endif +#define ZEND_MM_SRUN_BITSET_SET(num, set) \ + (ZEND_MM_SRUN(num) | ((set) << ZEND_MM_SRUN_BITSET_OFFSET)) -#ifdef HAVE_MEM_MALLOC +#define ZEND_MM_SRUN_BITSET_UPDATE(info, set) \ + ((info) | ((set) << ZEND_MM_SRUN_BITSET_OFFSET)) -static zend_mm_segment* zend_mm_mem_malloc_alloc(zend_mm_storage *storage, size_t size) -{ - return (zend_mm_segment*)malloc(size); -} +#define ZEND_MM_SRUN_BITSET_INCL(info, num) \ + ((info) | (1 << ((num) + ZEND_MM_SRUN_BITSET_OFFSET))) -static zend_mm_segment* zend_mm_mem_malloc_realloc(zend_mm_storage *storage, zend_mm_segment *ptr, size_t size) -{ - return (zend_mm_segment*)realloc(ptr, size); -} +#define ZEND_MM_SRUN_BITSET_EXCL(info, num) \ + ((info) & ~(1 << ((num) + ZEND_MM_SRUN_BITSET_OFFSET))) -static void zend_mm_mem_malloc_free(zend_mm_storage *storage, zend_mm_segment *ptr) -{ - free(ptr); -} +#define ZEND_MM_BINS 30 -# define ZEND_MM_MEM_MALLOC_DSC {"malloc", zend_mm_mem_dummy_init, zend_mm_mem_dummy_dtor, zend_mm_mem_dummy_compact, zend_mm_mem_malloc_alloc, zend_mm_mem_malloc_realloc, zend_mm_mem_malloc_free} +typedef struct _zend_mm_page zend_mm_page; +typedef struct _zend_mm_bin zend_mm_bin; +typedef struct _zend_mm_free_list zend_mm_free_list; +typedef struct _zend_mm_chunk zend_mm_chunk; +typedef struct _zend_mm_huge_list zend_mm_huge_list; -#endif +struct _zend_mm_page { + char bytes[ZEND_MM_PAGE_SIZE]; +}; -static const zend_mm_mem_handlers mem_handlers[] = { -#ifdef HAVE_MEM_WIN32 - ZEND_MM_MEM_WIN32_DSC, -#endif -#ifdef HAVE_MEM_MALLOC - ZEND_MM_MEM_MALLOC_DSC, -#endif -#ifdef HAVE_MEM_MMAP_ANON - ZEND_MM_MEM_MMAP_ANON_DSC, -#endif -#ifdef HAVE_MEM_MMAP_ZERO - ZEND_MM_MEM_MMAP_ZERO_DSC, +struct _zend_mm_chunk { + zend_mm_heap *heap; + zend_mm_chunk *next; + zend_mm_chunk *prev; + int free_pages; + int free_tail; + int num; +#if ZEND_DEBUG && defined(ZTS) + THREAD_T thread_id; + char reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3 + sizeof(THREAD_T))]; +#else + char reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)]; #endif - {NULL, NULL, NULL, NULL, NULL, NULL} + zend_mm_page_map free_map; /* 64 B */ + zend_mm_page_map small_map[ZEND_MM_BINS]; /* 1920 B (ZEND_MM_BINS <= 30) */ + zend_mm_page_info map[ZEND_MM_PAGES]; /* 2 KB = 512 * 4 */ }; -# define ZEND_MM_STORAGE_DTOR() heap->storage->handlers->dtor(heap->storage) -# define ZEND_MM_STORAGE_ALLOC(size) heap->storage->handlers->_alloc(heap->storage, size) -# define ZEND_MM_STORAGE_REALLOC(ptr, size) heap->storage->handlers->_realloc(heap->storage, ptr, size) -# define ZEND_MM_STORAGE_FREE(ptr) heap->storage->handlers->_free(heap->storage, ptr) - -/****************/ -/* Heap Manager */ -/****************/ - -#define MEM_BLOCK_VALID 0x7312F8DC -#define MEM_BLOCK_FREED 0x99954317 -#define MEM_BLOCK_CACHED 0xFB8277DC -#define MEM_BLOCK_GUARD 0x2A8FCC84 -#define MEM_BLOCK_LEAK 0x6C5E8F2D - -/* mm block type */ -typedef struct _zend_mm_block_info { -#if ZEND_MM_COOKIES - size_t _cookie; +#if ZEND_DEBUG && defined(ZTS) +# define ZEND_MM_CHECK_THREAD_ID(chunk) do { \ + ZEND_MM_CHECK((chunk)->thread_id == tsrm_thread_id(), "zend_mm_heap corrupted"); \ + } while (0) +#else +# define ZEND_MM_CHECK_THREAD_ID(chunk) do { \ + } while (0); #endif - size_t _size; - size_t _prev; -} zend_mm_block_info; #if ZEND_DEBUG - typedef struct _zend_mm_debug_info { - const char *filename; - uint lineno; - const char *orig_filename; - uint orig_lineno; - size_t size; -#if ZEND_MM_HEAP_PROTECTION - unsigned int start_magic; -#endif + size_t size; + const char *filename; + const char *orig_filename; + uint lineno; + uint orig_lineno; } zend_mm_debug_info; +#endif -#elif ZEND_MM_HEAP_PROTECTION - -typedef struct _zend_mm_debug_info { - size_t size; - unsigned int start_magic; -} zend_mm_debug_info; +struct _zend_mm_bin { + int next_free; + int num_used; +}; -#endif +struct _zend_mm_free_list { + short next_free; +}; -typedef struct _zend_mm_block { - zend_mm_block_info info; +struct _zend_mm_huge_list { + void *ptr; + size_t size; + zend_mm_huge_list *next; #if ZEND_DEBUG - unsigned int magic; -# ifdef ZTS - THREAD_T thread_id; -# endif - zend_mm_debug_info debug; -#elif ZEND_MM_HEAP_PROTECTION - zend_mm_debug_info debug; + zend_mm_debug_info dbg; #endif -} zend_mm_block; +}; -typedef struct _zend_mm_small_free_block { - zend_mm_block_info info; -#if ZEND_DEBUG - unsigned int magic; -# ifdef ZTS - THREAD_T thread_id; -# endif -#endif - struct _zend_mm_free_block *prev_free_block; - struct _zend_mm_free_block *next_free_block; -} zend_mm_small_free_block; +struct _zend_mm_heap { + zend_mm_chunk *main_chunk; + zend_mm_chunk *cached_chunks; + int chunks_count; + int peak_chunks_count; + int cached_chunks_count; + double avg_chunks_count; +#if ZEND_MM_STAT || ZEND_MM_LIMIT + size_t real_size; +#endif +#if ZEND_MM_STAT + size_t real_peak; +#endif +#if ZEND_MM_LIMIT + size_t limit; + int overflow; +#endif +#if ZEND_MM_STAT + size_t size; + size_t peak; +#endif +#if ZEND_MM_CUSTOM + int use_custom_heap; + void *(*_malloc)(size_t); + void (*_free)(void*); + void *(*_realloc)(void*, size_t); +#endif + zend_mm_huge_list *huge_list; + zend_mm_bin *cache[ZEND_MM_BINS]; +}; -typedef struct _zend_mm_free_block { - zend_mm_block_info info; +static ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap, + const char *format, + size_t limit, #if ZEND_DEBUG - unsigned int magic; -# ifdef ZTS - THREAD_T thread_id; -# endif + const char *filename, + uint lineno, #endif - struct _zend_mm_free_block *prev_free_block; - struct _zend_mm_free_block *next_free_block; - - struct _zend_mm_free_block **parent; - struct _zend_mm_free_block *child[2]; -} zend_mm_free_block; - -#define ZEND_MM_NUM_BUCKETS (sizeof(size_t) << 3) - -#define ZEND_MM_CACHE 1 -#define ZEND_MM_CACHE_SIZE (ZEND_MM_NUM_BUCKETS * 4 * 1024) + size_t size) +{ + TSRMLS_FETCH(); -#ifndef ZEND_MM_CACHE_STAT -# define ZEND_MM_CACHE_STAT 0 + heap->overflow = 1; + zend_try { + zend_error_noreturn(E_ERROR, + format, + limit, +#if ZEND_DEBUG + filename, + lineno, #endif + size); + } zend_catch { + } zend_end_try(); + heap->overflow = 0; + zend_bailout(); + exit(1); +} -struct _zend_mm_heap { - int use_zend_alloc; - void *(*_malloc)(size_t); - void (*_free)(void*); - void *(*_realloc)(void*, size_t); - size_t free_bitmap; - size_t large_free_bitmap; - size_t block_size; - size_t compact_size; - zend_mm_segment *segments_list; - zend_mm_storage *storage; - size_t real_size; - size_t real_peak; - size_t limit; - size_t size; - size_t peak; - size_t reserve_size; - void *reserve; - int overflow; - int internal; -#if ZEND_MM_CACHE - unsigned int cached; - zend_mm_free_block *cache[ZEND_MM_NUM_BUCKETS]; -#endif - zend_mm_free_block *free_buckets[ZEND_MM_NUM_BUCKETS*2]; - zend_mm_free_block *large_free_buckets[ZEND_MM_NUM_BUCKETS]; - zend_mm_free_block *rest_buckets[2]; - int rest_count; -#if ZEND_MM_CACHE_STAT - struct { - int count; - int max_count; - int hit; - int miss; - } cache_stat[ZEND_MM_NUM_BUCKETS+1]; -#endif +#define _BIN_DATA_SIZE(num, size, offset, elements, pages, x, y) size, +static const unsigned int bin_data_size[] = { + ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y) }; -#define ZEND_MM_SMALL_FREE_BUCKET(heap, index) \ - (zend_mm_free_block*) ((char*)&heap->free_buckets[index * 2] + \ - sizeof(zend_mm_free_block*) * 2 - \ - sizeof(zend_mm_small_free_block)) - -#define ZEND_MM_REST_BUCKET(heap) \ - (zend_mm_free_block*)((char*)&heap->rest_buckets[0] + \ - sizeof(zend_mm_free_block*) * 2 - \ - sizeof(zend_mm_small_free_block)) - -#define ZEND_MM_REST_BLOCK ((zend_mm_free_block**)(zend_uintptr_t)(1)) - -#define ZEND_MM_MAX_REST_BLOCKS 16 +#define _BIN_DATA_OFFSET(num, size, offset, elements, pages, x, y) offset, +static const unsigned int bin_data_offset[] = { + ZEND_MM_BINS_INFO(_BIN_DATA_OFFSET, x, y) +}; -#if ZEND_MM_COOKIES +#define _BIN_DATA_ELEMENTS(num, size, offset, elements, pages, x, y) elements, +static const int bin_elements[] = { + ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y) +}; -static unsigned int _zend_mm_cookie = 0; +#define _BIN_DATA_PAGES(num, size, offset, elements, pages, x, y) pages, +static const int bin_pages[] = { + ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y) +}; -# define ZEND_MM_COOKIE(block) \ - (((size_t)(block)) ^ _zend_mm_cookie) -# define ZEND_MM_SET_COOKIE(block) \ - (block)->info._cookie = ZEND_MM_COOKIE(block) -# define ZEND_MM_CHECK_COOKIE(block) \ - if (UNEXPECTED((block)->info._cookie != ZEND_MM_COOKIE(block))) { \ - zend_mm_panic("zend_mm_heap corrupted"); \ +static zend_always_inline int zend_mm_small_size_to_bit(size_t size) +{ +#if defined(__GNUC__) + if (UNEXPECTED(size == 0)) { + return 0; + } else { + return sizeof(size) * 8 - __builtin_clzl(size); } #else -# define ZEND_MM_SET_COOKIE(block) -# define ZEND_MM_CHECK_COOKIE(block) + int n = 16; + if (size == 0) return 0; + if (size <= 0x00ff) {n -= 8; size = size << 8;} + if (size <= 0x0fff) {n -= 4; size = size << 4;} + if (size <= 0x3fff) {n -= 2; size = size << 2;} + if (size <= 0x7fff) {n -= 1;} + return n; #endif +} -/* Default memory segment size */ -#define ZEND_MM_SEG_SIZE (256 * 1024) - -/* Reserved space for error reporting in case of memory overflow */ -#define ZEND_MM_RESERVE_SIZE (8*1024) - -#ifdef _WIN64 -# define ZEND_MM_LONG_CONST(x) (x##i64) -#else -# define ZEND_MM_LONG_CONST(x) (x##L) +#ifndef MAX +# define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif -#define ZEND_MM_TYPE_MASK ZEND_MM_LONG_CONST(0x3) - -#define ZEND_MM_FREE_BLOCK ZEND_MM_LONG_CONST(0x0) -#define ZEND_MM_USED_BLOCK ZEND_MM_LONG_CONST(0x1) -#define ZEND_MM_GUARD_BLOCK ZEND_MM_LONG_CONST(0x3) - -#define ZEND_MM_BLOCK(b, type, size) do { \ - size_t _size = (size); \ - (b)->info._size = (type) | _size; \ - ZEND_MM_BLOCK_AT(b, _size)->info._prev = (type) | _size; \ - ZEND_MM_SET_COOKIE(b); \ - } while (0); -#define ZEND_MM_LAST_BLOCK(b) do { \ - (b)->info._size = ZEND_MM_GUARD_BLOCK | ZEND_MM_ALIGNED_HEADER_SIZE; \ - ZEND_MM_SET_MAGIC(b, MEM_BLOCK_GUARD); \ - } while (0); -#define ZEND_MM_BLOCK_SIZE(b) ((b)->info._size & ~ZEND_MM_TYPE_MASK) -#define ZEND_MM_IS_FREE_BLOCK(b) (!((b)->info._size & ZEND_MM_USED_BLOCK)) -#define ZEND_MM_IS_USED_BLOCK(b) ((b)->info._size & ZEND_MM_USED_BLOCK) -#define ZEND_MM_IS_GUARD_BLOCK(b) (((b)->info._size & ZEND_MM_TYPE_MASK) == ZEND_MM_GUARD_BLOCK) - -#define ZEND_MM_NEXT_BLOCK(b) ZEND_MM_BLOCK_AT(b, ZEND_MM_BLOCK_SIZE(b)) -#define ZEND_MM_PREV_BLOCK(b) ZEND_MM_BLOCK_AT(b, -(ssize_t)((b)->info._prev & ~ZEND_MM_TYPE_MASK)) - -#define ZEND_MM_PREV_BLOCK_IS_FREE(b) (!((b)->info._prev & ZEND_MM_USED_BLOCK)) - -#define ZEND_MM_MARK_FIRST_BLOCK(b) ((b)->info._prev = ZEND_MM_GUARD_BLOCK) -#define ZEND_MM_IS_FIRST_BLOCK(b) ((b)->info._prev == ZEND_MM_GUARD_BLOCK) - -/* optimized access */ -#define ZEND_MM_FREE_BLOCK_SIZE(b) (b)->info._size - -/* Aligned header size */ -#define ZEND_MM_ALIGNED_HEADER_SIZE ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_block)) -#define ZEND_MM_ALIGNED_FREE_HEADER_SIZE ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_small_free_block)) -#define ZEND_MM_MIN_ALLOC_BLOCK_SIZE ZEND_MM_ALIGNED_SIZE(ZEND_MM_ALIGNED_HEADER_SIZE + END_MAGIC_SIZE) -#define ZEND_MM_ALIGNED_MIN_HEADER_SIZE (ZEND_MM_MIN_ALLOC_BLOCK_SIZE>ZEND_MM_ALIGNED_FREE_HEADER_SIZE?ZEND_MM_MIN_ALLOC_BLOCK_SIZE:ZEND_MM_ALIGNED_FREE_HEADER_SIZE) -#define ZEND_MM_ALIGNED_SEGMENT_SIZE ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_segment)) - -#define ZEND_MM_MIN_SIZE ((ZEND_MM_ALIGNED_MIN_HEADER_SIZE>(ZEND_MM_ALIGNED_HEADER_SIZE+END_MAGIC_SIZE))?(ZEND_MM_ALIGNED_MIN_HEADER_SIZE-(ZEND_MM_ALIGNED_HEADER_SIZE+END_MAGIC_SIZE)):0) - -#define ZEND_MM_MAX_SMALL_SIZE ((ZEND_MM_NUM_BUCKETS<>ZEND_MM_ALIGNMENT_LOG2)-(ZEND_MM_ALIGNED_MIN_HEADER_SIZE>>ZEND_MM_ALIGNMENT_LOG2)) - -#define ZEND_MM_SMALL_SIZE(true_size) (true_size < ZEND_MM_MAX_SMALL_SIZE) - -/* Memory calculations */ -#define ZEND_MM_BLOCK_AT(blk, offset) ((zend_mm_block *) (((char *) (blk))+(offset))) -#define ZEND_MM_DATA_OF(p) ((void *) (((char *) (p))+ZEND_MM_ALIGNED_HEADER_SIZE)) -#define ZEND_MM_HEADER_OF(blk) ZEND_MM_BLOCK_AT(blk, -(int)ZEND_MM_ALIGNED_HEADER_SIZE) - -/* Debug output */ -#if ZEND_DEBUG - -# ifdef ZTS -# define ZEND_MM_SET_THREAD_ID(block) \ - ((zend_mm_block*)(block))->thread_id = tsrm_thread_id() -# define ZEND_MM_BAD_THREAD_ID(block) ((block)->thread_id != tsrm_thread_id()) -# else -# define ZEND_MM_SET_THREAD_ID(block) -# define ZEND_MM_BAD_THREAD_ID(block) 0 -# endif - -# define ZEND_MM_VALID_PTR(block) \ - zend_mm_check_ptr(heap, block, 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC) - -# define ZEND_MM_SET_MAGIC(block, val) do { \ - (block)->magic = (val); \ - } while (0) - -# define ZEND_MM_CHECK_MAGIC(block, val) do { \ - if ((block)->magic != (val)) { \ - zend_mm_panic("zend_mm_heap corrupted"); \ - } \ - } while (0) - -# define ZEND_MM_SET_DEBUG_INFO(block, __size, set_valid, set_thread) do { \ - ((zend_mm_block*)(block))->debug.filename = __zend_filename; \ - ((zend_mm_block*)(block))->debug.lineno = __zend_lineno; \ - ((zend_mm_block*)(block))->debug.orig_filename = __zend_orig_filename; \ - ((zend_mm_block*)(block))->debug.orig_lineno = __zend_orig_lineno; \ - ZEND_MM_SET_BLOCK_SIZE(block, __size); \ - if (set_valid) { \ - ZEND_MM_SET_MAGIC(block, MEM_BLOCK_VALID); \ - } \ - if (set_thread) { \ - ZEND_MM_SET_THREAD_ID(block); \ - } \ - } while (0) - -#else - -# define ZEND_MM_VALID_PTR(ptr) EXPECTED(ptr != NULL) - -# define ZEND_MM_SET_MAGIC(block, val) - -# define ZEND_MM_CHECK_MAGIC(block, val) - -# define ZEND_MM_SET_DEBUG_INFO(block, __size, set_valid, set_thread) ZEND_MM_SET_BLOCK_SIZE(block, __size) - +#ifndef MIN +# define MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif +#define MAX_EXPECTED_1(a, b) (EXPECTED((a) > (b)) ? (a) : (b)) +#define MAX_EXPECTED_2(a, b) (UNEXPECTED((a) > (b)) ? (a) : (b)) +#define MIN_EXPECTED_1(a, b) (EXPECTED((a) < (b)) ? (a) : (b)) +#define MIN_EXPECTED_2(a, b) (UNEXPECTED((a) < (b)) ? (a) : (b)) -#if ZEND_MM_HEAP_PROTECTION - -# define ZEND_MM_CHECK_PROTECTION(block) \ - do { \ - if ((block)->debug.start_magic != _mem_block_start_magic || \ - memcmp(ZEND_MM_END_MAGIC_PTR(block), &_mem_block_end_magic, END_MAGIC_SIZE) != 0) { \ - zend_mm_panic("zend_mm_heap corrupted"); \ - } \ - } while (0) +static zend_always_inline int zend_mm_small_size_to_bin(size_t size) +{ + int n; -# define ZEND_MM_END_MAGIC_PTR(block) \ - (((char*)(ZEND_MM_DATA_OF(block))) + ((zend_mm_block*)(block))->debug.size) + if (UNEXPECTED(size <= 4)) { + return (size > 2); + } else { + n = zend_mm_small_size_to_bit(size - 1); + n = 2 + 4 * MAX_EXPECTED_2(n - 6, 0) + ((size - 1) >> MAX(n - 3, 3)); + } + return n; +} -# define END_MAGIC_SIZE sizeof(unsigned int) +#define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size) -# define ZEND_MM_SET_BLOCK_SIZE(block, __size) do { \ - char *p; \ - ((zend_mm_block*)(block))->debug.size = (__size); \ - p = ZEND_MM_END_MAGIC_PTR(block); \ - ((zend_mm_block*)(block))->debug.start_magic = _mem_block_start_magic; \ - memcpy(p, &_mem_block_end_magic, END_MAGIC_SIZE); \ - } while (0) +#define ZEND_MM_PAGE_ADDR(chunk, page_num) \ + ((void*)(((zend_mm_page*)(chunk)) + (page_num))) -static unsigned int _mem_block_start_magic = 0; -static unsigned int _mem_block_end_magic = 0; +/*****************/ +/* OS Allocation */ +/*****************/ -#else +static void *zend_mm_mmap_fixed(void *addr, size_t size) +{ -# if ZEND_DEBUG -# define ZEND_MM_SET_BLOCK_SIZE(block, _size) \ - ((zend_mm_block*)(block))->debug.size = (_size) -# else -# define ZEND_MM_SET_BLOCK_SIZE(block, _size) -# endif +#ifdef _WIN32 + return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +#else + /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */ + void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0); -# define ZEND_MM_CHECK_PROTECTION(block) + if (ptr == MAP_FAILED) { +#if ZEND_MM_ERROR + fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno)); +#endif + return NULL; + } else if (ptr != addr) { + if (munmap(ptr, size) != 0) { +#if ZEND_MM_ERROR + fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno)); +#endif + } + return NULL; + } + return ptr; +#endif +} -# define END_MAGIC_SIZE 0 +static void *zend_mm_mmap(size_t size) +{ +#ifdef _WIN32 + void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + + if (ptr == NULL) { +#if ZEND_MM_ERROR + fprintf(stderr, "\nVirtualAlloc() failed: [%d]\n", GetLastError()); +#endif + return NULL; + } + return ptr; +#else + void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0); + if (ptr == MAP_FAILED) { +#if ZEND_MM_ERROR + fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno)); #endif + return NULL; + } + return ptr; +#endif +} -#if ZEND_MM_SAFE_UNLINKING -# define ZEND_MM_CHECK_BLOCK_LINKAGE(block) \ - if (UNEXPECTED((block)->info._size != ZEND_MM_BLOCK_AT(block, ZEND_MM_FREE_BLOCK_SIZE(block))->info._prev) || \ - UNEXPECTED(!UNEXPECTED(ZEND_MM_IS_FIRST_BLOCK(block)) && \ - UNEXPECTED(ZEND_MM_PREV_BLOCK(block)->info._size != (block)->info._prev))) { \ - zend_mm_panic("zend_mm_heap corrupted"); \ +static void zend_mm_munmap(void *addr, size_t size) +{ +#ifdef _WIN32 + if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { +#if ZEND_MM_ERROR + fprintf(stderr, "\nVirtualFree() failed: [%d]\n", GetLastError()); +#endif } -#define ZEND_MM_CHECK_TREE(block) \ - if (UNEXPECTED(*((block)->parent) != (block))) { \ - zend_mm_panic("zend_mm_heap corrupted"); \ +#else + if (munmap(addr, size) != 0) { +#if ZEND_MM_ERROR + fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno)); +#endif } -#else -# define ZEND_MM_CHECK_BLOCK_LINKAGE(block) -# define ZEND_MM_CHECK_TREE(block) #endif +} -#define ZEND_MM_LARGE_BUCKET_INDEX(S) zend_mm_high_bit(S) - -static void *_zend_mm_alloc_int(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC ZEND_ATTRIBUTE_ALLOC_SIZE(2); -static void _zend_mm_free_int(zend_mm_heap *heap, void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); -static void *_zend_mm_realloc_int(zend_mm_heap *heap, void *p, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_ALLOC_SIZE(3); +/***********/ +/* Bitmask */ +/***********/ -static inline unsigned int zend_mm_high_bit(size_t _size) +/* number of trailing set (1) bits */ +static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset) { -#if defined(__GNUC__) && (defined(__native_client__) || defined(i386)) - unsigned int n; +#if defined(__GNUC__) + return __builtin_ctzl(~bitset); +#else + int n; - __asm__("bsrl %1,%0\n\t" : "=r" (n) : "rm" (_size) : "cc"); - return n; -#elif defined(__GNUC__) && defined(__x86_64__) - unsigned long n; + if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN; - __asm__("bsr %1,%0\n\t" : "=r" (n) : "rm" (_size) : "cc"); - return (unsigned int)n; -#elif defined(_MSC_VER) && defined(_M_IX86) - __asm { - bsr eax, _size - } -#elif defined(__GNUC__) && (defined(__arm__) || defined(__aarch64__)) - return (8 * SIZEOF_SIZE_T - 1) - __builtin_clzl(_size); -#else - unsigned int n = 0; - while (_size != 0) { - _size = _size >> 1; - n++; + n = 0; +#if SIZEOF_LONG == 8 + if (sizeof(zend_mm_bitset) == 8) { + if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> 32;} } - return n-1; +#endif + if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;} + if ((bitset & 0x000000ff) == 0x000000ff) {n += 8; bitset = bitset >> 8;} + if ((bitset & 0x0000000f) == 0x0000000f) {n += 4; bitset = bitset >> 4;} + if ((bitset & 0x00000003) == 0x00000003) {n += 2; bitset = bitset >> 2;} + return n + (bitset & 1); #endif } -static inline unsigned int zend_mm_low_bit(size_t _size) +/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */ +static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset) { -#if defined(__GNUC__) && (defined(__native_client__) || defined(i386)) - unsigned int n; - - __asm__("bsfl %1,%0\n\t" : "=r" (n) : "rm" (_size) : "cc"); - return n; -#elif defined(__GNUC__) && defined(__x86_64__) - unsigned long n; - - __asm__("bsf %1,%0\n\t" : "=r" (n) : "rm" (_size) : "cc"); - return (unsigned int)n; -#elif defined(_MSC_VER) && defined(_M_IX86) - __asm { - bsf eax, _size - } -#elif defined(__GNUC__) && (defined(__arm__) || defined(__aarch64__)) - return __builtin_ctzl(_size); +#if defined(__GNUC__) + return __builtin_ctzl(bitset); #else - static const int offset[16] = {4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0}; - unsigned int n; - unsigned int index = 0; + int n; - n = offset[_size & 15]; - while (n == 4) { - _size >>= 4; - index += n; - n = offset[_size & 15]; - } + if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN; - return index + n; + n = 1; +#if SIZEOF_LONG == 8 + if (sizeof(zend_mm_bitset) == 8) { + if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> 32;} + } +#endif + if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;} + if ((bitset & 0x000000ff) == 0) {n += 8; bitset = bitset >> 8;} + if ((bitset & 0x0000000f) == 0) {n += 4; bitset = bitset >> 4;} + if ((bitset & 0x00000003) == 0) {n += 2; bitset = bitset >> 2;} + return n - (bitset & 1); #endif } -static inline void zend_mm_add_to_free_list(zend_mm_heap *heap, zend_mm_free_block *mm_block) +static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size) { - size_t size; - size_t index; - - ZEND_MM_SET_MAGIC(mm_block, MEM_BLOCK_FREED); - - size = ZEND_MM_FREE_BLOCK_SIZE(mm_block); - if (EXPECTED(!ZEND_MM_SMALL_SIZE(size))) { - zend_mm_free_block **p; - - index = ZEND_MM_LARGE_BUCKET_INDEX(size); - p = &heap->large_free_buckets[index]; - mm_block->child[0] = mm_block->child[1] = NULL; - if (!*p) { - *p = mm_block; - mm_block->parent = p; - mm_block->prev_free_block = mm_block->next_free_block = mm_block; - heap->large_free_bitmap |= (ZEND_MM_LONG_CONST(1) << index); - } else { - size_t m; + int i = 0; - for (m = size << (ZEND_MM_NUM_BUCKETS - index); ; m <<= 1) { - zend_mm_free_block *prev = *p; + do { + zend_mm_bitset tmp = bitset[i]; + if (tmp != (zend_mm_bitset)-1) { + return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp); + } + i++; + } while (i < size); + return -1; +} - if (ZEND_MM_FREE_BLOCK_SIZE(prev) != size) { - p = &prev->child[(m >> (ZEND_MM_NUM_BUCKETS-1)) & 1]; - if (!*p) { - *p = mm_block; - mm_block->parent = p; - mm_block->prev_free_block = mm_block->next_free_block = mm_block; - break; - } - } else { - zend_mm_free_block *next = prev->next_free_block; +static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size) +{ + int i = 0; - prev->next_free_block = next->prev_free_block = mm_block; - mm_block->next_free_block = next; - mm_block->prev_free_block = prev; - mm_block->parent = NULL; - break; - } - } + do { + zend_mm_bitset tmp = bitset[i]; + if (tmp != 0) { + return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp); } - } else { - zend_mm_free_block *prev, *next; + i++; + } while (i < size); + return -1; +} - index = ZEND_MM_BUCKET_INDEX(size); +static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size) +{ + int i = 0; - prev = ZEND_MM_SMALL_FREE_BUCKET(heap, index); - if (prev->prev_free_block == prev) { - heap->free_bitmap |= (ZEND_MM_LONG_CONST(1) << index); + do { + zend_mm_bitset tmp = bitset[i]; + if (tmp != (zend_mm_bitset)-1) { + int n = zend_mm_bitset_nts(tmp); + bitset[i] |= 1 << n; + return i * ZEND_MM_BITSET_LEN + n; } - next = prev->next_free_block; - - mm_block->prev_free_block = prev; - mm_block->next_free_block = next; - prev->next_free_block = next->prev_free_block = mm_block; - } + i++; + } while (i < size); + return -1; } -static inline void zend_mm_remove_from_free_list(zend_mm_heap *heap, zend_mm_free_block *mm_block) +static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit) { - zend_mm_free_block *prev = mm_block->prev_free_block; - zend_mm_free_block *next = mm_block->next_free_block; - - ZEND_MM_CHECK_MAGIC(mm_block, MEM_BLOCK_FREED); - - if (EXPECTED(prev == mm_block)) { - zend_mm_free_block **rp, **cp; + return (bitset[bit / ZEND_MM_BITSET_LEN] & (1L << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0; +} -#if ZEND_MM_SAFE_UNLINKING - if (UNEXPECTED(next != mm_block)) { - zend_mm_panic("zend_mm_heap corrupted"); - } -#endif +static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit) +{ + bitset[bit / ZEND_MM_BITSET_LEN] |= (1L << (bit & (ZEND_MM_BITSET_LEN-1))); +} - rp = &mm_block->child[mm_block->child[1] != NULL]; - prev = *rp; - if (EXPECTED(prev == NULL)) { - size_t index = ZEND_MM_LARGE_BUCKET_INDEX(ZEND_MM_FREE_BLOCK_SIZE(mm_block)); +static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit) +{ + bitset[bit / ZEND_MM_BITSET_LEN] &= ~(1L << (bit & (ZEND_MM_BITSET_LEN-1))); +} - ZEND_MM_CHECK_TREE(mm_block); - *mm_block->parent = NULL; - if (mm_block->parent == &heap->large_free_buckets[index]) { - heap->large_free_bitmap &= ~(ZEND_MM_LONG_CONST(1) << index); - } - } else { - while (*(cp = &(prev->child[prev->child[1] != NULL])) != NULL) { - prev = *cp; - rp = cp; - } - *rp = NULL; - -subst_block: - ZEND_MM_CHECK_TREE(mm_block); - *mm_block->parent = prev; - prev->parent = mm_block->parent; - if ((prev->child[0] = mm_block->child[0])) { - ZEND_MM_CHECK_TREE(prev->child[0]); - prev->child[0]->parent = &prev->child[0]; - } - if ((prev->child[1] = mm_block->child[1])) { - ZEND_MM_CHECK_TREE(prev->child[1]); - prev->child[1]->parent = &prev->child[1]; - } - } +static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len) +{ + if (len == 1) { + zend_mm_bitset_set_bit(bitset, start); } else { - -#if ZEND_MM_SAFE_UNLINKING - if (UNEXPECTED(prev->next_free_block != mm_block) || UNEXPECTED(next->prev_free_block != mm_block)) { - zend_mm_panic("zend_mm_heap corrupted"); - } -#endif - - prev->next_free_block = next; - next->prev_free_block = prev; - - if (EXPECTED(ZEND_MM_SMALL_SIZE(ZEND_MM_FREE_BLOCK_SIZE(mm_block)))) { - if (EXPECTED(prev == next)) { - size_t index = ZEND_MM_BUCKET_INDEX(ZEND_MM_FREE_BLOCK_SIZE(mm_block)); - - if (EXPECTED(heap->free_buckets[index*2] == heap->free_buckets[index*2+1])) { - heap->free_bitmap &= ~(ZEND_MM_LONG_CONST(1) << index); - } + int pos = start / ZEND_MM_BITSET_LEN; + int end = (start + len - 1) / ZEND_MM_BITSET_LEN; + int bit = start & (ZEND_MM_BITSET_LEN - 1); + zend_mm_bitset tmp; + + if (pos != end) { + /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */ + tmp = (zend_mm_bitset)-1 << bit; + bitset[pos++] |= tmp; + while (pos != end) { + /* set all bits */ + bitset[pos++] = (zend_mm_bitset)-1; } - } else if (UNEXPECTED(mm_block->parent == ZEND_MM_REST_BLOCK)) { - heap->rest_count--; - } else if (UNEXPECTED(mm_block->parent != NULL)) { - goto subst_block; + end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1); + /* set bits from "0" to "end" */ + tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end); + bitset[pos] |= tmp; + } else { + end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1); + /* set bits from "bit" to "end" */ + tmp = (zend_mm_bitset)-1 << bit; + tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end); + bitset[pos] |= tmp; } } } -static inline void zend_mm_add_to_rest_list(zend_mm_heap *heap, zend_mm_free_block *mm_block) +static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len) { - zend_mm_free_block *prev, *next; - - while (heap->rest_count >= ZEND_MM_MAX_REST_BLOCKS) { - zend_mm_free_block *p = heap->rest_buckets[1]; - - if (!ZEND_MM_SMALL_SIZE(ZEND_MM_FREE_BLOCK_SIZE(p))) { - heap->rest_count--; + if (len == 1) { + zend_mm_bitset_reset_bit(bitset, start); + } else { + int pos = start / ZEND_MM_BITSET_LEN; + int end = (start + len - 1) / ZEND_MM_BITSET_LEN; + int bit = start & (ZEND_MM_BITSET_LEN - 1); + zend_mm_bitset tmp; + + if (pos != end) { + /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */ + tmp = ~((1L << bit) - 1); + bitset[pos++] &= ~tmp; + while (pos != end) { + /* set all bits */ + bitset[pos++] = 0; + } + end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1); + /* reset bits from "0" to "end" */ + tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end); + bitset[pos] &= ~tmp; + } else { + end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1); + /* reset bits from "bit" to "end" */ + tmp = (zend_mm_bitset)-1 << bit; + tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end); + bitset[pos] &= ~tmp; } - prev = p->prev_free_block; - next = p->next_free_block; - prev->next_free_block = next; - next->prev_free_block = prev; - zend_mm_add_to_free_list(heap, p); } +} - if (!ZEND_MM_SMALL_SIZE(ZEND_MM_FREE_BLOCK_SIZE(mm_block))) { - mm_block->parent = ZEND_MM_REST_BLOCK; - heap->rest_count++; +static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len) +{ + if (len == 1) { + return !zend_mm_bitset_is_set(bitset, start); + } else { + int pos = start / ZEND_MM_BITSET_LEN; + int end = (start + len - 1) / ZEND_MM_BITSET_LEN; + int bit = start & (ZEND_MM_BITSET_LEN - 1); + zend_mm_bitset tmp; + + if (pos != end) { + /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */ + tmp = (zend_mm_bitset)-1 << bit; + if ((bitset[pos++] & tmp) != 0) { + return 0; + } + while (pos != end) { + /* set all bits */ + if (bitset[pos++] != 0) { + return 0; + } + } + end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1); + /* set bits from "0" to "end" */ + tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end); + return (bitset[pos] & tmp) == 0; + } else { + end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1); + /* set bits from "bit" to "end" */ + tmp = (zend_mm_bitset)-1 << bit; + tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end); + return (bitset[pos] & tmp) == 0; + } } - - ZEND_MM_SET_MAGIC(mm_block, MEM_BLOCK_FREED); - - prev = heap->rest_buckets[0]; - next = prev->next_free_block; - mm_block->prev_free_block = prev; - mm_block->next_free_block = next; - prev->next_free_block = next->prev_free_block = mm_block; } -static inline void zend_mm_init(zend_mm_heap *heap) +/**********/ +/* Chunks */ +/**********/ + +static void *zend_mm_chunk_alloc(size_t size, size_t alignment) { - zend_mm_free_block* p; - int i; + void *ptr = zend_mm_mmap(size); - heap->free_bitmap = 0; - heap->large_free_bitmap = 0; -#if ZEND_MM_CACHE - heap->cached = 0; - memset(heap->cache, 0, sizeof(heap->cache)); + if (ptr == NULL) { + return NULL; + } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) { +#ifdef MADV_HUGEPAGE +// madvise(ptr, size, MADV_HUGEPAGE); #endif -#if ZEND_MM_CACHE_STAT - for (i = 0; i < ZEND_MM_NUM_BUCKETS; i++) { - heap->cache_stat[i].count = 0; - } + return ptr; + } else { + size_t offset; + + /* chunk has to be aligned */ + zend_mm_munmap(ptr, size); + ptr = zend_mm_mmap(size + alignment - ZEND_MM_PAGE_SIZE); +#ifdef _WIN32 + offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment); + zend_mm_munmap(ptr, size + alignment - ZEND_MM_PAGE_SIZE); + ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size); + offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment); + if (offset != 0) { + zend_mm_munmap(ptr, size); + return NULL; + } + return ptr; +#else + offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment); + if (offset != 0) { + offset = alignment - offset; + zend_mm_munmap(ptr, offset); + ptr = (char*)ptr + offset; + } else { + zend_mm_munmap((char*)ptr + size, alignment - ZEND_MM_PAGE_SIZE); + } +# ifdef MADV_HUGEPAGE +// madvise(ptr, size, MADV_HUGEPAGE); +# endif #endif - p = ZEND_MM_SMALL_FREE_BUCKET(heap, 0); - for (i = 0; i < ZEND_MM_NUM_BUCKETS; i++) { - p->next_free_block = p; - p->prev_free_block = p; - p = (zend_mm_free_block*)((char*)p + sizeof(zend_mm_free_block*) * 2); - heap->large_free_buckets[i] = NULL; + return ptr; } - heap->rest_buckets[0] = heap->rest_buckets[1] = ZEND_MM_REST_BUCKET(heap); - heap->rest_count = 0; } -static void zend_mm_del_segment(zend_mm_heap *heap, zend_mm_segment *segment) +static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk) { - zend_mm_segment **p = &heap->segments_list; - - while (*p != segment) { - p = &(*p)->next_segment; - } - *p = segment->next_segment; - heap->real_size -= segment->size; - ZEND_MM_STORAGE_FREE(segment); + chunk->heap = heap; + chunk->next = heap->main_chunk; + chunk->prev = heap->main_chunk->prev; + chunk->prev->next = chunk; + chunk->next->prev = chunk; + /* mark first pages as allocated */ + chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE; + chunk->free_tail = ZEND_MM_FIRST_PAGE; + /* the younger chunks have bigger number */ + chunk->num = chunk->prev->num + 1; +#if ZEND_DEBUG && defined(ZTS) + chunk->thread_id = tsrm_thread_id(); +#endif + /* mark first pages as allocated */ + chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1; + chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE); } -#if ZEND_MM_CACHE -static void zend_mm_free_cache(zend_mm_heap *heap) -{ - int i; +/***********************/ +/* Huge Runs (forward) */ +/***********************/ - for (i = 0; i < ZEND_MM_NUM_BUCKETS; i++) { - if (heap->cache[i]) { - zend_mm_free_block *mm_block = heap->cache[i]; +static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); +static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); +static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); - while (mm_block) { - size_t size = ZEND_MM_BLOCK_SIZE(mm_block); - zend_mm_free_block *q = mm_block->prev_free_block; - zend_mm_block *next_block = ZEND_MM_NEXT_BLOCK(mm_block); +#if ZEND_DEBUG +static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); +#else +static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); +#endif + +/**************/ +/* Large Runs */ +/**************/ + +#if ZEND_DEBUG +static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +#else +static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +#endif +{ + zend_mm_chunk *chunk = heap->main_chunk; + int page_num, len; - heap->cached -= size; + while (1) { + if (UNEXPECTED(chunk->free_pages < pages_count)) { + goto not_found; +#if 0 + } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) { + if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) { + goto not_found; + } else { + page_num = chunk->free_tail; + goto found; + } + } else if (0) { + /* First-Fit Search */ + int free_tail = chunk->free_tail; + zend_mm_bitset *bitset = chunk->free_map; + zend_mm_bitset tmp = *(bitset++); + int i = 0; - if (ZEND_MM_PREV_BLOCK_IS_FREE(mm_block)) { - mm_block = (zend_mm_free_block*)ZEND_MM_PREV_BLOCK(mm_block); - size += ZEND_MM_FREE_BLOCK_SIZE(mm_block); - zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) mm_block); + while (1) { + /* skip allocated blocks */ + while (tmp == (zend_mm_bitset)-1) { + i += ZEND_MM_BITSET_LEN; + if (i == ZEND_MM_PAGES) { + goto not_found; + } + tmp = *(bitset++); } - if (ZEND_MM_IS_FREE_BLOCK(next_block)) { - size += ZEND_MM_FREE_BLOCK_SIZE(next_block); - zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block); + /* find first 0 bit */ + page_num = i + zend_mm_bitset_nts(tmp); + /* reset bits from 0 to "bit" */ + tmp &= tmp + 1; + /* skip free blocks */ + while (tmp == 0) { + i += ZEND_MM_BITSET_LEN; + len = i - page_num; + if (len >= pages_count) { + goto found; + } else if (i >= free_tail) { + goto not_found; + } + tmp = *(bitset++); } - ZEND_MM_BLOCK(mm_block, ZEND_MM_FREE_BLOCK, size); - - if (ZEND_MM_IS_FIRST_BLOCK(mm_block) && - ZEND_MM_IS_GUARD_BLOCK(ZEND_MM_NEXT_BLOCK(mm_block))) { - zend_mm_del_segment(heap, (zend_mm_segment *) ((char *)mm_block - ZEND_MM_ALIGNED_SEGMENT_SIZE)); - } else { - zend_mm_add_to_free_list(heap, (zend_mm_free_block *) mm_block); + /* find first 1 bit */ + len = (i + zend_mm_bitset_ntz(tmp)) - page_num; + if (len >= pages_count) { + goto found; } - - mm_block = q; + /* set bits from 0 to "bit" */ + tmp |= tmp - 1; } - heap->cache[i] = NULL; -#if ZEND_MM_CACHE_STAT - heap->cache_stat[i].count = 0; #endif - } - } -} -#endif - -#if ZEND_MM_HEAP_PROTECTION || ZEND_MM_COOKIES -static void zend_mm_random(unsigned char *buf, size_t size) /* {{{ */ -{ - size_t i = 0; - unsigned char t; + } else { + /* Best-Fit Search */ + int best = -1; + int best_len = ZEND_MM_PAGES; + int free_tail = chunk->free_tail; + zend_mm_bitset *bitset = chunk->free_map; + zend_mm_bitset tmp = *(bitset++); + int i = 0; -#ifdef ZEND_WIN32 - HCRYPTPROV hCryptProv; - int has_context = 0; - - if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 0)) { - /* Could mean that the key container does not exist, let try - again by asking for a new one */ - if (GetLastError() == NTE_BAD_KEYSET) { - if (CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_NEWKEYSET)) { - has_context = 1; + while (1) { + /* skip allocated blocks */ + while (tmp == (zend_mm_bitset)-1) { + i += ZEND_MM_BITSET_LEN; + if (i == ZEND_MM_PAGES) { + if (best > 0) { + page_num = best; + goto found; + } else { + goto not_found; + } + } + tmp = *(bitset++); + } + /* find first 0 bit */ + page_num = i + zend_mm_bitset_nts(tmp); + /* reset bits from 0 to "bit" */ + tmp &= tmp + 1; + /* skip free blocks */ + while (tmp == 0) { + i += ZEND_MM_BITSET_LEN; + if (i >= free_tail) { + len = ZEND_MM_PAGES - page_num; + if (len >= pages_count && len < best_len) { + chunk->free_tail = page_num + pages_count; + goto found; + } else { + /* set accurate value */ + chunk->free_tail = page_num; + if (best > 0) { + page_num = best; + goto found; + } else { + goto not_found; + } + } + } + tmp = *(bitset++); + } + /* find first 1 bit */ + len = i + zend_mm_bitset_ntz(tmp) - page_num; + if (len >= pages_count) { + if (len == pages_count) { + goto found; + } else if (len < best_len) { + best_len = len; + best = page_num; + } + } + /* set bits from 0 to "bit" */ + tmp |= tmp - 1; } } - } else { - has_context = 1; - } - if (has_context) { - do { - BOOL ret = CryptGenRandom(hCryptProv, size, buf); - CryptReleaseContext(hCryptProv, 0); - if (ret) { - while (i < size && buf[i] != 0) { - i++; + +not_found: + if (chunk->next == heap->main_chunk) { + if (heap->cached_chunks) { + heap->cached_chunks_count--; + chunk = heap->cached_chunks; + heap->cached_chunks = chunk->next; + } else { +#if ZEND_MM_LIMIT + if (heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit) { + if (heap->overflow == 0) { +#if ZEND_DEBUG + zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted at %s:%d (tried to allocate %lu bytes)", heap->limit, __zend_filename, __zend_lineno, size); +#else + zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted (tried to allocate %lu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count); +#endif + return NULL; + } } - if (i == size) { - return; +#endif + chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE); + if (UNEXPECTED(chunk == NULL)) { + /* insufficient memory */ + return NULL; } - } - } while (0); - } -#elif defined(HAVE_DEV_URANDOM) - int fd = open("/dev/urandom", 0); - - if (fd >= 0) { - if (read(fd, buf, size) == size) { - while (i < size && buf[i] != 0) { - i++; +#if ZEND_MM_STAT || ZEND_MM_LIMIT + heap->real_size += ZEND_MM_CHUNK_SIZE; +#endif +#if ZEND_MM_STAT + if (heap->real_size > heap->real_peak) { + heap->real_peak = heap->real_size; + } +#endif } - if (i == size) { - close(fd); - return; + heap->chunks_count++; + if (heap->chunks_count > heap->peak_chunks_count) { + heap->peak_chunks_count = heap->chunks_count; } + zend_mm_chunk_init(heap, chunk); + page_num = ZEND_MM_FIRST_PAGE; + len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE; + goto found; + } else { + chunk = chunk->next; } - close(fd); } -#endif - t = (unsigned char)getpid(); - while (i < size) { - do { - buf[i] = ((unsigned char)rand()) ^ t; - } while (buf[i] == 0); - t = buf[i++] << 1; - } -} -/* }}} */ -#endif -/* Notes: - * - This function may alter the block_sizes values to match platform alignment - * - This function does *not* perform sanity checks on the arguments - */ -ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_mem_handlers *handlers, size_t block_size, size_t reserve_size, int internal, void *params) -{ - zend_mm_storage *storage; - zend_mm_heap *heap; - -#if 0 - int i; - - printf("ZEND_MM_ALIGNMENT=%d\n", ZEND_MM_ALIGNMENT); - printf("ZEND_MM_ALIGNMENT_LOG2=%d\n", ZEND_MM_ALIGNMENT_LOG2); - printf("ZEND_MM_MIN_SIZE=%d\n", ZEND_MM_MIN_SIZE); - printf("ZEND_MM_MAX_SMALL_SIZE=%d\n", ZEND_MM_MAX_SMALL_SIZE); - printf("ZEND_MM_ALIGNED_HEADER_SIZE=%d\n", ZEND_MM_ALIGNED_HEADER_SIZE); - printf("ZEND_MM_ALIGNED_FREE_HEADER_SIZE=%d\n", ZEND_MM_ALIGNED_FREE_HEADER_SIZE); - printf("ZEND_MM_MIN_ALLOC_BLOCK_SIZE=%d\n", ZEND_MM_MIN_ALLOC_BLOCK_SIZE); - printf("ZEND_MM_ALIGNED_MIN_HEADER_SIZE=%d\n", ZEND_MM_ALIGNED_MIN_HEADER_SIZE); - printf("ZEND_MM_ALIGNED_SEGMENT_SIZE=%d\n", ZEND_MM_ALIGNED_SEGMENT_SIZE); - for (i = 0; i < ZEND_MM_MAX_SMALL_SIZE; i++) { - printf("%3d%c: %3ld %d %2ld\n", i, (i == ZEND_MM_MIN_SIZE?'*':' '), (long)ZEND_MM_TRUE_SIZE(i), ZEND_MM_SMALL_SIZE(ZEND_MM_TRUE_SIZE(i)), (long)ZEND_MM_BUCKET_INDEX(ZEND_MM_TRUE_SIZE(i))); +found: + /* mark run as allocated */ + chunk->free_pages -= pages_count; + zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count); + chunk->map[page_num] = ZEND_MM_LRUN(pages_count); + if (page_num == chunk->free_tail) { + chunk->free_tail = page_num + pages_count; } - exit(0); -#endif + return ZEND_MM_PAGE_ADDR(chunk, page_num); +} -#if ZEND_MM_HEAP_PROTECTION - if (_mem_block_start_magic == 0) { - zend_mm_random((unsigned char*)&_mem_block_start_magic, sizeof(_mem_block_start_magic)); - } - if (_mem_block_end_magic == 0) { - zend_mm_random((unsigned char*)&_mem_block_end_magic, sizeof(_mem_block_end_magic)); - } +static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +{ + int pages_count = ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE); +#if ZEND_DEBUG + void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#else + void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); #endif -#if ZEND_MM_COOKIES - if (_zend_mm_cookie == 0) { - zend_mm_random((unsigned char*)&_zend_mm_cookie, sizeof(_zend_mm_cookie)); +#if ZEND_MM_STAT + heap->size += pages_count * ZEND_MM_PAGE_SIZE; + if (UNEXPECTED(heap->size > heap->peak)) { + heap->peak = heap->size; } #endif + return ptr; +} - if (zend_mm_low_bit(block_size) != zend_mm_high_bit(block_size)) { - fprintf(stderr, "'block_size' must be a power of two\n"); -/* See http://support.microsoft.com/kb/190351 */ -#ifdef PHP_WIN32 - fflush(stderr); -#endif - exit(255); - } - storage = handlers->init(params); - if (!storage) { - fprintf(stderr, "Cannot initialize zend_mm storage [%s]\n", handlers->name); -/* See http://support.microsoft.com/kb/190351 */ -#ifdef PHP_WIN32 - fflush(stderr); +static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count) +{ + chunk->free_pages += pages_count; + if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) { + /* delete chunk */ + chunk->next->prev = chunk->prev; + chunk->prev->next = chunk->next; + heap->chunks_count--; + if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) { + /* delay deletion */ + heap->cached_chunks_count++; + chunk->next = heap->cached_chunks; + heap->cached_chunks = chunk; + } else { +#if ZEND_MM_STAT || ZEND_MM_LIMIT + heap->real_size -= ZEND_MM_CHUNK_SIZE; #endif - exit(255); + if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) { + zend_mm_munmap(chunk, ZEND_MM_CHUNK_SIZE); + } else { +//TODO: select the best chunk to delete??? + chunk->next = heap->cached_chunks->next; + zend_mm_munmap(heap->cached_chunks, ZEND_MM_CHUNK_SIZE); + heap->cached_chunks = chunk; + } + } + } else { + zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count); + chunk->map[page_num] = 0; + if (chunk->free_tail == page_num + pages_count) { + /* this setting may be not accurate */ + chunk->free_tail = page_num; + } } - storage->handlers = handlers; +} - heap = malloc(sizeof(struct _zend_mm_heap)); - if (heap == NULL) { - fprintf(stderr, "Cannot allocate heap for zend_mm storage [%s]\n", handlers->name); -#ifdef PHP_WIN32 - fflush(stderr); +static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count) +{ +#if ZEND_MM_STAT + heap->size -= pages_count * ZEND_MM_PAGE_SIZE; #endif - exit(255); - } - heap->storage = storage; - heap->block_size = block_size; - heap->compact_size = 0; - heap->segments_list = NULL; - zend_mm_init(heap); -# if ZEND_MM_CACHE_STAT - memset(heap->cache_stat, 0, sizeof(heap->cache_stat)); -# endif + zend_mm_free_pages(heap, chunk, page_num, pages_count); +} - heap->use_zend_alloc = 1; - heap->real_size = 0; - heap->overflow = 0; - heap->real_peak = 0; - heap->limit = ZEND_MM_LONG_CONST(1)<<(ZEND_MM_NUM_BUCKETS-2); - heap->size = 0; - heap->peak = 0; - heap->internal = internal; - heap->reserve = NULL; - heap->reserve_size = reserve_size; - if (reserve_size > 0) { - heap->reserve = _zend_mm_alloc_int(heap, reserve_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC); - } - if (internal) { - int i; - zend_mm_free_block *p, *q, *orig; - zend_mm_heap *mm_heap = _zend_mm_alloc_int(heap, sizeof(zend_mm_heap) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC); - - *mm_heap = *heap; - - p = ZEND_MM_SMALL_FREE_BUCKET(mm_heap, 0); - orig = ZEND_MM_SMALL_FREE_BUCKET(heap, 0); - for (i = 0; i < ZEND_MM_NUM_BUCKETS; i++) { - q = p; - while (q->prev_free_block != orig) { - q = q->prev_free_block; - } - q->prev_free_block = p; - q = p; - while (q->next_free_block != orig) { - q = q->next_free_block; - } - q->next_free_block = p; - p = (zend_mm_free_block*)((char*)p + sizeof(zend_mm_free_block*) * 2); - orig = (zend_mm_free_block*)((char*)orig + sizeof(zend_mm_free_block*) * 2); - if (mm_heap->large_free_buckets[i]) { - mm_heap->large_free_buckets[i]->parent = &mm_heap->large_free_buckets[i]; +/**************/ +/* Small Runs */ +/**************/ + +/* separating the following function allows saving 2 instructions on x86 + * on the fast patch - register save/restore in prologue/epilogue + */ +static zend_never_inline void zend_mm_small_run_got_free(zend_mm_heap *heap, int bin_num, zend_mm_bin *bin) +{ + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE); + int page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE; + zend_mm_bin *cached_bin = heap->cache[bin_num]; + + /* mark this run as available for allocation of small blocks */ + zend_mm_bitset_set_bit(chunk->small_map[bin_num], page_num); + + /* put this run into cache if it's lower then the currently cached one */ + if (EXPECTED(cached_bin)) { + zend_mm_chunk *cached_chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(cached_bin, ZEND_MM_CHUNK_SIZE); + if (EXPECTED(chunk == cached_chunk)) { + if (EXPECTED(bin < cached_bin)) { + goto cache_this_bin; } + } else if (EXPECTED(chunk->num < cached_chunk->num)) { + goto cache_this_bin; } - mm_heap->rest_buckets[0] = mm_heap->rest_buckets[1] = ZEND_MM_REST_BUCKET(mm_heap); - mm_heap->rest_count = 0; - - free(heap); - heap = mm_heap; + } else { +cache_this_bin: + heap->cache[bin_num] = bin; } - return heap; } -ZEND_API zend_mm_heap *zend_mm_startup(void) +static zend_always_inline void zend_mm_small_run_became_full(zend_mm_heap *heap, int bin_num, zend_mm_bin *bin) { - int i; - size_t seg_size; - char *mem_type = getenv("ZEND_MM_MEM_TYPE"); - char *tmp; - const zend_mm_mem_handlers *handlers; - zend_mm_heap *heap; + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE); + int page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE; - if (mem_type == NULL) { - i = 0; - } else { - for (i = 0; mem_handlers[i].name; i++) { - if (strcmp(mem_handlers[i].name, mem_type) == 0) { - break; - } - } - if (!mem_handlers[i].name) { - fprintf(stderr, "Wrong or unsupported zend_mm storage type '%s'\n", mem_type); - fprintf(stderr, " supported types:\n"); -/* See http://support.microsoft.com/kb/190351 */ -#ifdef PHP_WIN32 - fflush(stderr); -#endif - for (i = 0; mem_handlers[i].name; i++) { - fprintf(stderr, " '%s'\n", mem_handlers[i].name); - } -/* See http://support.microsoft.com/kb/190351 */ -#ifdef PHP_WIN32 - fflush(stderr); -#endif - exit(255); - } - } - handlers = &mem_handlers[i]; + zend_mm_bitset_reset_bit(chunk->small_map[bin_num], page_num); + heap->cache[bin_num] = NULL; +} - tmp = getenv("ZEND_MM_SEG_SIZE"); - if (tmp) { - seg_size = zend_atoi(tmp, 0); - if (zend_mm_low_bit(seg_size) != zend_mm_high_bit(seg_size)) { - fprintf(stderr, "ZEND_MM_SEG_SIZE must be a power of two\n"); -/* See http://support.microsoft.com/kb/190351 */ -#ifdef PHP_WIN32 - fflush(stderr); -#endif - exit(255); - } else if (seg_size < ZEND_MM_ALIGNED_SEGMENT_SIZE + ZEND_MM_ALIGNED_HEADER_SIZE) { - fprintf(stderr, "ZEND_MM_SEG_SIZE is too small\n"); -/* See http://support.microsoft.com/kb/190351 */ -#ifdef PHP_WIN32 - fflush(stderr); +static zend_always_inline void *zend_mm_alloc_small_list_init(zend_mm_chunk *chunk, int bin_num, int page_num, zend_mm_bin *bin) +{ + int i, n; + zend_mm_free_list *p; + + chunk->map[page_num] = ZEND_MM_SRUN(bin_num); + + /* create a linked list of elements from 1 to last */ + i = bin_data_offset[bin_num] + bin_data_size[bin_num]; + n = bin_data_offset[bin_num] + (bin_elements[bin_num] - 1) * bin_data_size[bin_num]; + bin->next_free = i; + p = (zend_mm_free_list*)((char*)bin + i); + do { + i += bin_data_size[bin_num]; + p->next_free = i; +#if ZEND_DEBUG + do { + zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); + dbg->size = 0; + } while (0); #endif - exit(255); - } - } else { - seg_size = ZEND_MM_SEG_SIZE; - } + p = (zend_mm_free_list*)((char*)p + bin_data_size[bin_num]); + } while (i != n); - heap = zend_mm_startup_ex(handlers, seg_size, ZEND_MM_RESERVE_SIZE, 0, NULL); - if (heap) { - tmp = getenv("ZEND_MM_COMPACT"); - if (tmp) { - heap->compact_size = zend_atoi(tmp, 0); - } else { - heap->compact_size = 2 * 1024 * 1024; - } - } - return heap; + p->next_free = ZEND_MM_FREE_LIST_END; + bin->num_used = 1; + + /* return first element */ + return (char*)bin + bin_data_offset[bin_num]; } -#if ZEND_DEBUG -static long zend_mm_find_leaks(zend_mm_segment *segment, zend_mm_block *b) +static zend_always_inline void *zend_mm_alloc_small_list(zend_mm_heap *heap, int bin_num, zend_mm_bin *bin) { - long leaks = 0; - zend_mm_block *p, *q; + void *ptr = (char*)bin + bin->next_free; - p = ZEND_MM_NEXT_BLOCK(b); - while (1) { - if (ZEND_MM_IS_GUARD_BLOCK(p)) { - ZEND_MM_CHECK_MAGIC(p, MEM_BLOCK_GUARD); - segment = segment->next_segment; - if (!segment) { - break; - } - p = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE); - continue; - } - q = ZEND_MM_NEXT_BLOCK(p); - if (q <= p || - (char*)q > (char*)segment + segment->size || - p->info._size != q->info._prev) { - zend_mm_panic("zend_mm_heap corrupted"); - } - if (!ZEND_MM_IS_FREE_BLOCK(p)) { - if (p->magic == MEM_BLOCK_VALID) { - if (p->debug.filename==b->debug.filename && p->debug.lineno==b->debug.lineno) { - ZEND_MM_SET_MAGIC(p, MEM_BLOCK_LEAK); - leaks++; - } -#if ZEND_MM_CACHE - } else if (p->magic == MEM_BLOCK_CACHED) { - /* skip it */ -#endif - } else if (p->magic != MEM_BLOCK_LEAK) { - zend_mm_panic("zend_mm_heap corrupted"); - } - } - p = q; + bin->next_free = ((zend_mm_free_list*)ptr)->next_free; + bin->num_used++; + if (UNEXPECTED(bin->next_free == ZEND_MM_FREE_LIST_END)) { + /* run became full */ + zend_mm_small_run_became_full(heap, bin_num, bin); } - return leaks; + return ptr; } -static void zend_mm_check_leaks(zend_mm_heap *heap TSRMLS_DC) +static zend_always_inline int zend_mm_free_small_list(zend_mm_heap *heap, int bin_num, zend_mm_bin *bin, void *ptr) { - zend_mm_segment *segment = heap->segments_list; - zend_mm_block *p, *q; - zend_uint total = 0; + int old_next_free = bin->next_free; - if (!segment) { - return; - } - p = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE); - while (1) { - q = ZEND_MM_NEXT_BLOCK(p); - if (q <= p || - (char*)q > (char*)segment + segment->size || - p->info._size != q->info._prev) { - zend_mm_panic("zend_mm_heap corrupted"); - } - if (!ZEND_MM_IS_FREE_BLOCK(p)) { - if (p->magic == MEM_BLOCK_VALID) { - long repeated; - zend_leak_info leak; - - ZEND_MM_SET_MAGIC(p, MEM_BLOCK_LEAK); - - leak.addr = ZEND_MM_DATA_OF(p); - leak.size = p->debug.size; - leak.filename = p->debug.filename; - leak.lineno = p->debug.lineno; - leak.orig_filename = p->debug.orig_filename; - leak.orig_lineno = p->debug.orig_lineno; - - zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC); - zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC); - repeated = zend_mm_find_leaks(segment, p); - total += 1 + repeated; - if (repeated) { - zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC); - } -#if ZEND_MM_CACHE - } else if (p->magic == MEM_BLOCK_CACHED) { - /* skip it */ +#if ZEND_DEBUG + do { + zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); + dbg->size = 0; + } while (0); #endif - } else if (p->magic != MEM_BLOCK_LEAK) { - zend_mm_panic("zend_mm_heap corrupted"); - } - } - if (ZEND_MM_IS_GUARD_BLOCK(q)) { - segment = segment->next_segment; - if (!segment) { - break; - } - q = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE); - } - p = q; + + ((zend_mm_free_list*)ptr)->next_free = old_next_free; + bin->next_free = (char*)ptr - (char*)bin; + + bin->num_used--; + if (UNEXPECTED(bin->num_used == 0)) { + /* run became free */ + return 1; } - if (total) { - zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total TSRMLS_CC); + if (UNEXPECTED(old_next_free == ZEND_MM_FREE_LIST_END)) { + /* run was completely full */ + zend_mm_small_run_got_free(heap, bin_num, bin); } + return 0; } -static int zend_mm_check_ptr(zend_mm_heap *heap, void *ptr, int silent ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +static zend_always_inline void *zend_mm_alloc_small_subpage_init(zend_mm_chunk *chunk, int bin_num, int page_num, zend_mm_bin *bin) { - zend_mm_block *p; - int no_cache_notice = 0; - int had_problems = 0; - int valid_beginning = 1; + chunk->map[page_num] = ZEND_MM_SRUN_BITSET_SET(bin_num, 1); + return (void*)bin; +} - if (silent==2) { - silent = 1; - no_cache_notice = 1; - } else if (silent==3) { - silent = 0; - no_cache_notice = 1; - } - if (!silent) { - TSRMLS_FETCH(); - - zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC); - zend_debug_alloc_output("---------------------------------------\n"); - zend_debug_alloc_output("%s(%d) : Block "PTR_FMT" status:\n" ZEND_FILE_LINE_RELAY_CC, ptr); - if (__zend_orig_filename) { - zend_debug_alloc_output("%s(%d) : Actual location (location was relayed)\n" ZEND_FILE_LINE_ORIG_RELAY_CC); - } - if (!ptr) { - zend_debug_alloc_output("NULL\n"); - zend_debug_alloc_output("---------------------------------------\n"); - return 0; - } - } +static zend_always_inline void *zend_mm_alloc_small_subpage(zend_mm_heap *heap, int bin_num, zend_mm_bin *bin) +{ + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE); + int page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE; + zend_mm_page_info info = chunk->map[page_num]; + zend_mm_page_info bitset = ZEND_MM_SRUN_BITSET(info); + int element_num = zend_mm_bitset_nts(bitset); - if (!ptr) { - if (silent) { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } + /* set the right zero bit */ + bitset = bitset | (bitset + 1); + chunk->map[page_num] = ZEND_MM_SRUN_BITSET_UPDATE(info, bitset); + if (UNEXPECTED((1 << (bin_elements[bin_num])) - 1 == bitset)) { + /* run became full */ + zend_mm_small_run_became_full(heap, bin_num, bin); } + return (char*)bin + element_num * bin_data_size[bin_num]; +} - p = ZEND_MM_HEADER_OF(ptr); +static zend_always_inline int zend_mm_free_small_subpage(zend_mm_heap *heap, int bin_num, zend_mm_bin *bin, int element_num) +{ + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE); + int page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE; -#ifdef ZTS - if (ZEND_MM_BAD_THREAD_ID(p)) { - if (!silent) { - zend_debug_alloc_output("Invalid pointer: ((thread_id=0x%0.8X) != (expected=0x%0.8X))\n", (long)p->thread_id, (long)tsrm_thread_id()); - had_problems = 1; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - } -#endif - - if (p->info._size != ZEND_MM_NEXT_BLOCK(p)->info._prev) { - if (!silent) { - zend_debug_alloc_output("Invalid pointer: ((size="PTR_FMT") != (next.prev="PTR_FMT"))\n", p->info._size, ZEND_MM_NEXT_BLOCK(p)->info._prev); - had_problems = 1; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - } - if (p->info._prev != ZEND_MM_GUARD_BLOCK && - ZEND_MM_PREV_BLOCK(p)->info._size != p->info._prev) { - if (!silent) { - zend_debug_alloc_output("Invalid pointer: ((prev="PTR_FMT") != (prev.size="PTR_FMT"))\n", p->info._prev, ZEND_MM_PREV_BLOCK(p)->info._size); - had_problems = 1; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } + if (UNEXPECTED((1 << bin_elements[bin_num]) - 1 == ZEND_MM_SRUN_BITSET(chunk->map[page_num]))) { + /* run was completely full */ + zend_mm_small_run_got_free(heap, bin_num, bin); } + chunk->map[page_num] = ZEND_MM_SRUN_BITSET_EXCL(chunk->map[page_num], element_num); + return (ZEND_MM_SRUN_BITSET(chunk->map[page_num]) == 0); +} - if (had_problems) { - zend_debug_alloc_output("---------------------------------------\n"); - return 0; +static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +{ + zend_mm_chunk *chunk; + int page_num; + zend_mm_bin *bin; + +#if ZEND_DEBUG + bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#else + bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#endif + if (UNEXPECTED(bin == NULL)) { + /* insufficient memory */ + return NULL; } - if (!silent) { - zend_debug_alloc_output("%10s\t","Beginning: "); + heap->cache[bin_num] = bin; + chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE); + page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE; + zend_mm_bitset_set_bit(chunk->small_map[bin_num], page_num); + if (bin_pages[bin_num] > 1) { + int i = 1; + do { + chunk->map[page_num+i] = ZEND_MM_NRUN(i); + i++; + } while (i < bin_pages[bin_num]); } - - if (!ZEND_MM_IS_USED_BLOCK(p)) { - if (!silent) { - if (p->magic != MEM_BLOCK_FREED) { - zend_debug_alloc_output("Freed (magic=0x%0.8X, expected=0x%0.8X)\n", p->magic, MEM_BLOCK_FREED); - } else { - zend_debug_alloc_output("Freed\n"); - } - had_problems = 1; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - } else if (ZEND_MM_IS_GUARD_BLOCK(p)) { - if (!silent) { - if (p->magic != MEM_BLOCK_FREED) { - zend_debug_alloc_output("Guard (magic=0x%0.8X, expected=0x%0.8X)\n", p->magic, MEM_BLOCK_FREED); - } else { - zend_debug_alloc_output("Guard\n"); - } - had_problems = 1; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } + if (UNEXPECTED(bin_data_offset[bin_num] == 0)) { + return zend_mm_alloc_small_subpage_init(chunk, bin_num, page_num, bin); } else { - switch (p->magic) { - case MEM_BLOCK_VALID: - case MEM_BLOCK_LEAK: - if (!silent) { - zend_debug_alloc_output("OK (allocated on %s:%d, %d bytes)\n", p->debug.filename, p->debug.lineno, (int)p->debug.size); - } - break; /* ok */ - case MEM_BLOCK_CACHED: - if (!no_cache_notice) { - if (!silent) { - zend_debug_alloc_output("Cached\n"); - had_problems = 1; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - } - case MEM_BLOCK_FREED: - if (!silent) { - zend_debug_alloc_output("Freed (invalid)\n"); - had_problems = 1; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - break; - case MEM_BLOCK_GUARD: - if (!silent) { - zend_debug_alloc_output("Guard (invalid)\n"); - had_problems = 1; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - break; - default: - if (!silent) { - zend_debug_alloc_output("Unknown (magic=0x%0.8X, expected=0x%0.8X)\n", p->magic, MEM_BLOCK_VALID); - had_problems = 1; - valid_beginning = 0; - } else { - return zend_mm_check_ptr(heap, ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - break; - } + return zend_mm_alloc_small_list_init(chunk, bin_num, page_num, bin); } +} -#if ZEND_MM_HEAP_PROTECTION - if (!valid_beginning) { - if (!silent) { - zend_debug_alloc_output("%10s\t", "Start:"); - zend_debug_alloc_output("Unknown\n"); - zend_debug_alloc_output("%10s\t", "End:"); - zend_debug_alloc_output("Unknown\n"); - } - } else { - char *end_magic = ZEND_MM_END_MAGIC_PTR(p); - - if (p->debug.start_magic == _mem_block_start_magic) { - if (!silent) { - zend_debug_alloc_output("%10s\t", "Start:"); - zend_debug_alloc_output("OK\n"); - } - } else { - char *overflow_ptr, *magic_ptr=(char *) &_mem_block_start_magic; - int overflows=0; - int i; - - if (silent) { - return _mem_block_check(ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - had_problems = 1; - overflow_ptr = (char *) &p->debug.start_magic; - i = END_MAGIC_SIZE; - while (--i >= 0) { - if (overflow_ptr[i]!=magic_ptr[i]) { - overflows++; - } - } - zend_debug_alloc_output("%10s\t", "Start:"); - zend_debug_alloc_output("Overflown (magic=0x%0.8X instead of 0x%0.8X)\n", p->debug.start_magic, _mem_block_start_magic); - zend_debug_alloc_output("%10s\t",""); - if (overflows >= END_MAGIC_SIZE) { - zend_debug_alloc_output("At least %d bytes overflown\n", END_MAGIC_SIZE); - } else { - zend_debug_alloc_output("%d byte(s) overflown\n", overflows); - } - } - if (memcmp(end_magic, &_mem_block_end_magic, END_MAGIC_SIZE)==0) { - if (!silent) { - zend_debug_alloc_output("%10s\t", "End:"); - zend_debug_alloc_output("OK\n"); - } - } else { - char *overflow_ptr, *magic_ptr=(char *) &_mem_block_end_magic; - int overflows=0; - int i; - - if (silent) { - return _mem_block_check(ptr, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } - had_problems = 1; - overflow_ptr = (char *) end_magic; +static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +{ + int bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size); + zend_mm_bin *bin; - for (i=0; i < END_MAGIC_SIZE; i++) { - if (overflow_ptr[i]!=magic_ptr[i]) { - overflows++; - } - } +#if ZEND_MM_STAT + heap->size += bin_data_size[bin_num]; + if (UNEXPECTED(heap->size > heap->peak)) { + heap->peak = heap->size; + } +#endif + if (EXPECTED(heap->cache[bin_num])) { + bin = heap->cache[bin_num]; + } else { + zend_mm_chunk *chunk = heap->main_chunk; + int page_num; - zend_debug_alloc_output("%10s\t", "End:"); - zend_debug_alloc_output("Overflown (magic=0x%0.8X instead of 0x%0.8X)\n", *end_magic, _mem_block_end_magic); - zend_debug_alloc_output("%10s\t",""); - if (overflows >= END_MAGIC_SIZE) { - zend_debug_alloc_output("At least %d bytes overflown\n", END_MAGIC_SIZE); - } else { - zend_debug_alloc_output("%d byte(s) overflown\n", overflows); + do { + page_num = zend_mm_bitset_find_one(chunk->small_map[bin_num], ZEND_MM_PAGE_MAP_LEN); + if (EXPECTED(page_num > 0)) { + bin = (zend_mm_bin*)ZEND_MM_PAGE_ADDR(chunk, page_num); + heap->cache[bin_num] = bin; + goto found; } - } + chunk = chunk->next; + } while (chunk != heap->main_chunk); + return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } -#endif - if (!silent) { - zend_debug_alloc_output("---------------------------------------\n"); +found: + if (UNEXPECTED(bin_data_offset[bin_num] == 0)) { + return zend_mm_alloc_small_subpage(heap, bin_num, bin); + } else { + return zend_mm_alloc_small_list(heap, bin_num, bin); } - return ((!had_problems) ? 1 : 0); } -static int zend_mm_check_heap(zend_mm_heap *heap, int silent ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, zend_mm_bin *bin, int bin_num) { - zend_mm_segment *segment = heap->segments_list; - zend_mm_block *p, *q; - int errors = 0; + zend_mm_bin *old_bin; - if (!segment) { - return 0; - } - p = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE); - while (1) { - q = ZEND_MM_NEXT_BLOCK(p); - if (q <= p || - (char*)q > (char*)segment + segment->size || - p->info._size != q->info._prev) { - zend_mm_panic("zend_mm_heap corrupted"); - } - if (!ZEND_MM_IS_FREE_BLOCK(p)) { - if (p->magic == MEM_BLOCK_VALID || p->magic == MEM_BLOCK_LEAK) { - if (!zend_mm_check_ptr(heap, ZEND_MM_DATA_OF(p), (silent?2:3) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC)) { - errors++; - } -#if ZEND_MM_CACHE - } else if (p->magic == MEM_BLOCK_CACHED) { - /* skip it */ +#if ZEND_MM_STAT + heap->size -= bin_data_size[bin_num]; #endif - } else if (p->magic != MEM_BLOCK_LEAK) { - zend_mm_panic("zend_mm_heap corrupted"); - } + + if (UNEXPECTED(bin_data_offset[bin_num] == 0)) { + int element_num = ((char*)ptr - ((char*)bin + bin_data_offset[bin_num])) / bin_data_size[bin_num]; + if (EXPECTED(!zend_mm_free_small_subpage(heap, bin_num, bin, element_num))) { + return; } - if (ZEND_MM_IS_GUARD_BLOCK(q)) { - segment = segment->next_segment; - if (!segment) { - return errors; - } - q = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE); + } else { + if (EXPECTED(!zend_mm_free_small_list(heap, bin_num, bin, ptr))) { + return; } - p = q; } -} -#endif -ZEND_API void zend_mm_shutdown(zend_mm_heap *heap, int full_shutdown, int silent TSRMLS_DC) -{ - zend_mm_storage *storage; - zend_mm_segment *segment; - zend_mm_segment *prev; - int internal; + old_bin = heap->cache[bin_num]; + if (old_bin != bin) { + if (UNEXPECTED(!old_bin)) { + /* cache empty run, it may be deleted later by GC */ + heap->cache[bin_num] = bin; + } else { + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE); + int page_num; - if (!heap->use_zend_alloc) { - if (full_shutdown) { - free(heap); +#if 0 + if (UNEXPECTED(old_bin->num_used == 0)) { + zend_mm_chunk *old_chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(old_bin, ZEND_MM_CHUNK_SIZE); + if ((chunk == old_chunk && + ZEND_MM_ALIGNED_OFFSET(old_bin, ZEND_MM_CHUNK_SIZE) > ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE)) || + old_chunk->num > chunk->num) { + heap->cache[bin_num] = bin; + bin = old_bin; + chunk = old_chunk; + } + } +#endif + page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE; + zend_mm_bitset_reset_bit(chunk->small_map[bin_num], page_num); + zend_mm_free_pages(heap, chunk, page_num, bin_pages[bin_num]); } - return; } +} - if (heap->reserve) { -#if ZEND_DEBUG - if (!silent) { - _zend_mm_free_int(heap, heap->reserve ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC); - } -#endif - heap->reserve = NULL; +/******************/ +/* Specialization */ +/******************/ +#if ZEND_MM_SPEC_CT +# define _BIN_ALLOCATOR(_num, _size, _offset, _elements, _pages, x, y) \ + void* ZEND_FASTCALL zend_mm_alloc_small_ ## _size(zend_mm_heap *heap) { \ + return zend_mm_alloc_small(heap, _size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \ } -#if ZEND_MM_CACHE_STAT - if (full_shutdown) { - FILE *f; +ZEND_MM_BINS_INFO(_BIN_ALLOCATOR, x, y) +#endif - f = fopen("zend_mm.log", "w"); - if (f) { - int i,j; - size_t size, true_size, min_size, max_size; - int hit = 0, miss = 0; +#if ZEND_MM_SPEC_RT +# define _BIN_ALLOCATOR_I(_num, _size, _offset, _elements, _pages, x, y) \ + static zend_always_inline void *zend_mm_alloc_small_ ## _size ## _i(zend_mm_heap *heap) { \ + return zend_mm_alloc_small(heap, _size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \ + } - fprintf(f, "\nidx min_size max_size true_size max_len hits misses\n"); - size = 0; - while (1) { - true_size = ZEND_MM_TRUE_SIZE(size); - if (ZEND_MM_SMALL_SIZE(true_size)) { - min_size = size; - i = ZEND_MM_BUCKET_INDEX(true_size); - size++; - while (1) { - true_size = ZEND_MM_TRUE_SIZE(size); - if (ZEND_MM_SMALL_SIZE(true_size)) { - j = ZEND_MM_BUCKET_INDEX(true_size); - if (j > i) { - max_size = size-1; - break; - } - } else { - max_size = size-1; - break; - } - size++; - } - hit += heap->cache_stat[i].hit; - miss += heap->cache_stat[i].miss; - fprintf(f, "%2d %8d %8d %9d %8d %8d %8d\n", i, (int)min_size, (int)max_size, ZEND_MM_TRUE_SIZE(max_size), heap->cache_stat[i].max_count, heap->cache_stat[i].hit, heap->cache_stat[i].miss); - } else { - break; - } - } - fprintf(f, " %8d %8d\n", hit, miss); - fprintf(f, " %8d %8d\n", heap->cache_stat[ZEND_MM_NUM_BUCKETS].hit, heap->cache_stat[ZEND_MM_NUM_BUCKETS].miss); - fclose(f); - } +ZEND_MM_BINS_INFO(_BIN_ALLOCATOR_I, x, y) + +# define _BIN_DEALLOCATOR_I(_num, _size, _offset, _elements, _pages, x, y) \ + static zend_always_inline void zend_mm_free_small_ ## _size ## _i(zend_mm_heap *heap, void *ptr, zend_mm_bin *bin) { \ + zend_mm_free_small(heap, ptr, bin, _num); \ } + +ZEND_MM_BINS_INFO(_BIN_DEALLOCATOR_I, x, y) #endif +/********/ +/* Heap */ +/********/ + #if ZEND_DEBUG - if (!silent) { - zend_mm_check_leaks(heap TSRMLS_CC); - } -#endif +static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr) +{ + size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); + zend_mm_chunk *chunk; + int page_num; + zend_mm_page_info info; - internal = heap->internal; - storage = heap->storage; - segment = heap->segments_list; - if (full_shutdown) { - while (segment) { - prev = segment; - segment = segment->next_segment; - ZEND_MM_STORAGE_FREE(prev); - } - heap->segments_list = NULL; - storage->handlers->dtor(storage); - if (!internal) { - free(heap); - } - } else { - if (segment) { -#ifndef ZEND_WIN32 - if (heap->reserve_size) { - while (segment->next_segment) { - prev = segment; - segment = segment->next_segment; - ZEND_MM_STORAGE_FREE(prev); - } - heap->segments_list = segment; - } else { -#endif - do { - prev = segment; - segment = segment->next_segment; - ZEND_MM_STORAGE_FREE(prev); - } while (segment); - heap->segments_list = NULL; -#ifndef ZEND_WIN32 - } -#endif - } - if (heap->compact_size && - heap->real_peak > heap->compact_size) { - storage->handlers->compact(storage); - } - zend_mm_init(heap); - if (heap->segments_list) { - heap->real_size = heap->segments_list->size; - heap->real_peak = heap->segments_list->size; - } else { - heap->real_size = 0; - heap->real_peak = 0; - } - heap->size = 0; - heap->peak = 0; - if (heap->segments_list) { - /* mark segment as a free block */ - zend_mm_free_block *b = (zend_mm_free_block*)((char*)heap->segments_list + ZEND_MM_ALIGNED_SEGMENT_SIZE); - size_t block_size = heap->segments_list->size - ZEND_MM_ALIGNED_SEGMENT_SIZE - ZEND_MM_ALIGNED_HEADER_SIZE; - - ZEND_MM_MARK_FIRST_BLOCK(b); - ZEND_MM_LAST_BLOCK(ZEND_MM_BLOCK_AT(b, block_size)); - ZEND_MM_BLOCK(b, ZEND_MM_FREE_BLOCK, block_size); - zend_mm_add_to_free_list(heap, b); - } - if (heap->reserve_size) { - heap->reserve = _zend_mm_alloc_int(heap, heap->reserve_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC); + ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted"); + chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); + page_num = page_offset / ZEND_MM_PAGE_SIZE; + info = chunk->map[page_num]; + ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted"); + ZEND_MM_CHECK_THREAD_ID(chunk); + if (EXPECTED(info & ZEND_MM_IS_SRUN)) { + int bin_num; + + if (UNEXPECTED(info & ZEND_MM_IS_LRUN)) { + /* this is non-first page of a small run */ + page_num -= ZEND_MM_NRUN_PAGES(info); + info = chunk->map[page_num]; } - heap->overflow = 0; + bin_num = ZEND_MM_SRUN_BIN_NUM(info); + return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); + } else /* if (info & ZEND_MM_IS_LRUN) */ { + int pages_count = ZEND_MM_LRUN_PAGES(info); + + return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); } } - -static void zend_mm_safe_error(zend_mm_heap *heap, - const char *format, - size_t limit, -#if ZEND_DEBUG - const char *filename, - uint lineno, #endif - size_t size) + +static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - if (heap->reserve) { - _zend_mm_free_int(heap, heap->reserve ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC); - heap->reserve = NULL; - } - if (heap->overflow == 0) { - const char *error_filename; - uint error_lineno; - TSRMLS_FETCH(); - if (zend_is_compiling(TSRMLS_C)) { - zend_string *str = zend_get_compiled_filename(TSRMLS_C); - error_filename = str ? str->val : NULL; - error_lineno = zend_get_compiled_lineno(TSRMLS_C); - } else if (EG(current_execute_data)) { - zend_execute_data *ex = EG(current_execute_data); - - while (ex && (!ex->func || !ZEND_USER_CODE(ex->func->type))) { - ex = ex->prev_execute_data; - } - if (ex) { - error_filename = ex->func->op_array.filename->val; - error_lineno = ex->opline->lineno; - } else { - error_filename = NULL; - error_lineno = 0; - } - } else { - error_filename = NULL; - error_lineno = 0; - } - if (!error_filename) { - error_filename = "Unknown"; - } - heap->overflow = 1; - zend_try { - zend_error_noreturn(E_ERROR, - format, - limit, + void *ptr; #if ZEND_DEBUG - filename, - lineno, -#endif - size); - } zend_catch { - if (heap->overflow == 2) { - fprintf(stderr, "\nFatal error: "); - fprintf(stderr, - format, - limit, + size_t real_size = size; + zend_mm_debug_info *dbg; + + size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)); +#endif + if (size <= ZEND_MM_MAX_SMALL_SIZE) { +#if ZEND_MM_SPEC_RT +# if defined(__GNUC__) && __GNUC__ >= 3 && 0 +# define _BIN_ALLOCATOR_CASE(_num, _size, _offset, _elements, _pages, heap, ptr) \ + label_ ## _size: return zend_mm_alloc_small_ ## _size ## _i(heap); +# define _BIN_ALLOCATOR_LABEL(_num, _size, _offset, _elements, _pages, heap, ptr) \ + &&label_ ## _size, + static const void **labels[] = { + ZEND_MM_BINS_INFO(_BIN_ALLOCATOR_LABEL, x, y) + }; + goto *labels[ZEND_MM_SMALL_SIZE_TO_BIN(size)]; + ZEND_MM_BINS_INFO(_BIN_ALLOCATOR_CASE, heap, ptr); +# else +# define _BIN_ALLOCATOR_CASE(_num, _size, _offset, _elements, _pages, heap, size) \ + case _num: return zend_mm_alloc_small_ ## _size ## _i(heap); + switch (ZEND_MM_SMALL_SIZE_TO_BIN(size)) { + ZEND_MM_BINS_INFO(_BIN_ALLOCATOR_CASE, heap, size); + default: return NULL; + } +# endif +#else + ptr = zend_mm_alloc_small(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); #if ZEND_DEBUG - filename, - lineno, + dbg = zend_mm_get_debug_info(heap, ptr); + dbg->size = real_size; + dbg->filename = __zend_filename; + dbg->orig_filename = __zend_orig_filename; + dbg->lineno = __zend_lineno; + dbg->orig_lineno = __zend_orig_lineno; #endif - size); - fprintf(stderr, " in %s on line %d\n", error_filename, error_lineno); - } -/* See http://support.microsoft.com/kb/190351 */ -#ifdef PHP_WIN32 - fflush(stderr); + return ptr; #endif - } zend_end_try(); + } else if (size <= ZEND_MM_MAX_LARGE_SIZE) { + ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#if ZEND_DEBUG + dbg = zend_mm_get_debug_info(heap, ptr); + dbg->size = real_size; + dbg->filename = __zend_filename; + dbg->orig_filename = __zend_orig_filename; + dbg->lineno = __zend_lineno; + dbg->orig_lineno = __zend_orig_lineno; +#endif + return ptr; } else { - heap->overflow = 2; +#if ZEND_DEBUG + size = real_size; +#endif + return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } - zend_bailout(); } -static zend_mm_free_block *zend_mm_search_large_block(zend_mm_heap *heap, size_t true_size) +static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - zend_mm_free_block *best_fit; - size_t index = ZEND_MM_LARGE_BUCKET_INDEX(true_size); - size_t bitmap = heap->large_free_bitmap >> index; - zend_mm_free_block *p; + size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); - if (bitmap == 0) { - return NULL; - } - - if (UNEXPECTED((bitmap & 1) != 0)) { - /* Search for best "large" free block */ - zend_mm_free_block *rst = NULL; - size_t m; - size_t best_size = -1; - - best_fit = NULL; - p = heap->large_free_buckets[index]; - for (m = true_size << (ZEND_MM_NUM_BUCKETS - index); ; m <<= 1) { - if (UNEXPECTED(ZEND_MM_FREE_BLOCK_SIZE(p) == true_size)) { - return p->next_free_block; - } else if (ZEND_MM_FREE_BLOCK_SIZE(p) >= true_size && - ZEND_MM_FREE_BLOCK_SIZE(p) < best_size) { - best_size = ZEND_MM_FREE_BLOCK_SIZE(p); - best_fit = p; + if (UNEXPECTED(page_offset == 0)) { + if (ptr != NULL) { + zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + } + } else { + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); + int page_num = page_offset / ZEND_MM_PAGE_SIZE; + zend_mm_page_info info = chunk->map[page_num]; + + ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted"); + ZEND_MM_CHECK_THREAD_ID(chunk); + if (EXPECTED(info & ZEND_MM_IS_SRUN)) { + if (UNEXPECTED(info & ZEND_MM_IS_LRUN)) { + /* this is non-first page of a small run */ + page_num -= ZEND_MM_NRUN_PAGES(info); + info = chunk->map[page_num]; } - if ((m & (ZEND_MM_LONG_CONST(1) << (ZEND_MM_NUM_BUCKETS-1))) == 0) { - if (p->child[1]) { - rst = p->child[1]; - } - if (p->child[0]) { - p = p->child[0]; - } else { - break; - } - } else if (p->child[1]) { - p = p->child[1]; - } else { - break; +#if ZEND_MM_SPEC_RT +# if defined(__GNUC__) && __GNUC__ >= 3 && 0 +# define _BIN_DEALLOCATOR_CASE(_num, _size, _offset, _elements, _pages, heap, ptr) \ + label_ ## _size: \ + zend_mm_free_small_ ## _size ## _i(heap, ptr, (zend_mm_bin*)ZEND_MM_PAGE_ADDR(chunk, page_num)); \ + return; +# define _BIN_DEALLOCATOR_LABEL(_num, _size, _offset, _elements, _pages, heap, ptr) \ + &&label_ ## _size, + { + static const void **labels[] = { + ZEND_MM_BINS_INFO(_BIN_DEALLOCATOR_LABEL, x, y) + }; + goto *labels[ZEND_MM_SRUN_BIN_NUM(info)]; + ZEND_MM_BINS_INFO(_BIN_DEALLOCATOR_CASE, heap, ptr); } - } - - for (p = rst; p; p = p->child[p->child[0] != NULL]) { - if (UNEXPECTED(ZEND_MM_FREE_BLOCK_SIZE(p) == true_size)) { - return p->next_free_block; - } else if (ZEND_MM_FREE_BLOCK_SIZE(p) > true_size && - ZEND_MM_FREE_BLOCK_SIZE(p) < best_size) { - best_size = ZEND_MM_FREE_BLOCK_SIZE(p); - best_fit = p; +# else +# define _BIN_DEALLOCATOR_CASE(_num, _size, _offset, _elements, _pages, heap, ptr) \ + case _num: \ + zend_mm_free_small_ ## _size ## _i(heap, ptr, (zend_mm_bin*)ZEND_MM_PAGE_ADDR(chunk, page_num)); \ + break; + switch (ZEND_MM_SRUN_BIN_NUM(info)) { + ZEND_MM_BINS_INFO(_BIN_DEALLOCATOR_CASE, heap, ptr); + default: + break; } - } - - if (best_fit) { - return best_fit->next_free_block; - } - bitmap = bitmap >> 1; - if (!bitmap) { - return NULL; - } - index++; - } +# endif +#else + zend_mm_free_small(heap, ptr, (zend_mm_bin*)ZEND_MM_PAGE_ADDR(chunk, page_num), ZEND_MM_SRUN_BIN_NUM(info)); +#endif + } else { + int pages_count = ZEND_MM_LRUN_PAGES(info); - /* Search for smallest "large" free block */ - best_fit = p = heap->large_free_buckets[index + zend_mm_low_bit(bitmap)]; - while ((p = p->child[p->child[0] != NULL])) { - if (ZEND_MM_FREE_BLOCK_SIZE(p) < ZEND_MM_FREE_BLOCK_SIZE(best_fit)) { - best_fit = p; + ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted"); + zend_mm_free_large(heap, chunk, page_num, pages_count); } } - return best_fit->next_free_block; } -static void *_zend_mm_alloc_int(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - zend_mm_free_block *best_fit; - size_t true_size = ZEND_MM_TRUE_SIZE(size); - size_t block_size; - size_t remaining_size; - size_t segment_size; - zend_mm_segment *segment; - int keep_rest = 0; -#ifdef ZEND_SIGNALS - TSRMLS_FETCH(); -#endif + size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); - HANDLE_BLOCK_INTERRUPTIONS(); - - if (EXPECTED(ZEND_MM_SMALL_SIZE(true_size))) { - size_t index = ZEND_MM_BUCKET_INDEX(true_size); - size_t bitmap; - - if (UNEXPECTED(true_size < size)) { - goto out_of_memory; - } -#if ZEND_MM_CACHE - if (EXPECTED(heap->cache[index] != NULL)) { - /* Get block from cache */ -#if ZEND_MM_CACHE_STAT - heap->cache_stat[index].count--; - heap->cache_stat[index].hit++; -#endif - best_fit = heap->cache[index]; - heap->cache[index] = best_fit->prev_free_block; - heap->cached -= true_size; - ZEND_MM_CHECK_MAGIC(best_fit, MEM_BLOCK_CACHED); - ZEND_MM_SET_DEBUG_INFO(best_fit, size, 1, 0); - HANDLE_UNBLOCK_INTERRUPTIONS(); - return ZEND_MM_DATA_OF(best_fit); - } -#if ZEND_MM_CACHE_STAT - heap->cache_stat[index].miss++; -#endif -#endif - - bitmap = heap->free_bitmap >> index; - if (bitmap) { - /* Found some "small" free block that can be used */ - index += zend_mm_low_bit(bitmap); - best_fit = heap->free_buckets[index*2]; -#if ZEND_MM_CACHE_STAT - heap->cache_stat[ZEND_MM_NUM_BUCKETS].hit++; -#endif - goto zend_mm_finished_searching_for_block; + if (UNEXPECTED(page_offset == 0)) { + return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + } else { + zend_mm_chunk *chunk; +#if 0 && ZEND_DEBUG + zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr); + return dbg->size; +#else + int page_num; + zend_mm_page_info info; + + chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); + page_num = page_offset / ZEND_MM_PAGE_SIZE; + info = chunk->map[page_num]; + ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted"); + ZEND_MM_CHECK_THREAD_ID(chunk); + if (EXPECTED(info & ZEND_MM_IS_SRUN)) { + if (UNEXPECTED(info & ZEND_MM_IS_LRUN)) { + /* this is non-first page of a small run */ + page_num -= ZEND_MM_NRUN_PAGES(info); + info = chunk->map[page_num]; + } + return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)]; + } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ { + return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE; } +#endif } +} -#if ZEND_MM_CACHE_STAT - heap->cache_stat[ZEND_MM_NUM_BUCKETS].miss++; +static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +{ + size_t page_offset; + size_t old_size; + size_t new_size; + void *ret; +#if ZEND_DEBUG + size_t real_size; + zend_mm_debug_info *dbg; #endif - best_fit = zend_mm_search_large_block(heap, true_size); - - if (!best_fit && heap->real_size >= heap->limit - heap->block_size) { - zend_mm_free_block *p = heap->rest_buckets[0]; - size_t best_size = -1; - - while (p != ZEND_MM_REST_BUCKET(heap)) { - if (UNEXPECTED(ZEND_MM_FREE_BLOCK_SIZE(p) == true_size)) { - best_fit = p; - goto zend_mm_finished_searching_for_block; - } else if (ZEND_MM_FREE_BLOCK_SIZE(p) > true_size && - ZEND_MM_FREE_BLOCK_SIZE(p) < best_size) { - best_size = ZEND_MM_FREE_BLOCK_SIZE(p); - best_fit = p; - } - p = p->prev_free_block; + page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); + if (UNEXPECTED(page_offset == 0)) { + if (UNEXPECTED(ptr == NULL)) { + return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } - } - - if (!best_fit) { - if (true_size > heap->block_size - (ZEND_MM_ALIGNED_SEGMENT_SIZE + ZEND_MM_ALIGNED_HEADER_SIZE)) { - /* Make sure we add a memory block which is big enough, - segment must have header "size" and trailer "guard" block */ - segment_size = true_size + ZEND_MM_ALIGNED_SEGMENT_SIZE + ZEND_MM_ALIGNED_HEADER_SIZE; - segment_size = (segment_size + (heap->block_size-1)) & ~(heap->block_size-1); - keep_rest = 1; - } else { - segment_size = heap->block_size; - } - - if (segment_size < true_size || - heap->real_size + segment_size > heap->limit) { - /* Memory limit overflow */ -#if ZEND_MM_CACHE - zend_mm_free_cache(heap); + old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#if ZEND_DEBUG + real_size = size; + size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)); #endif - HANDLE_UNBLOCK_INTERRUPTIONS(); + if (size > ZEND_MM_MAX_LARGE_SIZE) { #if ZEND_DEBUG - zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted at %s:%d (tried to allocate %lu bytes)", heap->limit, __zend_filename, __zend_lineno, size); + size = real_size; +#endif + new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE); + if (new_size == old_size) { +#if ZEND_DEBUG + zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); #else - zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted (tried to allocate %lu bytes)", heap->limit, size); + zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); #endif - } - - segment = (zend_mm_segment *) ZEND_MM_STORAGE_ALLOC(segment_size); - - if (!segment) { - /* Storage manager cannot allocate memory */ -#if ZEND_MM_CACHE - zend_mm_free_cache(heap); + return ptr; +#ifndef _WIN32 + } else if (new_size < old_size) { + /* unmup tail */ + zend_mm_munmap((char*)ptr + new_size, old_size - new_size); +#if ZEND_MM_STAT || ZEND_MM_LIMIT + heap->real_size -= old_size - new_size; +#endif +#if ZEND_MM_STAT + heap->size -= old_size - new_size; #endif -out_of_memory: - HANDLE_UNBLOCK_INTERRUPTIONS(); #if ZEND_DEBUG - zend_mm_safe_error(heap, "Out of memory (allocated %ld) at %s:%d (tried to allocate %lu bytes)", heap->real_size, __zend_filename, __zend_lineno, size); + zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); #else - zend_mm_safe_error(heap, "Out of memory (allocated %ld) (tried to allocate %lu bytes)", heap->real_size, size); + zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); #endif - return NULL; - } - - heap->real_size += segment_size; - if (heap->real_size > heap->real_peak) { - heap->real_peak = heap->real_size; + return ptr; + } else /* if (new_size > old_size) */ { +#if ZEND_MM_LIMIT + if (heap->real_size + (new_size - old_size) > heap->limit) { + if (heap->overflow == 0) { +#if ZEND_DEBUG + zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted at %s:%d (tried to allocate %lu bytes)", heap->limit, __zend_filename, __zend_lineno, size); +#else + zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted (tried to allocate %lu bytes)", heap->limit, size); +#endif + return NULL; + } + } +#endif + /* try to map tail right after this block */ + if (zend_mm_mmap_fixed((char*)ptr + old_size, new_size - old_size)) { +#if ZEND_MM_STAT || ZEND_MM_LIMIT + heap->real_size += new_size - old_size; +#endif +#if ZEND_MM_STAT + heap->size += new_size - old_size; +#endif +#if ZEND_DEBUG + zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#else + zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#endif + return ptr; + } +#endif + } } + } else { + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); + int page_num = page_offset / ZEND_MM_PAGE_SIZE; + zend_mm_page_info info = chunk->map[page_num]; +#if ZEND_DEBUG + size_t real_size = size; - segment->size = segment_size; - segment->next_segment = heap->segments_list; - heap->segments_list = segment; - - best_fit = (zend_mm_free_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE); - ZEND_MM_MARK_FIRST_BLOCK(best_fit); - - block_size = segment_size - ZEND_MM_ALIGNED_SEGMENT_SIZE - ZEND_MM_ALIGNED_HEADER_SIZE; - - ZEND_MM_LAST_BLOCK(ZEND_MM_BLOCK_AT(best_fit, block_size)); + size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)); +#endif - } else { -zend_mm_finished_searching_for_block: - /* remove from free list */ - ZEND_MM_CHECK_MAGIC(best_fit, MEM_BLOCK_FREED); - ZEND_MM_CHECK_COOKIE(best_fit); - ZEND_MM_CHECK_BLOCK_LINKAGE(best_fit); - zend_mm_remove_from_free_list(heap, best_fit); + ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted"); + ZEND_MM_CHECK_THREAD_ID(chunk); + if (info & ZEND_MM_IS_SRUN) { + int old_bin_num, bin_num; - block_size = ZEND_MM_FREE_BLOCK_SIZE(best_fit); + if (info & ZEND_MM_IS_LRUN) { + /* this is non-first page of a small run */ + page_num -= ZEND_MM_NRUN_PAGES(info); + info = chunk->map[page_num]; + } + old_bin_num = ZEND_MM_SRUN_BIN_NUM(info); + old_size = bin_data_size[old_bin_num]; + bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size); + if (old_bin_num == bin_num) { +#if ZEND_DEBUG + dbg = zend_mm_get_debug_info(heap, ptr); + dbg->size = real_size; + dbg->filename = __zend_filename; + dbg->orig_filename = __zend_orig_filename; + dbg->lineno = __zend_lineno; + dbg->orig_lineno = __zend_orig_lineno; +#endif + return ptr; + } + } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ { + ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted"); + old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE; + if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) { + new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE); + if (new_size == old_size) { +#if ZEND_DEBUG + dbg = zend_mm_get_debug_info(heap, ptr); + dbg->size = real_size; + dbg->filename = __zend_filename; + dbg->orig_filename = __zend_orig_filename; + dbg->lineno = __zend_lineno; + dbg->orig_lineno = __zend_orig_lineno; +#endif + return ptr; + } else if (new_size < old_size) { + /* free tail pages */ + int new_pages_count = new_size / ZEND_MM_PAGE_SIZE; + int rest_pages_count = (old_size - new_size) / ZEND_MM_PAGE_SIZE; + +#if ZEND_MM_STAT + heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE; +#endif + chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count); + chunk->free_pages += rest_pages_count; + zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count); +#if ZEND_DEBUG + dbg = zend_mm_get_debug_info(heap, ptr); + dbg->size = real_size; + dbg->filename = __zend_filename; + dbg->orig_filename = __zend_orig_filename; + dbg->lineno = __zend_lineno; + dbg->orig_lineno = __zend_orig_lineno; +#endif + return ptr; + } else /* if (new_size > old_size) */ { + int new_pages_count = new_size / ZEND_MM_PAGE_SIZE; + int old_pages_count = old_size / ZEND_MM_PAGE_SIZE; + + /* try to allocate tail pages after this block */ + if (page_num + new_pages_count <= ZEND_MM_PAGES && + zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) { +#if ZEND_MM_STAT + heap->size += (new_size - old_size); + if (heap->size > heap->peak) { + heap->peak = heap->size; + } +#endif + chunk->free_pages -= new_pages_count - old_pages_count; + zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count); + chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count); +#if ZEND_DEBUG + dbg = zend_mm_get_debug_info(heap, ptr); + dbg->size = real_size; + dbg->filename = __zend_filename; + dbg->orig_filename = __zend_orig_filename; + dbg->lineno = __zend_lineno; + dbg->orig_lineno = __zend_orig_lineno; +#endif + return ptr; + } + } + } + } +#if ZEND_DEBUG + size = real_size; +#endif } - remaining_size = block_size - true_size; + /* Naive reallocation */ + old_size = zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + memcpy(ret, ptr, MIN(old_size, size)); + zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + return ret; +} - if (remaining_size < ZEND_MM_ALIGNED_MIN_HEADER_SIZE) { - true_size = block_size; - ZEND_MM_BLOCK(best_fit, ZEND_MM_USED_BLOCK, true_size); - } else { - zend_mm_free_block *new_free_block; +/*********************/ +/* Huge Runs (again) */ +/*********************/ + +#if ZEND_DEBUG +static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +#else +static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +#endif +{ + zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + list->ptr = ptr; + list->size = size; + list->next = heap->huge_list; +#if ZEND_DEBUG + list->dbg.size = dbg_size; + list->dbg.filename = __zend_filename; + list->dbg.orig_filename = __zend_orig_filename; + list->dbg.lineno = __zend_lineno; + list->dbg.orig_lineno = __zend_orig_lineno; +#endif + heap->huge_list = list; +} - /* prepare new free block */ - ZEND_MM_BLOCK(best_fit, ZEND_MM_USED_BLOCK, true_size); - new_free_block = (zend_mm_free_block *) ZEND_MM_BLOCK_AT(best_fit, true_size); - ZEND_MM_BLOCK(new_free_block, ZEND_MM_FREE_BLOCK, remaining_size); +static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +{ + zend_mm_huge_list *prev = NULL; + zend_mm_huge_list *list = heap->huge_list; + while (list != NULL) { + if (list->ptr == ptr) { + size_t size; - /* add the new free block to the free list */ - if (EXPECTED(!keep_rest)) { - zend_mm_add_to_free_list(heap, new_free_block); - } else { - zend_mm_add_to_rest_list(heap, new_free_block); + if (prev) { + prev->next = list->next; + } else { + heap->huge_list = list->next; + } + size = list->size; + zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + return size; } + prev = list; + list = list->next; } + ZEND_MM_CHECK(0, "zend_mm_heap corrupted"); + return 0; +} - ZEND_MM_SET_DEBUG_INFO(best_fit, size, 1, 1); - - heap->size += true_size; - if (heap->peak < heap->size) { - heap->peak = heap->size; +static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +{ + zend_mm_huge_list *list = heap->huge_list; + while (list != NULL) { + if (list->ptr == ptr) { + return list->size; + } + list = list->next; } - - HANDLE_UNBLOCK_INTERRUPTIONS(); - - return ZEND_MM_DATA_OF(best_fit); + ZEND_MM_CHECK(0, "zend_mm_heap corrupted"); + return 0; } - -static void _zend_mm_free_int(zend_mm_heap *heap, void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +#if ZEND_DEBUG +static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +#else +static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +#endif { - zend_mm_block *mm_block; - zend_mm_block *next_block; - size_t size; -#ifdef ZEND_SIGNALS - TSRMLS_FETCH(); + zend_mm_huge_list *list = heap->huge_list; + while (list != NULL) { + if (list->ptr == ptr) { + list->size = size; +#if ZEND_DEBUG + list->dbg.size = dbg_size; + list->dbg.filename = __zend_filename; + list->dbg.orig_filename = __zend_orig_filename; + list->dbg.lineno = __zend_lineno; + list->dbg.orig_lineno = __zend_orig_lineno; #endif - if (!ZEND_MM_VALID_PTR(p)) { - return; + return; + } + list = list->next; } +} - HANDLE_BLOCK_INTERRUPTIONS(); - - mm_block = ZEND_MM_HEADER_OF(p); - size = ZEND_MM_BLOCK_SIZE(mm_block); - ZEND_MM_CHECK_PROTECTION(mm_block); +static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +{ + size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE); + void *ptr; -#if ZEND_DEBUG || ZEND_MM_HEAP_PROTECTION - memset(ZEND_MM_DATA_OF(mm_block), 0x5a, mm_block->debug.size); +#if ZEND_MM_LIMIT + if (heap->real_size + new_size > heap->limit) { + if (heap->overflow == 0) { +#if ZEND_DEBUG + zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted at %s:%d (tried to allocate %lu bytes)", heap->limit, __zend_filename, __zend_lineno, size); +#else + zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted (tried to allocate %lu bytes)", heap->limit, size); #endif - -#if ZEND_MM_CACHE - if (EXPECTED(ZEND_MM_SMALL_SIZE(size)) && EXPECTED(heap->cached < ZEND_MM_CACHE_SIZE)) { - size_t index = ZEND_MM_BUCKET_INDEX(size); - zend_mm_free_block **cache = &heap->cache[index]; - - ((zend_mm_free_block*)mm_block)->prev_free_block = *cache; - *cache = (zend_mm_free_block*)mm_block; - heap->cached += size; - ZEND_MM_SET_MAGIC(mm_block, MEM_BLOCK_CACHED); -#if ZEND_MM_CACHE_STAT - if (++heap->cache_stat[index].count > heap->cache_stat[index].max_count) { - heap->cache_stat[index].max_count = heap->cache_stat[index].count; + return NULL; } -#endif - HANDLE_UNBLOCK_INTERRUPTIONS(); - return; } #endif - - heap->size -= size; - - next_block = ZEND_MM_BLOCK_AT(mm_block, size); - if (ZEND_MM_IS_FREE_BLOCK(next_block)) { - zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block); - size += ZEND_MM_FREE_BLOCK_SIZE(next_block); + ptr = zend_mm_chunk_alloc(new_size, ZEND_MM_CHUNK_SIZE); + if (UNEXPECTED(ptr == NULL)) { + /* insufficient memory */ + return NULL; } - if (ZEND_MM_PREV_BLOCK_IS_FREE(mm_block)) { - mm_block = ZEND_MM_PREV_BLOCK(mm_block); - zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) mm_block); - size += ZEND_MM_FREE_BLOCK_SIZE(mm_block); +#if ZEND_DEBUG + zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#else + zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#endif +#if ZEND_MM_STAT || ZEND_MM_LIMIT + heap->real_size += new_size; +#endif +#if ZEND_MM_STAT + if (heap->real_size > heap->real_peak) { + heap->real_peak = heap->real_size; } - if (ZEND_MM_IS_FIRST_BLOCK(mm_block) && - ZEND_MM_IS_GUARD_BLOCK(ZEND_MM_BLOCK_AT(mm_block, size))) { - zend_mm_del_segment(heap, (zend_mm_segment *) ((char *)mm_block - ZEND_MM_ALIGNED_SEGMENT_SIZE)); - } else { - ZEND_MM_BLOCK(mm_block, ZEND_MM_FREE_BLOCK, size); - zend_mm_add_to_free_list(heap, (zend_mm_free_block *) mm_block); + heap->size += new_size; + if (heap->size > heap->peak) { + heap->peak = heap->size; } - HANDLE_UNBLOCK_INTERRUPTIONS(); +#endif + return ptr; } -static void *_zend_mm_realloc_int(zend_mm_heap *heap, void *p, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - zend_mm_block *mm_block = ZEND_MM_HEADER_OF(p); - zend_mm_block *next_block; - size_t true_size; - size_t orig_size; - void *ptr; -#ifdef ZEND_SIGNALS - TSRMLS_FETCH(); + size_t size; + + ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted"); + size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + zend_mm_munmap(ptr, size); +#if ZEND_MM_STAT || ZEND_MM_LIMIT + heap->real_size -= size; #endif - if (UNEXPECTED(!p) || !ZEND_MM_VALID_PTR(p)) { - return _zend_mm_alloc_int(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - } +#if ZEND_MM_STAT + heap->size -= size; +#endif +} - HANDLE_BLOCK_INTERRUPTIONS(); +/******************/ +/* Initialization */ +/******************/ - mm_block = ZEND_MM_HEADER_OF(p); - true_size = ZEND_MM_TRUE_SIZE(size); - orig_size = ZEND_MM_BLOCK_SIZE(mm_block); - ZEND_MM_CHECK_PROTECTION(mm_block); +zend_mm_heap *zend_mm_init(void) +{ + zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE); + zend_mm_heap *heap; - if (UNEXPECTED(true_size < size)) { - goto out_of_memory; + if (UNEXPECTED(chunk == NULL)) { +#if ZEND_MM_ERROR + fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno)); +#endif + return NULL; } + heap = (zend_mm_heap*)ZEND_MM_PAGE_ADDR(chunk, ZEND_MM_FIRST_PAGE); + chunk->heap = heap; + chunk->next = chunk; + chunk->prev = chunk; + chunk->free_pages = ZEND_MM_PAGES - (ZEND_MM_FIRST_PAGE + 1); + chunk->free_tail = ZEND_MM_FIRST_PAGE + 1; + chunk->num = 0; +#if ZEND_DEBUG && defined(ZTS) + chunk->thread_id = tsrm_thread_id(); +#endif + chunk->free_map[0] = (1L << (ZEND_MM_FIRST_PAGE + 1)) - 1; + chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE); + chunk->map[ZEND_MM_FIRST_PAGE] = ZEND_MM_LRUN(1); + heap->main_chunk = chunk; + heap->cached_chunks = NULL; + heap->chunks_count = 1; + heap->peak_chunks_count = 1; + heap->cached_chunks_count = 0; + heap->avg_chunks_count = 1.0; +#if ZEND_MM_STAT || ZEND_MM_LIMIT + heap->real_size = ZEND_MM_CHUNK_SIZE; +#endif +#if ZEND_MM_STAT + heap->real_peak = ZEND_MM_CHUNK_SIZE; + heap->size = 0; + heap->peak = 0; +#endif +#if ZEND_MM_LIMIT + heap->limit = (-1L >> 1); + heap->overflow = 0; +#endif +#if ZEND_MM_CUSTOM + heap->use_custom_heap = 0; +#endif + heap->huge_list = NULL; + return heap; +} - if (true_size <= orig_size) { - size_t remaining_size = orig_size - true_size; - - if (remaining_size >= ZEND_MM_ALIGNED_MIN_HEADER_SIZE) { - zend_mm_free_block *new_free_block; +#if ZEND_DEBUG +/******************/ +/* Leak detection */ +/******************/ - next_block = ZEND_MM_BLOCK_AT(mm_block, orig_size); - if (ZEND_MM_IS_FREE_BLOCK(next_block)) { - remaining_size += ZEND_MM_FREE_BLOCK_SIZE(next_block); - zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block); - } +static long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak) +{ + int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]); + long count = 0; - /* prepare new free block */ - ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size); - new_free_block = (zend_mm_free_block *) ZEND_MM_BLOCK_AT(mm_block, true_size); + if (bin_data_offset[bin_num] == 0) { + if (ZEND_MM_SRUN_BITSET(p->map[i]) != 0) { + for (; j < bin_elements[bin_num]; j++) { + if (ZEND_MM_SRUN_BITSET(p->map[i]) & (1 << j)) { + zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); - ZEND_MM_BLOCK(new_free_block, ZEND_MM_FREE_BLOCK, remaining_size); + if (dbg->filename == leak->filename && + dbg->lineno == leak->lineno) { - /* add the new free block to the free list */ - zend_mm_add_to_free_list(heap, new_free_block); - heap->size += (true_size - orig_size); + count++; + p->map[i] = ZEND_MM_SRUN_BITSET_EXCL(p->map[i], j); + if (ZEND_MM_SRUN_BITSET(p->map[i]) == 0) { + break; + } + } + } + } } - ZEND_MM_SET_DEBUG_INFO(mm_block, size, 0, 0); - HANDLE_UNBLOCK_INTERRUPTIONS(); - return p; + } else { + zend_mm_bin *bin = (zend_mm_bin*)((char*)p + ZEND_MM_PAGE_SIZE * i); + + if (bin->num_used > 0) { + zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_offset[bin_num] + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); + + while (j < bin_elements[bin_num]) { + if (dbg->size != 0 && + dbg->filename == leak->filename && + dbg->lineno == leak->lineno) { + + count++; + dbg->size = 0; + dbg->filename = NULL; + dbg->lineno = 0; + bin->num_used--; + if (bin->num_used == 0) { + break; + } + } + j++; + dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]); + } + } } + return count; +} -#if ZEND_MM_CACHE - if (ZEND_MM_SMALL_SIZE(true_size)) { - size_t index = ZEND_MM_BUCKET_INDEX(true_size); - - if (heap->cache[index] != NULL) { - zend_mm_free_block *best_fit; - zend_mm_free_block **cache; - -#if ZEND_MM_CACHE_STAT - heap->cache_stat[index].count--; - heap->cache_stat[index].hit++; -#endif - best_fit = heap->cache[index]; - heap->cache[index] = best_fit->prev_free_block; - ZEND_MM_CHECK_MAGIC(best_fit, MEM_BLOCK_CACHED); - ZEND_MM_SET_DEBUG_INFO(best_fit, size, 1, 0); - - ptr = ZEND_MM_DATA_OF(best_fit); - -#if ZEND_DEBUG || ZEND_MM_HEAP_PROTECTION - memcpy(ptr, p, mm_block->debug.size); -#else - memcpy(ptr, p, orig_size - ZEND_MM_ALIGNED_HEADER_SIZE); -#endif +static long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak) +{ + long count = 0; - heap->cached -= true_size - orig_size; + do { + while (i < p->free_tail) { + if (zend_mm_bitset_is_set(p->free_map, i)) { + if (p->map[i] & ZEND_MM_IS_SRUN) { + int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]); + zend_mm_find_leaks_small(p, i, 0, leak); + i += bin_pages[bin_num]; + } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ { + int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]); + zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); - index = ZEND_MM_BUCKET_INDEX(orig_size); - cache = &heap->cache[index]; + if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) { + count++; + } + zend_mm_bitset_reset_range(p->free_map, i, i + pages_count); + i += pages_count; + } + } else { + i++; + } + } + p = p->next; + } while (p != heap->main_chunk); + return count; +} - ((zend_mm_free_block*)mm_block)->prev_free_block = *cache; - *cache = (zend_mm_free_block*)mm_block; - ZEND_MM_SET_MAGIC(mm_block, MEM_BLOCK_CACHED); -#if ZEND_MM_CACHE_STAT - if (++heap->cache_stat[index].count > heap->cache_stat[index].max_count) { - heap->cache_stat[index].max_count = heap->cache_stat[index].count; - } -#endif +static void zend_mm_check_leaks(zend_mm_heap *heap TSRMLS_DC) +{ + zend_mm_huge_list *list; + zend_mm_chunk *p; + zend_leak_info leak; + long repeated = 0; + zend_uint total = 0; + int i, j; - HANDLE_UNBLOCK_INTERRUPTIONS(); - return ptr; - } - } -#endif + /* find leaked huge blocks and free them */ + list = heap->huge_list; + while (list) { + zend_mm_huge_list *q = list; - next_block = ZEND_MM_BLOCK_AT(mm_block, orig_size); + heap->huge_list = list->next; - if (ZEND_MM_IS_FREE_BLOCK(next_block)) { - ZEND_MM_CHECK_COOKIE(next_block); - ZEND_MM_CHECK_BLOCK_LINKAGE(next_block); - if (orig_size + ZEND_MM_FREE_BLOCK_SIZE(next_block) >= true_size) { - size_t block_size = orig_size + ZEND_MM_FREE_BLOCK_SIZE(next_block); - size_t remaining_size = block_size - true_size; + leak.addr = list->ptr; + leak.size = list->dbg.size; + leak.filename = list->dbg.filename; + leak.orig_filename = list->dbg.orig_filename; + leak.lineno = list->dbg.lineno; + leak.orig_lineno = list->dbg.orig_lineno; + + zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC); + zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC); +//??? repeated = zend_mm_find_leaks(segment, p); + total += 1 + repeated; + if (repeated) { + zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC); + } + + list = list->next; + zend_mm_munmap(q->ptr, q->size); + zend_mm_free_heap(heap, q, NULL, 0, NULL, 0); + } + + /* for each chunk */ + p = heap->main_chunk; + do { + i = ZEND_MM_FIRST_PAGE; + if (p == heap->main_chunk) { + /* first block in main chunk contains the heap itself */ + i++; + } + while (i < p->free_tail) { + if (zend_mm_bitset_is_set(p->free_map, i)) { + if (p->map[i] & ZEND_MM_IS_SRUN) { + int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]); + + if (bin_data_offset[bin_num] == 0) { + if (ZEND_MM_SRUN_BITSET(p->map[i]) != 0) { + for (j = 0; j < bin_elements[bin_num]; j++) { + if (ZEND_MM_SRUN_BITSET(p->map[i]) & (1 << j)) { + zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); + + leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j); + leak.size = dbg->size; + leak.filename = dbg->filename; + leak.orig_filename = dbg->orig_filename; + leak.lineno = dbg->lineno; + leak.orig_lineno = dbg->orig_lineno; + + zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC); + zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC); + + p->map[i] = ZEND_MM_SRUN_BITSET_EXCL(p->map[i], j); + + repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) + + zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak); + total += 1 + repeated; + if (repeated) { + zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC); + } + if (ZEND_MM_SRUN_BITSET(p->map[i]) == 0) { + break; + } + } + } + } + } else { + zend_mm_bin *bin = (zend_mm_bin*)((char*)p + ZEND_MM_PAGE_SIZE * i); + + if (bin->num_used > 0) { + zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_offset[bin_num] + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); + + j = 0; + while (j < bin_elements[bin_num]) { + if (dbg->size != 0) { + leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_offset[bin_num] + bin_data_size[bin_num] * j); + leak.size = dbg->size; + leak.filename = dbg->filename; + leak.orig_filename = dbg->orig_filename; + leak.lineno = dbg->lineno; + leak.orig_lineno = dbg->orig_lineno; + + zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC); + zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC); + + dbg->size = 0; + dbg->filename = NULL; + dbg->lineno = 0; + bin->num_used--; + + repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) + + zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak); + total += 1 + repeated; + if (repeated) { + zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC); + } + + if (bin->num_used == 0) { + break; + } + } + dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]); + j++; + } + } + } + i += bin_pages[bin_num]; + } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ { + int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]); + zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info))); + + leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i); + leak.size = dbg->size; + leak.filename = dbg->filename; + leak.orig_filename = dbg->orig_filename; + leak.lineno = dbg->lineno; + leak.orig_lineno = dbg->orig_lineno; + + zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC); + zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC); - zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block); + zend_mm_bitset_reset_range(p->free_map, i, i + pages_count); + i += pages_count; - if (remaining_size < ZEND_MM_ALIGNED_MIN_HEADER_SIZE) { - true_size = block_size; - ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size); - } else { - zend_mm_free_block *new_free_block; - - /* prepare new free block */ - ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size); - new_free_block = (zend_mm_free_block *) ZEND_MM_BLOCK_AT(mm_block, true_size); - ZEND_MM_BLOCK(new_free_block, ZEND_MM_FREE_BLOCK, remaining_size); - - /* add the new free block to the free list */ - if (ZEND_MM_IS_FIRST_BLOCK(mm_block) && - ZEND_MM_IS_GUARD_BLOCK(ZEND_MM_BLOCK_AT(new_free_block, remaining_size))) { - zend_mm_add_to_rest_list(heap, new_free_block); - } else { - zend_mm_add_to_free_list(heap, new_free_block); + repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak); + total += 1 + repeated; + if (repeated) { + zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC); + } } + } else { + i++; } - ZEND_MM_SET_DEBUG_INFO(mm_block, size, 0, 0); - heap->size = heap->size + true_size - orig_size; - if (heap->peak < heap->size) { - heap->peak = heap->size; - } - HANDLE_UNBLOCK_INTERRUPTIONS(); - return p; - } else if (ZEND_MM_IS_FIRST_BLOCK(mm_block) && - ZEND_MM_IS_GUARD_BLOCK(ZEND_MM_BLOCK_AT(next_block, ZEND_MM_FREE_BLOCK_SIZE(next_block)))) { - zend_mm_remove_from_free_list(heap, (zend_mm_free_block *) next_block); - goto realloc_segment; - } - } else if (ZEND_MM_IS_FIRST_BLOCK(mm_block) && ZEND_MM_IS_GUARD_BLOCK(next_block)) { - zend_mm_segment *segment; - zend_mm_segment *segment_copy; - size_t segment_size; - size_t block_size; - size_t remaining_size; - -realloc_segment: - /* segment size, size of block and size of guard block */ - if (true_size > heap->block_size - (ZEND_MM_ALIGNED_SEGMENT_SIZE + ZEND_MM_ALIGNED_HEADER_SIZE)) { - segment_size = true_size+ZEND_MM_ALIGNED_SEGMENT_SIZE+ZEND_MM_ALIGNED_HEADER_SIZE; - segment_size = (segment_size + (heap->block_size-1)) & ~(heap->block_size-1); - } else { - segment_size = heap->block_size; } - - segment_copy = (zend_mm_segment *) ((char *)mm_block - ZEND_MM_ALIGNED_SEGMENT_SIZE); - if (segment_size < true_size || - heap->real_size + segment_size - segment_copy->size > heap->limit) { - if (ZEND_MM_IS_FREE_BLOCK(next_block)) { - zend_mm_add_to_free_list(heap, (zend_mm_free_block *) next_block); - } -#if ZEND_MM_CACHE - zend_mm_free_cache(heap); -#endif - HANDLE_UNBLOCK_INTERRUPTIONS(); -#if ZEND_DEBUG - zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted at %s:%d (tried to allocate %ld bytes)", heap->limit, __zend_filename, __zend_lineno, size); -#else - zend_mm_safe_error(heap, "Allowed memory size of %ld bytes exhausted (tried to allocate %ld bytes)", heap->limit, size); + p = p->next; + } while (p != heap->main_chunk); + if (total) { + zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total TSRMLS_CC); + } +} #endif - return NULL; - } - segment = ZEND_MM_STORAGE_REALLOC(segment_copy, segment_size); - if (!segment) { -#if ZEND_MM_CACHE - zend_mm_free_cache(heap); +void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent TSRMLS_DC) +{ + zend_mm_chunk *p; + zend_mm_huge_list *list; + +#if ZEND_MM_CUSTOM + if (heap->use_custom_heap) { + return; + } #endif -out_of_memory: - HANDLE_UNBLOCK_INTERRUPTIONS(); + #if ZEND_DEBUG - zend_mm_safe_error(heap, "Out of memory (allocated %ld) at %s:%d (tried to allocate %ld bytes)", heap->real_size, __zend_filename, __zend_lineno, size); -#else - zend_mm_safe_error(heap, "Out of memory (allocated %ld) (tried to allocate %ld bytes)", heap->real_size, size); + if (!silent) { + zend_mm_check_leaks(heap TSRMLS_CC); + } #endif - return NULL; - } - heap->real_size += segment_size - segment->size; - if (heap->real_size > heap->real_peak) { - heap->real_peak = heap->real_size; - } - segment->size = segment_size; - - if (segment != segment_copy) { - zend_mm_segment **seg = &heap->segments_list; - while (*seg != segment_copy) { - seg = &(*seg)->next_segment; - } - *seg = segment; - mm_block = (zend_mm_block *) ((char *) segment + ZEND_MM_ALIGNED_SEGMENT_SIZE); - ZEND_MM_MARK_FIRST_BLOCK(mm_block); - } - - block_size = segment_size - ZEND_MM_ALIGNED_SEGMENT_SIZE - ZEND_MM_ALIGNED_HEADER_SIZE; - remaining_size = block_size - true_size; - - /* setup guard block */ - ZEND_MM_LAST_BLOCK(ZEND_MM_BLOCK_AT(mm_block, block_size)); - - if (remaining_size < ZEND_MM_ALIGNED_MIN_HEADER_SIZE) { - true_size = block_size; - ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size); - } else { - zend_mm_free_block *new_free_block; - - /* prepare new free block */ - ZEND_MM_BLOCK(mm_block, ZEND_MM_USED_BLOCK, true_size); - new_free_block = (zend_mm_free_block *) ZEND_MM_BLOCK_AT(mm_block, true_size); - ZEND_MM_BLOCK(new_free_block, ZEND_MM_FREE_BLOCK, remaining_size); - - /* add the new free block to the free list */ - zend_mm_add_to_rest_list(heap, new_free_block); - } - - ZEND_MM_SET_DEBUG_INFO(mm_block, size, 1, 1); - - heap->size = heap->size + true_size - orig_size; - if (heap->peak < heap->size) { - heap->peak = heap->size; - } - - HANDLE_UNBLOCK_INTERRUPTIONS(); - return ZEND_MM_DATA_OF(mm_block); + /* free huge blocks */ + list = heap->huge_list; + while (list) { + zend_mm_huge_list *q = list; + list = list->next; + zend_mm_munmap(q->ptr, q->size); } - - ptr = _zend_mm_alloc_int(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); -#if ZEND_DEBUG || ZEND_MM_HEAP_PROTECTION - memcpy(ptr, p, mm_block->debug.size); -#else - memcpy(ptr, p, orig_size - ZEND_MM_ALIGNED_HEADER_SIZE); + + /* move all chunks except of the first one into the cache */ + p = heap->main_chunk->next; + while (p != heap->main_chunk) { + zend_mm_chunk *q = p->next; + p->next = heap->cached_chunks; + heap->cached_chunks = p; + p = q; + heap->chunks_count--; + heap->cached_chunks_count++; + } + + if (full) { + /* free all cached chunks */ + while (heap->cached_chunks) { + p = heap->cached_chunks; + heap->cached_chunks = p->next; + zend_mm_munmap(p, ZEND_MM_CHUNK_SIZE); + } + /* free the first chunk */ + zend_mm_munmap(heap->main_chunk, ZEND_MM_CHUNK_SIZE); + } else { + /* free some cached chunks to keep average count */ + heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0; + while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count && + heap->cached_chunks) { + p = heap->cached_chunks; + heap->cached_chunks = p->next; + zend_mm_munmap(p, ZEND_MM_CHUNK_SIZE); + heap->cached_chunks_count--; + } + /* clear cached chunks */ + p = heap->cached_chunks; + while (p != NULL) { + zend_mm_chunk *q = p->next; + memset(p, 0, sizeof(zend_mm_chunk)); + p->next = q; + p = q; + } + + /* reinitialize the first chunk and heap */ + heap->chunks_count = 1; + heap->peak_chunks_count = 1; +#if ZEND_MM_STAT || ZEND_MM_LIMIT + heap->real_size = ZEND_MM_CHUNK_SIZE; +#endif +#if ZEND_MM_STAT + heap->real_peak = ZEND_MM_CHUNK_SIZE; + heap->size = 0; + heap->peak = 0; #endif - _zend_mm_free_int(heap, p ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - HANDLE_UNBLOCK_INTERRUPTIONS(); - return ptr; + heap->huge_list = NULL; + memset(heap->cache, 0, sizeof(heap->cache)); + p = heap->main_chunk; + memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE); + p->heap = heap; + p->next = p; + p->prev = p; +#if ZEND_DEBUG && defined(ZTS) + p->thread_id = tsrm_thread_id(); +#endif + p->free_pages = ZEND_MM_PAGES - (ZEND_MM_FIRST_PAGE + 1); + p->free_tail = ZEND_MM_FIRST_PAGE + 1; + p->free_map[0] = (1L << (ZEND_MM_FIRST_PAGE + 1)) - 1; + p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE); + p->map[ZEND_MM_FIRST_PAGE] = ZEND_MM_LRUN(1); + } } +/**************/ +/* PUBLIC API */ +/**************/ + ZEND_API void *_zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - return _zend_mm_alloc_int(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } -ZEND_API void _zend_mm_free(zend_mm_heap *heap, void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API void _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - _zend_mm_free_int(heap, p ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } -ZEND_API void *_zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +void *_zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - return _zend_mm_realloc_int(heap, ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + return zend_mm_realloc_heap(heap, ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } -ZEND_API size_t _zend_mm_block_size(zend_mm_heap *heap, void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API size_t _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - zend_mm_block *mm_block; + return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +} - if (!ZEND_MM_VALID_PTR(p)) { - return 0; - } - mm_block = ZEND_MM_HEADER_OF(p); - ZEND_MM_CHECK_PROTECTION(mm_block); -#if ZEND_DEBUG || ZEND_MM_HEAP_PROTECTION - return mm_block->debug.size; -#else - return ZEND_MM_BLOCK_SIZE(mm_block); +#if ZEND_MM_SPEC_CT +# define _BIN_ALLOCATOR_SELECTOR_START(_num, _size, _offset, _elements, _pages, heap, size) \ + ((size <= _size) ? zend_mm_alloc_small_ ## _size(heap) : +# define _BIN_ALLOCATOR_SELECTOR_END(_num, _size, _offset, _elements, _pages, heap, size) \ + ) + +# define ZEND_MM_ALLOCATOR(heap, size) \ + ZEND_MM_BINS_INFO(_BIN_ALLOCATOR_SELECTOR_START, heap, size) \ + ((size <= ZEND_MM_MAX_LARGE_SIZE) ? zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC) : zend_mm_alloc_huge(heap, size)) \ + ZEND_MM_BINS_INFO(_BIN_ALLOCATOR_SELECTOR_END, heap, size) + +# define zend_mm_alloc(size) \ + (__builtin_constant_p(size) ? ZEND_MM_ALLOCATOR(_heap, size) : zend_mm_alloc_heap(_heap, size)) #endif -} /**********************/ /* Allocation Manager */ @@ -2427,46 +2309,152 @@ static zend_alloc_globals alloc_globals; ZEND_API int is_zend_mm(TSRMLS_D) { - return AG(mm_heap)->use_zend_alloc; +#if ZEND_MM_CUSTOM + return !AG(mm_heap)->use_custom_heap; +#else + return 1; +#endif +} + +#if !ZEND_DEBUG +#undef _emalloc + +#if ZEND_MM_CUSTOM +# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \ + if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \ + return AG(mm_heap)->_malloc(size); \ + } \ + } while (0) +# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \ + if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \ + AG(mm_heap)->_free(ptr); \ + return; \ + } \ + } while (0) +#else +# define ZEND_MM_CUSTOM_ALLOCATOR(size) +# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) +#endif + +# define _ZEND_BIN_ALLOCATOR(_num, _size, _offset, _elements, _pages, x, y) \ + ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \ + TSRMLS_FETCH(); \ + ZEND_MM_CUSTOM_ALLOCATOR(_size); \ + return zend_mm_alloc_small(AG(mm_heap), _size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \ + } + +ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y) + +ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +{ + TSRMLS_FETCH(); + + ZEND_MM_CUSTOM_ALLOCATOR(size); + return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +} + +ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size) +{ + TSRMLS_FETCH(); + + ZEND_MM_CUSTOM_ALLOCATOR(size); + return zend_mm_alloc_huge(AG(mm_heap), size); +} + +# define _ZEND_BIN_FREE(_num, _size, _offset, _elements, _pages, x, y) \ + ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \ + TSRMLS_FETCH(); \ + ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \ + { \ + size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \ + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \ + int page_num = page_offset / ZEND_MM_PAGE_SIZE; \ + ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \ + ZEND_MM_CHECK_THREAD_ID(chunk); \ + if (bin_pages[_num] > 1) { \ + zend_mm_page_info info = chunk->map[page_num]; \ + if (info & ZEND_MM_IS_LRUN) { \ + page_num -= ZEND_MM_NRUN_PAGES(info); \ + } \ + } \ + ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \ + ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \ + zend_mm_free_small(AG(mm_heap), ptr, (zend_mm_bin*)ZEND_MM_PAGE_ADDR(chunk, page_num), _num); \ + } \ + } + +ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y) + +ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size) +{ + TSRMLS_FETCH(); + + ZEND_MM_CUSTOM_DEALLOCATOR(ptr); + { + size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); + zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); + int page_num = page_offset / ZEND_MM_PAGE_SIZE; + int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE; + + ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted"); + ZEND_MM_CHECK_THREAD_ID(chunk); + ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN); + ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count); + zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count); + } +} + +ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size) +{ + TSRMLS_FETCH(); + + ZEND_MM_CUSTOM_DEALLOCATOR(ptr); + // TODO: use size??? + zend_mm_free_huge(AG(mm_heap), ptr); } +#endif -ZEND_API void *_emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { TSRMLS_FETCH(); - if (UNEXPECTED(!AG(mm_heap)->use_zend_alloc)) { +#if ZEND_MM_CUSTOM + if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { return AG(mm_heap)->_malloc(size); } - return _zend_mm_alloc_int(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#endif + return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } -ZEND_API void _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { TSRMLS_FETCH(); - if (UNEXPECTED(!AG(mm_heap)->use_zend_alloc)) { +#if ZEND_MM_CUSTOM + if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { AG(mm_heap)->_free(ptr); return; } - _zend_mm_free_int(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +#endif + zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } -ZEND_API void *_erealloc(void *ptr, size_t size, int allow_failure ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size, int allow_failure ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { TSRMLS_FETCH(); - if (UNEXPECTED(!AG(mm_heap)->use_zend_alloc)) { + if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { return AG(mm_heap)->_realloc(ptr, size); } - return _zend_mm_realloc_int(AG(mm_heap), ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + return zend_mm_realloc_heap(AG(mm_heap), ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } -ZEND_API size_t _zend_mem_block_size(void *ptr TSRMLS_DC ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr TSRMLS_DC ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - if (UNEXPECTED(!AG(mm_heap)->use_zend_alloc)) { + if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { return 0; } - return _zend_mm_block_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); + return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } #if defined(__GNUC__) && (defined(__native_client__) || defined(i386)) @@ -2589,28 +2577,28 @@ static inline size_t safe_address(size_t nmemb, size_t size, size_t offset) #endif -ZEND_API void *_safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { return emalloc_rel(safe_address(nmemb, size, offset)); } -ZEND_API void *_safe_malloc(size_t nmemb, size_t size, size_t offset) +ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset) { return pemalloc(safe_address(nmemb, size, offset), 1); } -ZEND_API void *_safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { return erealloc_rel(ptr, safe_address(nmemb, size, offset)); } -ZEND_API void *_safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset) +ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset) { return perealloc(ptr, safe_address(nmemb, size, offset), 1); } -ZEND_API void *_ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { void *p; #ifdef ZEND_SIGNALS @@ -2628,7 +2616,7 @@ ZEND_API void *_ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LI return p; } -ZEND_API char *_estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { int length; char *p; @@ -2649,7 +2637,7 @@ ZEND_API char *_estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) return p; } -ZEND_API char *_estrndup(const char *s, uint length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) +ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, uint length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { char *p; #ifdef ZEND_SIGNALS @@ -2670,7 +2658,7 @@ ZEND_API char *_estrndup(const char *s, uint length ZEND_FILE_LINE_DC ZEND_FILE_ } -ZEND_API char *zend_strndup(const char *s, uint length) +ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, uint length) { char *p; #ifdef ZEND_SIGNALS @@ -2695,31 +2683,35 @@ ZEND_API char *zend_strndup(const char *s, uint length) ZEND_API int zend_set_memory_limit(size_t memory_limit TSRMLS_DC) { - AG(mm_heap)->limit = (memory_limit >= AG(mm_heap)->block_size) ? memory_limit : AG(mm_heap)->block_size; - +#if ZEND_MM_LIMIT + AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE; +#endif return SUCCESS; } ZEND_API size_t zend_memory_usage(int real_usage TSRMLS_DC) { +#if ZEND_MM_STAT if (real_usage) { return AG(mm_heap)->real_size; } else { size_t usage = AG(mm_heap)->size; -#if ZEND_MM_CACHE - usage -= AG(mm_heap)->cached; -#endif return usage; } +#endif + return 0; } ZEND_API size_t zend_memory_peak_usage(int real_usage TSRMLS_DC) { +#if ZEND_MM_STAT if (real_usage) { return AG(mm_heap)->real_peak; } else { return AG(mm_heap)->peak; } +#endif + return 0; } ZEND_API void shutdown_memory_manager(int silent, int full_shutdown TSRMLS_DC) @@ -2729,18 +2721,20 @@ ZEND_API void shutdown_memory_manager(int silent, int full_shutdown TSRMLS_DC) static void alloc_globals_ctor(zend_alloc_globals *alloc_globals TSRMLS_DC) { +#if ZEND_MM_CUSTOM char *tmp = getenv("USE_ZEND_ALLOC"); if (tmp && !zend_atoi(tmp, 0)) { - alloc_globals->mm_heap = malloc(sizeof(struct _zend_mm_heap)); - memset(alloc_globals->mm_heap, 0, sizeof(struct _zend_mm_heap)); - alloc_globals->mm_heap->use_zend_alloc = 0; + alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap)); + memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap)); + alloc_globals->mm_heap->use_custom_heap = 1; alloc_globals->mm_heap->_malloc = malloc; alloc_globals->mm_heap->_free = free; alloc_globals->mm_heap->_realloc = realloc; - } else { - alloc_globals->mm_heap = zend_mm_startup(); + return; } +#endif + alloc_globals->mm_heap = zend_mm_init(); } #ifdef ZTS @@ -2764,13 +2758,14 @@ ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap TSRMLS_DC) zend_mm_heap *old_heap; old_heap = AG(mm_heap); - AG(mm_heap) = new_heap; - return old_heap; + AG(mm_heap) = (zend_mm_heap*)new_heap; + return (zend_mm_heap*)old_heap; } ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap) { - return heap->storage; +// return heap->storage;??? + return NULL; } ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap, @@ -2778,40 +2773,44 @@ ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap, void (*_free)(void*), void* (*_realloc)(void*, size_t)) { - heap->use_zend_alloc = 0; - heap->_malloc = _malloc; - heap->_free = _free; - heap->_realloc = _realloc; +#if ZEND_MM_CUSTOM + zend_mm_heap *_heap = (zend_mm_heap*)heap; + + _heap->use_custom_heap = 1; + _heap->_malloc = _malloc; + _heap->_free = _free; + _heap->_realloc = _realloc; +#endif } #if ZEND_DEBUG ZEND_API int _mem_block_check(void *ptr, int silent ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - TSRMLS_FETCH(); - - if (!AG(mm_heap)->use_zend_alloc) { +// TSRMLS_FETCH(); +// +// if (!AG(mm_heap)->use_zend_alloc) { return 1; - } - return zend_mm_check_ptr(AG(mm_heap), ptr, silent ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +// } +// return zend_mm_check_ptr(AG(mm_heap), ptr, silent ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); } ZEND_API void _full_mem_check(int silent ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) { - int errors; - TSRMLS_FETCH(); - - if (!AG(mm_heap)->use_zend_alloc) { - return; - } - - zend_debug_alloc_output("------------------------------------------------\n"); - zend_debug_alloc_output("Full Memory Check at %s:%d\n" ZEND_FILE_LINE_RELAY_CC); - - errors = zend_mm_check_heap(AG(mm_heap), silent ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); - - zend_debug_alloc_output("End of full memory check %s:%d (%d errors)\n" ZEND_FILE_LINE_RELAY_CC, errors); - zend_debug_alloc_output("------------------------------------------------\n"); +// int errors; +// TSRMLS_FETCH(); +// +// if (!AG(mm_heap)->use_zend_alloc) { +// return; +// } +// +// zend_debug_alloc_output("------------------------------------------------\n"); +// zend_debug_alloc_output("Full Memory Check at %s:%d\n" ZEND_FILE_LINE_RELAY_CC); +// +// errors = zend_mm_check_heap(AG(mm_heap), silent ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); +// +// zend_debug_alloc_output("End of full memory check %s:%d (%d errors)\n" ZEND_FILE_LINE_RELAY_CC, errors); +// zend_debug_alloc_output("------------------------------------------------\n"); } #endif diff --git a/Zend/zend_alloc.h b/Zend/zend_alloc.h index 609db22dac..931318934e 100644 --- a/Zend/zend_alloc.h +++ b/Zend/zend_alloc.h @@ -52,24 +52,98 @@ typedef struct _zend_leak_info { BEGIN_EXTERN_C() -ZEND_API char *zend_strndup(const char *s, unsigned int length) ZEND_ATTRIBUTE_MALLOC; - -ZEND_API void *_emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC ZEND_ATTRIBUTE_ALLOC_SIZE(1); -ZEND_API void *_safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC; -ZEND_API void *_safe_malloc(size_t nmemb, size_t size, size_t offset) ZEND_ATTRIBUTE_MALLOC; -ZEND_API void _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); -ZEND_API void *_ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC ZEND_ATTRIBUTE_ALLOC_SIZE2(1,2); -ZEND_API void *_erealloc(void *ptr, size_t size, int allow_failure ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_ALLOC_SIZE(2); -ZEND_API void *_safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); -ZEND_API void *_safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset); -ZEND_API char *_estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC; -ZEND_API char *_estrndup(const char *s, unsigned int length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC; -ZEND_API size_t _zend_mem_block_size(void *ptr TSRMLS_DC ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); +ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, unsigned int length) ZEND_ATTRIBUTE_MALLOC; + +ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC ZEND_ATTRIBUTE_ALLOC_SIZE(1); +ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC; +ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset) ZEND_ATTRIBUTE_MALLOC; +ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); +ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC ZEND_ATTRIBUTE_ALLOC_SIZE2(1,2); +ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size, int allow_failure ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_ALLOC_SIZE(2); +ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); +ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset); +ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC; +ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, unsigned int length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) ZEND_ATTRIBUTE_MALLOC; +ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr TSRMLS_DC ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC); + +#include "zend_alloc_sizes.h" + +/* _emalloc() & _efree() specialization */ +#if !ZEND_DEBUG && !defined(_WIN32) + +# define _ZEND_BIN_ALLOCATOR_DEF(_num, _size, _offset, _elements, _pages, x, y) \ + ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) ZEND_ATTRIBUTE_MALLOC; + +ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR_DEF, x, y) + +ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size) ZEND_ATTRIBUTE_MALLOC ZEND_ATTRIBUTE_ALLOC_SIZE(1); +ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size) ZEND_ATTRIBUTE_MALLOC ZEND_ATTRIBUTE_ALLOC_SIZE(1); + +# define _ZEND_BIN_ALLOCATOR_SELECTOR_START(_num, _size, _offset, _elements, _pages, size, y) \ + ((size <= _size) ? _emalloc_ ## _size() : +# define _ZEND_BIN_ALLOCATOR_SELECTOR_END(_num, _size, _offset, _elements, _pages, size, y) \ + ) + +# define ZEND_ALLOCATOR(size) \ + ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR_SELECTOR_START, size, y) \ + ((size <= ZEND_MM_MAX_LARGE_SIZE) ? _emalloc_large(size) : _emalloc_huge(size)) \ + ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR_SELECTOR_END, size, y) + +# define _emalloc(size) \ + (__builtin_constant_p(size) ? \ + ZEND_ALLOCATOR(size) \ + : \ + _emalloc(size) \ + ) + +# define _ZEND_BIN_DEALLOCATOR_DEF(_num, _size, _offset, _elements, _pages, x, y) \ + ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *); + +ZEND_MM_BINS_INFO(_ZEND_BIN_DEALLOCATOR_DEF, x, y) + +ZEND_API void ZEND_FASTCALL _efree_large(void *, size_t size); +ZEND_API void ZEND_FASTCALL _efree_huge(void *, size_t size); + +# define _ZEND_BIN_DEALLOCATOR_SELECTOR_START(_num, _size, _offset, _elements, _pages, ptr, size) \ + if (size <= _size) { _efree_ ## _size(ptr); } else + +# define ZEND_DEALLOCATOR(ptr, size) \ + ZEND_MM_BINS_INFO(_ZEND_BIN_DEALLOCATOR_SELECTOR_START, ptr, size) \ + if (size <= ZEND_MM_MAX_LARGE_SIZE) { _efree_large(ptr, size); } \ + else { _efree_huge(ptr, size); } + +# define efree_size(ptr, size) do { \ + if (__builtin_constant_p(size)) { \ + ZEND_DEALLOCATOR(ptr, size) \ + } else { \ + _efree(ptr); \ + } \ + } while (0) +# define efree_size_rel(ptr, size) \ + efree_size(ptr, size) + +#else + +# define efree_size(ptr, size) \ + efree(ptr) +# define efree_size_rel(ptr, size) \ + efree_rel(ptr) + +#define _emalloc_large _emalloc +#define _emalloc_huge _emalloc +#define _efree_large _efree +#define _efree_huge _efree + +#endif /* Standard wrapper macros */ #define emalloc(size) _emalloc((size) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) +#define emalloc_large(size) _emalloc_large((size) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) +#define emalloc_huge(size) _emalloc_huge((size) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) #define safe_emalloc(nmemb, size, offset) _safe_emalloc((nmemb), (size), (offset) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) #define efree(ptr) _efree((ptr) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) +#define efree_large(ptr) _efree_large((ptr) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) +#define efree_huge(ptr) _efree_huge((ptr) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) #define ecalloc(nmemb, size) _ecalloc((nmemb), (size) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) #define erealloc(ptr, size) _erealloc((ptr), (size), 0 ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) #define safe_erealloc(ptr, nmemb, size, offset) _safe_erealloc((ptr), (nmemb), (size), (offset) ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC) @@ -122,6 +196,7 @@ inline static void * __zend_realloc(void *p, size_t len) #define pemalloc(size, persistent) ((persistent)?__zend_malloc(size):emalloc(size)) #define safe_pemalloc(nmemb, size, offset, persistent) ((persistent)?_safe_malloc(nmemb, size, offset):safe_emalloc(nmemb, size, offset)) #define pefree(ptr, persistent) ((persistent)?free(ptr):efree(ptr)) +#define pefree_size(ptr, size, persistent) ((persistent)?free(ptr):efree_size(ptr, size)) #define pecalloc(nmemb, size, persistent) ((persistent)?__zend_calloc((nmemb), (size)):ecalloc((nmemb), (size))) #define perealloc(ptr, size, persistent) ((persistent)?__zend_realloc((ptr), (size)):erealloc((ptr), (size))) #define safe_perealloc(ptr, nmemb, size, offset, persistent) ((persistent)?_safe_realloc((ptr), (nmemb), (size), (offset)):safe_erealloc((ptr), (nmemb), (size), (offset))) @@ -163,13 +238,13 @@ END_EXTERN_C() (ht) = (HashTable *) emalloc(sizeof(HashTable)) #define FREE_HASHTABLE(ht) \ - efree(ht) + efree_size(ht, sizeof(HashTable)) #define ALLOC_HASHTABLE_REL(ht) \ (ht) = (HashTable *) emalloc_rel(sizeof(HashTable)) #define FREE_HASHTABLE_REL(ht) \ - efree_rel(ht) + efree_size_rel(ht, sizeof(HashTable)) /* Heap functions */ typedef struct _zend_mm_heap zend_mm_heap; diff --git a/Zend/zend_variables.c b/Zend/zend_variables.c index b512c45672..e586eee831 100644 --- a/Zend/zend_variables.c +++ b/Zend/zend_variables.c @@ -46,7 +46,7 @@ ZEND_API void _zval_dtor_func(zend_refcounted *p ZEND_FILE_LINE_DC) GC_TYPE(arr) = IS_NULL; zend_hash_destroy(&arr->ht); GC_REMOVE_FROM_BUFFER(arr); - efree(arr); + efree_size(arr, sizeof(zend_array)); } break; } @@ -54,7 +54,7 @@ ZEND_API void _zval_dtor_func(zend_refcounted *p ZEND_FILE_LINE_DC) zend_ast_ref *ast = (zend_ast_ref*)p; zend_ast_destroy(ast->ast); - efree(ast); + efree_size(ast, sizeof(zend_ast_ref)); break; } case IS_OBJECT: { @@ -78,7 +78,7 @@ ZEND_API void _zval_dtor_func(zend_refcounted *p ZEND_FILE_LINE_DC) zend_reference *ref = (zend_reference*)p; if (--GC_REFCOUNT(ref) == 0) { zval_ptr_dtor(&ref->val); - efree(ref); + efree_size(ref, sizeof(zend_reference)); } break; } @@ -106,7 +106,7 @@ ZEND_API void _zval_dtor_func_for_ptr(zend_refcounted *p ZEND_FILE_LINE_DC) GC_TYPE(arr) = IS_NULL; zend_hash_destroy(&arr->ht); GC_REMOVE_FROM_BUFFER(arr); - efree(arr); + efree_size(arr, sizeof(zend_array)); } break; } @@ -114,7 +114,7 @@ ZEND_API void _zval_dtor_func_for_ptr(zend_refcounted *p ZEND_FILE_LINE_DC) zend_ast_ref *ast = (zend_ast_ref*)p; zend_ast_destroy(ast->ast); - efree(ast); + efree_size(ast, sizeof(zend_ast_ref)); break; } case IS_OBJECT: { @@ -136,7 +136,7 @@ ZEND_API void _zval_dtor_func_for_ptr(zend_refcounted *p ZEND_FILE_LINE_DC) zend_reference *ref = (zend_reference*)p; zval_ptr_dtor(&ref->val); - efree(ref); + efree_size(ref, sizeof(zend_reference)); break; } default: -- 2.40.0