]> granicus.if.org Git - php/commitdiff
Reverting push to wrong repo
authorRasmus Lerdorf <rasmus@lerdorf.com>
Wed, 23 Oct 2019 21:34:12 +0000 (14:34 -0700)
committerRasmus Lerdorf <rasmus@lerdorf.com>
Wed, 23 Oct 2019 21:34:12 +0000 (14:34 -0700)
Zend/zend_alloc.c

index a1d3ad680fa915d9fc680c112f5a82c8d55a2f27..21ccf85049645abf754dc939dc198a993210b553 100644 (file)
@@ -195,11 +195,6 @@ typedef struct  _zend_mm_free_slot zend_mm_free_slot;
 typedef struct  _zend_mm_chunk     zend_mm_chunk;
 typedef struct  _zend_mm_huge_list zend_mm_huge_list;
 
-/*
- * 0 means disabled
- * 1 means huge pages
- * 2 means transparent huge pages
- */
 int zend_mm_use_huge_pages = 0;
 
 /*
@@ -234,13 +229,6 @@ int zend_mm_use_huge_pages = 0;
  *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
  */
 
-/*
- * For environments where mmap is expensive it can be
- * worthwhile to avoid mmap/munmap churn by raising
- * the minimum number of chunks in emalloc
- */
-int zend_mm_min_chunks = 0;
-
 struct _zend_mm_heap {
 #if ZEND_MM_CUSTOM
        int                use_custom_heap;
@@ -474,7 +462,7 @@ static void *zend_mm_mmap(size_t size)
        void *ptr;
 
 #ifdef MAP_HUGETLB
-       if (zend_mm_use_huge_pages == 1 && size == ZEND_MM_CHUNK_SIZE) {
+       if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
                ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
                if (ptr != MAP_FAILED) {
                        return ptr;
@@ -681,7 +669,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
                return NULL;
        } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
 #ifdef MADV_HUGEPAGE
-               if (zend_mm_use_huge_pages == 2) {
+               if (zend_mm_use_huge_pages) {
                        madvise(ptr, size, MADV_HUGEPAGE);
                }
 #endif
@@ -714,7 +702,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
                        zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
                }
 # ifdef MADV_HUGEPAGE
-               if (zend_mm_use_huge_pages == 2) {
+               if (zend_mm_use_huge_pages) {
                        madvise(ptr, size, MADV_HUGEPAGE);
                }
 # endif
@@ -2282,7 +2270,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
                zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
        } else {
                /* free some cached chunks to keep average count */
-               heap->avg_chunks_count = MAX((heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0, zend_mm_min_chunks);
+               heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
                while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
                       heap->cached_chunks) {
                        p = heap->cached_chunks;
@@ -2290,7 +2278,6 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
                        zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
                        heap->cached_chunks_count--;
                }
-
                /* clear cached chunks */
                p = heap->cached_chunks;
                while (p != NULL) {
@@ -2772,16 +2759,8 @@ static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
 #endif
 
        tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
-    if (tmp) {
-               zend_mm_use_huge_pages = zend_atoi(tmp, 0);
-               if (zend_mm_use_huge_pages > 2) {
-                       zend_mm_use_huge_pages = 1;
-               }
-       }
-
-       tmp = getenv("USE_ZEND_MIN_CHUNKS");
-       if (tmp) {
-               zend_mm_min_chunks = zend_atoi(tmp, 0);
+       if (tmp && zend_atoi(tmp, 0)) {
+               zend_mm_use_huge_pages = 1;
        }
        alloc_globals->mm_heap = zend_mm_init();
 }