1 /*-------------------------------------------------------------------------
4 * Allocation set definitions.
6 * AllocSet is our standard implementation of the abstract MemoryContext
10 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
11 * Portions Copyright (c) 1994, Regents of the University of California
14 * src/backend/utils/mmgr/aset.c
17 * This is a new (Feb. 05, 1999) implementation of the allocation set
18 * routines. AllocSet...() does not use OrderedSet...() any more.
19 * Instead it manages allocations in a block pool by itself, combining
20 * many small allocations in a few bigger blocks. AllocSetFree() normally
21 * doesn't free() memory really. It just add's the free'd area to some
22 * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 * at once on AllocSetReset(), which happens when the memory context gets
27 * Performance improvement from Tom Lane, 8/99: for extremely large request
28 * sizes, we do want to be able to give the memory back to free() as soon
29 * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 * freelist entries that might never be usable. This is specially needed
31 * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 * the previous instances of the block were guaranteed to be wasted until
33 * AllocSetReset() under the old way.
35 * Further improvement 12/00: as the code stood, request sizes in the
36 * midrange between "small" and "large" were handled very inefficiently,
37 * because any sufficiently large free chunk would be used to satisfy a
38 * request, even if it was much larger than necessary. This led to more
39 * and more wasted space in allocated chunks over time. To fix, get rid
40 * of the midrange behavior: we now handle only "small" power-of-2-size
41 * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 * the number of freelists to change the small/large boundary.
44 *-------------------------------------------------------------------------
49 #include "utils/memdebug.h"
50 #include "utils/memutils.h"
52 /* Define this to detail debug alloc information */
53 /* #define HAVE_ALLOCINFO */
55 /*--------------------
56 * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
59 * Note that all chunks in the freelists have power-of-2 sizes. This
60 * improves recyclability: we may waste some space, but the wasted space
61 * should stay pretty constant as requests are made and released.
63 * A request too large for the last freelist is handled by allocating a
64 * dedicated block from malloc(). The block still has a block header and
65 * chunk header, but when the chunk is freed we'll return the whole block
66 * to malloc(), not put it on our freelists.
68 * CAUTION: ALLOC_MINBITS must be large enough so that
69 * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 * or we may fail to align the smallest chunks adequately.
71 * 8-byte alignment is enough on all currently known machines.
73 * With the current parameters, request sizes up to 8K are treated as chunks,
74 * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
75 * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
76 * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
77 * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
81 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
82 #define ALLOCSET_NUM_FREELISTS 11
83 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
84 /* Size of largest chunk that we use a fixed size for */
85 #define ALLOC_CHUNK_FRACTION 4
86 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
88 /*--------------------
89 * The first block allocated for an allocset has size initBlockSize.
90 * Each time we have to allocate another block, we double the block size
91 * (if possible, and without exceeding maxBlockSize), so as to reduce
92 * the bookkeeping load on malloc().
94 * Blocks allocated to hold oversize chunks do not follow this rule, however;
95 * they are just however big they need to be to hold that single chunk.
99 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
100 #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
102 typedef struct AllocBlockData *AllocBlock; /* forward reference */
103 typedef struct AllocChunkData *AllocChunk;
107 * Aligned pointer which may be a member of an allocation set.
109 typedef void *AllocPointer;
112 * AllocSetContext is our standard implementation of MemoryContext.
114 * Note: header.isReset means there is nothing for AllocSetReset to do.
115 * This is different from the aset being physically empty (empty blocks list)
116 * because we may still have a keeper block. It's also different from the set
117 * being logically empty, because we don't attempt to detect pfree'ing the
120 typedef struct AllocSetContext
122 MemoryContextData header; /* Standard memory-context fields */
123 /* Info about storage allocated in this context: */
124 AllocBlock blocks; /* head of list of blocks in this set */
125 AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
126 /* Allocation parameters for this context: */
127 Size initBlockSize; /* initial block size */
128 Size maxBlockSize; /* maximum block size */
129 Size nextBlockSize; /* next block size to allocate */
130 Size allocChunkLimit; /* effective chunk size limit */
131 AllocBlock keeper; /* if not NULL, keep this block over resets */
134 typedef AllocSetContext *AllocSet;
138 * An AllocBlock is the unit of memory that is obtained by aset.c
139 * from malloc(). It contains one or more AllocChunks, which are
140 * the units requested by palloc() and freed by pfree(). AllocChunks
141 * cannot be returned to malloc() individually, instead they are put
142 * on freelists by pfree() and re-used by the next palloc() that has
143 * a matching request size.
145 * AllocBlockData is the header data for a block --- the usable space
146 * within the block begins at the next alignment boundary.
148 typedef struct AllocBlockData
150 AllocSet aset; /* aset that owns this block */
151 AllocBlock prev; /* prev block in aset's blocks list, if any */
152 AllocBlock next; /* next block in aset's blocks list, if any */
153 char *freeptr; /* start of free space in this block */
154 char *endptr; /* end of space in this block */
159 * The prefix of each piece of memory in an AllocBlock
161 typedef struct AllocChunkData
163 /* size is always the size of the usable space in the chunk */
165 #ifdef MEMORY_CONTEXT_CHECKING
166 /* when debugging memory usage, also store actual requested size */
167 /* this is zero in a free chunk */
169 #if MAXIMUM_ALIGNOF > 4 && SIZEOF_VOID_P == 4
173 #endif /* MEMORY_CONTEXT_CHECKING */
175 /* aset is the owning aset if allocated, or the freelist link if free */
178 /* there must not be any padding to reach a MAXALIGN boundary here! */
182 * AllocPointerIsValid
183 * True iff pointer is valid allocation pointer.
185 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
189 * True iff set is valid allocation set.
191 #define AllocSetIsValid(set) PointerIsValid(set)
193 #define AllocPointerGetChunk(ptr) \
194 ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
195 #define AllocChunkGetPointer(chk) \
196 ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
199 * These functions implement the MemoryContext API for AllocSet contexts.
201 static void *AllocSetAlloc(MemoryContext context, Size size);
202 static void AllocSetFree(MemoryContext context, void *pointer);
203 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
204 static void AllocSetInit(MemoryContext context);
205 static void AllocSetReset(MemoryContext context);
206 static void AllocSetDelete(MemoryContext context);
207 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
208 static bool AllocSetIsEmpty(MemoryContext context);
209 static void AllocSetStats(MemoryContext context, int level, bool print,
210 MemoryContextCounters *totals);
212 #ifdef MEMORY_CONTEXT_CHECKING
213 static void AllocSetCheck(MemoryContext context);
217 * This is the virtual function table for AllocSet contexts.
219 static MemoryContextMethods AllocSetMethods = {
226 AllocSetGetChunkSpace,
229 #ifdef MEMORY_CONTEXT_CHECKING
235 * Table for AllocSetFreeIndex
237 #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
239 static const unsigned char LogTable256[256] =
241 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
242 LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
243 LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
250 #ifdef HAVE_ALLOCINFO
251 #define AllocFreeInfo(_cxt, _chunk) \
252 fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
253 (_cxt)->header.name, (_chunk), (_chunk)->size)
254 #define AllocAllocInfo(_cxt, _chunk) \
255 fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
256 (_cxt)->header.name, (_chunk), (_chunk)->size)
258 #define AllocFreeInfo(_cxt, _chunk)
259 #define AllocAllocInfo(_cxt, _chunk)
263 * AllocSetFreeIndex -
265 * Depending on the size of an allocation compute which freechunk
266 * list of the alloc set it belongs to. Caller must have verified
267 * that size <= ALLOC_CHUNK_LIMIT.
271 AllocSetFreeIndex(Size size)
277 if (size > (1 << ALLOC_MINBITS))
279 tsize = (size - 1) >> ALLOC_MINBITS;
282 * At this point we need to obtain log2(tsize)+1, ie, the number of
283 * not-all-zero bits at the right. We used to do this with a
284 * shift-and-count loop, but this function is enough of a hotspot to
285 * justify micro-optimization effort. The best approach seems to be
286 * to use a lookup table. Note that this code assumes that
287 * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
291 idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
293 Assert(idx < ALLOCSET_NUM_FREELISTS);
308 * AllocSetContextCreate
309 * Create a new AllocSet context.
311 * parent: parent context, or NULL if top-level context
312 * name: name of context (for debugging only, need not be unique)
313 * minContextSize: minimum context size
314 * initBlockSize: initial allocation block size
315 * maxBlockSize: maximum allocation block size
317 * Notes: the name string will be copied into context-lifespan storage.
318 * Most callers should abstract the context size parameters using a macro
319 * such as ALLOCSET_DEFAULT_SIZES.
322 AllocSetContextCreate(MemoryContext parent,
330 StaticAssertStmt(offsetof(AllocChunkData, aset) + sizeof(MemoryContext) ==
331 MAXALIGN(sizeof(AllocChunkData)),
332 "padding calculation in AllocChunkData is wrong");
335 * First, validate allocation parameters. (If we're going to throw an
336 * error, we should do so before the context is created, not after.) We
337 * somewhat arbitrarily enforce a minimum 1K block size.
339 if (initBlockSize != MAXALIGN(initBlockSize) ||
340 initBlockSize < 1024)
341 elog(ERROR, "invalid initBlockSize for memory context: %zu",
343 if (maxBlockSize != MAXALIGN(maxBlockSize) ||
344 maxBlockSize < initBlockSize ||
345 !AllocHugeSizeIsValid(maxBlockSize)) /* must be safe to double */
346 elog(ERROR, "invalid maxBlockSize for memory context: %zu",
348 if (minContextSize != 0 &&
349 (minContextSize != MAXALIGN(minContextSize) ||
350 minContextSize <= ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
351 elog(ERROR, "invalid minContextSize for memory context: %zu",
354 /* Do the type-independent part of context creation */
355 set = (AllocSet) MemoryContextCreate(T_AllocSetContext,
356 sizeof(AllocSetContext),
361 /* Save allocation parameters */
362 set->initBlockSize = initBlockSize;
363 set->maxBlockSize = maxBlockSize;
364 set->nextBlockSize = initBlockSize;
367 * Compute the allocation chunk size limit for this context. It can't be
368 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
369 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
370 * even a significant fraction of it, should be treated as large chunks
371 * too. For the typical case of maxBlockSize a power of 2, the chunk size
372 * limit will be at most 1/8th maxBlockSize, so that given a stream of
373 * requests that are all the maximum chunk size we will waste at most
374 * 1/8th of the allocated space.
376 * We have to have allocChunkLimit a power of two, because the requested
377 * and actually-allocated sizes of any chunk must be on the same side of
378 * the limit, else we get confused about whether the chunk is "big".
380 * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
382 StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
383 "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
385 set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
386 while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
387 (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
388 set->allocChunkLimit >>= 1;
391 * Grab always-allocated space, if requested
393 if (minContextSize > 0)
395 Size blksize = minContextSize;
398 block = (AllocBlock) malloc(blksize);
401 MemoryContextStats(TopMemoryContext);
403 (errcode(ERRCODE_OUT_OF_MEMORY),
404 errmsg("out of memory"),
405 errdetail("Failed while creating memory context \"%s\".",
409 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
410 block->endptr = ((char *) block) + blksize;
412 block->next = set->blocks;
414 block->next->prev = block;
416 /* Mark block as not to be released at reset time */
419 /* Mark unallocated space NOACCESS; leave the block header alone. */
420 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
421 blksize - ALLOC_BLOCKHDRSZ);
424 return (MemoryContext) set;
429 * Context-type-specific initialization routine.
431 * This is called by MemoryContextCreate() after setting up the
432 * generic MemoryContext fields and before linking the new context
433 * into the context tree. We must do whatever is needed to make the
434 * new context minimally valid for deletion. We must *not* risk
435 * failure --- thus, for example, allocating more memory is not cool.
436 * (AllocSetContextCreate can allocate memory when it gets control
440 AllocSetInit(MemoryContext context)
443 * Since MemoryContextCreate already zeroed the context node, we don't
444 * have to do anything here: it's already OK.
450 * Frees all memory which is allocated in the given set.
452 * Actually, this routine has some discretion about what to do.
453 * It should mark all allocated chunks freed, but it need not necessarily
454 * give back all the resources the set owns. Our actual implementation is
455 * that we hang onto any "keeper" block specified for the set. In this way,
456 * we don't thrash malloc() when a context is repeatedly reset after small
457 * allocations, which is typical behavior for per-tuple contexts.
460 AllocSetReset(MemoryContext context)
462 AllocSet set = (AllocSet) context;
465 AssertArg(AllocSetIsValid(set));
467 #ifdef MEMORY_CONTEXT_CHECKING
468 /* Check for corruption and leaks before freeing */
469 AllocSetCheck(context);
472 /* Clear chunk freelists */
473 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
477 /* New blocks list is either empty or just the keeper block */
478 set->blocks = set->keeper;
480 while (block != NULL)
482 AllocBlock next = block->next;
484 if (block == set->keeper)
486 /* Reset the block, but don't return it to malloc */
487 char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
489 #ifdef CLOBBER_FREED_MEMORY
490 wipe_mem(datastart, block->freeptr - datastart);
492 /* wipe_mem() would have done this */
493 VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
495 block->freeptr = datastart;
501 /* Normal case, release the block */
502 #ifdef CLOBBER_FREED_MEMORY
503 wipe_mem(block, block->freeptr - ((char *) block));
510 /* Reset block size allocation sequence, too */
511 set->nextBlockSize = set->initBlockSize;
516 * Frees all memory which is allocated in the given set,
517 * in preparation for deletion of the set.
519 * Unlike AllocSetReset, this *must* free all resources of the set.
520 * But note we are not responsible for deleting the context node itself.
523 AllocSetDelete(MemoryContext context)
525 AllocSet set = (AllocSet) context;
526 AllocBlock block = set->blocks;
528 AssertArg(AllocSetIsValid(set));
530 #ifdef MEMORY_CONTEXT_CHECKING
531 /* Check for corruption and leaks before freeing */
532 AllocSetCheck(context);
535 /* Make it look empty, just in case... */
536 MemSetAligned(set->freelist, 0, sizeof(set->freelist));
540 while (block != NULL)
542 AllocBlock next = block->next;
544 #ifdef CLOBBER_FREED_MEMORY
545 wipe_mem(block, block->freeptr - ((char *) block));
554 * Returns pointer to allocated memory of given size or NULL if
555 * request could not be completed; memory is added to the set.
557 * No request may exceed:
558 * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
559 * All callers use a much-lower limit.
562 AllocSetAlloc(MemoryContext context, Size size)
564 AllocSet set = (AllocSet) context;
571 AssertArg(AllocSetIsValid(set));
574 * If requested size exceeds maximum for chunks, allocate an entire block
577 if (size > set->allocChunkLimit)
579 chunk_size = MAXALIGN(size);
580 blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
581 block = (AllocBlock) malloc(blksize);
585 block->freeptr = block->endptr = ((char *) block) + blksize;
587 chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
589 chunk->size = chunk_size;
590 #ifdef MEMORY_CONTEXT_CHECKING
591 /* Valgrind: Will be made NOACCESS below. */
592 chunk->requested_size = size;
593 /* set mark to catch clobber of "unused" space */
594 if (size < chunk_size)
595 set_sentinel(AllocChunkGetPointer(chunk), size);
597 #ifdef RANDOMIZE_ALLOCATED_MEMORY
598 /* fill the allocated space with junk */
599 randomize_mem((char *) AllocChunkGetPointer(chunk), size);
603 * Stick the new block underneath the active allocation block, if any,
604 * so that we don't lose the use of the space remaining therein.
606 if (set->blocks != NULL)
608 block->prev = set->blocks;
609 block->next = set->blocks->next;
611 block->next->prev = block;
612 set->blocks->next = block;
621 AllocAllocInfo(set, chunk);
624 * Chunk's metadata fields remain DEFINED. The requested allocation
625 * itself can be NOACCESS or UNDEFINED; our caller will soon make it
626 * UNDEFINED. Make extra space at the end of the chunk, if any,
629 VALGRIND_MAKE_MEM_NOACCESS((char *) chunk + ALLOC_CHUNKHDRSZ,
630 chunk_size - ALLOC_CHUNKHDRSZ);
632 return AllocChunkGetPointer(chunk);
636 * Request is small enough to be treated as a chunk. Look in the
637 * corresponding free list to see if there is a free chunk we could reuse.
638 * If one is found, remove it from the free list, make it again a member
639 * of the alloc set and return its data address.
641 fidx = AllocSetFreeIndex(size);
642 chunk = set->freelist[fidx];
645 Assert(chunk->size >= size);
647 set->freelist[fidx] = (AllocChunk) chunk->aset;
649 chunk->aset = (void *) set;
651 #ifdef MEMORY_CONTEXT_CHECKING
652 /* Valgrind: Free list requested_size should be DEFINED. */
653 chunk->requested_size = size;
654 VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
655 sizeof(chunk->requested_size));
656 /* set mark to catch clobber of "unused" space */
657 if (size < chunk->size)
658 set_sentinel(AllocChunkGetPointer(chunk), size);
660 #ifdef RANDOMIZE_ALLOCATED_MEMORY
661 /* fill the allocated space with junk */
662 randomize_mem((char *) AllocChunkGetPointer(chunk), size);
665 AllocAllocInfo(set, chunk);
666 return AllocChunkGetPointer(chunk);
670 * Choose the actual chunk size to allocate.
672 chunk_size = (1 << ALLOC_MINBITS) << fidx;
673 Assert(chunk_size >= size);
676 * If there is enough room in the active allocation block, we will put the
677 * chunk into that block. Else must start a new one.
679 if ((block = set->blocks) != NULL)
681 Size availspace = block->endptr - block->freeptr;
683 if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
686 * The existing active (top) block does not have enough room for
687 * the requested allocation, but it might still have a useful
688 * amount of space in it. Once we push it down in the block list,
689 * we'll never try to allocate more space from it. So, before we
690 * do that, carve up its free space into chunks that we can put on
691 * the set's freelists.
693 * Because we can only get here when there's less than
694 * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
695 * more than ALLOCSET_NUM_FREELISTS-1 times.
697 while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
699 Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
700 int a_fidx = AllocSetFreeIndex(availchunk);
703 * In most cases, we'll get back the index of the next larger
704 * freelist than the one we need to put this chunk on. The
705 * exception is when availchunk is exactly a power of 2.
707 if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
711 availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
714 chunk = (AllocChunk) (block->freeptr);
716 /* Prepare to initialize the chunk header. */
717 VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
719 block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
720 availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
722 chunk->size = availchunk;
723 #ifdef MEMORY_CONTEXT_CHECKING
724 chunk->requested_size = 0; /* mark it free */
726 chunk->aset = (void *) set->freelist[a_fidx];
727 set->freelist[a_fidx] = chunk;
730 /* Mark that we need to create a new block */
736 * Time to create a new regular (multi-chunk) block?
743 * The first such block has size initBlockSize, and we double the
744 * space in each succeeding block, but not more than maxBlockSize.
746 blksize = set->nextBlockSize;
747 set->nextBlockSize <<= 1;
748 if (set->nextBlockSize > set->maxBlockSize)
749 set->nextBlockSize = set->maxBlockSize;
752 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
753 * space... but try to keep it a power of 2.
755 required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
756 while (blksize < required_size)
759 /* Try to allocate it */
760 block = (AllocBlock) malloc(blksize);
763 * We could be asking for pretty big blocks here, so cope if malloc
764 * fails. But give up if there's less than a meg or so available...
766 while (block == NULL && blksize > 1024 * 1024)
769 if (blksize < required_size)
771 block = (AllocBlock) malloc(blksize);
778 block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
779 block->endptr = ((char *) block) + blksize;
782 * If this is the first block of the set, make it the "keeper" block.
783 * Formerly, a keeper block could only be created during context
784 * creation, but allowing it to happen here lets us have fast reset
785 * cycling even for contexts created with minContextSize = 0; that way
786 * we don't have to force space to be allocated in contexts that might
787 * never need any space. Don't mark an oversize block as a keeper,
790 if (set->keeper == NULL && blksize == set->initBlockSize)
793 /* Mark unallocated space NOACCESS. */
794 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
795 blksize - ALLOC_BLOCKHDRSZ);
798 block->next = set->blocks;
800 block->next->prev = block;
805 * OK, do the allocation
807 chunk = (AllocChunk) (block->freeptr);
809 /* Prepare to initialize the chunk header. */
810 VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
812 block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
813 Assert(block->freeptr <= block->endptr);
815 chunk->aset = (void *) set;
816 chunk->size = chunk_size;
817 #ifdef MEMORY_CONTEXT_CHECKING
818 chunk->requested_size = size;
819 VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
820 sizeof(chunk->requested_size));
821 /* set mark to catch clobber of "unused" space */
822 if (size < chunk->size)
823 set_sentinel(AllocChunkGetPointer(chunk), size);
825 #ifdef RANDOMIZE_ALLOCATED_MEMORY
826 /* fill the allocated space with junk */
827 randomize_mem((char *) AllocChunkGetPointer(chunk), size);
830 AllocAllocInfo(set, chunk);
831 return AllocChunkGetPointer(chunk);
836 * Frees allocated memory; memory is removed from the set.
839 AllocSetFree(MemoryContext context, void *pointer)
841 AllocSet set = (AllocSet) context;
842 AllocChunk chunk = AllocPointerGetChunk(pointer);
844 AllocFreeInfo(set, chunk);
846 #ifdef MEMORY_CONTEXT_CHECKING
847 VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
848 sizeof(chunk->requested_size));
849 /* Test for someone scribbling on unused space in chunk */
850 if (chunk->requested_size < chunk->size)
851 if (!sentinel_ok(pointer, chunk->requested_size))
852 elog(WARNING, "detected write past chunk end in %s %p",
853 set->header.name, chunk);
856 if (chunk->size > set->allocChunkLimit)
859 * Big chunks are certain to have been allocated as single-chunk
860 * blocks. Just unlink that block and return it to malloc().
862 AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
865 * Try to verify that we have a sane block pointer: it should
866 * reference the correct aset, and freeptr and endptr should point
867 * just past the chunk.
869 if (block->aset != set ||
870 block->freeptr != block->endptr ||
871 block->freeptr != ((char *) block) +
872 (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
873 elog(ERROR, "could not find block containing chunk %p", chunk);
875 /* OK, remove block from aset's list and free it */
877 block->prev->next = block->next;
879 set->blocks = block->next;
881 block->next->prev = block->prev;
882 #ifdef CLOBBER_FREED_MEMORY
883 wipe_mem(block, block->freeptr - ((char *) block));
889 /* Normal case, put the chunk into appropriate freelist */
890 int fidx = AllocSetFreeIndex(chunk->size);
892 chunk->aset = (void *) set->freelist[fidx];
894 #ifdef CLOBBER_FREED_MEMORY
895 wipe_mem(pointer, chunk->size);
898 #ifdef MEMORY_CONTEXT_CHECKING
899 /* Reset requested_size to 0 in chunks that are on freelist */
900 chunk->requested_size = 0;
902 set->freelist[fidx] = chunk;
908 * Returns new pointer to allocated memory of given size or NULL if
909 * request could not be completed; this memory is added to the set.
910 * Memory associated with given pointer is copied into the new memory,
911 * and the old memory is freed.
913 * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
914 * makes our Valgrind client requests less-precise, hazarding false negatives.
915 * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
919 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
921 AllocSet set = (AllocSet) context;
922 AllocChunk chunk = AllocPointerGetChunk(pointer);
923 Size oldsize = chunk->size;
925 #ifdef MEMORY_CONTEXT_CHECKING
926 VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
927 sizeof(chunk->requested_size));
928 /* Test for someone scribbling on unused space in chunk */
929 if (chunk->requested_size < oldsize)
930 if (!sentinel_ok(pointer, chunk->requested_size))
931 elog(WARNING, "detected write past chunk end in %s %p",
932 set->header.name, chunk);
936 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
937 * allocated area already is >= the new size. (In particular, we always
938 * fall out here if the requested size is a decrease.)
942 #ifdef MEMORY_CONTEXT_CHECKING
943 Size oldrequest = chunk->requested_size;
945 #ifdef RANDOMIZE_ALLOCATED_MEMORY
946 /* We can only fill the extra space if we know the prior request */
947 if (size > oldrequest)
948 randomize_mem((char *) pointer + oldrequest,
952 chunk->requested_size = size;
953 VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
954 sizeof(chunk->requested_size));
957 * If this is an increase, mark any newly-available part UNDEFINED.
958 * Otherwise, mark the obsolete part NOACCESS.
960 if (size > oldrequest)
961 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
964 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
967 /* set mark to catch clobber of "unused" space */
969 set_sentinel(pointer, size);
970 #else /* !MEMORY_CONTEXT_CHECKING */
973 * We don't have the information to determine whether we're growing
974 * the old request or shrinking it, so we conservatively mark the
975 * entire new allocation DEFINED.
977 VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
978 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
984 if (oldsize > set->allocChunkLimit)
987 * The chunk must have been allocated as a single-chunk block. Use
988 * realloc() to make the containing block bigger with minimum space
991 AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
996 * Try to verify that we have a sane block pointer: it should
997 * reference the correct aset, and freeptr and endptr should point
998 * just past the chunk.
1000 if (block->aset != set ||
1001 block->freeptr != block->endptr ||
1002 block->freeptr != ((char *) block) +
1003 (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1004 elog(ERROR, "could not find block containing chunk %p", chunk);
1006 /* Do the realloc */
1007 chksize = MAXALIGN(size);
1008 blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1009 block = (AllocBlock) realloc(block, blksize);
1012 block->freeptr = block->endptr = ((char *) block) + blksize;
1014 /* Update pointers since block has likely been moved */
1015 chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1016 pointer = AllocChunkGetPointer(chunk);
1018 block->prev->next = block;
1020 set->blocks = block;
1022 block->next->prev = block;
1023 chunk->size = chksize;
1025 #ifdef MEMORY_CONTEXT_CHECKING
1026 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1027 /* We can only fill the extra space if we know the prior request */
1028 randomize_mem((char *) pointer + chunk->requested_size,
1029 size - chunk->requested_size);
1033 * realloc() (or randomize_mem()) will have left the newly-allocated
1034 * part UNDEFINED, but we may need to adjust trailing bytes from the
1037 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1038 oldsize - chunk->requested_size);
1040 chunk->requested_size = size;
1041 VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1042 sizeof(chunk->requested_size));
1044 /* set mark to catch clobber of "unused" space */
1045 if (size < chunk->size)
1046 set_sentinel(pointer, size);
1047 #else /* !MEMORY_CONTEXT_CHECKING */
1050 * We don't know how much of the old chunk size was the actual
1051 * allocation; it could have been as small as one byte. We have to be
1052 * conservative and just mark the entire old portion DEFINED.
1054 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1057 /* Make any trailing alignment padding NOACCESS. */
1058 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1065 * Small-chunk case. We just do this by brute force, ie, allocate a
1066 * new chunk and copy the data. Since we know the existing data isn't
1067 * huge, this won't involve any great memcpy expense, so it's not
1068 * worth being smarter. (At one time we tried to avoid memcpy when it
1069 * was possible to enlarge the chunk in-place, but that turns out to
1070 * misbehave unpleasantly for repeated cycles of
1071 * palloc/repalloc/pfree: the eventually freed chunks go into the
1072 * wrong freelist for the next initial palloc request, and so we leak
1073 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1075 AllocPointer newPointer;
1077 /* allocate new chunk */
1078 newPointer = AllocSetAlloc((MemoryContext) set, size);
1080 /* leave immediately if request was not completed */
1081 if (newPointer == NULL)
1085 * AllocSetAlloc() just made the region NOACCESS. Change it to
1086 * UNDEFINED for the moment; memcpy() will then transfer definedness
1087 * from the old allocation to the new. If we know the old allocation,
1088 * copy just that much. Otherwise, make the entire old chunk defined
1089 * to avoid errors as we copy the currently-NOACCESS trailing bytes.
1091 VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1092 #ifdef MEMORY_CONTEXT_CHECKING
1093 oldsize = chunk->requested_size;
1095 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1098 /* transfer existing data (certain to fit) */
1099 memcpy(newPointer, pointer, oldsize);
1101 /* free old chunk */
1102 AllocSetFree((MemoryContext) set, pointer);
1109 * AllocSetGetChunkSpace
1110 * Given a currently-allocated chunk, determine the total space
1111 * it occupies (including all memory-allocation overhead).
1114 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1116 AllocChunk chunk = AllocPointerGetChunk(pointer);
1118 return chunk->size + ALLOC_CHUNKHDRSZ;
1123 * Is an allocset empty of any allocated space?
1126 AllocSetIsEmpty(MemoryContext context)
1129 * For now, we say "empty" only if the context is new or just reset. We
1130 * could examine the freelists to determine if all space has been freed,
1131 * but it's not really worth the trouble for present uses of this
1134 if (context->isReset)
1141 * Compute stats about memory consumption of an allocset.
1143 * level: recursion level (0 at top level); used for print indentation.
1144 * print: true to print stats to stderr.
1145 * totals: if not NULL, add stats about this allocset into *totals.
1148 AllocSetStats(MemoryContext context, int level, bool print,
1149 MemoryContextCounters *totals)
1151 AllocSet set = (AllocSet) context;
1153 Size freechunks = 0;
1154 Size totalspace = 0;
1159 for (block = set->blocks; block != NULL; block = block->next)
1162 totalspace += block->endptr - ((char *) block);
1163 freespace += block->endptr - block->freeptr;
1165 for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1169 for (chunk = set->freelist[fidx]; chunk != NULL;
1170 chunk = (AllocChunk) chunk->aset)
1173 freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1181 for (i = 0; i < level; i++)
1182 fprintf(stderr, " ");
1184 "%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n",
1185 set->header.name, totalspace, nblocks, freespace, freechunks,
1186 totalspace - freespace);
1191 totals->nblocks += nblocks;
1192 totals->freechunks += freechunks;
1193 totals->totalspace += totalspace;
1194 totals->freespace += freespace;
1199 #ifdef MEMORY_CONTEXT_CHECKING
1203 * Walk through chunks and check consistency of memory.
1205 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1206 * find yourself in an infinite loop when trouble occurs, because this
1207 * routine will be entered again when elog cleanup tries to release memory!
1210 AllocSetCheck(MemoryContext context)
1212 AllocSet set = (AllocSet) context;
1213 char *name = set->header.name;
1214 AllocBlock prevblock;
1217 for (prevblock = NULL, block = set->blocks;
1219 prevblock = block, block = block->next)
1221 char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1222 long blk_used = block->freeptr - bpoz;
1227 * Empty block - empty can be keeper-block only
1231 if (set->keeper != block)
1232 elog(WARNING, "problem in alloc set %s: empty block %p",
1237 * Check block header fields
1239 if (block->aset != set ||
1240 block->prev != prevblock ||
1241 block->freeptr < bpoz ||
1242 block->freeptr > block->endptr)
1243 elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1249 while (bpoz < block->freeptr)
1251 AllocChunk chunk = (AllocChunk) bpoz;
1255 chsize = chunk->size; /* aligned chunk size */
1256 VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
1257 sizeof(chunk->requested_size));
1258 dsize = chunk->requested_size; /* real data */
1259 if (dsize > 0) /* not on a free list */
1260 VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1261 sizeof(chunk->requested_size));
1267 elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1268 name, chunk, block);
1269 if (chsize < (1 << ALLOC_MINBITS))
1270 elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1271 name, chsize, chunk, block);
1273 /* single-chunk block? */
1274 if (chsize > set->allocChunkLimit &&
1275 chsize + ALLOC_CHUNKHDRSZ != blk_used)
1276 elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1277 name, chunk, block);
1280 * If chunk is allocated, check for correct aset pointer. (If it's
1281 * free, the aset is the freelist pointer, which we can't check as
1284 if (dsize > 0 && chunk->aset != (void *) set)
1285 elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1286 name, block, chunk);
1289 * Check for overwrite of "unallocated" space in chunk
1291 if (dsize > 0 && dsize < chsize &&
1292 !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1293 elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1294 name, block, chunk);
1299 bpoz += ALLOC_CHUNKHDRSZ + chsize;
1302 if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1303 elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1308 #endif /* MEMORY_CONTEXT_CHECKING */