1 /*-------------------------------------------------------------------------
4 * Generational allocator definitions.
6 * Generation is a custom MemoryContext implementation designed for cases of
7 * chunks with similar lifespan.
9 * Portions Copyright (c) 2017-2018, PostgreSQL Global Development Group
12 * src/backend/utils/mmgr/generation.c
15 * This memory context is based on the assumption that the chunks are freed
16 * roughly in the same order as they were allocated (FIFO), or in groups with
17 * similar lifespan (generations - hence the name of the context). This is
18 * typical for various queue-like use cases, i.e. when tuples are constructed,
19 * processed and then thrown away.
21 * The memory context uses a very simple approach to free space management.
22 * Instead of a complex global freelist, each block tracks a number
23 * of allocated and freed chunks. Freed chunks are not reused, and once all
24 * chunks in a block are freed, the whole block is thrown away. When the
25 * chunks allocated in the same block have similar lifespan, this works
26 * very well and is very cheap.
28 * The current implementation only uses a fixed block size - maybe it should
29 * adapt a min/max block size range, and grow the blocks automatically.
30 * It already uses dedicated blocks for oversized chunks.
32 * XXX It might be possible to improve this by keeping a small freelist for
33 * only a small number of recent blocks, but it's not clear it's worth the
34 * additional complexity.
36 *-------------------------------------------------------------------------
41 #include "lib/ilist.h"
42 #include "utils/memdebug.h"
43 #include "utils/memutils.h"
46 #define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
47 #define Generation_CHUNKHDRSZ sizeof(GenerationChunk)
49 typedef struct GenerationBlock GenerationBlock; /* forward reference */
50 typedef struct GenerationChunk GenerationChunk;
52 typedef void *GenerationPointer;
55 * GenerationContext is a simple memory context not reusing allocated chunks,
56 * and freeing blocks once all chunks are freed.
58 typedef struct GenerationContext
60 MemoryContextData header; /* Standard memory-context fields */
62 /* Generational context parameters */
63 Size blockSize; /* standard block size */
65 GenerationBlock *block; /* current (most recently allocated) block */
66 dlist_head blocks; /* list of blocks */
71 * GenerationBlock is the unit of memory that is obtained by generation.c
72 * from malloc(). It contains one or more GenerationChunks, which are
73 * the units requested by palloc() and freed by pfree(). GenerationChunks
74 * cannot be returned to malloc() individually, instead pfree()
75 * updates the free counter of the block and when all chunks in a block
76 * are free the whole block is returned to malloc().
78 * GenerationBlock is the header data for a block --- the usable space
79 * within the block begins at the next alignment boundary.
81 struct GenerationBlock
83 dlist_node node; /* doubly-linked list of blocks */
84 Size blksize; /* allocated size of this block */
85 int nchunks; /* number of chunks in the block */
86 int nfree; /* number of free chunks */
87 char *freeptr; /* start of free space in this block */
88 char *endptr; /* end of space in this block */
93 * The prefix of each piece of memory in a GenerationBlock
95 * Note: to meet the memory context APIs, the payload area of the chunk must
96 * be maxaligned, and the "context" link must be immediately adjacent to the
97 * payload area (cf. GetMemoryChunkContext). We simplify matters for this
98 * module by requiring sizeof(GenerationChunk) to be maxaligned, and then
99 * we can ensure things work by adding any required alignment padding before
100 * the pointer fields. There is a static assertion below that the alignment
103 struct GenerationChunk
105 /* size is always the size of the usable space in the chunk */
107 #ifdef MEMORY_CONTEXT_CHECKING
108 /* when debugging memory usage, also store actual requested size */
109 /* this is zero in a free chunk */
112 #define GENERATIONCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P * 2)
114 #define GENERATIONCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P * 2)
115 #endif /* MEMORY_CONTEXT_CHECKING */
117 /* ensure proper alignment by adding padding if needed */
118 #if (GENERATIONCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
119 char padding[MAXIMUM_ALIGNOF - GENERATIONCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
122 GenerationBlock *block; /* block owning this chunk */
123 GenerationContext *context; /* owning context, or NULL if freed chunk */
124 /* there must not be any padding to reach a MAXALIGN boundary here! */
128 * Only the "context" field should be accessed outside this module.
129 * We keep the rest of an allocated chunk's header marked NOACCESS when using
130 * valgrind. But note that freed chunk headers are kept accessible, for
133 #define GENERATIONCHUNK_PRIVATE_LEN offsetof(GenerationChunk, context)
137 * True iff set is valid allocation set.
139 #define GenerationIsValid(set) PointerIsValid(set)
141 #define GenerationPointerGetChunk(ptr) \
142 ((GenerationChunk *)(((char *)(ptr)) - Generation_CHUNKHDRSZ))
143 #define GenerationChunkGetPointer(chk) \
144 ((GenerationPointer *)(((char *)(chk)) + Generation_CHUNKHDRSZ))
147 * These functions implement the MemoryContext API for Generation contexts.
149 static void *GenerationAlloc(MemoryContext context, Size size);
150 static void GenerationFree(MemoryContext context, void *pointer);
151 static void *GenerationRealloc(MemoryContext context, void *pointer, Size size);
152 static void GenerationReset(MemoryContext context);
153 static void GenerationDelete(MemoryContext context);
154 static Size GenerationGetChunkSpace(MemoryContext context, void *pointer);
155 static bool GenerationIsEmpty(MemoryContext context);
156 static void GenerationStats(MemoryContext context,
157 MemoryStatsPrintFunc printfunc, void *passthru,
158 MemoryContextCounters *totals);
160 #ifdef MEMORY_CONTEXT_CHECKING
161 static void GenerationCheck(MemoryContext context);
165 * This is the virtual function table for Generation contexts.
167 static const MemoryContextMethods GenerationMethods = {
173 GenerationGetChunkSpace,
176 #ifdef MEMORY_CONTEXT_CHECKING
185 #ifdef HAVE_ALLOCINFO
186 #define GenerationFreeInfo(_cxt, _chunk) \
187 fprintf(stderr, "GenerationFree: %s: %p, %lu\n", \
188 (_cxt)->name, (_chunk), (_chunk)->size)
189 #define GenerationAllocInfo(_cxt, _chunk) \
190 fprintf(stderr, "GenerationAlloc: %s: %p, %lu\n", \
191 (_cxt)->name, (_chunk), (_chunk)->size)
193 #define GenerationFreeInfo(_cxt, _chunk)
194 #define GenerationAllocInfo(_cxt, _chunk)
204 * GenerationContextCreate
205 * Create a new Generation context.
207 * parent: parent context, or NULL if top-level context
208 * name: name of context (must be statically allocated)
209 * blockSize: generation block size
212 GenerationContextCreate(MemoryContext parent,
216 GenerationContext *set;
218 /* Assert we padded GenerationChunk properly */
219 StaticAssertStmt(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ),
220 "sizeof(GenerationChunk) is not maxaligned");
221 StaticAssertStmt(offsetof(GenerationChunk, context) + sizeof(MemoryContext) ==
222 Generation_CHUNKHDRSZ,
223 "padding calculation in GenerationChunk is wrong");
226 * First, validate allocation parameters. (If we're going to throw an
227 * error, we should do so before the context is created, not after.) We
228 * somewhat arbitrarily enforce a minimum 1K block size, mostly because
229 * that's what AllocSet does.
231 if (blockSize != MAXALIGN(blockSize) ||
233 !AllocHugeSizeIsValid(blockSize))
234 elog(ERROR, "invalid blockSize for memory context: %zu",
238 * Allocate the context header. Unlike aset.c, we never try to combine
239 * this with the first regular block, since that would prevent us from
240 * freeing the first generation of allocations.
243 set = (GenerationContext *) malloc(MAXALIGN(sizeof(GenerationContext)));
246 MemoryContextStats(TopMemoryContext);
248 (errcode(ERRCODE_OUT_OF_MEMORY),
249 errmsg("out of memory"),
250 errdetail("Failed while creating memory context \"%s\".",
255 * Avoid writing code that can fail between here and MemoryContextCreate;
256 * we'd leak the header if we ereport in this stretch.
259 /* Fill in GenerationContext-specific header fields */
260 set->blockSize = blockSize;
262 dlist_init(&set->blocks);
264 /* Finally, do the type-independent part of context creation */
265 MemoryContextCreate((MemoryContext) set,
271 return (MemoryContext) set;
276 * Frees all memory which is allocated in the given set.
278 * The code simply frees all the blocks in the context - we don't keep any
279 * keeper blocks or anything like that.
282 GenerationReset(MemoryContext context)
284 GenerationContext *set = (GenerationContext *) context;
285 dlist_mutable_iter miter;
287 AssertArg(GenerationIsValid(set));
289 #ifdef MEMORY_CONTEXT_CHECKING
290 /* Check for corruption and leaks before freeing */
291 GenerationCheck(context);
294 dlist_foreach_modify(miter, &set->blocks)
296 GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
298 dlist_delete(miter.cur);
300 #ifdef CLOBBER_FREED_MEMORY
301 wipe_mem(block, block->blksize);
309 Assert(dlist_is_empty(&set->blocks));
314 * Free all memory which is allocated in the given context.
317 GenerationDelete(MemoryContext context)
319 /* Reset to release all the GenerationBlocks */
320 GenerationReset(context);
321 /* And free the context header */
327 * Returns pointer to allocated memory of given size or NULL if
328 * request could not be completed; memory is added to the set.
330 * No request may exceed:
331 * MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ
332 * All callers use a much-lower limit.
334 * Note: when using valgrind, it doesn't matter how the returned allocation
335 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
336 * return space that is marked NOACCESS - GenerationRealloc has to beware!
339 GenerationAlloc(MemoryContext context, Size size)
341 GenerationContext *set = (GenerationContext *) context;
342 GenerationBlock *block;
343 GenerationChunk *chunk;
344 Size chunk_size = MAXALIGN(size);
346 /* is it an over-sized chunk? if yes, allocate special block */
347 if (chunk_size > set->blockSize / 8)
349 Size blksize = chunk_size + Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ;
351 block = (GenerationBlock *) malloc(blksize);
355 /* block with a single (used) chunk */
356 block->blksize = blksize;
360 /* the block is completely full */
361 block->freeptr = block->endptr = ((char *) block) + blksize;
363 chunk = (GenerationChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
364 chunk->block = block;
365 chunk->context = set;
366 chunk->size = chunk_size;
368 #ifdef MEMORY_CONTEXT_CHECKING
369 chunk->requested_size = size;
370 /* set mark to catch clobber of "unused" space */
371 if (size < chunk_size)
372 set_sentinel(GenerationChunkGetPointer(chunk), size);
374 #ifdef RANDOMIZE_ALLOCATED_MEMORY
375 /* fill the allocated space with junk */
376 randomize_mem((char *) GenerationChunkGetPointer(chunk), size);
379 /* add the block to the list of allocated blocks */
380 dlist_push_head(&set->blocks, &block->node);
382 GenerationAllocInfo(set, chunk);
384 /* Ensure any padding bytes are marked NOACCESS. */
385 VALGRIND_MAKE_MEM_NOACCESS((char *) GenerationChunkGetPointer(chunk) + size,
388 /* Disallow external access to private part of chunk header. */
389 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
391 return GenerationChunkGetPointer(chunk);
395 * Not an over-sized chunk. Is there enough space in the current block? If
396 * not, allocate a new "regular" block.
400 if ((block == NULL) ||
401 (block->endptr - block->freeptr) < Generation_CHUNKHDRSZ + chunk_size)
403 Size blksize = set->blockSize;
405 block = (GenerationBlock *) malloc(blksize);
410 block->blksize = blksize;
414 block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
415 block->endptr = ((char *) block) + blksize;
417 /* Mark unallocated space NOACCESS. */
418 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
419 blksize - Generation_BLOCKHDRSZ);
421 /* add it to the doubly-linked list of blocks */
422 dlist_push_head(&set->blocks, &block->node);
424 /* and also use it as the current allocation block */
428 /* we're supposed to have a block with enough free space now */
429 Assert(block != NULL);
430 Assert((block->endptr - block->freeptr) >= Generation_CHUNKHDRSZ + chunk_size);
432 chunk = (GenerationChunk *) block->freeptr;
434 /* Prepare to initialize the chunk header. */
435 VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ);
438 block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
440 Assert(block->freeptr <= block->endptr);
442 chunk->block = block;
443 chunk->context = set;
444 chunk->size = chunk_size;
446 #ifdef MEMORY_CONTEXT_CHECKING
447 chunk->requested_size = size;
448 /* set mark to catch clobber of "unused" space */
449 if (size < chunk->size)
450 set_sentinel(GenerationChunkGetPointer(chunk), size);
452 #ifdef RANDOMIZE_ALLOCATED_MEMORY
453 /* fill the allocated space with junk */
454 randomize_mem((char *) GenerationChunkGetPointer(chunk), size);
457 GenerationAllocInfo(set, chunk);
459 /* Ensure any padding bytes are marked NOACCESS. */
460 VALGRIND_MAKE_MEM_NOACCESS((char *) GenerationChunkGetPointer(chunk) + size,
463 /* Disallow external access to private part of chunk header. */
464 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
466 return GenerationChunkGetPointer(chunk);
471 * Update number of chunks in the block, and if all chunks in the block
472 * are now free then discard the block.
475 GenerationFree(MemoryContext context, void *pointer)
477 GenerationContext *set = (GenerationContext *) context;
478 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
479 GenerationBlock *block;
481 /* Allow access to private part of chunk header. */
482 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
484 block = chunk->block;
486 #ifdef MEMORY_CONTEXT_CHECKING
487 /* Test for someone scribbling on unused space in chunk */
488 if (chunk->requested_size < chunk->size)
489 if (!sentinel_ok(pointer, chunk->requested_size))
490 elog(WARNING, "detected write past chunk end in %s %p",
491 ((MemoryContext) set)->name, chunk);
494 #ifdef CLOBBER_FREED_MEMORY
495 wipe_mem(pointer, chunk->size);
498 /* Reset context to NULL in freed chunks */
499 chunk->context = NULL;
501 #ifdef MEMORY_CONTEXT_CHECKING
502 /* Reset requested_size to 0 in freed chunks */
503 chunk->requested_size = 0;
508 Assert(block->nchunks > 0);
509 Assert(block->nfree <= block->nchunks);
511 /* If there are still allocated chunks in the block, we're done. */
512 if (block->nfree < block->nchunks)
516 * The block is empty, so let's get rid of it. First remove it from the
517 * list of blocks, then return it to malloc().
519 dlist_delete(&block->node);
521 /* Also make sure the block is not marked as the current block. */
522 if (set->block == block)
530 * When handling repalloc, we simply allocate a new chunk, copy the data
531 * and discard the old one. The only exception is when the new size fits
532 * into the old chunk - in that case we just update chunk header.
535 GenerationRealloc(MemoryContext context, void *pointer, Size size)
537 GenerationContext *set = (GenerationContext *) context;
538 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
539 GenerationPointer newPointer;
542 /* Allow access to private part of chunk header. */
543 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
545 oldsize = chunk->size;
547 #ifdef MEMORY_CONTEXT_CHECKING
548 /* Test for someone scribbling on unused space in chunk */
549 if (chunk->requested_size < oldsize)
550 if (!sentinel_ok(pointer, chunk->requested_size))
551 elog(WARNING, "detected write past chunk end in %s %p",
552 ((MemoryContext) set)->name, chunk);
556 * Maybe the allocated area already is >= the new size. (In particular,
557 * we always fall out here if the requested size is a decrease.)
559 * This memory context does not use power-of-2 chunk sizing and instead
560 * carves the chunks to be as small as possible, so most repalloc() calls
561 * will end up in the palloc/memcpy/pfree branch.
563 * XXX Perhaps we should annotate this condition with unlikely()?
567 #ifdef MEMORY_CONTEXT_CHECKING
568 Size oldrequest = chunk->requested_size;
570 #ifdef RANDOMIZE_ALLOCATED_MEMORY
571 /* We can only fill the extra space if we know the prior request */
572 if (size > oldrequest)
573 randomize_mem((char *) pointer + oldrequest,
577 chunk->requested_size = size;
580 * If this is an increase, mark any newly-available part UNDEFINED.
581 * Otherwise, mark the obsolete part NOACCESS.
583 if (size > oldrequest)
584 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
587 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
590 /* set mark to catch clobber of "unused" space */
592 set_sentinel(pointer, size);
593 #else /* !MEMORY_CONTEXT_CHECKING */
596 * We don't have the information to determine whether we're growing
597 * the old request or shrinking it, so we conservatively mark the
598 * entire new allocation DEFINED.
600 VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
601 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
604 /* Disallow external access to private part of chunk header. */
605 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
610 /* allocate new chunk */
611 newPointer = GenerationAlloc((MemoryContext) set, size);
613 /* leave immediately if request was not completed */
614 if (newPointer == NULL)
616 /* Disallow external access to private part of chunk header. */
617 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
622 * GenerationAlloc() may have returned a region that is still NOACCESS.
623 * Change it to UNDEFINED for the moment; memcpy() will then transfer
624 * definedness from the old allocation to the new. If we know the old
625 * allocation, copy just that much. Otherwise, make the entire old chunk
626 * defined to avoid errors as we copy the currently-NOACCESS trailing
629 VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
630 #ifdef MEMORY_CONTEXT_CHECKING
631 oldsize = chunk->requested_size;
633 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
636 /* transfer existing data (certain to fit) */
637 memcpy(newPointer, pointer, oldsize);
640 GenerationFree((MemoryContext) set, pointer);
646 * GenerationGetChunkSpace
647 * Given a currently-allocated chunk, determine the total space
648 * it occupies (including all memory-allocation overhead).
651 GenerationGetChunkSpace(MemoryContext context, void *pointer)
653 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
656 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
657 result = chunk->size + Generation_CHUNKHDRSZ;
658 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
664 * Is a GenerationContext empty of any allocated space?
667 GenerationIsEmpty(MemoryContext context)
669 GenerationContext *set = (GenerationContext *) context;
671 return dlist_is_empty(&set->blocks);
676 * Compute stats about memory consumption of a Generation context.
678 * printfunc: if not NULL, pass a human-readable stats string to this.
679 * passthru: pass this pointer through to printfunc.
680 * totals: if not NULL, add stats about this context into *totals.
682 * XXX freespace only accounts for empty space at the end of the block, not
683 * space of freed chunks (which is unknown).
686 GenerationStats(MemoryContext context,
687 MemoryStatsPrintFunc printfunc, void *passthru,
688 MemoryContextCounters *totals)
690 GenerationContext *set = (GenerationContext *) context;
693 Size nfreechunks = 0;
698 /* Include context header in totalspace */
699 totalspace = MAXALIGN(sizeof(GenerationContext));
701 dlist_foreach(iter, &set->blocks)
703 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
706 nchunks += block->nchunks;
707 nfreechunks += block->nfree;
708 totalspace += block->blksize;
709 freespace += (block->endptr - block->freeptr);
714 char stats_string[200];
716 snprintf(stats_string, sizeof(stats_string),
717 "%zu total in %zd blocks (%zd chunks); %zu free (%zd chunks); %zu used",
718 totalspace, nblocks, nchunks, freespace,
719 nfreechunks, totalspace - freespace);
720 printfunc(context, passthru, stats_string);
725 totals->nblocks += nblocks;
726 totals->freechunks += nfreechunks;
727 totals->totalspace += totalspace;
728 totals->freespace += freespace;
733 #ifdef MEMORY_CONTEXT_CHECKING
737 * Walk through chunks and check consistency of memory.
739 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
740 * find yourself in an infinite loop when trouble occurs, because this
741 * routine will be entered again when elog cleanup tries to release memory!
744 GenerationCheck(MemoryContext context)
746 GenerationContext *gen = (GenerationContext *) context;
747 const char *name = context->name;
750 /* walk all blocks in this context */
751 dlist_foreach(iter, &gen->blocks)
753 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
759 * nfree > nchunks is surely wrong, and we don't expect to see
760 * equality either, because such a block should have gotten freed.
762 if (block->nfree >= block->nchunks)
763 elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated",
764 name, block->nfree, block, block->nchunks);
766 /* Now walk through the chunks and count them. */
769 ptr = ((char *) block) + Generation_BLOCKHDRSZ;
771 while (ptr < block->freeptr)
773 GenerationChunk *chunk = (GenerationChunk *) ptr;
775 /* Allow access to private part of chunk header. */
776 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
778 /* move to the next chunk */
779 ptr += (chunk->size + Generation_CHUNKHDRSZ);
783 /* chunks have both block and context pointers, so check both */
784 if (chunk->block != block)
785 elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p",
789 * Check for valid context pointer. Note this is an incomplete
790 * test, since palloc(0) produces an allocated chunk with
791 * requested_size == 0.
793 if ((chunk->requested_size > 0 && chunk->context != gen) ||
794 (chunk->context != gen && chunk->context != NULL))
795 elog(WARNING, "problem in Generation %s: bogus context link in block %p, chunk %p",
798 /* now make sure the chunk size is correct */
799 if (chunk->size < chunk->requested_size ||
800 chunk->size != MAXALIGN(chunk->size))
801 elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p",
804 /* is chunk allocated? */
805 if (chunk->context != NULL)
807 /* check sentinel, but only in allocated blocks */
808 if (chunk->requested_size < chunk->size &&
809 !sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size))
810 elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p",
817 * If chunk is allocated, disallow external access to private part
820 if (chunk->context != NULL)
821 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
825 * Make sure we got the expected number of allocated and free chunks
826 * (as tracked in the block header).
828 if (nchunks != block->nchunks)
829 elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d",
830 name, nchunks, block, block->nchunks);
832 if (nfree != block->nfree)
833 elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d",
834 name, nfree, block, block->nfree);
838 #endif /* MEMORY_CONTEXT_CHECKING */