/* Define the linked list structure. This is used to link free blocks in order
of their memory address. */
+/* This is optimized and assumes a region is never larger than 16MiB. */
typedef struct A_BLOCK_LINK
{
struct A_BLOCK_LINK *pxNextFreeBlock; /*<< The next free block in the list. */
- size_t xBlockSize; /*<< The size of the free block. */
- BaseType_t xTag; /*<< Tag of this region */
+ int xBlockSize: 24; /*<< The size of the free block. */
+ int xTag: 7; /*<< Tag of this region */
+ int xAllocated: 1; /*<< 1 if allocated */
} BlockLink_t;
//Mux to protect the memory status data
/* Keeps track of the number of free bytes remaining, but says nothing about
fragmentation. */
-static size_t xFreeBytesRemaining = 0;
-static size_t xMinimumEverFreeBytesRemaining = 0;
+static size_t xFreeBytesRemaining[HEAPREGIONS_MAX_TAGCOUNT] = {0};
+static size_t xMinimumEverFreeBytesRemaining[HEAPREGIONS_MAX_TAGCOUNT] = {0};
-/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize
-member of an BlockLink_t structure is set then the block belongs to the
-application. When the bit is free the block is still part of the free heap
-space. */
-static size_t xBlockAllocatedBit = 0;
/*-----------------------------------------------------------*/
taskENTER_CRITICAL(&xMallocMutex);
{
- /* Check the requested block size is not so large that the top bit is
- set. The top bit of the block size member of the BlockLink_t structure
- is used to determine who owns the block - the application or the
- kernel, so it must be free. */
- if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
+ /* The wanted size is increased so it can contain a BlockLink_t
+ structure in addition to the requested amount of bytes. */
+ if( xWantedSize > 0 )
{
- /* The wanted size is increased so it can contain a BlockLink_t
- structure in addition to the requested amount of bytes. */
- if( xWantedSize > 0 )
- {
- xWantedSize += uxHeapStructSize;
+ xWantedSize += uxHeapStructSize;
- /* Ensure that blocks are always aligned to the required number
- of bytes. */
- if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
- {
- /* Byte alignment required. */
- xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
- }
- else
- {
- mtCOVERAGE_TEST_MARKER();
- }
+ /* Ensure that blocks are always aligned to the required number
+ of bytes. */
+ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
+ {
+ /* Byte alignment required. */
+ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
}
else
{
- mtCOVERAGE_TEST_MARKER();
+ mtCOVERAGE_TEST_MARKER();
}
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
- if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
+ if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining[ tag ] ) )
+ {
+ /* Traverse the list from the start (lowest address) block until
+ one of adequate size is found. */
+ pxPreviousBlock = &xStart;
+ pxBlock = xStart.pxNextFreeBlock;
+ while( ( ( pxBlock->xTag != tag ) || ( pxBlock->xBlockSize < xWantedSize ) ) && ( pxBlock->pxNextFreeBlock != NULL ) )
{
- /* Traverse the list from the start (lowest address) block until
- one of adequate size is found. */
- pxPreviousBlock = &xStart;
- pxBlock = xStart.pxNextFreeBlock;
- while( ( ( pxBlock->xTag != tag ) || ( pxBlock->xBlockSize < xWantedSize ) ) && ( pxBlock->pxNextFreeBlock != NULL ) )
- {
-// ets_printf("Block %x -> %x\n", (uint32_t)pxBlock, (uint32_t)pxBlock->pxNextFreeBlock);
+// ets_printf("Block %x -> %x\n", (uint32_t)pxBlock, (uint32_t)pxBlock->pxNextFreeBlock);
- #if (configENABLE_MEMORY_DEBUG == 1)
- {
- mem_check_block(pxBlock);
- }
- #endif
+ #if (configENABLE_MEMORY_DEBUG == 1)
+ {
+ mem_check_block(pxBlock);
+ }
+ #endif
- pxPreviousBlock = pxBlock;
- pxBlock = pxBlock->pxNextFreeBlock;
- }
+ pxPreviousBlock = pxBlock;
+ pxBlock = pxBlock->pxNextFreeBlock;
+ }
+
+ /* If the end marker was not reached then a block of adequate size
+ was found. */
+ if( pxBlock != pxEnd )
+ {
+ /* Return the memory space pointed to - jumping over the
+ BlockLink_t structure at its start. */
+ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + uxHeapStructSize - BLOCK_TAIL_LEN - BLOCK_HEAD_LEN);
- /* If the end marker was not reached then a block of adequate size
- was found. */
- if( pxBlock != pxEnd )
+ /* This block is being returned for use so must be taken out
+ of the list of free blocks. */
+ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
+
+ /* If the block is larger than required it can be split into
+ two. */
+
+ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
{
- /* Return the memory space pointed to - jumping over the
- BlockLink_t structure at its start. */
- pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + uxHeapStructSize - BLOCK_TAIL_LEN - BLOCK_HEAD_LEN);
-
- /* This block is being returned for use so must be taken out
- of the list of free blocks. */
- pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
-
- /* If the block is larger than required it can be split into
- two. */
-
- if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
- {
- /* This block is to be split into two. Create a new
- block following the number of bytes requested. The void
- cast is used to prevent byte alignment warnings from the
- compiler. */
- pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize);
-
- /* Calculate the sizes of two blocks split from the
- single block. */
- pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
- pxNewBlockLink->xTag = tag;
- pxBlock->xBlockSize = xWantedSize;
-
- #if (configENABLE_MEMORY_DEBUG == 1)
- {
- mem_init_dog(pxNewBlockLink);
- }
- #endif
-
-
- /* Insert the new block into the list of free blocks. */
- prvInsertBlockIntoFreeList( ( pxNewBlockLink ) );
- }
- else
- {
- mtCOVERAGE_TEST_MARKER();
- }
-
- xFreeBytesRemaining -= pxBlock->xBlockSize;
-
- if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
- {
- xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
- }
- else
- {
- mtCOVERAGE_TEST_MARKER();
- }
-
- /* The block is being returned - it is allocated and owned
- by the application and has no "next" block. */
- pxBlock->xBlockSize |= xBlockAllocatedBit;
- pxBlock->pxNextFreeBlock = NULL;
-
- #if (configENABLE_MEMORY_DEBUG == 1)
- {
- mem_init_dog(pxBlock);
- mem_malloc_block(pxBlock);
- }
- #endif
+ /* This block is to be split into two. Create a new
+ block following the number of bytes requested. The void
+ cast is used to prevent byte alignment warnings from the
+ compiler. */
+ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize);
+
+ /* Calculate the sizes of two blocks split from the
+ single block. */
+ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
+ pxNewBlockLink->xTag = tag;
+ pxBlock->xBlockSize = xWantedSize;
+
+ #if (configENABLE_MEMORY_DEBUG == 1)
+ {
+ mem_init_dog(pxNewBlockLink);
+ }
+ #endif
+
+
+ /* Insert the new block into the list of free blocks. */
+ prvInsertBlockIntoFreeList( ( pxNewBlockLink ) );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
+
+ xFreeBytesRemaining[ tag ] -= pxBlock->xBlockSize;
+
+ if( xFreeBytesRemaining[ tag ] < xMinimumEverFreeBytesRemaining[ tag ] )
+ {
+ xMinimumEverFreeBytesRemaining[ tag ] = xFreeBytesRemaining[ tag ];
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* The block is being returned - it is allocated and owned
+ by the application and has no "next" block. */
+ pxBlock->xAllocated = 1;
+ pxBlock->pxNextFreeBlock = NULL;
+
+ #if (configENABLE_MEMORY_DEBUG == 1)
+ {
+ mem_init_dog(pxBlock);
+ mem_malloc_block(pxBlock);
+ }
+ #endif
}
else
{
#endif
/* Check the block is actually allocated. */
- configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
+ configASSERT( ( pxLink->xAllocated ) != 0 );
configASSERT( pxLink->pxNextFreeBlock == NULL );
- if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
+ if( pxLink->xAllocated != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
{
/* The block is being returned to the heap - it is no longer
allocated. */
- pxLink->xBlockSize &= ~xBlockAllocatedBit;
+ pxLink->xAllocated = 0;
taskENTER_CRITICAL(&xMallocMutex);
{
/* Add this block to the list of free blocks. */
- xFreeBytesRemaining += pxLink->xBlockSize;
+ xFreeBytesRemaining[ pxLink->xTag ] += pxLink->xBlockSize;
traceFREE( pv, pxLink->xBlockSize );
prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
}
}
/*-----------------------------------------------------------*/
-size_t xPortGetFreeHeapSize( void )
+size_t xPortGetFreeHeapSizeTagged( BaseType_t tag )
{
- return xFreeBytesRemaining;
+ return xFreeBytesRemaining[ tag ];
}
/*-----------------------------------------------------------*/
-size_t xPortGetMinimumEverFreeHeapSize( void )
+size_t xPortGetMinimumEverFreeHeapSizeTagged( BaseType_t tag )
{
- return xMinimumEverFreeBytesRemaining;
+ return xMinimumEverFreeBytesRemaining[ tag ];
}
/*-----------------------------------------------------------*/
continue;
}
+ configASSERT(pxHeapRegion->xTag < HEAPREGIONS_MAX_TAGCOUNT);
xTotalRegionSize = pxHeapRegion->xSizeInBytes;
/* Ensure the heap region starts on a correctly aligned boundary. */
}
xTotalHeapSize += pxFirstFreeBlockInRegion->xBlockSize;
+ xMinimumEverFreeBytesRemaining[ pxHeapRegion->xTag ] += pxFirstFreeBlockInRegion->xBlockSize;
+ xFreeBytesRemaining[ pxHeapRegion->xTag ] += pxFirstFreeBlockInRegion->xBlockSize;
/* Move onto the next HeapRegionTagged_t structure. */
xDefinedRegions++;
#endif
}
- xMinimumEverFreeBytesRemaining = xTotalHeapSize;
- xFreeBytesRemaining = xTotalHeapSize;
-
/* Check something was actually defined before it is accessed. */
configASSERT( xTotalHeapSize );
- /* Work out the position of the top bit in a size_t variable. */
- xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 );
#if (configENABLE_MEMORY_DEBUG == 1)
{
static mem_dbg_ctl_t g_mem_dbg;
char g_mem_print = 0;
static portMUX_TYPE *g_malloc_mutex = NULL;
-static unsigned int g_alloc_bit;
#define MEM_DEBUG(...)
-void mem_debug_init(size_t size, void *start, void *end, portMUX_TYPE *mutex, unsigned int alloc_bit)
+void mem_debug_init(size_t size, void *start, void *end, portMUX_TYPE *mutex)
{
- MEM_DEBUG("size=%d start=%p end=%p mutex=%p alloc_bit=0x%x\n", size, start, end, mutex, alloc_bit);
+ MEM_DEBUG("size=%d start=%p end=%p mutex=%p%x\n", size, start, end, mutex);
memset(&g_mem_dbg, 0, sizeof(g_mem_dbg));
memset(&g_malloc_list, 0, sizeof(g_malloc_list));
g_malloc_mutex = mutex;
g_heap_struct_size = size;
g_free_list = start;
g_end = end;
- g_alloc_bit = alloc_bit;
}
void mem_debug_push(char type, void *addr)
MEM_DEBUG("push type=%d addr=%p\n", type, addr);
if (g_mem_print){
if (type == DEBUG_TYPE_MALLOC){
- ets_printf("task=%s t=%s s=%u a=%p\n", debug_b->head.task?debug_b->head.task:"", type==DEBUG_TYPE_MALLOC?"m":"f", b->size&(~g_alloc_bit), addr);
+ ets_printf("task=%s t=%s s=%u a=%p\n", debug_b->head.task?debug_b->head.task:"", type==DEBUG_TYPE_MALLOC?"m":"f", b->size, addr);
} else {
- ets_printf("task=%s t=%s s=%u a=%p\n", debug_b->head.task?debug_b->head.task:"", type==DEBUG_TYPE_MALLOC?"m":"f", b->size&(~g_alloc_bit), addr);
+ ets_printf("task=%s t=%s s=%u a=%p\n", debug_b->head.task?debug_b->head.task:"", type==DEBUG_TYPE_MALLOC?"m":"f", b->size, addr);
}
} else {
mem_dbg_info_t *info = &g_mem_dbg.info[g_mem_dbg.cnt%DEBUG_MAX_INFO_NUM];
while (b){
d = DEBUG_BLOCK(b);
d->head.task[3] = '\0';
- ets_printf("t=%s s=%u a=%p\n", d->head.task?d->head.task:"", b->size&(~g_alloc_bit), b);
+ ets_printf("t=%s s=%u a=%p\n", d->head.task?d->head.task:"", b->size, b);
b = b->next;
}
taskEXIT_CRITICAL(g_malloc_mutex);
while (b){
debug_b = DEBUG_BLOCK(b);
- ets_printf("%s %p %p %u\n", debug_b->head.task, debug_b, b, b->size&(~g_alloc_bit));
+ ets_printf("%s %p %p %u\n", debug_b->head.task, debug_b, b, b->size);
b = b->next;
}
}
{
os_block_t *b = (os_block_t*)data;
- MEM_DEBUG("mem malloc block data=%p, size=%u\n", data, b->size&(~g_alloc_bit));
+ MEM_DEBUG("mem malloc block data=%p, size=%u\n", data, b->size);
mem_debug_push(DEBUG_TYPE_MALLOC, data);
if (b){
os_block_t *pre = &g_malloc_list;
debug_block_t *debug_b;
- MEM_DEBUG("mem free block data=%p, size=%d\n", data, del->size&(~g_alloc_bit));
+ MEM_DEBUG("mem free block data=%p, size=%d\n", data, del->size);
mem_debug_push(DEBUG_TYPE_FREE, data);
if (!del) {
}
debug_b = DEBUG_BLOCK(del);
- ets_printf("%s %p %p %u already free\n", debug_b->head.task, debug_b, del, del->size&(~g_alloc_bit));
+ ets_printf("%s %p %p %u already free\n", debug_b->head.task, debug_b, del, del->size);
mem_malloc_show();
abort();
}