[Refactor] Optimize and refine the behavior logic of pvPortRealloc

This commit is contained in:
skb666 2026-01-06 23:56:44 +08:00 committed by Anubhav Rawal
parent 55a7f9a7ac
commit 6e0d95811a
2 changed files with 556 additions and 286 deletions

View file

@ -427,54 +427,78 @@ void vPortFree( void * pv )
* On failure: NULL
*
* Behavior:
* - When pv is NULL, equivalent to pvPortMalloc(xWantedSize)
* - When xWantedSize is 0, equivalent to vPortFree(pv)
* - Resize strategy:
* 1. If new size <= original size, attempt to shrink the block
* 2. If new size > original size, attempt to expand by merging with adjacent free block
* 3. If in-place resize fails, allocate new block and copy data
* 1) If pv == NULL, behaves like pvPortMalloc(xWantedSize).
* 2) If xWantedSize == 0, behaves like vPortFree(pv) and returns NULL.
* 3) Align the requested size and include the block header size; if the aligned
* size is invalid, return NULL.
* 4) If the aligned requested size is <= current block size, shrink in place and
* insert any sufficiently large remainder as a free block.
* 5) If expansion is required and there are enough free bytes in the heap, try to
* expand into adjacent free blocks in this order:
* - Merge with next free block if it is immediately after the current block.
* - Merge with previous free block if it is immediately before the current block.
* - Merge with both previous and next if combined they provide enough space.
* If none of the above succeed, fall back to allocating a new block, memcpy'ing
* the payload and freeing the old block.
*/
void *pvPortRealloc( void *pv,
size_t xWantedSize )
void * pvPortRealloc( void * pv,
size_t xWantedSize )
{
BlockLink_t *pxBlock;
BlockLink_t *pxPreviousBlock;
BlockLink_t *pxNewBlockLink;
BlockLink_t *pxAdjacentFreeBlock;
void *pvReturn = NULL;
size_t xOriginalSize;
size_t xNewBlockSize;
BlockLink_t * pxBlock;
BlockLink_t * pxNewBlockLink;
BlockLink_t * pxNextFreeBlock;
BlockLink_t * pxPreviousFreeBlock;
BlockLink_t * pxBeforePreviousFreeBlock;
uint8_t * puc;
void * pvReturn = NULL;
size_t xAlignedWantedSize;
size_t xAdditionalRequiredSize;
size_t xCopySize;
size_t xCurrentBlockSize;
size_t xRemainingBlockSize;
size_t xNextBlockSize;
size_t xPreviousBlockSize;
BaseType_t xHasNextBlock;
BaseType_t xHasPreviousBlock;
/* Handle NULL pointer case - equivalent to malloc */
/* Ensure the end marker has been set up. */
configASSERT( pxEnd );
/* If pv is NULL behave like malloc. */
if( pv == NULL )
{
return pvPortMalloc( xWantedSize );
pvReturn = pvPortMalloc( xWantedSize );
goto realloc_exit;
}
/* Handle zero size case - equivalent to free */
/* If requested size is zero behave like free. */
if( xWantedSize == 0 )
{
vPortFree( pv );
return NULL;
pvReturn = NULL;
goto realloc_exit;
}
/* Calculate new block size with overhead (header size and alignment) */
xNewBlockSize = xWantedSize;
if( heapADD_WILL_OVERFLOW( xNewBlockSize, xHeapStructSize ) == 0 )
/* Calculate the internal aligned size including the header. */
xAlignedWantedSize = xWantedSize;
/* Add the header size and check for overflow. */
if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xHeapStructSize ) == 0 )
{
xNewBlockSize += xHeapStructSize;
if( ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
xAlignedWantedSize += xHeapStructSize;
/* Ensure byte alignment. */
if( ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
{
xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xNewBlockSize & portBYTE_ALIGNMENT_MASK );
if( heapADD_WILL_OVERFLOW( xNewBlockSize, xAdditionalRequiredSize ) == 0 )
xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK );
if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xAdditionalRequiredSize ) == 0 )
{
xNewBlockSize += xAdditionalRequiredSize;
xAlignedWantedSize += xAdditionalRequiredSize;
}
else
{
return NULL;
/* Overflow -> invalid request. */
xAlignedWantedSize = 0;
}
}
else
@ -484,165 +508,276 @@ void *pvPortRealloc( void *pv,
}
else
{
return NULL;
xAlignedWantedSize = 0;
}
/* Get the block header from the user pointer and validate it */
pxBlock = ( BlockLink_t * )( ( uint8_t * )pv - xHeapStructSize );
/* Validate the aligned size. */
if( ( xAlignedWantedSize == 0 ) || ( heapBLOCK_SIZE_IS_VALID( xAlignedWantedSize ) == 0 ) )
{
pvReturn = NULL;
goto realloc_exit;
}
/* Get the block header for the allocated block. */
puc = ( uint8_t * ) pv;
puc -= xHeapStructSize;
pxBlock = ( BlockLink_t * ) puc;
heapVALIDATE_BLOCK_POINTER( pxBlock );
if( ( heapBLOCK_IS_ALLOCATED( pxBlock ) == 0 ) || ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
{
return NULL;
}
configASSERT( heapBLOCK_IS_ALLOCATED( pxBlock ) );
/* Calculate the original block size (without the allocated bit)
* Check if there's enough free memory to expand the block */
xOriginalSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK;
if( ( xOriginalSize < xNewBlockSize ) && ( xFreeBytesRemaining < ( xNewBlockSize - xOriginalSize ) ) )
{
/* Not enough memory to expand the block */
return NULL;
}
/* Current block size without the allocated bit. */
xCurrentBlockSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK;
/* Calculate the amount of user data to copy (excluding the block header).
* The user data size is the block size minus the header size.
* Limit the copy size to the requested size to avoid copying too much data. */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xOriginalSize, xHeapStructSize ) == 0 );
xCopySize = xOriginalSize - xHeapStructSize;
if( xWantedSize < xCopySize )
/* 1) Shrink in place if possible. */
if( xAlignedWantedSize <= xCurrentBlockSize )
{
xCopySize = xWantedSize;
}
xRemainingBlockSize = xCurrentBlockSize - xAlignedWantedSize;
/* Enter critical section - protect heap structure from concurrent access */
vTaskSuspendAll();
{
/* Case 1: Shrink the block (new size is smaller than or equal to original)
* Check if the remaining space is large enough to create a separate free block */
if( xNewBlockSize <= xOriginalSize )
/* Only split if the remaining space is large enough to form a free block. */
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
/* Create a new free block from the remaining space */
if( ( xOriginalSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE )
vTaskSuspendAll();
{
pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize );
configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xOriginalSize - xNewBlockSize;
xFreeBytesRemaining += pxNewBlockLink->xBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
heapFREE_BLOCK( pxBlock );
pxBlock->xBlockSize = xNewBlockSize;
/* Set the block to the new size and mark as allocated. */
pxBlock->xBlockSize = xAlignedWantedSize;
heapALLOCATE_BLOCK( pxBlock );
/* Create a new free block from the remainder and insert it. */
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pvReturn = pv;
( void ) xTaskResumeAll();
}
else
{
/* Case 2: Try to expand by merging with next free block */
pxAdjacentFreeBlock = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xOriginalSize );
configASSERT( ( ( ( size_t )pxAdjacentFreeBlock ) & portBYTE_ALIGNMENT_MASK ) == 0 );
/* Remainder too small to split. */
mtCOVERAGE_TEST_MARKER();
}
pvReturn = pv;
goto realloc_exit;
}
/* 2) Expansion path: try to use adjacent free blocks if overall free bytes suffice. */
else if( ( xAlignedWantedSize - xCurrentBlockSize ) <= xFreeBytesRemaining )
{
vTaskSuspendAll();
{
/* Walk the free list to find the free blocks immediately before and after pxBlock. */
pxBeforePreviousFreeBlock = &xStart;
pxPreviousFreeBlock = &xStart;
pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock );
/* Traverse the free list to find if the adjacent block is actually free.
* The free list is ordered by address, so we can search efficiently.*/
pxPreviousBlock = &xStart;
while( ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) &&
( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
while( ( pxNextFreeBlock < pxBlock ) && ( pxNextFreeBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
{
pxPreviousBlock = heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxPreviousBlock );
pxBeforePreviousFreeBlock = pxPreviousFreeBlock;
pxPreviousFreeBlock = pxNextFreeBlock;
pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNextFreeBlock->pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock );
}
if( pxPreviousBlock->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) )
/* Check if next is immediately after current. */
if( ( pxNextFreeBlock != pxEnd ) &&
( ( ( size_t ) pxBlock + xCurrentBlockSize ) == ( size_t ) pxNextFreeBlock ) )
{
configASSERT( heapBLOCK_SIZE_IS_VALID( pxAdjacentFreeBlock->xBlockSize ) );
if( !heapADD_WILL_OVERFLOW( xOriginalSize, pxAdjacentFreeBlock->xBlockSize ) )
xHasNextBlock = pdTRUE;
}
else
{
xHasNextBlock = pdFALSE;
}
/* Check if previous is immediately before current. */
if( ( pxPreviousFreeBlock != &xStart ) &&
( ( ( size_t ) pxPreviousFreeBlock + pxPreviousFreeBlock->xBlockSize ) == ( size_t ) pxBlock ) )
{
xHasPreviousBlock = pdTRUE;
}
else
{
xHasPreviousBlock = pdFALSE;
}
/* Compute required extra size and neighbor sizes. */
xRemainingBlockSize = xAlignedWantedSize - xCurrentBlockSize;
xNextBlockSize = pxNextFreeBlock->xBlockSize;
xPreviousBlockSize = pxPreviousFreeBlock->xBlockSize;
configASSERT( heapBLOCK_SIZE_IS_VALID( xNextBlockSize ) != 0 );
configASSERT( heapBLOCK_SIZE_IS_VALID( xPreviousBlockSize ) != 0 );
/* a) If next exists and is large enough, merge with next. */
if( ( xHasNextBlock == pdTRUE ) &&
( xNextBlockSize >= xRemainingBlockSize ) )
{
/* Remove next from free list and update free bytes. */
pxPreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock;
pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xNextBlockSize;
/* Temporarily free the current block for merging. */
heapFREE_BLOCK( pxBlock );
/* Remaining bytes after creating the requested size. */
xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
/* Found a suitable adjacent free block that can provide enough space. */
if( ( xOriginalSize + pxAdjacentFreeBlock->xBlockSize ) >= xNewBlockSize )
{
/* Remove the adjacent free block from the free list and merge it with the allocated block. */
pxPreviousBlock->pxNextFreeBlock = pxAdjacentFreeBlock->pxNextFreeBlock;
xFreeBytesRemaining -= pxAdjacentFreeBlock->xBlockSize;
heapFREE_BLOCK( pxBlock );
pxBlock->xBlockSize = xOriginalSize + pxAdjacentFreeBlock->xBlockSize;
/* Set block to requested size and insert leftover as a free block. */
pxBlock->xBlockSize = xAlignedWantedSize;
/* If the merged block is larger than needed, split the excess space
* into a new free block. */
if( ( pxBlock->xBlockSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE )
{
pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize );
configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xNewBlockSize;
xFreeBytesRemaining += pxNewBlockLink->xBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
pxBlock->xBlockSize = xNewBlockSize;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
heapALLOCATE_BLOCK( pxBlock );
pvReturn = pv;
/* Update minimum free size statistic if memory was consumed */
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
/* Leftover too small, keep as part of allocated block. */
pxBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize;
}
/* Mark merged block as allocated. */
heapALLOCATE_BLOCK( pxBlock );
pvReturn = pv;
}
/* b) If previous exists and is large enough, merge with previous (data must be moved). */
else if( ( xHasPreviousBlock == pdTRUE ) &&
( xPreviousBlockSize >= xRemainingBlockSize ) )
{
/* Remove previous from free list and update free bytes. */
pxBeforePreviousFreeBlock->pxNextFreeBlock = pxPreviousFreeBlock->pxNextFreeBlock;
pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xPreviousBlockSize;
heapFREE_BLOCK( pxBlock );
/* Move the payload forward into the previous block's payload area. */
puc = ( uint8_t * ) pxPreviousFreeBlock;
puc += xHeapStructSize;
/* Ensure memmove length will not underflow. */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize );
/* Remaining bytes after creating the requested size. */
xRemainingBlockSize = xCurrentBlockSize + xPreviousBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
/* previous becomes the allocated block of requested size, insert leftover. */
pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
/* Leftover too small, treat entire previous+current as allocated. */
pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xPreviousBlockSize;
}
heapALLOCATE_BLOCK( pxPreviousFreeBlock );
/* Return the payload pointer in the previous block. */
pvReturn = ( void * ) puc;
}
/* c) If both neighbors exist and combined are large enough, merge both sides (move data). */
else if( ( xHasNextBlock == pdTRUE ) &&
( xHasPreviousBlock == pdTRUE ) &&
( ( xNextBlockSize + xPreviousBlockSize ) >= xRemainingBlockSize ) )
{
/* Remove both previous and next from the free list and update free bytes. */
pxBeforePreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock;
pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xNextBlockSize + xPreviousBlockSize;
heapFREE_BLOCK( pxBlock );
/* Move payload forward into previous block's payload area. */
puc = ( uint8_t * ) pxPreviousFreeBlock;
puc += xHeapStructSize;
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize );
/* Remaining bytes after allocation. */
xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize;
}
heapALLOCATE_BLOCK( pxPreviousFreeBlock );
pvReturn = ( void * ) puc;
}
else
{
/* None of the merge strategies worked on this path. */
mtCOVERAGE_TEST_MARKER();
}
/* Update historical minimum free bytes. */
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
( void ) xTaskResumeAll();
}
else
{
/* Not enough free bytes in the entire heap to satisfy expansion. */
pvReturn = NULL;
goto realloc_exit;
}
/* Exit critical section - heap structure modification complete */
( void ) xTaskResumeAll();
/* Case 3: If in-place resize failed, allocate a new block and move the data.
* This is more expensive as it involves:
* 1. Allocating a new block with the requested size
* 2. Copying the user data from the old block to the new block
* 3. Freeing the old block
* Note: Statistics are updated by the called functions (malloc and free). */
/* If still NULL, fall back to allocating a new block and copying the payload. */
if( pvReturn == NULL )
{
pvReturn = pvPortMalloc( xWantedSize );
if( pvReturn != NULL )
puc = pvPortMalloc( xWantedSize );
if( puc != NULL )
{
/* Copy user data from old block to new block (up to the smaller of old or new size) */
( void )memcpy( pvReturn, pv, xCopySize );
/* Copy the old payload (old payload size = xCurrentBlockSize - xHeapStructSize). */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memcpy( puc, pv, xCurrentBlockSize - xHeapStructSize );
vPortFree( pv );
pvReturn = ( void * ) puc;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
configASSERT( ( ( ( size_t )pvReturn ) & ( size_t )portBYTE_ALIGNMENT_MASK ) == 0 );
realloc_exit:
/* Ensure returned pointer is properly aligned (NULL also satisfies this). */
configASSERT( ( ( size_t ) pvReturn & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
}
#endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */

View file

@ -462,54 +462,78 @@ void vPortFree( void * pv )
* On failure: NULL
*
* Behavior:
* - When pv is NULL, equivalent to pvPortMalloc(xWantedSize)
* - When xWantedSize is 0, equivalent to vPortFree(pv)
* - Resize strategy:
* 1. If new size <= original size, attempt to shrink the block
* 2. If new size > original size, attempt to expand by merging with adjacent free block
* 3. If in-place resize fails, allocate new block and copy data
* 1) If pv == NULL, behaves like pvPortMalloc(xWantedSize).
* 2) If xWantedSize == 0, behaves like vPortFree(pv) and returns NULL.
* 3) Align the requested size and include the block header size; if the aligned
* size is invalid, return NULL.
* 4) If the aligned requested size is <= current block size, shrink in place and
* insert any sufficiently large remainder as a free block.
* 5) If expansion is required and there are enough free bytes in the heap, try to
* expand into adjacent free blocks in this order:
* - Merge with next free block if it is immediately after the current block.
* - Merge with previous free block if it is immediately before the current block.
* - Merge with both previous and next if combined they provide enough space.
* If none of the above succeed, fall back to allocating a new block, memcpy'ing
* the payload and freeing the old block.
*/
void *pvPortRealloc( void *pv,
size_t xWantedSize )
void * pvPortRealloc( void * pv,
size_t xWantedSize )
{
BlockLink_t *pxBlock;
BlockLink_t *pxPreviousBlock;
BlockLink_t *pxNewBlockLink;
BlockLink_t *pxAdjacentFreeBlock;
void *pvReturn = NULL;
size_t xOriginalSize;
size_t xNewBlockSize;
BlockLink_t * pxBlock;
BlockLink_t * pxNewBlockLink;
BlockLink_t * pxNextFreeBlock;
BlockLink_t * pxPreviousFreeBlock;
BlockLink_t * pxBeforePreviousFreeBlock;
uint8_t * puc;
void * pvReturn = NULL;
size_t xAlignedWantedSize;
size_t xAdditionalRequiredSize;
size_t xCopySize;
size_t xCurrentBlockSize;
size_t xRemainingBlockSize;
size_t xNextBlockSize;
size_t xPreviousBlockSize;
BaseType_t xHasNextBlock;
BaseType_t xHasPreviousBlock;
/* Handle NULL pointer case - equivalent to malloc */
/* Ensure the end marker has been set up. */
configASSERT( pxEnd );
/* If pv is NULL behave like malloc. */
if( pv == NULL )
{
return pvPortMalloc( xWantedSize );
pvReturn = pvPortMalloc( xWantedSize );
goto realloc_exit;
}
/* Handle zero size case - equivalent to free */
/* If requested size is zero behave like free. */
if( xWantedSize == 0 )
{
vPortFree( pv );
return NULL;
pvReturn = NULL;
goto realloc_exit;
}
/* Calculate new block size with overhead (header size and alignment) */
xNewBlockSize = xWantedSize;
if( heapADD_WILL_OVERFLOW( xNewBlockSize, xHeapStructSize ) == 0 )
/* Calculate the internal aligned size including the header. */
xAlignedWantedSize = xWantedSize;
/* Add the header size and check for overflow. */
if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xHeapStructSize ) == 0 )
{
xNewBlockSize += xHeapStructSize;
if( ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
xAlignedWantedSize += xHeapStructSize;
/* Ensure byte alignment. */
if( ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
{
xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xNewBlockSize & portBYTE_ALIGNMENT_MASK );
if( heapADD_WILL_OVERFLOW( xNewBlockSize, xAdditionalRequiredSize ) == 0 )
xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK );
if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xAdditionalRequiredSize ) == 0 )
{
xNewBlockSize += xAdditionalRequiredSize;
xAlignedWantedSize += xAdditionalRequiredSize;
}
else
{
return NULL;
/* Overflow -> invalid request. */
xAlignedWantedSize = 0;
}
}
else
@ -519,165 +543,276 @@ void *pvPortRealloc( void *pv,
}
else
{
return NULL;
xAlignedWantedSize = 0;
}
/* Get the block header from the user pointer and validate it */
pxBlock = ( BlockLink_t * )( ( uint8_t * )pv - xHeapStructSize );
/* Validate the aligned size. */
if( ( xAlignedWantedSize == 0 ) || ( heapBLOCK_SIZE_IS_VALID( xAlignedWantedSize ) == 0 ) )
{
pvReturn = NULL;
goto realloc_exit;
}
/* Get the block header for the allocated block. */
puc = ( uint8_t * ) pv;
puc -= xHeapStructSize;
pxBlock = ( BlockLink_t * ) puc;
heapVALIDATE_BLOCK_POINTER( pxBlock );
if( ( heapBLOCK_IS_ALLOCATED( pxBlock ) == 0 ) || ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
{
return NULL;
}
configASSERT( heapBLOCK_IS_ALLOCATED( pxBlock ) );
/* Calculate the original block size (without the allocated bit)
* Check if there's enough free memory to expand the block */
xOriginalSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK;
if( ( xOriginalSize < xNewBlockSize ) && ( xFreeBytesRemaining < ( xNewBlockSize - xOriginalSize ) ) )
{
/* Not enough memory to expand the block */
return NULL;
}
/* Current block size without the allocated bit. */
xCurrentBlockSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK;
/* Calculate the amount of user data to copy (excluding the block header).
* The user data size is the block size minus the header size.
* Limit the copy size to the requested size to avoid copying too much data. */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xOriginalSize, xHeapStructSize ) == 0 );
xCopySize = xOriginalSize - xHeapStructSize;
if( xWantedSize < xCopySize )
/* 1) Shrink in place if possible. */
if( xAlignedWantedSize <= xCurrentBlockSize )
{
xCopySize = xWantedSize;
}
xRemainingBlockSize = xCurrentBlockSize - xAlignedWantedSize;
/* Enter critical section - protect heap structure from concurrent access */
vTaskSuspendAll();
{
/* Case 1: Shrink the block (new size is smaller than or equal to original)
* Check if the remaining space is large enough to create a separate free block */
if( xNewBlockSize <= xOriginalSize )
/* Only split if the remaining space is large enough to form a free block. */
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
/* Create a new free block from the remaining space */
if( ( xOriginalSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE )
vTaskSuspendAll();
{
pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize );
configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xOriginalSize - xNewBlockSize;
xFreeBytesRemaining += pxNewBlockLink->xBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
heapFREE_BLOCK( pxBlock );
pxBlock->xBlockSize = xNewBlockSize;
/* Set the block to the new size and mark as allocated. */
pxBlock->xBlockSize = xAlignedWantedSize;
heapALLOCATE_BLOCK( pxBlock );
/* Create a new free block from the remainder and insert it. */
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pvReturn = pv;
( void ) xTaskResumeAll();
}
else
{
/* Case 2: Try to expand by merging with next free block */
pxAdjacentFreeBlock = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xOriginalSize );
configASSERT( ( ( ( size_t )pxAdjacentFreeBlock ) & portBYTE_ALIGNMENT_MASK ) == 0 );
/* Remainder too small to split. */
mtCOVERAGE_TEST_MARKER();
}
pvReturn = pv;
goto realloc_exit;
}
/* 2) Expansion path: try to use adjacent free blocks if overall free bytes suffice. */
else if( ( xAlignedWantedSize - xCurrentBlockSize ) <= xFreeBytesRemaining )
{
vTaskSuspendAll();
{
/* Walk the free list to find the free blocks immediately before and after pxBlock. */
pxBeforePreviousFreeBlock = &xStart;
pxPreviousFreeBlock = &xStart;
pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock );
/* Traverse the free list to find if the adjacent block is actually free.
* The free list is ordered by address, so we can search efficiently.*/
pxPreviousBlock = &xStart;
while( ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) &&
( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
while( ( pxNextFreeBlock < pxBlock ) && ( pxNextFreeBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
{
pxPreviousBlock = heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxPreviousBlock );
pxBeforePreviousFreeBlock = pxPreviousFreeBlock;
pxPreviousFreeBlock = pxNextFreeBlock;
pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNextFreeBlock->pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock );
}
if( pxPreviousBlock->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) )
/* Check if next is immediately after current. */
if( ( pxNextFreeBlock != pxEnd ) &&
( ( ( size_t ) pxBlock + xCurrentBlockSize ) == ( size_t ) pxNextFreeBlock ) )
{
configASSERT( heapBLOCK_SIZE_IS_VALID( pxAdjacentFreeBlock->xBlockSize ) );
if( !heapADD_WILL_OVERFLOW( xOriginalSize, pxAdjacentFreeBlock->xBlockSize ) )
xHasNextBlock = pdTRUE;
}
else
{
xHasNextBlock = pdFALSE;
}
/* Check if previous is immediately before current. */
if( ( pxPreviousFreeBlock != &xStart ) &&
( ( ( size_t ) pxPreviousFreeBlock + pxPreviousFreeBlock->xBlockSize ) == ( size_t ) pxBlock ) )
{
xHasPreviousBlock = pdTRUE;
}
else
{
xHasPreviousBlock = pdFALSE;
}
/* Compute required extra size and neighbor sizes. */
xRemainingBlockSize = xAlignedWantedSize - xCurrentBlockSize;
xNextBlockSize = pxNextFreeBlock->xBlockSize;
xPreviousBlockSize = pxPreviousFreeBlock->xBlockSize;
configASSERT( heapBLOCK_SIZE_IS_VALID( xNextBlockSize ) != 0 );
configASSERT( heapBLOCK_SIZE_IS_VALID( xPreviousBlockSize ) != 0 );
/* a) If next exists and is large enough, merge with next. */
if( ( xHasNextBlock == pdTRUE ) &&
( xNextBlockSize >= xRemainingBlockSize ) )
{
/* Remove next from free list and update free bytes. */
pxPreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock;
pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xNextBlockSize;
/* Temporarily free the current block for merging. */
heapFREE_BLOCK( pxBlock );
/* Remaining bytes after creating the requested size. */
xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
/* Found a suitable adjacent free block that can provide enough space. */
if( ( xOriginalSize + pxAdjacentFreeBlock->xBlockSize ) >= xNewBlockSize )
{
/* Remove the adjacent free block from the free list and merge it with the allocated block. */
pxPreviousBlock->pxNextFreeBlock = pxAdjacentFreeBlock->pxNextFreeBlock;
xFreeBytesRemaining -= pxAdjacentFreeBlock->xBlockSize;
heapFREE_BLOCK( pxBlock );
pxBlock->xBlockSize = xOriginalSize + pxAdjacentFreeBlock->xBlockSize;
/* Set block to requested size and insert leftover as a free block. */
pxBlock->xBlockSize = xAlignedWantedSize;
/* If the merged block is larger than needed, split the excess space
* into a new free block. */
if( ( pxBlock->xBlockSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE )
{
pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize );
configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xNewBlockSize;
xFreeBytesRemaining += pxNewBlockLink->xBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
pxBlock->xBlockSize = xNewBlockSize;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
heapALLOCATE_BLOCK( pxBlock );
pvReturn = pv;
/* Update minimum free size statistic if memory was consumed */
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
/* Leftover too small, keep as part of allocated block. */
pxBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize;
}
/* Mark merged block as allocated. */
heapALLOCATE_BLOCK( pxBlock );
pvReturn = pv;
}
/* b) If previous exists and is large enough, merge with previous (data must be moved). */
else if( ( xHasPreviousBlock == pdTRUE ) &&
( xPreviousBlockSize >= xRemainingBlockSize ) )
{
/* Remove previous from free list and update free bytes. */
pxBeforePreviousFreeBlock->pxNextFreeBlock = pxPreviousFreeBlock->pxNextFreeBlock;
pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xPreviousBlockSize;
heapFREE_BLOCK( pxBlock );
/* Move the payload forward into the previous block's payload area. */
puc = ( uint8_t * ) pxPreviousFreeBlock;
puc += xHeapStructSize;
/* Ensure memmove length will not underflow. */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize );
/* Remaining bytes after creating the requested size. */
xRemainingBlockSize = xCurrentBlockSize + xPreviousBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
/* previous becomes the allocated block of requested size, insert leftover. */
pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
/* Leftover too small, treat entire previous+current as allocated. */
pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xPreviousBlockSize;
}
heapALLOCATE_BLOCK( pxPreviousFreeBlock );
/* Return the payload pointer in the previous block. */
pvReturn = ( void * ) puc;
}
/* c) If both neighbors exist and combined are large enough, merge both sides (move data). */
else if( ( xHasNextBlock == pdTRUE ) &&
( xHasPreviousBlock == pdTRUE ) &&
( ( xNextBlockSize + xPreviousBlockSize ) >= xRemainingBlockSize ) )
{
/* Remove both previous and next from the free list and update free bytes. */
pxBeforePreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock;
pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xNextBlockSize + xPreviousBlockSize;
heapFREE_BLOCK( pxBlock );
/* Move payload forward into previous block's payload area. */
puc = ( uint8_t * ) pxPreviousFreeBlock;
puc += xHeapStructSize;
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize );
/* Remaining bytes after allocation. */
xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize;
}
heapALLOCATE_BLOCK( pxPreviousFreeBlock );
pvReturn = ( void * ) puc;
}
else
{
/* None of the merge strategies worked on this path. */
mtCOVERAGE_TEST_MARKER();
}
/* Update historical minimum free bytes. */
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
( void ) xTaskResumeAll();
}
else
{
/* Not enough free bytes in the entire heap to satisfy expansion. */
pvReturn = NULL;
goto realloc_exit;
}
/* Exit critical section - heap structure modification complete */
( void ) xTaskResumeAll();
/* Case 3: If in-place resize failed, allocate a new block and move the data.
* This is more expensive as it involves:
* 1. Allocating a new block with the requested size
* 2. Copying the user data from the old block to the new block
* 3. Freeing the old block
* Note: Statistics are updated by the called functions (malloc and free). */
/* If still NULL, fall back to allocating a new block and copying the payload. */
if( pvReturn == NULL )
{
pvReturn = pvPortMalloc( xWantedSize );
if( pvReturn != NULL )
puc = pvPortMalloc( xWantedSize );
if( puc != NULL )
{
/* Copy user data from old block to new block (up to the smaller of old or new size) */
( void )memcpy( pvReturn, pv, xCopySize );
/* Copy the old payload (old payload size = xCurrentBlockSize - xHeapStructSize). */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memcpy( puc, pv, xCurrentBlockSize - xHeapStructSize );
vPortFree( pv );
pvReturn = ( void * ) puc;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
configASSERT( ( ( ( size_t )pvReturn ) & ( size_t )portBYTE_ALIGNMENT_MASK ) == 0 );
realloc_exit:
/* Ensure returned pointer is properly aligned (NULL also satisfies this). */
configASSERT( ( ( size_t ) pvReturn & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
}
#endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */