[Refactor] Optimize and refine the behavior logic of pvPortRealloc

This commit is contained in:
skb666 2026-01-06 23:56:44 +08:00 committed by Anubhav Rawal
parent 55a7f9a7ac
commit 6e0d95811a
2 changed files with 556 additions and 286 deletions

View file

@ -427,54 +427,78 @@ void vPortFree( void * pv )
* On failure: NULL * On failure: NULL
* *
* Behavior: * Behavior:
* - When pv is NULL, equivalent to pvPortMalloc(xWantedSize) * 1) If pv == NULL, behaves like pvPortMalloc(xWantedSize).
* - When xWantedSize is 0, equivalent to vPortFree(pv) * 2) If xWantedSize == 0, behaves like vPortFree(pv) and returns NULL.
* - Resize strategy: * 3) Align the requested size and include the block header size; if the aligned
* 1. If new size <= original size, attempt to shrink the block * size is invalid, return NULL.
* 2. If new size > original size, attempt to expand by merging with adjacent free block * 4) If the aligned requested size is <= current block size, shrink in place and
* 3. If in-place resize fails, allocate new block and copy data * insert any sufficiently large remainder as a free block.
* 5) If expansion is required and there are enough free bytes in the heap, try to
* expand into adjacent free blocks in this order:
* - Merge with next free block if it is immediately after the current block.
* - Merge with previous free block if it is immediately before the current block.
* - Merge with both previous and next if combined they provide enough space.
* If none of the above succeed, fall back to allocating a new block, memcpy'ing
* the payload and freeing the old block.
*/ */
void *pvPortRealloc( void *pv, void * pvPortRealloc( void * pv,
size_t xWantedSize ) size_t xWantedSize )
{ {
BlockLink_t *pxBlock; BlockLink_t * pxBlock;
BlockLink_t *pxPreviousBlock; BlockLink_t * pxNewBlockLink;
BlockLink_t *pxNewBlockLink; BlockLink_t * pxNextFreeBlock;
BlockLink_t *pxAdjacentFreeBlock; BlockLink_t * pxPreviousFreeBlock;
void *pvReturn = NULL; BlockLink_t * pxBeforePreviousFreeBlock;
size_t xOriginalSize; uint8_t * puc;
size_t xNewBlockSize; void * pvReturn = NULL;
size_t xAlignedWantedSize;
size_t xAdditionalRequiredSize; size_t xAdditionalRequiredSize;
size_t xCopySize; size_t xCurrentBlockSize;
size_t xRemainingBlockSize;
size_t xNextBlockSize;
size_t xPreviousBlockSize;
BaseType_t xHasNextBlock;
BaseType_t xHasPreviousBlock;
/* Handle NULL pointer case - equivalent to malloc */ /* Ensure the end marker has been set up. */
configASSERT( pxEnd );
/* If pv is NULL behave like malloc. */
if( pv == NULL ) if( pv == NULL )
{ {
return pvPortMalloc( xWantedSize ); pvReturn = pvPortMalloc( xWantedSize );
goto realloc_exit;
} }
/* Handle zero size case - equivalent to free */ /* If requested size is zero behave like free. */
if( xWantedSize == 0 ) if( xWantedSize == 0 )
{ {
vPortFree( pv ); vPortFree( pv );
return NULL; pvReturn = NULL;
goto realloc_exit;
} }
/* Calculate new block size with overhead (header size and alignment) */ /* Calculate the internal aligned size including the header. */
xNewBlockSize = xWantedSize; xAlignedWantedSize = xWantedSize;
if( heapADD_WILL_OVERFLOW( xNewBlockSize, xHeapStructSize ) == 0 )
/* Add the header size and check for overflow. */
if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xHeapStructSize ) == 0 )
{ {
xNewBlockSize += xHeapStructSize; xAlignedWantedSize += xHeapStructSize;
if( ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
/* Ensure byte alignment. */
if( ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
{ {
xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ); xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK );
if( heapADD_WILL_OVERFLOW( xNewBlockSize, xAdditionalRequiredSize ) == 0 )
if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xAdditionalRequiredSize ) == 0 )
{ {
xNewBlockSize += xAdditionalRequiredSize; xAlignedWantedSize += xAdditionalRequiredSize;
} }
else else
{ {
return NULL; /* Overflow -> invalid request. */
xAlignedWantedSize = 0;
} }
} }
else else
@ -484,111 +508,235 @@ void *pvPortRealloc( void *pv,
} }
else else
{ {
return NULL; xAlignedWantedSize = 0;
} }
/* Get the block header from the user pointer and validate it */ /* Validate the aligned size. */
pxBlock = ( BlockLink_t * )( ( uint8_t * )pv - xHeapStructSize ); if( ( xAlignedWantedSize == 0 ) || ( heapBLOCK_SIZE_IS_VALID( xAlignedWantedSize ) == 0 ) )
{
pvReturn = NULL;
goto realloc_exit;
}
/* Get the block header for the allocated block. */
puc = ( uint8_t * ) pv;
puc -= xHeapStructSize;
pxBlock = ( BlockLink_t * ) puc;
heapVALIDATE_BLOCK_POINTER( pxBlock ); heapVALIDATE_BLOCK_POINTER( pxBlock );
if( ( heapBLOCK_IS_ALLOCATED( pxBlock ) == 0 ) || ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) configASSERT( heapBLOCK_IS_ALLOCATED( pxBlock ) );
{
return NULL;
}
/* Calculate the original block size (without the allocated bit) /* Current block size without the allocated bit. */
* Check if there's enough free memory to expand the block */ xCurrentBlockSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK;
xOriginalSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK;
if( ( xOriginalSize < xNewBlockSize ) && ( xFreeBytesRemaining < ( xNewBlockSize - xOriginalSize ) ) )
{
/* Not enough memory to expand the block */
return NULL;
}
/* Calculate the amount of user data to copy (excluding the block header). /* 1) Shrink in place if possible. */
* The user data size is the block size minus the header size. if( xAlignedWantedSize <= xCurrentBlockSize )
* Limit the copy size to the requested size to avoid copying too much data. */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xOriginalSize, xHeapStructSize ) == 0 );
xCopySize = xOriginalSize - xHeapStructSize;
if( xWantedSize < xCopySize )
{ {
xCopySize = xWantedSize; xRemainingBlockSize = xCurrentBlockSize - xAlignedWantedSize;
}
/* Enter critical section - protect heap structure from concurrent access */ /* Only split if the remaining space is large enough to form a free block. */
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
vTaskSuspendAll(); vTaskSuspendAll();
{ {
/* Case 1: Shrink the block (new size is smaller than or equal to original) /* Set the block to the new size and mark as allocated. */
* Check if the remaining space is large enough to create a separate free block */ pxBlock->xBlockSize = xAlignedWantedSize;
if( xNewBlockSize <= xOriginalSize )
{
/* Create a new free block from the remaining space */
if( ( xOriginalSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE )
{
pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize );
configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xOriginalSize - xNewBlockSize;
xFreeBytesRemaining += pxNewBlockLink->xBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
heapFREE_BLOCK( pxBlock );
pxBlock->xBlockSize = xNewBlockSize;
heapALLOCATE_BLOCK( pxBlock ); heapALLOCATE_BLOCK( pxBlock );
/* Create a new free block from the remainder and insert it. */
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
( void ) xTaskResumeAll();
} }
else else
{ {
/* Remainder too small to split. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
pvReturn = pv; pvReturn = pv;
goto realloc_exit;
}
/* 2) Expansion path: try to use adjacent free blocks if overall free bytes suffice. */
else if( ( xAlignedWantedSize - xCurrentBlockSize ) <= xFreeBytesRemaining )
{
vTaskSuspendAll();
{
/* Walk the free list to find the free blocks immediately before and after pxBlock. */
pxBeforePreviousFreeBlock = &xStart;
pxPreviousFreeBlock = &xStart;
pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock );
while( ( pxNextFreeBlock < pxBlock ) && ( pxNextFreeBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
{
pxBeforePreviousFreeBlock = pxPreviousFreeBlock;
pxPreviousFreeBlock = pxNextFreeBlock;
pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNextFreeBlock->pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock );
}
/* Check if next is immediately after current. */
if( ( pxNextFreeBlock != pxEnd ) &&
( ( ( size_t ) pxBlock + xCurrentBlockSize ) == ( size_t ) pxNextFreeBlock ) )
{
xHasNextBlock = pdTRUE;
} }
else else
{ {
/* Case 2: Try to expand by merging with next free block */ xHasNextBlock = pdFALSE;
pxAdjacentFreeBlock = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xOriginalSize );
configASSERT( ( ( ( size_t )pxAdjacentFreeBlock ) & portBYTE_ALIGNMENT_MASK ) == 0 );
/* Traverse the free list to find if the adjacent block is actually free.
* The free list is ordered by address, so we can search efficiently.*/
pxPreviousBlock = &xStart;
while( ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) &&
( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
{
pxPreviousBlock = heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxPreviousBlock );
} }
if( pxPreviousBlock->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) /* Check if previous is immediately before current. */
if( ( pxPreviousFreeBlock != &xStart ) &&
( ( ( size_t ) pxPreviousFreeBlock + pxPreviousFreeBlock->xBlockSize ) == ( size_t ) pxBlock ) )
{ {
configASSERT( heapBLOCK_SIZE_IS_VALID( pxAdjacentFreeBlock->xBlockSize ) ); xHasPreviousBlock = pdTRUE;
if( !heapADD_WILL_OVERFLOW( xOriginalSize, pxAdjacentFreeBlock->xBlockSize ) ) }
else
{ {
/* Found a suitable adjacent free block that can provide enough space. */ xHasPreviousBlock = pdFALSE;
if( ( xOriginalSize + pxAdjacentFreeBlock->xBlockSize ) >= xNewBlockSize ) }
/* Compute required extra size and neighbor sizes. */
xRemainingBlockSize = xAlignedWantedSize - xCurrentBlockSize;
xNextBlockSize = pxNextFreeBlock->xBlockSize;
xPreviousBlockSize = pxPreviousFreeBlock->xBlockSize;
configASSERT( heapBLOCK_SIZE_IS_VALID( xNextBlockSize ) != 0 );
configASSERT( heapBLOCK_SIZE_IS_VALID( xPreviousBlockSize ) != 0 );
/* a) If next exists and is large enough, merge with next. */
if( ( xHasNextBlock == pdTRUE ) &&
( xNextBlockSize >= xRemainingBlockSize ) )
{ {
/* Remove the adjacent free block from the free list and merge it with the allocated block. */ /* Remove next from free list and update free bytes. */
pxPreviousBlock->pxNextFreeBlock = pxAdjacentFreeBlock->pxNextFreeBlock; pxPreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock;
xFreeBytesRemaining -= pxAdjacentFreeBlock->xBlockSize; pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xNextBlockSize;
/* Temporarily free the current block for merging. */
heapFREE_BLOCK( pxBlock ); heapFREE_BLOCK( pxBlock );
pxBlock->xBlockSize = xOriginalSize + pxAdjacentFreeBlock->xBlockSize;
/* If the merged block is larger than needed, split the excess space /* Remaining bytes after creating the requested size. */
* into a new free block. */ xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize - xAlignedWantedSize;
if( ( pxBlock->xBlockSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE )
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{ {
pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); /* Set block to requested size and insert leftover as a free block. */
configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); pxBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xNewBlockSize;
xFreeBytesRemaining += pxNewBlockLink->xBlockSize; pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink ); prvInsertBlockIntoFreeList( pxNewBlockLink );
pxBlock->xBlockSize = xNewBlockSize;
} }
else else
{ {
/* Leftover too small, keep as part of allocated block. */
pxBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize;
}
/* Mark merged block as allocated. */
heapALLOCATE_BLOCK( pxBlock );
pvReturn = pv;
}
/* b) If previous exists and is large enough, merge with previous (data must be moved). */
else if( ( xHasPreviousBlock == pdTRUE ) &&
( xPreviousBlockSize >= xRemainingBlockSize ) )
{
/* Remove previous from free list and update free bytes. */
pxBeforePreviousFreeBlock->pxNextFreeBlock = pxPreviousFreeBlock->pxNextFreeBlock;
pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xPreviousBlockSize;
heapFREE_BLOCK( pxBlock );
/* Move the payload forward into the previous block's payload area. */
puc = ( uint8_t * ) pxPreviousFreeBlock;
puc += xHeapStructSize;
/* Ensure memmove length will not underflow. */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize );
/* Remaining bytes after creating the requested size. */
xRemainingBlockSize = xCurrentBlockSize + xPreviousBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
/* previous becomes the allocated block of requested size, insert leftover. */
pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
/* Leftover too small, treat entire previous+current as allocated. */
pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xPreviousBlockSize;
}
heapALLOCATE_BLOCK( pxPreviousFreeBlock );
/* Return the payload pointer in the previous block. */
pvReturn = ( void * ) puc;
}
/* c) If both neighbors exist and combined are large enough, merge both sides (move data). */
else if( ( xHasNextBlock == pdTRUE ) &&
( xHasPreviousBlock == pdTRUE ) &&
( ( xNextBlockSize + xPreviousBlockSize ) >= xRemainingBlockSize ) )
{
/* Remove both previous and next from the free list and update free bytes. */
pxBeforePreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock;
pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xNextBlockSize + xPreviousBlockSize;
heapFREE_BLOCK( pxBlock );
/* Move payload forward into previous block's payload area. */
puc = ( uint8_t * ) pxPreviousFreeBlock;
puc += xHeapStructSize;
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize );
/* Remaining bytes after allocation. */
xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize;
}
heapALLOCATE_BLOCK( pxPreviousFreeBlock );
pvReturn = ( void * ) puc;
}
else
{
/* None of the merge strategies worked on this path. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
heapALLOCATE_BLOCK( pxBlock ); /* Update historical minimum free bytes. */
pvReturn = pv;
/* Update minimum free size statistic if memory was consumed */
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{ {
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
@ -598,51 +746,38 @@ void *pvPortRealloc( void *pv,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
/* Exit critical section - heap structure modification complete */
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
}
else
{
/* Not enough free bytes in the entire heap to satisfy expansion. */
pvReturn = NULL;
goto realloc_exit;
}
/* Case 3: If in-place resize failed, allocate a new block and move the data. /* If still NULL, fall back to allocating a new block and copying the payload. */
* This is more expensive as it involves:
* 1. Allocating a new block with the requested size
* 2. Copying the user data from the old block to the new block
* 3. Freeing the old block
* Note: Statistics are updated by the called functions (malloc and free). */
if( pvReturn == NULL ) if( pvReturn == NULL )
{ {
pvReturn = pvPortMalloc( xWantedSize ); puc = pvPortMalloc( xWantedSize );
if( pvReturn != NULL )
if( puc != NULL )
{ {
/* Copy user data from old block to new block (up to the smaller of old or new size) */ /* Copy the old payload (old payload size = xCurrentBlockSize - xHeapStructSize). */
( void )memcpy( pvReturn, pv, xCopySize ); configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memcpy( puc, pv, xCurrentBlockSize - xHeapStructSize );
vPortFree( pv ); vPortFree( pv );
pvReturn = ( void * ) puc;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else
{
mtCOVERAGE_TEST_MARKER();
}
configASSERT( ( ( ( size_t )pvReturn ) & ( size_t )portBYTE_ALIGNMENT_MASK ) == 0 ); realloc_exit:
/* Ensure returned pointer is properly aligned (NULL also satisfies this). */
configASSERT( ( ( size_t ) pvReturn & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn; return pvReturn;
} }
#endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */ #endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */

View file

@ -462,54 +462,78 @@ void vPortFree( void * pv )
* On failure: NULL * On failure: NULL
* *
* Behavior: * Behavior:
* - When pv is NULL, equivalent to pvPortMalloc(xWantedSize) * 1) If pv == NULL, behaves like pvPortMalloc(xWantedSize).
* - When xWantedSize is 0, equivalent to vPortFree(pv) * 2) If xWantedSize == 0, behaves like vPortFree(pv) and returns NULL.
* - Resize strategy: * 3) Align the requested size and include the block header size; if the aligned
* 1. If new size <= original size, attempt to shrink the block * size is invalid, return NULL.
* 2. If new size > original size, attempt to expand by merging with adjacent free block * 4) If the aligned requested size is <= current block size, shrink in place and
* 3. If in-place resize fails, allocate new block and copy data * insert any sufficiently large remainder as a free block.
* 5) If expansion is required and there are enough free bytes in the heap, try to
* expand into adjacent free blocks in this order:
* - Merge with next free block if it is immediately after the current block.
* - Merge with previous free block if it is immediately before the current block.
* - Merge with both previous and next if combined they provide enough space.
* If none of the above succeed, fall back to allocating a new block, memcpy'ing
* the payload and freeing the old block.
*/ */
void *pvPortRealloc( void *pv, void * pvPortRealloc( void * pv,
size_t xWantedSize ) size_t xWantedSize )
{ {
BlockLink_t *pxBlock; BlockLink_t * pxBlock;
BlockLink_t *pxPreviousBlock; BlockLink_t * pxNewBlockLink;
BlockLink_t *pxNewBlockLink; BlockLink_t * pxNextFreeBlock;
BlockLink_t *pxAdjacentFreeBlock; BlockLink_t * pxPreviousFreeBlock;
void *pvReturn = NULL; BlockLink_t * pxBeforePreviousFreeBlock;
size_t xOriginalSize; uint8_t * puc;
size_t xNewBlockSize; void * pvReturn = NULL;
size_t xAlignedWantedSize;
size_t xAdditionalRequiredSize; size_t xAdditionalRequiredSize;
size_t xCopySize; size_t xCurrentBlockSize;
size_t xRemainingBlockSize;
size_t xNextBlockSize;
size_t xPreviousBlockSize;
BaseType_t xHasNextBlock;
BaseType_t xHasPreviousBlock;
/* Handle NULL pointer case - equivalent to malloc */ /* Ensure the end marker has been set up. */
configASSERT( pxEnd );
/* If pv is NULL behave like malloc. */
if( pv == NULL ) if( pv == NULL )
{ {
return pvPortMalloc( xWantedSize ); pvReturn = pvPortMalloc( xWantedSize );
goto realloc_exit;
} }
/* Handle zero size case - equivalent to free */ /* If requested size is zero behave like free. */
if( xWantedSize == 0 ) if( xWantedSize == 0 )
{ {
vPortFree( pv ); vPortFree( pv );
return NULL; pvReturn = NULL;
goto realloc_exit;
} }
/* Calculate new block size with overhead (header size and alignment) */ /* Calculate the internal aligned size including the header. */
xNewBlockSize = xWantedSize; xAlignedWantedSize = xWantedSize;
if( heapADD_WILL_OVERFLOW( xNewBlockSize, xHeapStructSize ) == 0 )
/* Add the header size and check for overflow. */
if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xHeapStructSize ) == 0 )
{ {
xNewBlockSize += xHeapStructSize; xAlignedWantedSize += xHeapStructSize;
if( ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
/* Ensure byte alignment. */
if( ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
{ {
xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xNewBlockSize & portBYTE_ALIGNMENT_MASK ); xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xAlignedWantedSize & portBYTE_ALIGNMENT_MASK );
if( heapADD_WILL_OVERFLOW( xNewBlockSize, xAdditionalRequiredSize ) == 0 )
if( heapADD_WILL_OVERFLOW( xAlignedWantedSize, xAdditionalRequiredSize ) == 0 )
{ {
xNewBlockSize += xAdditionalRequiredSize; xAlignedWantedSize += xAdditionalRequiredSize;
} }
else else
{ {
return NULL; /* Overflow -> invalid request. */
xAlignedWantedSize = 0;
} }
} }
else else
@ -519,111 +543,235 @@ void *pvPortRealloc( void *pv,
} }
else else
{ {
return NULL; xAlignedWantedSize = 0;
} }
/* Get the block header from the user pointer and validate it */ /* Validate the aligned size. */
pxBlock = ( BlockLink_t * )( ( uint8_t * )pv - xHeapStructSize ); if( ( xAlignedWantedSize == 0 ) || ( heapBLOCK_SIZE_IS_VALID( xAlignedWantedSize ) == 0 ) )
{
pvReturn = NULL;
goto realloc_exit;
}
/* Get the block header for the allocated block. */
puc = ( uint8_t * ) pv;
puc -= xHeapStructSize;
pxBlock = ( BlockLink_t * ) puc;
heapVALIDATE_BLOCK_POINTER( pxBlock ); heapVALIDATE_BLOCK_POINTER( pxBlock );
if( ( heapBLOCK_IS_ALLOCATED( pxBlock ) == 0 ) || ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) ) configASSERT( heapBLOCK_IS_ALLOCATED( pxBlock ) );
{
return NULL;
}
/* Calculate the original block size (without the allocated bit) /* Current block size without the allocated bit. */
* Check if there's enough free memory to expand the block */ xCurrentBlockSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK;
xOriginalSize = pxBlock->xBlockSize & ~heapBLOCK_ALLOCATED_BITMASK;
if( ( xOriginalSize < xNewBlockSize ) && ( xFreeBytesRemaining < ( xNewBlockSize - xOriginalSize ) ) )
{
/* Not enough memory to expand the block */
return NULL;
}
/* Calculate the amount of user data to copy (excluding the block header). /* 1) Shrink in place if possible. */
* The user data size is the block size minus the header size. if( xAlignedWantedSize <= xCurrentBlockSize )
* Limit the copy size to the requested size to avoid copying too much data. */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xOriginalSize, xHeapStructSize ) == 0 );
xCopySize = xOriginalSize - xHeapStructSize;
if( xWantedSize < xCopySize )
{ {
xCopySize = xWantedSize; xRemainingBlockSize = xCurrentBlockSize - xAlignedWantedSize;
}
/* Enter critical section - protect heap structure from concurrent access */ /* Only split if the remaining space is large enough to form a free block. */
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
vTaskSuspendAll(); vTaskSuspendAll();
{ {
/* Case 1: Shrink the block (new size is smaller than or equal to original) /* Set the block to the new size and mark as allocated. */
* Check if the remaining space is large enough to create a separate free block */ pxBlock->xBlockSize = xAlignedWantedSize;
if( xNewBlockSize <= xOriginalSize )
{
/* Create a new free block from the remaining space */
if( ( xOriginalSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE )
{
pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize );
configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xOriginalSize - xNewBlockSize;
xFreeBytesRemaining += pxNewBlockLink->xBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
heapFREE_BLOCK( pxBlock );
pxBlock->xBlockSize = xNewBlockSize;
heapALLOCATE_BLOCK( pxBlock ); heapALLOCATE_BLOCK( pxBlock );
/* Create a new free block from the remainder and insert it. */
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
( void ) xTaskResumeAll();
} }
else else
{ {
/* Remainder too small to split. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
pvReturn = pv; pvReturn = pv;
goto realloc_exit;
}
/* 2) Expansion path: try to use adjacent free blocks if overall free bytes suffice. */
else if( ( xAlignedWantedSize - xCurrentBlockSize ) <= xFreeBytesRemaining )
{
vTaskSuspendAll();
{
/* Walk the free list to find the free blocks immediately before and after pxBlock. */
pxBeforePreviousFreeBlock = &xStart;
pxPreviousFreeBlock = &xStart;
pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock );
while( ( pxNextFreeBlock < pxBlock ) && ( pxNextFreeBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
{
pxBeforePreviousFreeBlock = pxPreviousFreeBlock;
pxPreviousFreeBlock = pxNextFreeBlock;
pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNextFreeBlock->pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxNextFreeBlock );
}
/* Check if next is immediately after current. */
if( ( pxNextFreeBlock != pxEnd ) &&
( ( ( size_t ) pxBlock + xCurrentBlockSize ) == ( size_t ) pxNextFreeBlock ) )
{
xHasNextBlock = pdTRUE;
} }
else else
{ {
/* Case 2: Try to expand by merging with next free block */ xHasNextBlock = pdFALSE;
pxAdjacentFreeBlock = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xOriginalSize );
configASSERT( ( ( ( size_t )pxAdjacentFreeBlock ) & portBYTE_ALIGNMENT_MASK ) == 0 );
/* Traverse the free list to find if the adjacent block is actually free.
* The free list is ordered by address, so we can search efficiently.*/
pxPreviousBlock = &xStart;
while( ( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) &&
( pxPreviousBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
{
pxPreviousBlock = heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock );
heapVALIDATE_BLOCK_POINTER( pxPreviousBlock );
} }
if( pxPreviousBlock->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( pxAdjacentFreeBlock ) ) /* Check if previous is immediately before current. */
if( ( pxPreviousFreeBlock != &xStart ) &&
( ( ( size_t ) pxPreviousFreeBlock + pxPreviousFreeBlock->xBlockSize ) == ( size_t ) pxBlock ) )
{ {
configASSERT( heapBLOCK_SIZE_IS_VALID( pxAdjacentFreeBlock->xBlockSize ) ); xHasPreviousBlock = pdTRUE;
if( !heapADD_WILL_OVERFLOW( xOriginalSize, pxAdjacentFreeBlock->xBlockSize ) ) }
else
{ {
/* Found a suitable adjacent free block that can provide enough space. */ xHasPreviousBlock = pdFALSE;
if( ( xOriginalSize + pxAdjacentFreeBlock->xBlockSize ) >= xNewBlockSize ) }
/* Compute required extra size and neighbor sizes. */
xRemainingBlockSize = xAlignedWantedSize - xCurrentBlockSize;
xNextBlockSize = pxNextFreeBlock->xBlockSize;
xPreviousBlockSize = pxPreviousFreeBlock->xBlockSize;
configASSERT( heapBLOCK_SIZE_IS_VALID( xNextBlockSize ) != 0 );
configASSERT( heapBLOCK_SIZE_IS_VALID( xPreviousBlockSize ) != 0 );
/* a) If next exists and is large enough, merge with next. */
if( ( xHasNextBlock == pdTRUE ) &&
( xNextBlockSize >= xRemainingBlockSize ) )
{ {
/* Remove the adjacent free block from the free list and merge it with the allocated block. */ /* Remove next from free list and update free bytes. */
pxPreviousBlock->pxNextFreeBlock = pxAdjacentFreeBlock->pxNextFreeBlock; pxPreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock;
xFreeBytesRemaining -= pxAdjacentFreeBlock->xBlockSize; pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xNextBlockSize;
/* Temporarily free the current block for merging. */
heapFREE_BLOCK( pxBlock ); heapFREE_BLOCK( pxBlock );
pxBlock->xBlockSize = xOriginalSize + pxAdjacentFreeBlock->xBlockSize;
/* If the merged block is larger than needed, split the excess space /* Remaining bytes after creating the requested size. */
* into a new free block. */ xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize - xAlignedWantedSize;
if( ( pxBlock->xBlockSize - xNewBlockSize ) > heapMINIMUM_BLOCK_SIZE )
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{ {
pxNewBlockLink = ( BlockLink_t * )( ( ( uint8_t * )pxBlock ) + xNewBlockSize ); /* Set block to requested size and insert leftover as a free block. */
configASSERT( ( ( ( size_t )pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); pxBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xNewBlockSize;
xFreeBytesRemaining += pxNewBlockLink->xBlockSize; pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink ); prvInsertBlockIntoFreeList( pxNewBlockLink );
pxBlock->xBlockSize = xNewBlockSize;
} }
else else
{ {
/* Leftover too small, keep as part of allocated block. */
pxBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize;
}
/* Mark merged block as allocated. */
heapALLOCATE_BLOCK( pxBlock );
pvReturn = pv;
}
/* b) If previous exists and is large enough, merge with previous (data must be moved). */
else if( ( xHasPreviousBlock == pdTRUE ) &&
( xPreviousBlockSize >= xRemainingBlockSize ) )
{
/* Remove previous from free list and update free bytes. */
pxBeforePreviousFreeBlock->pxNextFreeBlock = pxPreviousFreeBlock->pxNextFreeBlock;
pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xPreviousBlockSize;
heapFREE_BLOCK( pxBlock );
/* Move the payload forward into the previous block's payload area. */
puc = ( uint8_t * ) pxPreviousFreeBlock;
puc += xHeapStructSize;
/* Ensure memmove length will not underflow. */
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize );
/* Remaining bytes after creating the requested size. */
xRemainingBlockSize = xCurrentBlockSize + xPreviousBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
/* previous becomes the allocated block of requested size, insert leftover. */
pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
/* Leftover too small, treat entire previous+current as allocated. */
pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xPreviousBlockSize;
}
heapALLOCATE_BLOCK( pxPreviousFreeBlock );
/* Return the payload pointer in the previous block. */
pvReturn = ( void * ) puc;
}
/* c) If both neighbors exist and combined are large enough, merge both sides (move data). */
else if( ( xHasNextBlock == pdTRUE ) &&
( xHasPreviousBlock == pdTRUE ) &&
( ( xNextBlockSize + xPreviousBlockSize ) >= xRemainingBlockSize ) )
{
/* Remove both previous and next from the free list and update free bytes. */
pxBeforePreviousFreeBlock->pxNextFreeBlock = pxNextFreeBlock->pxNextFreeBlock;
pxNextFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
pxPreviousFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
xFreeBytesRemaining -= xNextBlockSize + xPreviousBlockSize;
heapFREE_BLOCK( pxBlock );
/* Move payload forward into previous block's payload area. */
puc = ( uint8_t * ) pxPreviousFreeBlock;
puc += xHeapStructSize;
configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memmove( puc, pv, xCurrentBlockSize - xHeapStructSize );
/* Remaining bytes after allocation. */
xRemainingBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize - xAlignedWantedSize;
if( xRemainingBlockSize > heapMINIMUM_BLOCK_SIZE )
{
pxPreviousFreeBlock->xBlockSize = xAlignedWantedSize;
pxNewBlockLink = ( BlockLink_t * ) ( ( ( uint8_t * ) pxPreviousFreeBlock ) + xAlignedWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
pxNewBlockLink->xBlockSize = xRemainingBlockSize;
xFreeBytesRemaining += xRemainingBlockSize;
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
pxPreviousFreeBlock->xBlockSize = xCurrentBlockSize + xNextBlockSize + xPreviousBlockSize;
}
heapALLOCATE_BLOCK( pxPreviousFreeBlock );
pvReturn = ( void * ) puc;
}
else
{
/* None of the merge strategies worked on this path. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
heapALLOCATE_BLOCK( pxBlock ); /* Update historical minimum free bytes. */
pvReturn = pv;
/* Update minimum free size statistic if memory was consumed */
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{ {
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
@ -633,51 +781,38 @@ void *pvPortRealloc( void *pv,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
/* Exit critical section - heap structure modification complete */
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
}
else
{
/* Not enough free bytes in the entire heap to satisfy expansion. */
pvReturn = NULL;
goto realloc_exit;
}
/* Case 3: If in-place resize failed, allocate a new block and move the data. /* If still NULL, fall back to allocating a new block and copying the payload. */
* This is more expensive as it involves:
* 1. Allocating a new block with the requested size
* 2. Copying the user data from the old block to the new block
* 3. Freeing the old block
* Note: Statistics are updated by the called functions (malloc and free). */
if( pvReturn == NULL ) if( pvReturn == NULL )
{ {
pvReturn = pvPortMalloc( xWantedSize ); puc = pvPortMalloc( xWantedSize );
if( pvReturn != NULL )
if( puc != NULL )
{ {
/* Copy user data from old block to new block (up to the smaller of old or new size) */ /* Copy the old payload (old payload size = xCurrentBlockSize - xHeapStructSize). */
( void )memcpy( pvReturn, pv, xCopySize ); configASSERT( heapSUBTRACT_WILL_UNDERFLOW( xCurrentBlockSize, xHeapStructSize ) == 0 );
( void ) memcpy( puc, pv, xCurrentBlockSize - xHeapStructSize );
vPortFree( pv ); vPortFree( pv );
pvReturn = ( void * ) puc;
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
else
{
mtCOVERAGE_TEST_MARKER();
}
configASSERT( ( ( ( size_t )pvReturn ) & ( size_t )portBYTE_ALIGNMENT_MASK ) == 0 ); realloc_exit:
/* Ensure returned pointer is properly aligned (NULL also satisfies this). */
configASSERT( ( ( size_t ) pvReturn & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn; return pvReturn;
} }
#endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */ #endif /* if ( configSUPPORT_HEAP_REALLOC == 1 ) */