mirror of
https://github.com/FreeRTOS/FreeRTOS-Kernel.git
synced 2025-04-19 21:11:57 -04:00
change(freertos/smp): Update stream_buffer.c locking
Updated stream_buffer.c to use granular locking - Added xTaskSpinlock and xISRSpinlock - Replaced critical section macros with data group critical section macros such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with sbENTER/EXIT_CRITICAL_FROM_ISR(). - Added vStreambuffersEnterCritical/FromISR() and vStreambuffersExitCritical/FromISR() to map to the data group critical section macros. - Added prvLockStreamBufferForTasks() and prvUnlockStreamBufferForTasks() to suspend the stream buffer when executing non-deterministic code. Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
This commit is contained in:
parent
e76ae93c8a
commit
58a5c84990
|
@ -3410,6 +3410,9 @@ typedef struct xSTATIC_STREAM_BUFFER
|
|||
void * pvDummy5[ 2 ];
|
||||
#endif
|
||||
UBaseType_t uxDummy6;
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
} StaticStreamBuffer_t;
|
||||
|
||||
/* Message buffers are built on stream buffers. */
|
||||
|
|
261
stream_buffer.c
261
stream_buffer.c
|
@ -58,6 +58,39 @@
|
|||
#error INCLUDE_xTaskGetCurrentTaskHandle must be set to 1 to build stream_buffer.c
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Macros to mark the start and end of a critical code region.
|
||||
*/
|
||||
#if ( portUSING_GRANULAR_LOCKS == 1 )
|
||||
#define sbENTER_CRITICAL( pxStreamBuffer ) vStreamBufferEnterCritical( pxStreamBuffer )
|
||||
#define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ) uxStreamBufferEnterCriticalFromISR( pxStreamBuffer )
|
||||
#define sbEXIT_CRITICAL( pxStreamBuffer ) vStreamBufferExitCritical( pxStreamBuffer )
|
||||
#define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) vStreamBufferExitCriticalFromISR( uxSavedInterruptStatus, pxStreamBuffer )
|
||||
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
#define sbENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
|
||||
#define sbENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
|
||||
#define sbEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL();
|
||||
#define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
|
||||
|
||||
/*
|
||||
* Macro used to lock and unlock a stream buffer. When a task locks a stream
|
||||
* buffer, the task will have thread safe non-deterministic access to the stream
|
||||
* buffer.
|
||||
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
|
||||
* - Concurrent access from ISRs will be pended
|
||||
*
|
||||
* When the task unlocks the stream buffer, all pended access attempts are handled.
|
||||
*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
#define sbLOCK( pxStreamBuffer ) prvLockStreamBufferForTasks( pxStreamBuffer )
|
||||
#define sbUNLOCK( pxStreamBuffer ) prvUnlockStreamBufferForTasks( pxStreamBuffer )
|
||||
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
#define sbLOCK( pxStreamBuffer ) vTaskSuspendAll()
|
||||
#define sbUNLOCK( pxStreamBuffer ) ( void ) xTaskResumeAll()
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
/* If the user has not provided application specific Rx notification macros,
|
||||
* or #defined the notification macros away, then provide default implementations
|
||||
* that uses task notifications. */
|
||||
|
@ -65,7 +98,7 @@
|
|||
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
||||
do \
|
||||
{ \
|
||||
vTaskSuspendAll(); \
|
||||
sbLOCK( pxStreamBuffer ); \
|
||||
{ \
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
|
||||
{ \
|
||||
|
@ -76,7 +109,7 @@
|
|||
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
|
||||
} \
|
||||
} \
|
||||
( void ) xTaskResumeAll(); \
|
||||
( void ) sbUNLOCK( pxStreamBuffer ); \
|
||||
} while( 0 )
|
||||
#endif /* sbRECEIVE_COMPLETED */
|
||||
|
||||
|
@ -105,7 +138,7 @@
|
|||
do { \
|
||||
UBaseType_t uxSavedInterruptStatus; \
|
||||
\
|
||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \
|
||||
uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ); \
|
||||
{ \
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
|
||||
{ \
|
||||
|
@ -117,7 +150,7 @@
|
|||
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
|
||||
} \
|
||||
} \
|
||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \
|
||||
sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ); \
|
||||
} while( 0 )
|
||||
#endif /* sbRECEIVE_COMPLETED_FROM_ISR */
|
||||
|
||||
|
@ -145,7 +178,7 @@
|
|||
*/
|
||||
#ifndef sbSEND_COMPLETED
|
||||
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
||||
vTaskSuspendAll(); \
|
||||
sbLOCK( pxStreamBuffer ); \
|
||||
{ \
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
|
||||
{ \
|
||||
|
@ -156,7 +189,7 @@
|
|||
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
|
||||
} \
|
||||
} \
|
||||
( void ) xTaskResumeAll()
|
||||
( void ) sbUNLOCK( pxStreamBuffer )
|
||||
#endif /* sbSEND_COMPLETED */
|
||||
|
||||
/* If user has provided a per-instance send completed callback, then
|
||||
|
@ -184,7 +217,7 @@
|
|||
do { \
|
||||
UBaseType_t uxSavedInterruptStatus; \
|
||||
\
|
||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \
|
||||
uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer ); \
|
||||
{ \
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
|
||||
{ \
|
||||
|
@ -196,7 +229,7 @@
|
|||
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
|
||||
} \
|
||||
} \
|
||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \
|
||||
sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ); \
|
||||
} while( 0 )
|
||||
#endif /* sbSEND_COMPLETE_FROM_ISR */
|
||||
|
||||
|
@ -249,8 +282,59 @@ typedef struct StreamBufferDef_t
|
|||
StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */
|
||||
#endif
|
||||
UBaseType_t uxNotificationIndex; /* The index we are using for notification, by default tskDEFAULT_INDEX_TO_NOTIFY. */
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
portSPINLOCK_TYPE xTaskSpinlock;
|
||||
portSPINLOCK_TYPE xISRSpinlock;
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
} StreamBuffer_t;
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
|
||||
/*
|
||||
* Enters a critical section for a stream buffer. Disables interrupts and takes
|
||||
* both task and ISR spinlocks to ensure thread safety.
|
||||
*/
|
||||
static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Enters a critical section for a stream buffer from an ISR context. Takes the ISR
|
||||
* spinlock and returns the previous interrupt state.
|
||||
*/
|
||||
static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for a stream buffer. Releases spinlocks in reverse order
|
||||
* and conditionally re-enables interrupts and yields if required.
|
||||
*/
|
||||
static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Exits a critical section for a stream buffer from an ISR context. Releases the ISR
|
||||
* spinlock and conditionally restores the previous interrupt state.
|
||||
*/
|
||||
static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
|
||||
|
||||
/*
|
||||
* Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer
|
||||
* but allows ISRs to pend access to the stream buffer. Caller cannot be preempted
|
||||
* by other tasks after locking the stream buffer, thus allowing the caller to
|
||||
* execute non-deterministic operations.
|
||||
*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
/*
|
||||
* Unlocks a stream buffer for tasks. Handles all pended access from ISRs, then reenables preemption
|
||||
* for the caller.
|
||||
*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
/*
|
||||
* The number of bytes available to be read from the buffer.
|
||||
*/
|
||||
|
@ -327,6 +411,131 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
|||
StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vStreamBufferEnterCritical( StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
portDISABLE_INTERRUPTS();
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Task spinlock is always taken first */
|
||||
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) );
|
||||
|
||||
/* Take the ISR spinlock next */
|
||||
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static UBaseType_t uxStreamBufferEnterCriticalFromISR( StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
/* Take the ISR spinlock */
|
||||
portGET_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
|
||||
|
||||
/* Increment the critical nesting count */
|
||||
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
return uxSavedInterruptStatus;
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
static void vStreamBufferExitCritical( StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Get the xYieldPending status inside the critical section. */
|
||||
BaseType_t xYieldCurrentTask = xTaskUnlockCanYield();
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
|
||||
|
||||
/* Release the task spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xTaskSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portENABLE_INTERRUPTS();
|
||||
|
||||
if( xYieldCurrentTask != pdFALSE )
|
||||
{
|
||||
portYIELD();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) )
|
||||
static void vStreamBufferExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus,
|
||||
StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
|
||||
|
||||
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U );
|
||||
|
||||
/* Decrement the critical nesting count */
|
||||
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
|
||||
|
||||
/* Release the ISR spinlock */
|
||||
portRELEASE_SPINLOCK( xCoreID, &( pxStreamBuffer->xISRSpinlock ) );
|
||||
|
||||
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 )
|
||||
{
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
}
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
/* Disable preemption so that the current task cannot be preempted by another task */
|
||||
vTaskPreemptionDisable( NULL );
|
||||
|
||||
/* Keep holding xTaskSpinlock after unlocking the data group to prevent tasks
|
||||
* on other cores from accessing the stream buffer while it is suspended. */
|
||||
portGET_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) );
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer )
|
||||
{
|
||||
/* Release the previously held task spinlock */
|
||||
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) );
|
||||
|
||||
/* Re-enable preemption */
|
||||
vTaskPreemptionEnable( NULL );
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
|
||||
StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes,
|
||||
size_t xTriggerLevelBytes,
|
||||
|
@ -405,6 +614,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
|||
pxSendCompletedCallback,
|
||||
pxReceiveCompletedCallback );
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
{
|
||||
portINIT_SPINLOCK( &( ( ( StreamBuffer_t * ) pvAllocatedMemory )->xTaskSpinlock ) );
|
||||
portINIT_SPINLOCK( &( ( ( StreamBuffer_t * ) pvAllocatedMemory )->xISRSpinlock ) );
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pvAllocatedMemory ), xStreamBufferType );
|
||||
}
|
||||
else
|
||||
|
@ -499,6 +715,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
|||
* again. */
|
||||
pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED;
|
||||
|
||||
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
|
||||
{
|
||||
portINIT_SPINLOCK( &( pxStreamBuffer->xTaskSpinlock ) );
|
||||
portINIT_SPINLOCK( &( pxStreamBuffer->xISRSpinlock ) );
|
||||
}
|
||||
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
|
||||
|
||||
traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xStreamBufferType );
|
||||
|
||||
/* MISRA Ref 11.3.1 [Misaligned access] */
|
||||
|
@ -614,7 +837,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
|
|||
#endif
|
||||
|
||||
/* Can only reset a message buffer if there are no tasks blocked on it. */
|
||||
taskENTER_CRITICAL();
|
||||
sbENTER_CRITICAL( pxStreamBuffer );
|
||||
{
|
||||
if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) )
|
||||
{
|
||||
|
@ -644,7 +867,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
|
|||
xReturn = pdPASS;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
sbEXIT_CRITICAL( pxStreamBuffer );
|
||||
|
||||
traceRETURN_xStreamBufferReset( xReturn );
|
||||
|
||||
|
@ -872,7 +1095,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
|||
{
|
||||
/* Wait until the required number of bytes are free in the message
|
||||
* buffer. */
|
||||
taskENTER_CRITICAL();
|
||||
sbENTER_CRITICAL( pxStreamBuffer );
|
||||
{
|
||||
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
|
||||
|
||||
|
@ -887,11 +1110,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
|||
}
|
||||
else
|
||||
{
|
||||
taskEXIT_CRITICAL();
|
||||
sbEXIT_CRITICAL( pxStreamBuffer );
|
||||
break;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
sbEXIT_CRITICAL( pxStreamBuffer );
|
||||
|
||||
traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer );
|
||||
( void ) xTaskNotifyWaitIndexed( pxStreamBuffer->uxNotificationIndex, ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait );
|
||||
|
@ -1087,7 +1310,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
|||
{
|
||||
/* Checking if there is data and clearing the notification state must be
|
||||
* performed atomically. */
|
||||
taskENTER_CRITICAL();
|
||||
sbENTER_CRITICAL( pxStreamBuffer );
|
||||
{
|
||||
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
|
||||
|
||||
|
@ -1112,7 +1335,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
|||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
sbEXIT_CRITICAL( pxStreamBuffer );
|
||||
|
||||
if( xBytesAvailable <= xBytesToStoreMessageLength )
|
||||
{
|
||||
|
@ -1409,7 +1632,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
|
|||
/* MISRA Ref 4.7.1 [Return value shall be checked] */
|
||||
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
||||
/* coverity[misra_c_2012_directive_4_7_violation] */
|
||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
||||
uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer );
|
||||
{
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL )
|
||||
{
|
||||
|
@ -1426,7 +1649,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
|
|||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer );
|
||||
|
||||
traceRETURN_xStreamBufferSendCompletedFromISR( xReturn );
|
||||
|
||||
|
@ -1448,7 +1671,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf
|
|||
/* MISRA Ref 4.7.1 [Return value shall be checked] */
|
||||
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
||||
/* coverity[misra_c_2012_directive_4_7_violation] */
|
||||
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
|
||||
uxSavedInterruptStatus = sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer );
|
||||
{
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL )
|
||||
{
|
||||
|
@ -1465,7 +1688,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf
|
|||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
|
||||
sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer );
|
||||
|
||||
traceRETURN_xStreamBufferReceiveCompletedFromISR( xReturn );
|
||||
|
||||
|
|
Loading…
Reference in a new issue