Feature/fixing clang gnu compiler warnings (#620)

* Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558)

* Using single name definition for libraries everywhere. (#558)

* Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571)

* Removing compiler warnings for GNU and Clang. (#571)

* Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms.

* Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY.

* Fixing clang and gnu compiler warnings.

* Adding in project information and how to compile for GNU/clang

* Fixing compiler issue with unused variable - no need to declare variable.

* Adding in compile warnings for linux builds that kernel is okay with using.

* Fixing more extra-semi-stmt clang warnings.

* Moving definition of hooks into header files if features are enabled.

* Fixing formatting with uncrustify.

* Fixing merge conflicts with main merge.

* Fixing compiler errors due to merge issues and formatting.

* Fixing Line feeds.

* Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request

* Further clean-up of clang and clang-tidy issues.

* Removing compiler specific pragmas from common c files.

* Fixing missing lexicon entry and uncrustify formatting changes.

* Resolving merge issue multiple defnitions of proto for prvIdleTask

* Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control.

* More uncrustify formatting issues.

* Fixing extra bracket in #if statement.

---------

Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com>
This commit is contained in:
phelter 2023-02-23 10:05:04 -08:00 committed by GitHub
parent 5d05601045
commit 8cd5451ad5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
38 changed files with 260 additions and 172 deletions

64
queue.c
View file

@ -64,14 +64,14 @@
typedef struct QueuePointers
{
int8_t * pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
int8_t * pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
} QueuePointers_t;
typedef struct SemaphoreData
{
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */
UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
} SemaphoreData_t;
/* Semaphores do not actually store or copy data, so have an item size of
@ -95,27 +95,27 @@ typedef struct SemaphoreData
*/
typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
{
int8_t * pcHead; /*< Points to the beginning of the queue storage area. */
int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */
int8_t * pcHead; /**< Points to the beginning of the queue storage area. */
int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */
union
{
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */
SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
} u;
List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
volatile UBaseType_t uxMessagesWaiting; /*< The number of items currently in the queue. */
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
#endif
#if ( configUSE_QUEUE_SETS == 1 )
@ -264,14 +264,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* tasks than the number of tasks in the system.
*/
#define prvIncrementQueueTxLock( pxQueue, cTxLock ) \
{ \
do { \
const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \
{ \
configASSERT( ( cTxLock ) != queueINT8_MAX ); \
( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
} \
}
} while( 0 )
/*
* Macro to increment cRxLock member of the queue data structure. It is
@ -279,14 +279,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* tasks than the number of tasks in the system.
*/
#define prvIncrementQueueRxLock( pxQueue, cRxLock ) \
{ \
do { \
const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \
{ \
configASSERT( ( cRxLock ) != queueINT8_MAX ); \
( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
} \
}
} while( 0 )
/*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
@ -1046,7 +1046,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
const BaseType_t xCopyPosition )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
portBASE_TYPE xSavedInterruptStatus;
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
@ -1074,7 +1074,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
* read, instead return a flag to say whether a context switch is required or
* not (i.e. has a task with a higher priority than us been woken by this
* post). */
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
@ -1199,7 +1199,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL;
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus );
return xReturn;
}
@ -1209,7 +1209,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
BaseType_t * const pxHigherPriorityTaskWoken )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
portBASE_TYPE xSavedInterruptStatus;
Queue_t * const pxQueue = xQueue;
/* Similar to xQueueGenericSendFromISR() but used with semaphores where the
@ -1245,7 +1245,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1365,7 +1365,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL;
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus );
return xReturn;
}
@ -1880,7 +1880,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
BaseType_t * const pxHigherPriorityTaskWoken )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
portBASE_TYPE xSavedInterruptStatus;
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
@ -1902,7 +1902,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1962,7 +1962,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus );
return xReturn;
}
@ -1972,7 +1972,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
void * const pvBuffer )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
portBASE_TYPE xSavedInterruptStatus;
int8_t * pcOriginalReadPosition;
Queue_t * const pxQueue = xQueue;
@ -1996,7 +1996,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
/* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
@ -2017,7 +2017,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus );
return xReturn;
}