Add support for running FreeRTOS on Secure Side only in Cortex M33 port. Also, change spaces to tabs.

This commit is contained in:
Gaurav Aggarwal 2019-02-20 00:25:45 +00:00
parent c3c9c12ce2
commit 5849459c65
45 changed files with 6989 additions and 6739 deletions

View file

@ -26,8 +26,8 @@
*/
/******************************************************************************
See http://www.freertos.org/a00110.html for an explanation of the
definitions contained in this file.
See http://www.freertos.org/a00110.html for an explanation of the
definitions contained in this file.
******************************************************************************/
#ifndef FREERTOS_CONFIG_H
@ -47,123 +47,123 @@
extern uint32_t SystemCoreClock;
/* Cortex M33 port configuration. */
#define configENABLE_MPU 1
#define configENABLE_FPU 1
#define configENABLE_TRUSTZONE 1
#define configENABLE_MPU 1
#define configENABLE_FPU 1
#define configENABLE_TRUSTZONE 1
/* Constants related to the behaviour or the scheduler. */
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
#define configUSE_PREEMPTION 1
#define configUSE_TIME_SLICING 1
#define configMAX_PRIORITIES ( 5 )
#define configIDLE_SHOULD_YIELD 1
#define configUSE_16_BIT_TICKS 0 /* Only for 8 and 16-bit hardware. */
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
#define configUSE_PREEMPTION 1
#define configUSE_TIME_SLICING 1
#define configMAX_PRIORITIES ( 5 )
#define configIDLE_SHOULD_YIELD 1
#define configUSE_16_BIT_TICKS 0 /* Only for 8 and 16-bit hardware. */
/* Constants that describe the hardware and memory usage. */
#define configCPU_CLOCK_HZ SystemCoreClock
#define configMINIMAL_STACK_SIZE ( ( uint16_t ) 128 )
#define configMINIMAL_SECURE_STACK_SIZE ( 1024 )
#define configMAX_TASK_NAME_LEN ( 12 )
#define configTOTAL_HEAP_SIZE ( ( size_t ) ( 50 * 1024 ) )
#define configCPU_CLOCK_HZ SystemCoreClock
#define configMINIMAL_STACK_SIZE ( ( uint16_t ) 128 )
#define configMINIMAL_SECURE_STACK_SIZE ( 1024 )
#define configMAX_TASK_NAME_LEN ( 12 )
#define configTOTAL_HEAP_SIZE ( ( size_t ) ( 50 * 1024 ) )
/* Constants that build features in or out. */
#define configUSE_MUTEXES 1
#define configUSE_TICKLESS_IDLE 1
#define configUSE_APPLICATION_TASK_TAG 0
#define configUSE_NEWLIB_REENTRANT 0
#define configUSE_CO_ROUTINES 0
#define configUSE_COUNTING_SEMAPHORES 1
#define configUSE_RECURSIVE_MUTEXES 1
#define configUSE_QUEUE_SETS 0
#define configUSE_TASK_NOTIFICATIONS 1
#define configUSE_TRACE_FACILITY 1
#define configUSE_MUTEXES 1
#define configUSE_TICKLESS_IDLE 1
#define configUSE_APPLICATION_TASK_TAG 0
#define configUSE_NEWLIB_REENTRANT 0
#define configUSE_CO_ROUTINES 0
#define configUSE_COUNTING_SEMAPHORES 1
#define configUSE_RECURSIVE_MUTEXES 1
#define configUSE_QUEUE_SETS 0
#define configUSE_TASK_NOTIFICATIONS 1
#define configUSE_TRACE_FACILITY 1
/* Constants that define which hook (callback) functions should be used. */
#define configUSE_IDLE_HOOK 0
#define configUSE_TICK_HOOK 0
#define configUSE_MALLOC_FAILED_HOOK 0
#define configUSE_IDLE_HOOK 0
#define configUSE_TICK_HOOK 0
#define configUSE_MALLOC_FAILED_HOOK 0
/* Constants provided for debugging and optimisation assistance. */
#define configCHECK_FOR_STACK_OVERFLOW 2
#define configASSERT( x ) if( ( x ) == 0 ) { taskDISABLE_INTERRUPTS(); for( ;; ); }
#define configQUEUE_REGISTRY_SIZE 0
#define configCHECK_FOR_STACK_OVERFLOW 2
#define configASSERT( x ) if( ( x ) == 0 ) { taskDISABLE_INTERRUPTS(); for( ;; ); }
#define configQUEUE_REGISTRY_SIZE 0
/* Software timer definitions. */
#define configUSE_TIMERS 1
#define configTIMER_TASK_PRIORITY ( 3 )
#define configTIMER_QUEUE_LENGTH 5
#define configTIMER_TASK_STACK_DEPTH ( configMINIMAL_STACK_SIZE )
#define configUSE_TIMERS 1
#define configTIMER_TASK_PRIORITY ( 3 )
#define configTIMER_QUEUE_LENGTH 5
#define configTIMER_TASK_STACK_DEPTH ( configMINIMAL_STACK_SIZE )
/* Set the following definitions to 1 to include the API function, or zero
* to exclude the API function. NOTE: Setting an INCLUDE_ parameter to 0 is
* only necessary if the linker does not automatically remove functions that are
* not referenced anyway. */
#define INCLUDE_vTaskPrioritySet 1
#define INCLUDE_uxTaskPriorityGet 1
#define INCLUDE_vTaskDelete 1
#define INCLUDE_vTaskCleanUpResources 0
#define INCLUDE_vTaskSuspend 1
#define INCLUDE_vTaskDelayUntil 1
#define INCLUDE_vTaskDelay 1
#define INCLUDE_uxTaskGetStackHighWaterMark 0
#define INCLUDE_xTaskGetIdleTaskHandle 0
#define INCLUDE_eTaskGetState 1
#define INCLUDE_xTaskResumeFromISR 0
#define INCLUDE_xTaskGetCurrentTaskHandle 1
#define INCLUDE_xTaskGetSchedulerState 0
#define INCLUDE_xSemaphoreGetMutexHolder 0
#define INCLUDE_xTimerPendFunctionCall 1
#define INCLUDE_vTaskPrioritySet 1
#define INCLUDE_uxTaskPriorityGet 1
#define INCLUDE_vTaskDelete 1
#define INCLUDE_vTaskCleanUpResources 0
#define INCLUDE_vTaskSuspend 1
#define INCLUDE_vTaskDelayUntil 1
#define INCLUDE_vTaskDelay 1
#define INCLUDE_uxTaskGetStackHighWaterMark 0
#define INCLUDE_xTaskGetIdleTaskHandle 0
#define INCLUDE_eTaskGetState 1
#define INCLUDE_xTaskResumeFromISR 0
#define INCLUDE_xTaskGetCurrentTaskHandle 1
#define INCLUDE_xTaskGetSchedulerState 0
#define INCLUDE_xSemaphoreGetMutexHolder 0
#define INCLUDE_xTimerPendFunctionCall 1
/* This demo makes use of one or more example stats formatting functions. These
* format the raw data provided by the uxTaskGetSystemState() function in to
* human readable ASCII form. See the notes in the implementation of vTaskList()
* within FreeRTOS/Source/tasks.c for limitations. */
#define configUSE_STATS_FORMATTING_FUNCTIONS 1
#define configUSE_STATS_FORMATTING_FUNCTIONS 1
/* Dimensions a buffer that can be used by the FreeRTOS+CLI command interpreter.
* See the FreeRTOS+CLI documentation for more information:
* http://www.FreeRTOS.org/FreeRTOS-Plus/FreeRTOS_Plus_CLI/ */
#define configCOMMAND_INT_MAX_OUTPUT_SIZE 2048
#define configCOMMAND_INT_MAX_OUTPUT_SIZE 2048
/* Interrupt priority configuration follows...................... */
/* Use the system definition, if there is one. */
#ifdef __NVIC_PRIO_BITS
#define configPRIO_BITS __NVIC_PRIO_BITS
#define configPRIO_BITS __NVIC_PRIO_BITS
#else
#define configPRIO_BITS 3 /* 8 priority levels. */
#define configPRIO_BITS 3 /* 8 priority levels. */
#endif
/* The lowest interrupt priority that can be used in a call to a "set priority"
* function. */
#define configLIBRARY_LOWEST_INTERRUPT_PRIORITY 0x07
#define configLIBRARY_LOWEST_INTERRUPT_PRIORITY 0x07
/* The highest interrupt priority that can be used by any interrupt service
* routine that makes calls to interrupt safe FreeRTOS API functions. DO NOT
* CALL INTERRUPT SAFE FREERTOS API FUNCTIONS FROM ANY INTERRUPT THAT HAS A
* HIGHER PRIORITY THAN THIS! (higher priorities are lower numeric values). */
#define configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY 5
#define configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY 5
/* Interrupt priorities used by the kernel port layer itself. These are generic
* to all Cortex-M ports, and do not rely on any particular library functions. */
#define configKERNEL_INTERRUPT_PRIORITY ( configLIBRARY_LOWEST_INTERRUPT_PRIORITY << ( 8 - configPRIO_BITS ) )
#define configKERNEL_INTERRUPT_PRIORITY ( configLIBRARY_LOWEST_INTERRUPT_PRIORITY << ( 8 - configPRIO_BITS ) )
/* !!!! configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to zero !!!!
* See http://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html. */
#define configMAX_SYSCALL_INTERRUPT_PRIORITY ( configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY << ( 8 - configPRIO_BITS ) )
#define configMAX_SYSCALL_INTERRUPT_PRIORITY ( configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY << ( 8 - configPRIO_BITS ) )
/* The #ifdef guards against the file being included from IAR assembly files. */
#ifndef __IASMARM__
/* Constants related to the generation of run time stats. */
#define configGENERATE_RUN_TIME_STATS 0
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()
#define portGET_RUN_TIME_COUNTER_VALUE() 0
#define configTICK_RATE_HZ ( ( TickType_t ) 100 )
/* Constants related to the generation of run time stats. */
#define configGENERATE_RUN_TIME_STATS 0
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()
#define portGET_RUN_TIME_COUNTER_VALUE() 0
#define configTICK_RATE_HZ ( ( TickType_t ) 100 )
#endif /* __IASMARM__ */
/* Enable static allocation. */
#define configSUPPORT_STATIC_ALLOCATION 1
#define configSUPPORT_STATIC_ALLOCATION 1
#endif /* FREERTOS_CONFIG_H */

View file

@ -36,27 +36,27 @@
/* Externs needed by the MPU setup code. These must match the memory map as
* specified in Scatter-Loading description file (FreeRTOSDemo_ns.sct). */
/* Privileged flash. */
const uint32_t * __privileged_functions_start__ = ( uint32_t * ) ( 0x00200000 );
const uint32_t * __privileged_functions_end__ = ( uint32_t * ) ( 0x00208000 - 0x1 ); /* Last address in privileged Flash region. */
const uint32_t * __privileged_functions_start__ = ( uint32_t * ) ( 0x00200000 );
const uint32_t * __privileged_functions_end__ = ( uint32_t * ) ( 0x00208000 - 0x1 ); /* Last address in privileged Flash region. */
/* Flash containing system calls. */
const uint32_t * __syscalls_flash_start__ = ( uint32_t * ) ( 0x00208000 );
const uint32_t * __syscalls_flash_end__ = ( uint32_t * ) ( 0x00209000 - 0x1 ); /* Last address in Flash region containing system calls. */
const uint32_t * __syscalls_flash_start__ = ( uint32_t * ) ( 0x00208000 );
const uint32_t * __syscalls_flash_end__ = ( uint32_t * ) ( 0x00209000 - 0x1 ); /* Last address in Flash region containing system calls. */
/* Unprivileged flash. Note that the section containing
* system calls is unprivilged so that unprivleged tasks
* can make system calls. */
const uint32_t * __unprivileged_flash_start__ = ( uint32_t * ) ( 0x00209000 );
const uint32_t * __unprivileged_flash_end__ = ( uint32_t * ) ( 0x00400000 - 0x1 ); /* Last address in un-privileged Flash region. */
const uint32_t * __unprivileged_flash_start__ = ( uint32_t * ) ( 0x00209000 );
const uint32_t * __unprivileged_flash_end__ = ( uint32_t * ) ( 0x00400000 - 0x1 ); /* Last address in un-privileged Flash region. */
/* 512 bytes (0x200) of RAM starting at 0x30008000 is
* priviledged access only. This contains kernel data. */
const uint32_t * __privileged_sram_start__ = ( uint32_t * ) ( 0x20200000 );
const uint32_t * __privileged_sram_end__ = ( uint32_t * ) ( 0x20201000 - 0x1 ); /* Last address in privileged RAM. */
const uint32_t * __privileged_sram_start__ = ( uint32_t * ) ( 0x20200000 );
const uint32_t * __privileged_sram_end__ = ( uint32_t * ) ( 0x20201000 - 0x1 ); /* Last address in privileged RAM. */
;
/* Unprivileged RAM. */
const uint32_t * __unprivileged_sram_start__ = ( uint32_t * ) ( 0x20201000 );
const uint32_t * __unprivileged_sram_end__ = ( uint32_t * ) ( 0x20220000 - 0x1 ); /* Last address in un-privileged RAM. */
const uint32_t * __unprivileged_sram_start__ = ( uint32_t * ) ( 0x20201000 );
const uint32_t * __unprivileged_sram_end__ = ( uint32_t * ) ( 0x20220000 - 0x1 ); /* Last address in un-privileged RAM. */
/*-----------------------------------------------------------*/
/**
@ -67,11 +67,11 @@ static void prvCreateTasks( void );
static void prvCreateTasks( void )
{
/* Create tasks for the MPU Demo. */
vStartMPUDemo();
/* Create tasks for the MPU Demo. */
vStartMPUDemo();
/* Create tasks for the TZ Demo. */
vStartTZDemo();
/* Create tasks for the TZ Demo. */
vStartTZDemo();
}
/*-----------------------------------------------------------*/
@ -79,51 +79,51 @@ static void prvCreateTasks( void )
/* Stack overflow hook. */
void vApplicationStackOverflowHook( TaskHandle_t xTask, signed char *pcTaskName )
{
/* Force an assert. */
configASSERT( pcTaskName == 0 );
/* Force an assert. */
configASSERT( pcTaskName == 0 );
}
/*-----------------------------------------------------------*/
/* Non-Secure main. */
int main( void )
{
/* Create tasks. */
prvCreateTasks();
/* Create tasks. */
prvCreateTasks();
/* Start scheduler. */
vTaskStartScheduler();
/* Start scheduler. */
vTaskStartScheduler();
/* Should not reach here as the schedular is already started. */
for( ; ; )
{
}
/* Should not reach here as the schedular is already started. */
for( ; ; )
{
}
}
/*-----------------------------------------------------------*/
/* configUSE_STATIC_ALLOCATION is set to 1, so the application must provide an
* implementation of vApplicationGetIdleTaskMemory() to provide the memory that
* is used by the Idle task. */
void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
StackType_t ** ppxIdleTaskStackBuffer,
uint32_t * pulIdleTaskStackSize )
void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
StackType_t ** ppxIdleTaskStackBuffer,
uint32_t * pulIdleTaskStackSize )
{
/* If the buffers to be provided to the Idle task are declared inside this
* function then they must be declared static - otherwise they will be
* allocated on the stack and so not exists after this function exits. */
static StaticTask_t xIdleTaskTCB;
static StackType_t uxIdleTaskStack[ configMINIMAL_STACK_SIZE ] __attribute__((aligned(32)));
/* If the buffers to be provided to the Idle task are declared inside this
* function then they must be declared static - otherwise they will be
* allocated on the stack and so not exists after this function exits. */
static StaticTask_t xIdleTaskTCB;
static StackType_t uxIdleTaskStack[ configMINIMAL_STACK_SIZE ] __attribute__((aligned(32)));
/* Pass out a pointer to the StaticTask_t structure in which the Idle
* task's state will be stored. */
*ppxIdleTaskTCBBuffer = &xIdleTaskTCB;
/* Pass out a pointer to the StaticTask_t structure in which the Idle
* task's state will be stored. */
*ppxIdleTaskTCBBuffer = &xIdleTaskTCB;
/* Pass out the array that will be used as the Idle task's stack. */
*ppxIdleTaskStackBuffer = uxIdleTaskStack;
/* Pass out the array that will be used as the Idle task's stack. */
*ppxIdleTaskStackBuffer = uxIdleTaskStack;
/* Pass out the size of the array pointed to by *ppxIdleTaskStackBuffer.
* Note that, as the array is necessarily of type StackType_t,
* configMINIMAL_STACK_SIZE is specified in words, not bytes. */
*pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
/* Pass out the size of the array pointed to by *ppxIdleTaskStackBuffer.
* Note that, as the array is necessarily of type StackType_t,
* configMINIMAL_STACK_SIZE is specified in words, not bytes. */
*pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
}
/*-----------------------------------------------------------*/
@ -131,26 +131,26 @@ void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
* application must provide an implementation of vApplicationGetTimerTaskMemory()
* to provide the memory that is used by the Timer service task. */
void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer,
StackType_t ** ppxTimerTaskStackBuffer,
uint32_t * pulTimerTaskStackSize )
StackType_t ** ppxTimerTaskStackBuffer,
uint32_t * pulTimerTaskStackSize )
{
/* If the buffers to be provided to the Timer task are declared inside this
* function then they must be declared static - otherwise they will be
* allocated on the stack and so not exists after this function exits. */
static StaticTask_t xTimerTaskTCB;
static StackType_t uxTimerTaskStack[ configTIMER_TASK_STACK_DEPTH ] __attribute__((aligned(32)));
/* If the buffers to be provided to the Timer task are declared inside this
* function then they must be declared static - otherwise they will be
* allocated on the stack and so not exists after this function exits. */
static StaticTask_t xTimerTaskTCB;
static StackType_t uxTimerTaskStack[ configTIMER_TASK_STACK_DEPTH ] __attribute__((aligned(32)));
/* Pass out a pointer to the StaticTask_t structure in which the Timer
* task's state will be stored. */
*ppxTimerTaskTCBBuffer = &xTimerTaskTCB;
/* Pass out a pointer to the StaticTask_t structure in which the Timer
* task's state will be stored. */
*ppxTimerTaskTCBBuffer = &xTimerTaskTCB;
/* Pass out the array that will be used as the Timer task's stack. */
*ppxTimerTaskStackBuffer = uxTimerTaskStack;
/* Pass out the array that will be used as the Timer task's stack. */
*ppxTimerTaskStackBuffer = uxTimerTaskStack;
/* Pass out the size of the array pointed to by *ppxTimerTaskStackBuffer.
* Note that, as the array is necessarily of type StackType_t,
* configTIMER_TASK_STACK_DEPTH is specified in words, not bytes. */
*pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
/* Pass out the size of the array pointed to by *ppxTimerTaskStackBuffer.
* Note that, as the array is necessarily of type StackType_t,
* configTIMER_TASK_STACK_DEPTH is specified in words, not bytes. */
*pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
}
/*-----------------------------------------------------------*/
@ -160,16 +160,16 @@ void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer,
*/
void MemManage_Handler( void )
{
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, handler_address_const \n"
" bx r1 \n"
" \n"
" handler_address_const: .word vHandleMemoryFault \n"
);
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, handler_address_const \n"
" bx r1 \n"
" \n"
" handler_address_const: .word vHandleMemoryFault \n"
);
}
/*-----------------------------------------------------------*/

View file

@ -34,10 +34,10 @@
#include "secure_port_macros.h"
/* Start address of non-secure application. */
#define mainNONSECURE_APP_START_ADDRESS ( 0x200000U )
#define mainNONSECURE_APP_START_ADDRESS ( 0x200000U )
/* typedef for non-secure Reset Handler. */
typedef void ( *NonSecureResetHandler_t ) ( void ) __attribute__( ( cmse_nonsecure_call ) );
typedef void ( *NonSecureResetHandler_t ) ( void ) __attribute__( ( cmse_nonsecure_call ) );
/*-----------------------------------------------------------*/
/* Boot into the non-secure code. */
@ -46,34 +46,34 @@ void BootNonSecure( uint32_t ulNonSecureStartAddress );
void BootNonSecure( uint32_t ulNonSecureStartAddress )
{
NonSecureResetHandler_t pxNonSecureResetHandler;
NonSecureResetHandler_t pxNonSecureResetHandler;
/* Main Stack Pointer value for the non-secure side is the first entry in
* the non-secure vector table. Read the first entry and assign the same to
* the non-secure main stack pointer(MSP_NS). */
secureportSET_MSP_NS( *( ( uint32_t * )( ulNonSecureStartAddress ) ) );
/* Main Stack Pointer value for the non-secure side is the first entry in
* the non-secure vector table. Read the first entry and assign the same to
* the non-secure main stack pointer(MSP_NS). */
secureportSET_MSP_NS( *( ( uint32_t * )( ulNonSecureStartAddress ) ) );
/* Non secure Reset Handler is the second entry in the non-secure vector
* table. Read the non-secure reset handler.
*/
pxNonSecureResetHandler = ( NonSecureResetHandler_t )( * ( ( uint32_t * ) ( ( ulNonSecureStartAddress ) + 4U ) ) );
/* Non secure Reset Handler is the second entry in the non-secure vector
* table. Read the non-secure reset handler.
*/
pxNonSecureResetHandler = ( NonSecureResetHandler_t )( * ( ( uint32_t * ) ( ( ulNonSecureStartAddress ) + 4U ) ) );
/* Start non-secure software application by jumping to the non-secure Reset
* Handler. */
pxNonSecureResetHandler();
/* Start non-secure software application by jumping to the non-secure Reset
* Handler. */
pxNonSecureResetHandler();
}
/*-----------------------------------------------------------*/
/* Secure main() */
int main( void )
{
/* Boot the non-secure code. */
BootNonSecure( mainNONSECURE_APP_START_ADDRESS );
/* Boot the non-secure code. */
BootNonSecure( mainNONSECURE_APP_START_ADDRESS );
/* Non-secure software does not return, this code is not executed. */
for( ; ; )
{
/* Should not reach here. */
}
/* Non-secure software does not return, this code is not executed. */
for( ; ; )
{
/* Should not reach here. */
}
}
/*-----------------------------------------------------------*/

View file

@ -76,48 +76,48 @@ static void prvROAccessTask( void * pvParameters )
{
uint8_t ucVal;
/* Unused parameters. */
( void ) pvParameters;
/* Unused parameters. */
( void ) pvParameters;
for( ; ; )
{
/* This task has RO access to ucSharedMemory and therefore it can read
* it but cannot modify it. */
ucVal = ucSharedMemory[ 0 ];
for( ; ; )
{
/* This task has RO access to ucSharedMemory and therefore it can read
* it but cannot modify it. */
ucVal = ucSharedMemory[ 0 ];
/* Silent compiler warnings about unused variables. */
( void ) ucVal;
/* Silent compiler warnings about unused variables. */
( void ) ucVal;
/* Since this task has Read Only access to the ucSharedMemory region,
* writing to it results in Memory Fault. Set ucROTaskFaultTracker[ 0 ]
* to 1 to tell the Memory Fault Handler that this is an expected fault.
* The handler will recover from this fault gracefully by jumping to the
* next instruction. */
ucROTaskFaultTracker[ 0 ] = 1;
/* Since this task has Read Only access to the ucSharedMemory region,
* writing to it results in Memory Fault. Set ucROTaskFaultTracker[ 0 ]
* to 1 to tell the Memory Fault Handler that this is an expected fault.
* The handler will recover from this fault gracefully by jumping to the
* next instruction. */
ucROTaskFaultTracker[ 0 ] = 1;
/* Illegal access to generate Memory Fault. */
ucSharedMemory[ 0 ] = 0;
/* Illegal access to generate Memory Fault. */
ucSharedMemory[ 0 ] = 0;
/* Wait for a second. */
vTaskDelay( pdMS_TO_TICKS( 1000 ) );
}
/* Wait for a second. */
vTaskDelay( pdMS_TO_TICKS( 1000 ) );
}
}
/*-----------------------------------------------------------*/
static void prvRWAccessTask( void * pvParameters )
{
/* Unused parameters. */
( void ) pvParameters;
/* Unused parameters. */
( void ) pvParameters;
for( ; ; )
{
/* This task has RW access to ucSharedMemory and therefore can write to
* it. */
ucSharedMemory[ 0 ] = 0;
for( ; ; )
{
/* This task has RW access to ucSharedMemory and therefore can write to
* it. */
ucSharedMemory[ 0 ] = 0;
/* Wait for a second. */
vTaskDelay( pdMS_TO_TICKS( 1000 ) );
}
/* Wait for a second. */
vTaskDelay( pdMS_TO_TICKS( 1000 ) );
}
}
/*-----------------------------------------------------------*/
@ -127,38 +127,38 @@ static StackType_t xROAccessTaskStack[ configMINIMAL_STACK_SIZE ] __attribute__(
static StackType_t xRWAccessTaskStack[ configMINIMAL_STACK_SIZE ] __attribute__( ( aligned( 32 ) ) );
TaskParameters_t xROAccessTaskParameters =
{
.pvTaskCode = prvROAccessTask,
.pcName = "ROAccess",
.usStackDepth = configMINIMAL_STACK_SIZE,
.pvParameters = NULL,
.uxPriority = tskIDLE_PRIORITY,
.puxStackBuffer = xROAccessTaskStack,
.xRegions = {
{ ucSharedMemory, 32, tskMPU_REGION_READ_ONLY | tskMPU_REGION_EXECUTE_NEVER },
{ ucROTaskFaultTracker, 32, tskMPU_REGION_READ_WRITE | tskMPU_REGION_EXECUTE_NEVER },
{ 0, 0, 0 },
}
.pvTaskCode = prvROAccessTask,
.pcName = "ROAccess",
.usStackDepth = configMINIMAL_STACK_SIZE,
.pvParameters = NULL,
.uxPriority = tskIDLE_PRIORITY,
.puxStackBuffer = xROAccessTaskStack,
.xRegions = {
{ ucSharedMemory, 32, tskMPU_REGION_READ_ONLY | tskMPU_REGION_EXECUTE_NEVER },
{ ucROTaskFaultTracker, 32, tskMPU_REGION_READ_WRITE | tskMPU_REGION_EXECUTE_NEVER },
{ 0, 0, 0 },
}
};
TaskParameters_t xRWAccessTaskParameters =
{
.pvTaskCode = prvRWAccessTask,
.pcName = "RWAccess",
.usStackDepth = configMINIMAL_STACK_SIZE,
.pvParameters = NULL,
.uxPriority = tskIDLE_PRIORITY,
.puxStackBuffer = xRWAccessTaskStack,
.xRegions = {
{ ucSharedMemory, 32, tskMPU_REGION_READ_WRITE | tskMPU_REGION_EXECUTE_NEVER },
{ 0, 0, 0 },
{ 0, 0, 0 },
}
.pvTaskCode = prvRWAccessTask,
.pcName = "RWAccess",
.usStackDepth = configMINIMAL_STACK_SIZE,
.pvParameters = NULL,
.uxPriority = tskIDLE_PRIORITY,
.puxStackBuffer = xRWAccessTaskStack,
.xRegions = {
{ ucSharedMemory, 32, tskMPU_REGION_READ_WRITE | tskMPU_REGION_EXECUTE_NEVER },
{ 0, 0, 0 },
{ 0, 0, 0 },
}
};
/* Create an unprivileged task with RO access to ucSharedMemory. */
xTaskCreateRestricted( &( xROAccessTaskParameters ), NULL );
/* Create an unprivileged task with RO access to ucSharedMemory. */
xTaskCreateRestricted( &( xROAccessTaskParameters ), NULL );
/* Create an unprivileged task with RW access to ucSharedMemory. */
xTaskCreateRestricted( &( xRWAccessTaskParameters ), NULL );
/* Create an unprivileged task with RW access to ucSharedMemory. */
xTaskCreateRestricted( &( xRWAccessTaskParameters ), NULL );
}
/*-----------------------------------------------------------*/
@ -166,27 +166,27 @@ void vHandleMemoryFault( uint32_t * pulFaultStackAddress )
{
uint32_t ulPC;
/* Is this an expected fault? */
if( ucROTaskFaultTracker[ 0 ] == 1 )
{
/* Read program counter. */
ulPC = pulFaultStackAddress[ 6 ];
/* Is this an expected fault? */
if( ucROTaskFaultTracker[ 0 ] == 1 )
{
/* Read program counter. */
ulPC = pulFaultStackAddress[ 6 ];
/* Increment the program counter by 2 to move to the next instruction. */
ulPC += 2;
/* Increment the program counter by 2 to move to the next instruction. */
ulPC += 2;
/* Save the new program counter on the stack. */
pulFaultStackAddress[ 6 ] = ulPC;
/* Save the new program counter on the stack. */
pulFaultStackAddress[ 6 ] = ulPC;
/* Mark the fault as handled. */
ucROTaskFaultTracker[ 0 ] = 0;
}
else
{
/* This is an unexpected fault - loop forever. */
for( ; ; )
{
}
}
/* Mark the fault as handled. */
ucROTaskFaultTracker[ 0 ] = 0;
}
else
{
/* This is an unexpected fault - loop forever. */
for( ; ; )
{
}
}
}
/*-----------------------------------------------------------*/

View file

@ -44,16 +44,16 @@ secureportNON_SECURE_CALLABLE uint32_t NSCFunction( Callback_t pxCallback )
{
NonSecureCallback_t pxNonSecureCallback;
/* Return function pointer with cleared LSB. */
pxNonSecureCallback = ( NonSecureCallback_t ) cmse_nsfptr_create( pxCallback );
/* Return function pointer with cleared LSB. */
pxNonSecureCallback = ( NonSecureCallback_t ) cmse_nsfptr_create( pxCallback );
/* Invoke the supplied callback. */
pxNonSecureCallback();
/* Invoke the supplied callback. */
pxNonSecureCallback();
/* Increment the secure side counter. */
ulSecureCounter += 1;
/* Increment the secure side counter. */
ulSecureCounter += 1;
/* Return the secure side counter. */
return ulSecureCounter;
/* Return the secure side counter. */
return ulSecureCounter;
}
/*-----------------------------------------------------------*/

View file

@ -71,30 +71,30 @@ void vStartTZDemo( void )
static StackType_t xSecureCallingTaskStack[ configMINIMAL_STACK_SIZE ] __attribute__( ( aligned( 32 ) ) );
TaskParameters_t xSecureCallingTaskParameters =
{
.pvTaskCode = prvSecureCallingTask,
.pcName = "SecCalling",
.usStackDepth = configMINIMAL_STACK_SIZE,
.pvParameters = NULL,
.uxPriority = tskIDLE_PRIORITY,
.puxStackBuffer = xSecureCallingTaskStack,
.xRegions = {
{ ulNonSecureCounter, 32, tskMPU_REGION_READ_WRITE | tskMPU_REGION_EXECUTE_NEVER },
{ 0, 0, 0 },
{ 0, 0, 0 },
}
.pvTaskCode = prvSecureCallingTask,
.pcName = "SecCalling",
.usStackDepth = configMINIMAL_STACK_SIZE,
.pvParameters = NULL,
.uxPriority = tskIDLE_PRIORITY,
.puxStackBuffer = xSecureCallingTaskStack,
.xRegions = {
{ ulNonSecureCounter, 32, tskMPU_REGION_READ_WRITE | tskMPU_REGION_EXECUTE_NEVER },
{ 0, 0, 0 },
{ 0, 0, 0 },
}
};
/* Create an unprivileged task which calls secure functions. */
xTaskCreateRestricted( &( xSecureCallingTaskParameters ), NULL );
/* Create an unprivileged task which calls secure functions. */
xTaskCreateRestricted( &( xSecureCallingTaskParameters ), NULL );
}
/*-----------------------------------------------------------*/
static void prvCallback( void )
{
/* This function is called from the secure side. Just increment the counter
* here. The check that this counter keeps incrementing is performed in the
* prvSecureCallingTask. */
ulNonSecureCounter[ 0 ] += 1;
/* This function is called from the secure side. Just increment the counter
* here. The check that this counter keeps incrementing is performed in the
* prvSecureCallingTask. */
ulNonSecureCounter[ 0 ] += 1;
}
/*-----------------------------------------------------------*/
@ -103,31 +103,31 @@ static void prvSecureCallingTask( void * pvParameters )
uint32_t ulLastSecureCounter = 0, ulLastNonSecureCounter = 0;
uint32_t ulCurrentSecureCounter = 0;
/* This task calls secure side functions. So allocate a secure context for
* it. */
portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
/* This task calls secure side functions. So allocate a secure context for
* it. */
portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
for( ; ; )
{
/* Call the secure side function. It does two things:
* - It calls the supplied function (prvCallback) which in turn
* increments the non-secure counter.
* - It increments the secure counter and returns the incremented value.
* Therefore at the end of this function call both the secure and
* non-secure counters must have been incremented.
*/
ulCurrentSecureCounter = NSCFunction( prvCallback );
for( ; ; )
{
/* Call the secure side function. It does two things:
* - It calls the supplied function (prvCallback) which in turn
* increments the non-secure counter.
* - It increments the secure counter and returns the incremented value.
* Therefore at the end of this function call both the secure and
* non-secure counters must have been incremented.
*/
ulCurrentSecureCounter = NSCFunction( prvCallback );
/* Make sure that both the counters are incremented. */
configASSERT( ulCurrentSecureCounter == ulLastSecureCounter + 1 );
configASSERT( ulNonSecureCounter[ 0 ] == ulLastNonSecureCounter + 1 );
/* Make sure that both the counters are incremented. */
configASSERT( ulCurrentSecureCounter == ulLastSecureCounter + 1 );
configASSERT( ulNonSecureCounter[ 0 ] == ulLastNonSecureCounter + 1 );
/* Update the last values for both the counters. */
ulLastSecureCounter = ulCurrentSecureCounter;
ulLastNonSecureCounter = ulNonSecureCounter[ 0 ];
/* Update the last values for both the counters. */
ulLastSecureCounter = ulCurrentSecureCounter;
ulLastNonSecureCounter = ulNonSecureCounter[ 0 ];
/* Wait for a second. */
vTaskDelay( pdMS_TO_TICKS( 1000 ) );
}
/* Wait for a second. */
vTaskDelay( pdMS_TO_TICKS( 1000 ) );
}
}
/*-----------------------------------------------------------*/

File diff suppressed because it is too large Load diff

View file

@ -33,349 +33,349 @@
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
" ldr r4, [r3] \n" /* r4 = *r3 i.e. r4 = MAIR0. */
" ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r4, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst2 \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r4, #4 \n" /* r4 = 4. */
" str r4, [r2] \n" /* Program RNR = 4. */
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
" ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r3!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldm r0!, {r1-r4} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
" ldr r5, xSecureContextConst2 \n"
" str r1, [r5] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" msr control, r3 \n" /* Set this task's CONTROL value. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r4 \n" /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
" ldr r4, xSecureContextConst2 \n"
" str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const2: .word 0xe000edc0 \n"
"xRNRConst2: .word 0xe000ed98 \n"
"xRBARConst2: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
" ldr r4, [r3] \n" /* r4 = *r3 i.e. r4 = MAIR0. */
" ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r4, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst2 \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r4, #4 \n" /* r4 = 4. */
" str r4, [r2] \n" /* Program RNR = 4. */
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
" ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r3!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldm r0!, {r1-r4} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
" ldr r5, xSecureContextConst2 \n"
" str r1, [r5] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" msr control, r3 \n" /* Set this task's CONTROL value. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r4 \n" /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
" ldr r4, xSecureContextConst2 \n"
" str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const2: .word 0xe000edc0 \n"
"xRNRConst2: .word 0xe000ed98 \n"
"xRBARConst2: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
}
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vResetPrivilege( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
:::"r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
:::"r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
"xVTORConst: .word 0xe000ed08 \n"
:: "i" ( portSVC_START_SCHEDULER ) : "memory"
);
__asm volatile
(
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
"xVTORConst: .word 0xe000ed08 \n"
:: "i" ( portSVC_START_SCHEDULER ) : "memory"
);
}
/*-----------------------------------------------------------*/
uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r0, PRIMASK \n"
" cpsid i \n"
" bx lr \n"
::: "memory"
);
__asm volatile
(
" mrs r0, PRIMASK \n"
" cpsid i \n"
" bx lr \n"
::: "memory"
);
#if !defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
/* To avoid compiler warnings. The return statement will never be reached,
* but some compilers warn if it is not included, while others won't compile
* if it is. */
return 0;
/* To avoid compiler warnings. The return statement will never be reached,
* but some compilers warn if it is not included, while others won't compile
* if it is. */
return 0;
#endif
}
/*-----------------------------------------------------------*/
void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" msr PRIMASK, r0 \n"
" bx lr \n"
::: "memory"
);
__asm volatile
(
" msr PRIMASK, r0 \n"
" bx lr \n"
::: "memory"
);
#if !defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
/* Just to avoid compiler warning. ulMask is used from the asm code but
* the compiler can't see that. Some compilers generate warnings without
* the following line, while others generate warnings if the line is
* included. */
( void ) ulMask;
/* Just to avoid compiler warning. ulMask is used from the asm code but
* the compiler can't see that. Some compilers generate warnings without
* the following line, while others generate warnings if the line is
* included. */
( void ) ulMask;
#endif
}
/*-----------------------------------------------------------*/
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" mrs r1, psp \n" /* Read PSP in r1. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r2] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" \n"
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r2, r3, #25 \n" /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r2, [r3] \n" /* Read pxCurrentTCB. */
#if( configENABLE_MPU == 1 )
" subs r1, r1, #16 \n" /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mrs r3, control \n" /* r3 = CONTROL. */
" mov r4, lr \n" /* r4 = LR/EXC_RETURN. */
" stmia r1!, {r0, r2-r4} \n" /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
" subs r1, r1, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r1!, {r0, r2-r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r2, [r3] \n" /* Read pxCurrentTCB. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vstmdbeq r1!, {s16-s31} \n" /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" subs r1, r1, #48 \n" /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" adds r1, r1, #16 \n" /* r1 = r1 + 16. */
" stm r1, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mrs r3, control \n" /* r3 = CONTROL. */
" mov r4, lr \n" /* r4 = LR/EXC_RETURN. */
" subs r1, r1, #16 \n" /* r1 = r1 - 16. */
" stm r1, {r0, r2-r4} \n" /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
" subs r1, r1, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" adds r1, r1, #12 \n" /* r1 = r1 + 12. */
" stm r1, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r1, r1, #12 \n" /* r1 = r1 - 12. */
" stmia r1!, {r0, r2-r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
" \n"
" select_next_task: \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
" ldr r4, [r3] \n" /* r4 = *r3 i.e. r4 = MAIR0. */
" ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r4, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r4, #4 \n" /* r4 = 4. */
" str r4, [r2] \n" /* Program RNR = 4. */
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
" ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r3!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldmia r1!, {r0, r2-r4} \n" /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr control, r3 \n" /* Restore the CONTROL register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r2] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" push {r1,r4} \n"
" bl SecureContext_LoadContext \n" /* Restore the secure context. */
" pop {r1,r4} \n"
" mov lr, r4 \n" /* LR = r4. */
" lsls r2, r4, #25 \n" /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
#else /* configENABLE_MPU */
" ldmia r1!, {r0, r2-r3} \n" /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r3 \n" /* LR = r3. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r2] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" push {r1,r3} \n"
" bl SecureContext_LoadContext \n" /* Restore the secure context. */
" pop {r1,r3} \n"
" mov lr, r3 \n" /* LR = r3. */
" lsls r2, r3, #25 \n" /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
#endif /* configENABLE_MPU */
" \n"
" restore_ns_context: \n"
" ldmia r1!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vldmiaeq r1!, {s16-s31} \n" /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const: .word 0xe000edc0 \n"
"xRNRConst: .word 0xe000ed98 \n"
"xRBARConst: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" mrs r1, psp \n" /* Read PSP in r1. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r2] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" \n"
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r2, r3, #25 \n" /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r2, [r3] \n" /* Read pxCurrentTCB. */
#if( configENABLE_MPU == 1 )
" subs r1, r1, #16 \n" /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mrs r3, control \n" /* r3 = CONTROL. */
" mov r4, lr \n" /* r4 = LR/EXC_RETURN. */
" stmia r1!, {r0, r2-r4} \n" /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
" subs r1, r1, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r1!, {r0, r2-r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r2, [r3] \n" /* Read pxCurrentTCB. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vstmdbeq r1!, {s16-s31} \n" /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" subs r1, r1, #48 \n" /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" adds r1, r1, #16 \n" /* r1 = r1 + 16. */
" stm r1, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mrs r3, control \n" /* r3 = CONTROL. */
" mov r4, lr \n" /* r4 = LR/EXC_RETURN. */
" subs r1, r1, #16 \n" /* r1 = r1 - 16. */
" stm r1, {r0, r2-r4} \n" /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
" subs r1, r1, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" adds r1, r1, #12 \n" /* r1 = r1 + 12. */
" stm r1, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r1, r1, #12 \n" /* r1 = r1 - 12. */
" stmia r1!, {r0, r2-r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
" \n"
" select_next_task: \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
" ldr r4, [r3] \n" /* r4 = *r3 i.e. r4 = MAIR0. */
" ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r4, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r4, #4 \n" /* r4 = 4. */
" str r4, [r2] \n" /* Program RNR = 4. */
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
" ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r3!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldmia r1!, {r0, r2-r4} \n" /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr control, r3 \n" /* Restore the CONTROL register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r2] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" push {r1,r4} \n"
" bl SecureContext_LoadContext \n" /* Restore the secure context. */
" pop {r1,r4} \n"
" mov lr, r4 \n" /* LR = r4. */
" lsls r2, r4, #25 \n" /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
#else /* configENABLE_MPU */
" ldmia r1!, {r0, r2-r3} \n" /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r3 \n" /* LR = r3. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r2] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" push {r1,r3} \n"
" bl SecureContext_LoadContext \n" /* Restore the secure context. */
" pop {r1,r3} \n"
" mov lr, r3 \n" /* LR = r3. */
" lsls r2, r3, #25 \n" /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
#endif /* configENABLE_MPU */
" \n"
" restore_ns_context: \n"
" ldmia r1!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vldmiaeq r1!, {s16-s31} \n" /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const: .word 0xe000edc0 \n"
"xRNRConst: .word 0xe000ed98 \n"
"xRBARConst: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
}
/*-----------------------------------------------------------*/
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, svchandler_address_const \n"
" bx r1 \n"
" \n"
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, svchandler_address_const \n"
" bx r1 \n"
" \n"
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
:: "i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
__asm volatile
(
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
:: "i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
/*-----------------------------------------------------------*/
void vPortFreeSecureContext( uint32_t *pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" ldr r1, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r0, [r1] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r0, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
:: "i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
__asm volatile
(
" ldr r1, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r0, [r1] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r0, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
:: "i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}
/*-----------------------------------------------------------*/

View file

@ -33,253 +33,253 @@
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
" ldr r3, [r1] \n" /* r3 = *r1 i.e. r3 = MAIR0. */
" ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r3, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst2 \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r3, #4 \n" /* r3 = 4. */
" str r3, [r2] \n" /* Program RNR = 4. */
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
" ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r1!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" msr control, r2 \n" /* Set this task's CONTROL value. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const2: .word 0xe000edc0 \n"
"xRNRConst2: .word 0xe000ed98 \n"
"xRBARConst2: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
" ldr r3, [r1] \n" /* r3 = *r1 i.e. r3 = MAIR0. */
" ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r3, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst2 \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r3, #4 \n" /* r3 = 4. */
" str r3, [r2] \n" /* Program RNR = 4. */
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
" ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r1!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" msr control, r2 \n" /* Set this task's CONTROL value. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const2: .word 0xe000edc0 \n"
"xRNRConst2: .word 0xe000ed98 \n"
"xRBARConst2: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
}
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vResetPrivilege( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
:::"r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
:::"r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
"xVTORConst: .word 0xe000ed08 \n"
:: "i" ( portSVC_START_SCHEDULER ) : "memory"
);
__asm volatile
(
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
"xVTORConst: .word 0xe000ed08 \n"
:: "i" ( portSVC_START_SCHEDULER ) : "memory"
);
}
/*-----------------------------------------------------------*/
uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r0, PRIMASK \n"
" cpsid i \n"
" bx lr \n"
::: "memory"
);
__asm volatile
(
" mrs r0, PRIMASK \n"
" cpsid i \n"
" bx lr \n"
::: "memory"
);
#if !defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
/* To avoid compiler warnings. The return statement will never be reached,
* but some compilers warn if it is not included, while others won't compile
* if it is. */
return 0;
/* To avoid compiler warnings. The return statement will never be reached,
* but some compilers warn if it is not included, while others won't compile
* if it is. */
return 0;
#endif
}
/*-----------------------------------------------------------*/
void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" msr PRIMASK, r0 \n"
" bx lr \n"
::: "memory"
);
__asm volatile
(
" msr PRIMASK, r0 \n"
" bx lr \n"
::: "memory"
);
#if !defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
/* Just to avoid compiler warning. ulMask is used from the asm code but
* the compiler can't see that. Some compilers generate warnings without
* the following line, while others generate warnings if the line is
* included. */
( void ) ulMask;
/* Just to avoid compiler warning. ulMask is used from the asm code but
* the compiler can't see that. Some compilers generate warnings without
* the following line, while others generate warnings if the line is
* included. */
( void ) ulMask;
#endif
}
/*-----------------------------------------------------------*/
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n" /* Read PSP in r0. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n" /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mrs r2, control \n" /* r2 = CONTROL. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r1-r11} \n" /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
#else /* configENABLE_MPU */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
#endif /* configENABLE_MPU */
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
" ldr r3, [r1] \n" /* r3 = *r1 i.e. r3 = MAIR0. */
" ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r3, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r3, #4 \n" /* r3 = 4. */
" str r3, [r2] \n" /* Program RNR = 4. */
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
" ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r1!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldmia r0!, {r1-r11} \n" /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
#else /* configENABLE_MPU */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_FPU == 1 )
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n" /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
" \n"
#if( configENABLE_MPU == 1 )
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" msr control, r2 \n" /* Restore the CONTROL register value for the task. */
#else /* configENABLE_MPU */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
#endif /* configENABLE_MPU */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xMAIR0Const: .word 0xe000edc0 \n"
"xRNRConst: .word 0xe000ed98 \n"
"xRBARConst: .word 0xe000ed9c \n"
);
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n" /* Read PSP in r0. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n" /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mrs r2, control \n" /* r2 = CONTROL. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r1-r11} \n" /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
#else /* configENABLE_MPU */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
#endif /* configENABLE_MPU */
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
" ldr r3, [r1] \n" /* r3 = *r1 i.e. r3 = MAIR0. */
" ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r3, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r3, #4 \n" /* r3 = 4. */
" str r3, [r2] \n" /* Program RNR = 4. */
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
" ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r1!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldmia r0!, {r1-r11} \n" /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
#else /* configENABLE_MPU */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_FPU == 1 )
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n" /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
" \n"
#if( configENABLE_MPU == 1 )
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" msr control, r2 \n" /* Restore the CONTROL register value for the task. */
#else /* configENABLE_MPU */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
#endif /* configENABLE_MPU */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xMAIR0Const: .word 0xe000edc0 \n"
"xRNRConst: .word 0xe000ed98 \n"
"xRBARConst: .word 0xe000ed9c \n"
);
}
/*-----------------------------------------------------------*/
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, svchandler_address_const \n"
" bx r1 \n"
" \n"
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, svchandler_address_const \n"
" bx r1 \n"
" \n"
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
/*-----------------------------------------------------------*/

View file

@ -25,278 +25,278 @@
* 1 tab == 4 spaces!
*/
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vPortAllocateSecureContext
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
PUBLIC vPortFreeSecureContext
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vPortAllocateSecureContext
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
PUBLIC vPortFreeSecureContext
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
SECTION .text:CODE:NOROOT(2)
THUMB
SECTION .text:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
bx lr /* Return. */
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
#if ( configENABLE_MPU == 1 )
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
ldr r5, =xSecureContext
str r1, [r5] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
msr control, r3 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r4 /* Finally, branch to EXC_RETURN. */
ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
ldr r5, =xSecureContext
str r1, [r5] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
msr control, r3 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r4 /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR:
mrs r0, PRIMASK
cpsid i
bx lr
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR:
msr PRIMASK, r0
bx lr
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
PendSV_Handler:
mrs r1, psp /* Read PSP in r1. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r2] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
mrs r1, psp /* Read PSP in r1. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r2] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
cbz r0, save_ns_context /* No secure context to save. */
push {r0-r2, r14}
bl SecureContext_SaveContext
pop {r0-r3} /* LR is now in r3. */
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl save_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
cbz r0, save_ns_context /* No secure context to save. */
push {r0-r2, r14}
bl SecureContext_SaveContext
pop {r0-r3} /* LR is now in r3. */
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl save_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
subs r1, r1, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
subs r1, r1, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
b select_next_task
b select_next_task
save_ns_context:
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r1!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #16 /* r1 = r1 + 16. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
subs r1, r1, #16 /* r1 = r1 - 16. */
stm r1, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #12 /* r1 = r1 + 12. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r1, r1, #12 /* r1 = r1 - 12. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
save_ns_context:
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r1!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #16 /* r1 = r1 + 16. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
subs r1, r1, #16 /* r1 = r1 - 16. */
stm r1, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #12 /* r1 = r1 + 12. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r1, r1, #12 /* r1 = r1 - 12. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
select_next_task:
cpsid i
bl vTaskSwitchContext
cpsie i
select_next_task:
cpsid i
bl vTaskSwitchContext
cpsie i
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r1, [r3] /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r1, [r3] /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
#if ( configENABLE_MPU == 1 )
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r0, r2-r4} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
msr control, r3 /* Restore the CONTROL register value for the task. */
mov lr, r4 /* LR = r4. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r4}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r4}
mov lr, r4 /* LR = r4. */
lsls r2, r4, #25 /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#else /* configENABLE_MPU */
ldmia r1!, {r0, r2-r3} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
mov lr, r3 /* LR = r3. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r3}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r3}
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r0, r2-r4} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
msr control, r3 /* Restore the CONTROL register value for the task. */
mov lr, r4 /* LR = r4. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r4}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r4}
mov lr, r4 /* LR = r4. */
lsls r2, r4, #25 /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#else /* configENABLE_MPU */
ldmia r1!, {r0, r2-r3} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
mov lr, r3 /* LR = r3. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r3}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r3}
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#endif /* configENABLE_MPU */
restore_ns_context:
ldmia r1!, {r4-r11} /* Restore the registers that are not automatically restored. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r1!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
restore_ns_context:
ldmia r1!, {r4-r11} /* Restore the registers that are not automatically restored. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r1!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
/*-----------------------------------------------------------*/
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
/* r0 = uint32_t *pulTCB. */
ldr r1, [r0] /* The first item in the TCB is the top of the stack. */
ldr r0, [r1] /* The first item on the stack is the task's xSecureContext. */
cmp r0, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
bx lr /* Return. */
/* r0 = uint32_t *pulTCB. */
ldr r1, [r0] /* The first item in the TCB is the top of the stack. */
ldr r0, [r1] /* The first item on the stack is the task's xSecureContext. */
cmp r0, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
END
END

View file

@ -25,194 +25,194 @@
* 1 tab == 4 spaces!
*/
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
SECTION .text:CODE:NOROOT(2)
THUMB
SECTION .text:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
#if ( configENABLE_MPU == 1 )
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
msr control, r2 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
msr control, r2 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r2 /* Finally, branch to EXC_RETURN. */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r2 /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR:
mrs r0, PRIMASK
cpsid i
bx lr
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR:
msr PRIMASK, r0
bx lr
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
mrs r0, psp /* Read PSP in r0. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r0!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r0!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
mrs r1, psplim /* r1 = PSPLIM. */
mrs r2, control /* r2 = CONTROL. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
mrs r1, psplim /* r1 = PSPLIM. */
mrs r2, control /* r2 = CONTROL. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
#else /* configENABLE_MPU */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
#endif /* configENABLE_MPU */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
str r0, [r1] /* Save the new top of stack in TCB. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
str r0, [r1] /* Save the new top of stack in TCB. */
cpsid i
bl vTaskSwitchContext
cpsie i
cpsid i
bl vTaskSwitchContext
cpsie i
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
#if ( configENABLE_MPU == 1 )
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
#else /* configENABLE_MPU */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
#endif /* configENABLE_MPU */
#if ( configENABLE_FPU == 1 )
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r0!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r0!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
msr control, r2 /* Restore the CONTROL register value for the task. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
msr control, r2 /* Restore the CONTROL register value for the task. */
#else /* configENABLE_MPU */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
#endif /* configENABLE_MPU */
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
/*-----------------------------------------------------------*/
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
END
END

View file

@ -42,46 +42,59 @@ extern "C" {
*------------------------------------------------------------------------------
*/
#ifndef configENABLE_FPU
#error configENABLE_FPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_FPU */
#ifndef configENABLE_MPU
#error configENABLE_MPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_MPU */
#ifndef configENABLE_TRUSTZONE
#error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portNOP()
#define portINLINE __inline
#define portINLINE __inline
#ifndef portFORCE_INLINE
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#endif
#define portHAS_STACK_OVERFLOW_CHECKING 1
#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
@ -96,13 +109,13 @@ extern uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PR
extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
#if( configENABLE_TRUSTZONE == 1 )
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
#endif /* configENABLE_TRUSTZONE */
#if( configENABLE_MPU == 1 )
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -110,58 +123,58 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
* @brief MPU specific constants.
*/
#if( configENABLE_MPU == 1 )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#else
#define portPRIVILEGE_BIT ( 0x0UL )
#define portPRIVILEGE_BIT ( 0x0UL )
#endif /* configENABLE_MPU */
/* MPU regions. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Devices Region. */
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
/**
@ -169,8 +182,8 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
*/
typedef struct MPURegionSettings
{
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
/**
@ -178,99 +191,99 @@ typedef struct MPURegionSettings
*/
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
} xMPU_SETTINGS;
/*-----------------------------------------------------------*/
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
/*-----------------------------------------------------------*/
#if( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
#else
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
#else
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/

View file

@ -33,56 +33,56 @@
secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle )
{
/* xSecureContextHandle value is in r0. */
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r1, ipsr \n" /* r1 = IPSR. */
" cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
" ldmia r0!, {r1, r2} \n" /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
#if( configENABLE_MPU == 1 )
" ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */
" msr control, r3 \n" /* CONTROL = r3. */
#endif /* configENABLE_MPU */
" msr psplim, r2 \n" /* PSPLIM = r2. */
" msr psp, r1 \n" /* PSP = r1. */
" \n"
" load_ctx_therad_mode: \n"
" nop \n"
" \n"
:::"r0", "r1", "r2"
);
/* xSecureContextHandle value is in r0. */
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r1, ipsr \n" /* r1 = IPSR. */
" cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
" ldmia r0!, {r1, r2} \n" /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
#if( configENABLE_MPU == 1 )
" ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */
" msr control, r3 \n" /* CONTROL = r3. */
#endif /* configENABLE_MPU */
" msr psplim, r2 \n" /* PSPLIM = r2. */
" msr psp, r1 \n" /* PSP = r1. */
" \n"
" load_ctx_therad_mode: \n"
" nop \n"
" \n"
:::"r0", "r1", "r2"
);
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle )
{
/* xSecureContextHandle value is in r0. */
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r1, ipsr \n" /* r1 = IPSR. */
" cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
" mrs r1, psp \n" /* r1 = PSP. */
#if( configENABLE_FPU == 1 )
" vstmdb r1!, {s0} \n" /* Trigger the defferred stacking of FPU registers. */
" vldmia r1!, {s0} \n" /* Nullify the effect of the pervious statement. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" mrs r2, control \n" /* r2 = CONTROL. */
" stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */
#endif /* configENABLE_MPU */
" str r1, [r0] \n" /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
" movs r1, %0 \n" /* r1 = securecontextNO_STACK. */
" msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */
" msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
" \n"
" save_ctx_therad_mode: \n"
" nop \n"
" \n"
:: "i" ( securecontextNO_STACK ) : "r1", "memory"
);
/* xSecureContextHandle value is in r0. */
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r1, ipsr \n" /* r1 = IPSR. */
" cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
" mrs r1, psp \n" /* r1 = PSP. */
#if( configENABLE_FPU == 1 )
" vstmdb r1!, {s0} \n" /* Trigger the defferred stacking of FPU registers. */
" vldmia r1!, {s0} \n" /* Nullify the effect of the pervious statement. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" mrs r2, control \n" /* r2 = CONTROL. */
" stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */
#endif /* configENABLE_MPU */
" str r1, [r0] \n" /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
" movs r1, %0 \n" /* r1 = securecontextNO_STACK. */
" msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */
" msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
" \n"
" save_ctx_therad_mode: \n"
" nop \n"
" \n"
:: "i" ( securecontextNO_STACK ) : "r1", "memory"
);
}
/*-----------------------------------------------------------*/

View file

@ -37,12 +37,12 @@ extern void SecureContext_SaveContextAsm( SecureContextHandle_t xSecureContextHa
secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle )
{
SecureContext_LoadContextAsm( xSecureContextHandle );
SecureContext_LoadContextAsm( xSecureContextHandle );
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle )
{
SecureContext_SaveContextAsm( xSecureContextHandle );
SecureContext_SaveContextAsm( xSecureContextHandle );
}
/*-----------------------------------------------------------*/

View file

@ -25,49 +25,49 @@
* 1 tab == 4 spaces!
*/
SECTION .text:CODE:NOROOT(2)
THUMB
SECTION .text:CODE:NOROOT(2)
THUMB
PUBLIC SecureContext_LoadContextAsm
PUBLIC SecureContext_SaveContextAsm
PUBLIC SecureContext_LoadContextAsm
PUBLIC SecureContext_SaveContextAsm
/*-----------------------------------------------------------*/
SecureContext_LoadContextAsm:
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
ldmia r0!, {r1, r2} /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
ldmia r0!, {r1, r2} /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */
msr control, r3 /* CONTROL = r3. */
ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */
msr control, r3 /* CONTROL = r3. */
#endif /* configENABLE_MPU */
msr psplim, r2 /* PSPLIM = r2. */
msr psp, r1 /* PSP = r1. */
msr psplim, r2 /* PSPLIM = r2. */
msr psp, r1 /* PSP = r1. */
load_ctx_therad_mode:
bx lr
load_ctx_therad_mode:
bx lr
/*-----------------------------------------------------------*/
SecureContext_SaveContextAsm:
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
mrs r1, psp /* r1 = PSP. */
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
mrs r1, psp /* r1 = PSP. */
#if ( configENABLE_FPU == 1 )
vstmdb r1!, {s0} /* Trigger the defferred stacking of FPU registers. */
vldmia r1!, {s0} /* Nullify the effect of the pervious statement. */
vstmdb r1!, {s0} /* Trigger the defferred stacking of FPU registers. */
vldmia r1!, {s0} /* Nullify the effect of the pervious statement. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
mrs r2, control /* r2 = CONTROL. */
stmdb r1!, {r2} /* Store CONTROL value on the stack. */
mrs r2, control /* r2 = CONTROL. */
stmdb r1!, {r2} /* Store CONTROL value on the stack. */
#endif /* configENABLE_MPU */
str r1, [r0] /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
movs r1, #0 /* r1 = securecontextNO_STACK. */
msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */
msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
str r1, [r0] /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
movs r1, #0 /* r1 = securecontextNO_STACK. */
msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */
msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
save_ctx_therad_mode:
bx lr
save_ctx_therad_mode:
bx lr
/*-----------------------------------------------------------*/
END
END

View file

@ -40,7 +40,7 @@
* Bit[0] - 0 --> Thread mode is privileged.
* Bit[1] - 1 --> Thread mode uses PSP.
*/
#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
/**
* @brief CONTROL value for un-privileged tasks.
@ -48,7 +48,7 @@
* Bit[0] - 1 --> Thread mode is un-privileged.
* Bit[1] - 1 --> Thread mode uses PSP.
*/
#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
/*-----------------------------------------------------------*/
/**
@ -59,146 +59,146 @@
*/
typedef struct SecureContext
{
uint8_t *pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
uint8_t *pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
uint8_t *pucStackStart; /**< First location of the stack memory. */
uint8_t *pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
uint8_t *pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
uint8_t *pucStackStart; /**< First location of the stack memory. */
} SecureContext_t;
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_Init( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* No stack for thread mode until a task's context is loaded. */
secureportSET_PSPLIM( securecontextNO_STACK );
secureportSET_PSP( securecontextNO_STACK );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* No stack for thread mode until a task's context is loaded. */
secureportSET_PSPLIM( securecontextNO_STACK );
secureportSET_PSP( securecontextNO_STACK );
#if( configENABLE_MPU == 1 )
{
/* Configure thread mode to use PSP and to be unprivileged. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
}
#else /* configENABLE_MPU */
{
/* Configure thread mode to use PSP and to be privileged.. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
}
#endif /* configENABLE_MPU */
}
#if( configENABLE_MPU == 1 )
{
/* Configure thread mode to use PSP and to be unprivileged. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
}
#else /* configENABLE_MPU */
{
/* Configure thread mode to use PSP and to be privileged.. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
}
#endif /* configENABLE_MPU */
}
}
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged )
#else /* configENABLE_MPU */
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize )
#endif /* configENABLE_MPU */
{
uint8_t *pucStackMemory = NULL;
uint32_t ulIPSR;
SecureContextHandle_t xSecureContextHandle = NULL;
#if( configENABLE_MPU == 1 )
uint32_t *pulCurrentStackPointer = NULL;
#endif /* configENABLE_MPU */
uint8_t *pucStackMemory = NULL;
uint32_t ulIPSR;
SecureContextHandle_t xSecureContextHandle = NULL;
#if( configENABLE_MPU == 1 )
uint32_t *pulCurrentStackPointer = NULL;
#endif /* configENABLE_MPU */
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Allocate the context structure. */
xSecureContextHandle = ( SecureContextHandle_t ) pvPortMalloc( sizeof( SecureContext_t ) );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Allocate the context structure. */
xSecureContextHandle = ( SecureContextHandle_t ) pvPortMalloc( sizeof( SecureContext_t ) );
if( xSecureContextHandle != NULL )
{
/* Allocate the stack space. */
pucStackMemory = pvPortMalloc( ulSecureStackSize );
if( xSecureContextHandle != NULL )
{
/* Allocate the stack space. */
pucStackMemory = pvPortMalloc( ulSecureStackSize );
if( pucStackMemory != NULL )
{
/* Since stack grows down, the starting point will be the last
* location. Note that this location is next to the last
* allocated byte because the hardware decrements the stack
* pointer before writing i.e. if stack pointer is 0x2, a push
* operation will decrement the stack pointer to 0x1 and then
* write at 0x1. */
xSecureContextHandle->pucStackStart = pucStackMemory + ulSecureStackSize;
if( pucStackMemory != NULL )
{
/* Since stack grows down, the starting point will be the last
* location. Note that this location is next to the last
* allocated byte because the hardware decrements the stack
* pointer before writing i.e. if stack pointer is 0x2, a push
* operation will decrement the stack pointer to 0x1 and then
* write at 0x1. */
xSecureContextHandle->pucStackStart = pucStackMemory + ulSecureStackSize;
/* The stack cannot go beyond this location. This value is
* programmed in the PSPLIM register on context switch.*/
xSecureContextHandle->pucStackLimit = pucStackMemory;
/* The stack cannot go beyond this location. This value is
* programmed in the PSPLIM register on context switch.*/
xSecureContextHandle->pucStackLimit = pucStackMemory;
#if( configENABLE_MPU == 1 )
{
/* Store the correct CONTROL value for the task on the stack.
* This value is programmed in the CONTROL register on
* context switch. */
pulCurrentStackPointer = ( uint32_t * ) xSecureContextHandle->pucStackStart;
pulCurrentStackPointer--;
if( ulIsTaskPrivileged )
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
}
else
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
}
#if( configENABLE_MPU == 1 )
{
/* Store the correct CONTROL value for the task on the stack.
* This value is programmed in the CONTROL register on
* context switch. */
pulCurrentStackPointer = ( uint32_t * ) xSecureContextHandle->pucStackStart;
pulCurrentStackPointer--;
if( ulIsTaskPrivileged )
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
}
else
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
}
/* Store the current stack pointer. This value is programmed in
* the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
}
#else /* configENABLE_MPU */
{
/* Current SP is set to the starting of the stack. This
* value programmed in the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = xSecureContextHandle->pucStackStart;
/* Store the current stack pointer. This value is programmed in
* the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
}
#else /* configENABLE_MPU */
{
/* Current SP is set to the starting of the stack. This
* value programmed in the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = xSecureContextHandle->pucStackStart;
}
#endif /* configENABLE_MPU */
}
else
{
/* Free the context to avoid memory leak and make sure to return
* NULL to indicate failure. */
vPortFree( xSecureContextHandle );
xSecureContextHandle = NULL;
}
}
}
}
#endif /* configENABLE_MPU */
}
else
{
/* Free the context to avoid memory leak and make sure to return
* NULL to indicate failure. */
vPortFree( xSecureContextHandle );
xSecureContextHandle = NULL;
}
}
}
return xSecureContextHandle;
return xSecureContextHandle;
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Ensure that valid parameters are passed. */
secureportASSERT( xSecureContextHandle != NULL );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Ensure that valid parameters are passed. */
secureportASSERT( xSecureContextHandle != NULL );
/* Free the stack space. */
vPortFree( xSecureContextHandle->pucStackLimit );
/* Free the stack space. */
vPortFree( xSecureContextHandle->pucStackLimit );
/* Free the context itself. */
vPortFree( xSecureContextHandle );
}
/* Free the context itself. */
vPortFree( xSecureContextHandle );
}
}
/*-----------------------------------------------------------*/

View file

@ -37,13 +37,13 @@
/**
* @brief PSP value when no task's context is loaded.
*/
#define securecontextNO_STACK 0x0
#define securecontextNO_STACK 0x0
/**
* @brief Opaque handle.
*/
struct SecureContext;
typedef struct SecureContext* SecureContextHandle_t;
typedef struct SecureContext* SecureContextHandle_t;
/*-----------------------------------------------------------*/
/**
@ -70,9 +70,9 @@ void SecureContext_Init( void );
* otherwise.
*/
#if( configENABLE_MPU == 1 )
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged );
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged );
#else /* configENABLE_MPU */
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize );
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize );
#endif /* configENABLE_MPU */
/**

View file

@ -37,37 +37,37 @@
/**
* @brief Total heap size.
*/
#define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
#define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
/* No test marker by default. */
#ifndef mtCOVERAGE_TEST_MARKER
#define mtCOVERAGE_TEST_MARKER()
#define mtCOVERAGE_TEST_MARKER()
#endif
/* No tracing by default. */
#ifndef traceMALLOC
#define traceMALLOC( pvReturn, xWantedSize )
#define traceMALLOC( pvReturn, xWantedSize )
#endif
/* No tracing by default. */
#ifndef traceFREE
#define traceFREE( pv, xBlockSize )
#define traceFREE( pv, xBlockSize )
#endif
/* Block sizes must not get too small. */
#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
/* Assumes 8bit bytes! */
#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
/*-----------------------------------------------------------*/
/* Allocate the memory for the heap. */
#if( configAPPLICATION_ALLOCATED_HEAP == 1 )
/* The application writer has already defined the array used for the RTOS
* heap - probably so it can be placed in a special segment or address. */
extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
/* The application writer has already defined the array used for the RTOS
* heap - probably so it can be placed in a special segment or address. */
extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
#else /* configAPPLICATION_ALLOCATED_HEAP */
static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
#endif /* configAPPLICATION_ALLOCATED_HEAP */
/**
@ -77,8 +77,8 @@
*/
typedef struct A_BLOCK_LINK
{
struct A_BLOCK_LINK *pxNextFreeBlock; /**< The next free block in the list. */
size_t xBlockSize; /**< The size of the free block. */
struct A_BLOCK_LINK *pxNextFreeBlock; /**< The next free block in the list. */
size_t xBlockSize; /**< The size of the free block. */
} BlockLink_t;
/*-----------------------------------------------------------*/
@ -135,44 +135,44 @@ uint8_t *pucAlignedHeap;
size_t uxAddress;
size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE;
/* Ensure the heap starts on a correctly aligned boundary. */
uxAddress = ( size_t ) ucHeap;
/* Ensure the heap starts on a correctly aligned boundary. */
uxAddress = ( size_t ) ucHeap;
if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
{
uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
}
if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
{
uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
}
pucAlignedHeap = ( uint8_t * ) uxAddress;
pucAlignedHeap = ( uint8_t * ) uxAddress;
/* xStart is used to hold a pointer to the first item in the list of free
* blocks. The void cast is used to prevent compiler warnings. */
xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
xStart.xBlockSize = ( size_t ) 0;
/* xStart is used to hold a pointer to the first item in the list of free
* blocks. The void cast is used to prevent compiler warnings. */
xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
xStart.xBlockSize = ( size_t ) 0;
/* pxEnd is used to mark the end of the list of free blocks and is inserted
* at the end of the heap space. */
uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
uxAddress -= xHeapStructSize;
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
pxEnd = ( void * ) uxAddress;
pxEnd->xBlockSize = 0;
pxEnd->pxNextFreeBlock = NULL;
/* pxEnd is used to mark the end of the list of free blocks and is inserted
* at the end of the heap space. */
uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
uxAddress -= xHeapStructSize;
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
pxEnd = ( void * ) uxAddress;
pxEnd->xBlockSize = 0;
pxEnd->pxNextFreeBlock = NULL;
/* To start with there is a single free block that is sized to take up the
* entire heap space, minus the space taken by pxEnd. */
pxFirstFreeBlock = ( void * ) pucAlignedHeap;
pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
/* To start with there is a single free block that is sized to take up the
* entire heap space, minus the space taken by pxEnd. */
pxFirstFreeBlock = ( void * ) pucAlignedHeap;
pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
/* Only one block exists - and it covers the entire usable heap space. */
xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
/* Only one block exists - and it covers the entire usable heap space. */
xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
/* Work out the position of the top bit in a size_t variable. */
xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
/* Work out the position of the top bit in a size_t variable. */
xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
}
/*-----------------------------------------------------------*/
@ -181,59 +181,59 @@ static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert )
BlockLink_t *pxIterator;
uint8_t *puc;
/* Iterate through the list until a block is found that has a higher address
* than the block being inserted. */
for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
{
/* Nothing to do here, just iterate to the right position. */
}
/* Iterate through the list until a block is found that has a higher address
* than the block being inserted. */
for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
{
/* Nothing to do here, just iterate to the right position. */
}
/* Do the block being inserted, and the block it is being inserted after
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxIterator;
if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
{
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted after
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxIterator;
if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
{
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted before
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxBlockToInsert;
if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
{
if( pxIterator->pxNextFreeBlock != pxEnd )
{
/* Form one big block from the two blocks. */
pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxEnd;
}
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* Do the block being inserted, and the block it is being inserted before
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxBlockToInsert;
if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
{
if( pxIterator->pxNextFreeBlock != pxEnd )
{
/* Form one big block from the two blocks. */
pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxEnd;
}
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* If the block being inserted plugged a gab, so was merged with the block
* before and the block after, then it's pxNextFreeBlock pointer will have
* already been set, and should not be set here as that would make it point
* to itself. */
if( pxIterator != pxBlockToInsert )
{
pxIterator->pxNextFreeBlock = pxBlockToInsert;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* If the block being inserted plugged a gab, so was merged with the block
* before and the block after, then it's pxNextFreeBlock pointer will have
* already been set, and should not be set here as that would make it point
* to itself. */
if( pxIterator != pxBlockToInsert )
{
pxIterator->pxNextFreeBlock = pxBlockToInsert;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
/*-----------------------------------------------------------*/
@ -242,144 +242,144 @@ void *pvPortMalloc( size_t xWantedSize )
BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink;
void *pvReturn = NULL;
/* If this is the first call to malloc then the heap will require
* initialisation to setup the list of free blocks. */
if( pxEnd == NULL )
{
prvHeapInit();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* If this is the first call to malloc then the heap will require
* initialisation to setup the list of free blocks. */
if( pxEnd == NULL )
{
prvHeapInit();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Check the requested block size is not so large that the top bit is set.
* The top bit of the block size member of the BlockLink_t structure is used
* to determine who owns the block - the application or the kernel, so it
* must be free. */
if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
{
/* The wanted size is increased so it can contain a BlockLink_t
* structure in addition to the requested amount of bytes. */
if( xWantedSize > 0 )
{
xWantedSize += xHeapStructSize;
/* Check the requested block size is not so large that the top bit is set.
* The top bit of the block size member of the BlockLink_t structure is used
* to determine who owns the block - the application or the kernel, so it
* must be free. */
if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
{
/* The wanted size is increased so it can contain a BlockLink_t
* structure in addition to the requested amount of bytes. */
if( xWantedSize > 0 )
{
xWantedSize += xHeapStructSize;
/* Ensure that blocks are always aligned to the required number of
* bytes. */
if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
{
/* Byte alignment required. */
xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Ensure that blocks are always aligned to the required number of
* bytes. */
if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
{
/* Byte alignment required. */
xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
{
/* Traverse the list from the start (lowest address) block until
* one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
{
pxPreviousBlock = pxBlock;
pxBlock = pxBlock->pxNextFreeBlock;
}
if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
{
/* Traverse the list from the start (lowest address) block until
* one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
{
pxPreviousBlock = pxBlock;
pxBlock = pxBlock->pxNextFreeBlock;
}
/* If the end marker was reached then a block of adequate size was
* not found. */
if( pxBlock != pxEnd )
{
/* Return the memory space pointed to - jumping over the
* BlockLink_t structure at its start. */
pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
/* If the end marker was reached then a block of adequate size was
* not found. */
if( pxBlock != pxEnd )
{
/* Return the memory space pointed to - jumping over the
* BlockLink_t structure at its start. */
pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
/* This block is being returned for use so must be taken out
* of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* This block is being returned for use so must be taken out
* of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* If the block is larger than required it can be split into
* two. */
if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
{
/* This block is to be split into two. Create a new
* block following the number of bytes requested. The void
* cast is used to prevent byte alignment warnings from the
* compiler. */
pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
/* If the block is larger than required it can be split into
* two. */
if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
{
/* This block is to be split into two. Create a new
* block following the number of bytes requested. The void
* cast is used to prevent byte alignment warnings from the
* compiler. */
pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
/* Calculate the sizes of two blocks split from the single
* block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
/* Calculate the sizes of two blocks split from the single
* block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xFreeBytesRemaining -= pxBlock->xBlockSize;
xFreeBytesRemaining -= pxBlock->xBlockSize;
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned by
* the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned by
* the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceMALLOC( pvReturn, xWantedSize );
traceMALLOC( pvReturn, xWantedSize );
#if( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
{
if( pvReturn == NULL )
{
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
#if( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
{
if( pvReturn == NULL )
{
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
}
/*-----------------------------------------------------------*/
@ -388,63 +388,63 @@ void vPortFree( void *pv )
uint8_t *puc = ( uint8_t * ) pv;
BlockLink_t *pxLink;
if( pv != NULL )
{
/* The memory being freed will have an BlockLink_t structure immediately
* before it. */
puc -= xHeapStructSize;
if( pv != NULL )
{
/* The memory being freed will have an BlockLink_t structure immediately
* before it. */
puc -= xHeapStructSize;
/* This casting is to keep the compiler from issuing warnings. */
pxLink = ( void * ) puc;
/* This casting is to keep the compiler from issuing warnings. */
pxLink = ( void * ) puc;
/* Check the block is actually allocated. */
secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
secureportASSERT( pxLink->pxNextFreeBlock == NULL );
/* Check the block is actually allocated. */
secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
secureportASSERT( pxLink->pxNextFreeBlock == NULL );
if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
{
/* The block is being returned to the heap - it is no longer
* allocated. */
pxLink->xBlockSize &= ~xBlockAllocatedBit;
if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
{
/* The block is being returned to the heap - it is no longer
* allocated. */
pxLink->xBlockSize &= ~xBlockAllocatedBit;
secureportDISABLE_NON_SECURE_INTERRUPTS();
{
/* Add this block to the list of free blocks. */
xFreeBytesRemaining += pxLink->xBlockSize;
traceFREE( pv, pxLink->xBlockSize );
prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
}
secureportENABLE_NON_SECURE_INTERRUPTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
secureportDISABLE_NON_SECURE_INTERRUPTS();
{
/* Add this block to the list of free blocks. */
xFreeBytesRemaining += pxLink->xBlockSize;
traceFREE( pv, pxLink->xBlockSize );
prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
}
secureportENABLE_NON_SECURE_INTERRUPTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
/*-----------------------------------------------------------*/
size_t xPortGetFreeHeapSize( void )
{
return xFreeBytesRemaining;
return xFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
size_t xPortGetMinimumEverFreeHeapSize( void )
{
return xMinimumEverFreeBytesRemaining;
return xMinimumEverFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
void vPortInitialiseBlocks( void )
{
/* This just exists to keep the linker quiet. */
/* This just exists to keep the linker quiet. */
}
/*-----------------------------------------------------------*/

View file

@ -37,69 +37,69 @@
/**
* @brief Constants required to manipulate the SCB.
*/
#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
/**
* @brief Constants required to manipulate the FPU.
*/
#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
#define secureinitFPCCR_LSPENS_POS ( 29UL )
#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
#define secureinitFPCCR_TS_POS ( 26UL )
#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
#define secureinitFPCCR_LSPENS_POS ( 29UL )
#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
#define secureinitFPCCR_TS_POS ( 26UL )
#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
#define secureinitNSACR_CP10_POS ( 10UL )
#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
#define secureinitNSACR_CP11_POS ( 11UL )
#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
#define secureinitNSACR_CP10_POS ( 10UL )
#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
#define secureinitNSACR_CP11_POS ( 11UL )
#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
*( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
}
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
*( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
}
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
* permitted. CP11 should be programmed to the same value as CP10. */
*( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
* permitted. CP11 should be programmed to the same value as CP10. */
*( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
/* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
* that we can enable/disable lazy stacking in port.c file. */
*( secureinitFPCCR ) &= ~ ( secureinitFPCCR_LSPENS_MASK );
/* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
* that we can enable/disable lazy stacking in port.c file. */
*( secureinitFPCCR ) &= ~ ( secureinitFPCCR_LSPENS_MASK );
/* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
* registers (S16-S31) are also pushed to stack on exception entry and
* restored on exception return. */
*( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
}
/* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
* registers (S16-S31) are also pushed to stack on exception entry and
* restored on exception return. */
*( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
}
}
/*-----------------------------------------------------------*/

View file

@ -31,103 +31,103 @@
/**
* @brief Byte alignment requirements.
*/
#define secureportBYTE_ALIGNMENT 8
#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
#define secureportBYTE_ALIGNMENT 8
#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
/**
* @brief Macro to declare a function as non-secure callable.
*/
#if defined( __IAR_SYSTEMS_ICC__ )
#define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry
#define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry
#else
#define secureportNON_SECURE_CALLABLE __attribute__((cmse_nonsecure_entry))
#define secureportNON_SECURE_CALLABLE __attribute__((cmse_nonsecure_entry))
#endif
/**
* @brief Set the secure PRIMASK value.
*/
#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \
__asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
__asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
/**
* @brief Set the non-secure PRIMASK value.
*/
#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \
__asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
__asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
/**
* @brief Read the PSP value in the given variable.
*/
#define secureportREAD_PSP( pucOutCurrentStackPointer ) \
__asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
__asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
/**
* @brief Set the PSP to the given value.
*/
#define secureportSET_PSP( pucCurrentStackPointer ) \
__asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
__asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
/**
* @brief Set the PSPLIM to the given value.
*/
#define secureportSET_PSPLIM( pucStackLimit ) \
__asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
__asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
/**
* @brief Set the NonSecure MSP to the given value.
*/
#define secureportSET_MSP_NS( pucMainStackPointer ) \
__asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
__asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
/**
* @brief Set the CONTROL register to the given value.
*/
#define secureportSET_CONTROL( ulControl ) \
__asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
__asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
/**
* @brief Read the Interrupt Program Status Register (IPSR) value in the given
* variable.
*/
#define secureportREAD_IPSR( ulIPSR ) \
__asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
__asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
/**
* @brief PRIMASK value to enable interrupts.
*/
#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
/**
* @brief PRIMASK value to disable interrupts.
*/
#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
/**
* @brief Disable secure interrupts.
*/
#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
/**
* @brief Disable non-secure interrupts.
*
* This effectively disables context switches.
*/
#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
/**
* @brief Enable non-secure interrupts.
*/
#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
/**
* @brief Assert definition.
*/
#define secureportASSERT( x ) \
if( ( x ) == 0 ) \
{ \
secureportDISABLE_SECURE_INTERRUPTS(); \
secureportDISABLE_NON_SECURE_INTERRUPTS(); \
for( ;; ); \
}
#define secureportASSERT( x ) \
if( ( x ) == 0 ) \
{ \
secureportDISABLE_SECURE_INTERRUPTS(); \
secureportDISABLE_NON_SECURE_INTERRUPTS(); \
for( ;; ); \
}
#endif /* __SECURE_PORT_MACROS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -33,349 +33,349 @@
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
" ldr r4, [r3] \n" /* r4 = *r3 i.e. r4 = MAIR0. */
" ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r4, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst2 \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r4, #4 \n" /* r4 = 4. */
" str r4, [r2] \n" /* Program RNR = 4. */
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
" ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r3!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldm r0!, {r1-r4} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
" ldr r5, xSecureContextConst2 \n"
" str r1, [r5] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" msr control, r3 \n" /* Set this task's CONTROL value. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r4 \n" /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
" ldr r4, xSecureContextConst2 \n"
" str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const2: .word 0xe000edc0 \n"
"xRNRConst2: .word 0xe000ed98 \n"
"xRBARConst2: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
" ldr r4, [r3] \n" /* r4 = *r3 i.e. r4 = MAIR0. */
" ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r4, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst2 \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r4, #4 \n" /* r4 = 4. */
" str r4, [r2] \n" /* Program RNR = 4. */
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
" ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r3!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldm r0!, {r1-r4} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
" ldr r5, xSecureContextConst2 \n"
" str r1, [r5] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" msr control, r3 \n" /* Set this task's CONTROL value. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r4 \n" /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
" ldr r4, xSecureContextConst2 \n"
" str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const2: .word 0xe000edc0 \n"
"xRNRConst2: .word 0xe000ed98 \n"
"xRBARConst2: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
}
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vResetPrivilege( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
:::"r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
:::"r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
"xVTORConst: .word 0xe000ed08 \n"
:: "i" ( portSVC_START_SCHEDULER ) : "memory"
);
__asm volatile
(
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
"xVTORConst: .word 0xe000ed08 \n"
:: "i" ( portSVC_START_SCHEDULER ) : "memory"
);
}
/*-----------------------------------------------------------*/
uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r0, PRIMASK \n"
" cpsid i \n"
" bx lr \n"
::: "memory"
);
__asm volatile
(
" mrs r0, PRIMASK \n"
" cpsid i \n"
" bx lr \n"
::: "memory"
);
#if !defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
/* To avoid compiler warnings. The return statement will never be reached,
* but some compilers warn if it is not included, while others won't compile
* if it is. */
return 0;
/* To avoid compiler warnings. The return statement will never be reached,
* but some compilers warn if it is not included, while others won't compile
* if it is. */
return 0;
#endif
}
/*-----------------------------------------------------------*/
void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" msr PRIMASK, r0 \n"
" bx lr \n"
::: "memory"
);
__asm volatile
(
" msr PRIMASK, r0 \n"
" bx lr \n"
::: "memory"
);
#if !defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
/* Just to avoid compiler warning. ulMask is used from the asm code but
* the compiler can't see that. Some compilers generate warnings without
* the following line, while others generate warnings if the line is
* included. */
( void ) ulMask;
/* Just to avoid compiler warning. ulMask is used from the asm code but
* the compiler can't see that. Some compilers generate warnings without
* the following line, while others generate warnings if the line is
* included. */
( void ) ulMask;
#endif
}
/*-----------------------------------------------------------*/
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" mrs r1, psp \n" /* Read PSP in r1. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r2] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" \n"
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r2, r3, #25 \n" /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r2, [r3] \n" /* Read pxCurrentTCB. */
#if( configENABLE_MPU == 1 )
" subs r1, r1, #16 \n" /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mrs r3, control \n" /* r3 = CONTROL. */
" mov r4, lr \n" /* r4 = LR/EXC_RETURN. */
" stmia r1!, {r0, r2-r4} \n" /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
" subs r1, r1, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r1!, {r0, r2-r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r2, [r3] \n" /* Read pxCurrentTCB. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vstmdbeq r1!, {s16-s31} \n" /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" subs r1, r1, #48 \n" /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" adds r1, r1, #16 \n" /* r1 = r1 + 16. */
" stm r1, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mrs r3, control \n" /* r3 = CONTROL. */
" mov r4, lr \n" /* r4 = LR/EXC_RETURN. */
" subs r1, r1, #16 \n" /* r1 = r1 - 16. */
" stm r1, {r0, r2-r4} \n" /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
" subs r1, r1, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" adds r1, r1, #12 \n" /* r1 = r1 + 12. */
" stm r1, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r1, r1, #12 \n" /* r1 = r1 - 12. */
" stmia r1!, {r0, r2-r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
" \n"
" select_next_task: \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
" ldr r4, [r3] \n" /* r4 = *r3 i.e. r4 = MAIR0. */
" ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r4, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r4, #4 \n" /* r4 = 4. */
" str r4, [r2] \n" /* Program RNR = 4. */
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
" ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r3!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldmia r1!, {r0, r2-r4} \n" /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr control, r3 \n" /* Restore the CONTROL register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r2] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" push {r1,r4} \n"
" bl SecureContext_LoadContext \n" /* Restore the secure context. */
" pop {r1,r4} \n"
" mov lr, r4 \n" /* LR = r4. */
" lsls r2, r4, #25 \n" /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
#else /* configENABLE_MPU */
" ldmia r1!, {r0, r2-r3} \n" /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r3 \n" /* LR = r3. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r2] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" push {r1,r3} \n"
" bl SecureContext_LoadContext \n" /* Restore the secure context. */
" pop {r1,r3} \n"
" mov lr, r3 \n" /* LR = r3. */
" lsls r2, r3, #25 \n" /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
#endif /* configENABLE_MPU */
" \n"
" restore_ns_context: \n"
" ldmia r1!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vldmiaeq r1!, {s16-s31} \n" /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const: .word 0xe000edc0 \n"
"xRNRConst: .word 0xe000ed98 \n"
"xRBARConst: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" mrs r1, psp \n" /* Read PSP in r1. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r2] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" \n"
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r2, r3, #25 \n" /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r2, [r3] \n" /* Read pxCurrentTCB. */
#if( configENABLE_MPU == 1 )
" subs r1, r1, #16 \n" /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mrs r3, control \n" /* r3 = CONTROL. */
" mov r4, lr \n" /* r4 = LR/EXC_RETURN. */
" stmia r1!, {r0, r2-r4} \n" /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
" subs r1, r1, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r1!, {r0, r2-r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r2, [r3] \n" /* Read pxCurrentTCB. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vstmdbeq r1!, {s16-s31} \n" /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" subs r1, r1, #48 \n" /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" adds r1, r1, #16 \n" /* r1 = r1 + 16. */
" stm r1, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mrs r3, control \n" /* r3 = CONTROL. */
" mov r4, lr \n" /* r4 = LR/EXC_RETURN. */
" subs r1, r1, #16 \n" /* r1 = r1 - 16. */
" stm r1, {r0, r2-r4} \n" /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
" subs r1, r1, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r1, [r2] \n" /* Save the new top of stack in TCB. */
" adds r1, r1, #12 \n" /* r1 = r1 + 12. */
" stm r1, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r1, r1, #12 \n" /* r1 = r1 - 12. */
" stmia r1!, {r0, r2-r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
" \n"
" select_next_task: \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
" ldr r4, [r3] \n" /* r4 = *r3 i.e. r4 = MAIR0. */
" ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r4, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r4, #4 \n" /* r4 = 4. */
" str r4, [r2] \n" /* Program RNR = 4. */
" adds r3, #4 \n" /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
" ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r3!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldmia r1!, {r0, r2-r4} \n" /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr control, r3 \n" /* Restore the CONTROL register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r2] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" push {r1,r4} \n"
" bl SecureContext_LoadContext \n" /* Restore the secure context. */
" pop {r1,r4} \n"
" mov lr, r4 \n" /* LR = r4. */
" lsls r2, r4, #25 \n" /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
#else /* configENABLE_MPU */
" ldmia r1!, {r0, r2-r3} \n" /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r3 \n" /* LR = r3. */
" ldr r2, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r2] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" push {r1,r3} \n"
" bl SecureContext_LoadContext \n" /* Restore the secure context. */
" pop {r1,r3} \n"
" mov lr, r3 \n" /* LR = r3. */
" lsls r2, r3, #25 \n" /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
#endif /* configENABLE_MPU */
" \n"
" restore_ns_context: \n"
" ldmia r1!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vldmiaeq r1!, {s16-s31} \n" /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
" msr psp, r1 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const: .word 0xe000edc0 \n"
"xRNRConst: .word 0xe000ed98 \n"
"xRBARConst: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
}
/*-----------------------------------------------------------*/
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, svchandler_address_const \n"
" bx r1 \n"
" \n"
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, svchandler_address_const \n"
" bx r1 \n"
" \n"
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
/*-----------------------------------------------------------*/
void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
:: "i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
__asm volatile
(
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
:: "i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
/*-----------------------------------------------------------*/
void vPortFreeSecureContext( uint32_t *pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" ldr r1, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r0, [r1] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r0, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
:: "i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
__asm volatile
(
" ldr r1, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r0, [r1] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r0, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
:: "i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}
/*-----------------------------------------------------------*/

View file

@ -42,46 +42,59 @@ extern "C" {
*------------------------------------------------------------------------------
*/
#ifndef configENABLE_FPU
#error configENABLE_FPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_FPU */
#ifndef configENABLE_MPU
#error configENABLE_MPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_MPU */
#ifndef configENABLE_TRUSTZONE
#error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portNOP()
#define portINLINE __inline
#define portINLINE __inline
#ifndef portFORCE_INLINE
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#endif
#define portHAS_STACK_OVERFLOW_CHECKING 1
#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
@ -96,13 +109,13 @@ extern uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PR
extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
#if( configENABLE_TRUSTZONE == 1 )
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
#endif /* configENABLE_TRUSTZONE */
#if( configENABLE_MPU == 1 )
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -110,58 +123,58 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
* @brief MPU specific constants.
*/
#if( configENABLE_MPU == 1 )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#else
#define portPRIVILEGE_BIT ( 0x0UL )
#define portPRIVILEGE_BIT ( 0x0UL )
#endif /* configENABLE_MPU */
/* MPU regions. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Devices Region. */
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
/**
@ -169,8 +182,8 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
*/
typedef struct MPURegionSettings
{
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
/**
@ -178,99 +191,99 @@ typedef struct MPURegionSettings
*/
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
} xMPU_SETTINGS;
/*-----------------------------------------------------------*/
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
/*-----------------------------------------------------------*/
#if( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
#else
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
#else
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/

View file

@ -40,7 +40,7 @@
* Bit[0] - 0 --> Thread mode is privileged.
* Bit[1] - 1 --> Thread mode uses PSP.
*/
#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
/**
* @brief CONTROL value for un-privileged tasks.
@ -48,7 +48,7 @@
* Bit[0] - 1 --> Thread mode is un-privileged.
* Bit[1] - 1 --> Thread mode uses PSP.
*/
#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
/*-----------------------------------------------------------*/
/**
@ -59,146 +59,146 @@
*/
typedef struct SecureContext
{
uint8_t *pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
uint8_t *pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
uint8_t *pucStackStart; /**< First location of the stack memory. */
uint8_t *pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
uint8_t *pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
uint8_t *pucStackStart; /**< First location of the stack memory. */
} SecureContext_t;
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_Init( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* No stack for thread mode until a task's context is loaded. */
secureportSET_PSPLIM( securecontextNO_STACK );
secureportSET_PSP( securecontextNO_STACK );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* No stack for thread mode until a task's context is loaded. */
secureportSET_PSPLIM( securecontextNO_STACK );
secureportSET_PSP( securecontextNO_STACK );
#if( configENABLE_MPU == 1 )
{
/* Configure thread mode to use PSP and to be unprivileged. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
}
#else /* configENABLE_MPU */
{
/* Configure thread mode to use PSP and to be privileged.. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
}
#endif /* configENABLE_MPU */
}
#if( configENABLE_MPU == 1 )
{
/* Configure thread mode to use PSP and to be unprivileged. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
}
#else /* configENABLE_MPU */
{
/* Configure thread mode to use PSP and to be privileged.. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
}
#endif /* configENABLE_MPU */
}
}
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged )
#else /* configENABLE_MPU */
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize )
#endif /* configENABLE_MPU */
{
uint8_t *pucStackMemory = NULL;
uint32_t ulIPSR;
SecureContextHandle_t xSecureContextHandle = NULL;
#if( configENABLE_MPU == 1 )
uint32_t *pulCurrentStackPointer = NULL;
#endif /* configENABLE_MPU */
uint8_t *pucStackMemory = NULL;
uint32_t ulIPSR;
SecureContextHandle_t xSecureContextHandle = NULL;
#if( configENABLE_MPU == 1 )
uint32_t *pulCurrentStackPointer = NULL;
#endif /* configENABLE_MPU */
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Allocate the context structure. */
xSecureContextHandle = ( SecureContextHandle_t ) pvPortMalloc( sizeof( SecureContext_t ) );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Allocate the context structure. */
xSecureContextHandle = ( SecureContextHandle_t ) pvPortMalloc( sizeof( SecureContext_t ) );
if( xSecureContextHandle != NULL )
{
/* Allocate the stack space. */
pucStackMemory = pvPortMalloc( ulSecureStackSize );
if( xSecureContextHandle != NULL )
{
/* Allocate the stack space. */
pucStackMemory = pvPortMalloc( ulSecureStackSize );
if( pucStackMemory != NULL )
{
/* Since stack grows down, the starting point will be the last
* location. Note that this location is next to the last
* allocated byte because the hardware decrements the stack
* pointer before writing i.e. if stack pointer is 0x2, a push
* operation will decrement the stack pointer to 0x1 and then
* write at 0x1. */
xSecureContextHandle->pucStackStart = pucStackMemory + ulSecureStackSize;
if( pucStackMemory != NULL )
{
/* Since stack grows down, the starting point will be the last
* location. Note that this location is next to the last
* allocated byte because the hardware decrements the stack
* pointer before writing i.e. if stack pointer is 0x2, a push
* operation will decrement the stack pointer to 0x1 and then
* write at 0x1. */
xSecureContextHandle->pucStackStart = pucStackMemory + ulSecureStackSize;
/* The stack cannot go beyond this location. This value is
* programmed in the PSPLIM register on context switch.*/
xSecureContextHandle->pucStackLimit = pucStackMemory;
/* The stack cannot go beyond this location. This value is
* programmed in the PSPLIM register on context switch.*/
xSecureContextHandle->pucStackLimit = pucStackMemory;
#if( configENABLE_MPU == 1 )
{
/* Store the correct CONTROL value for the task on the stack.
* This value is programmed in the CONTROL register on
* context switch. */
pulCurrentStackPointer = ( uint32_t * ) xSecureContextHandle->pucStackStart;
pulCurrentStackPointer--;
if( ulIsTaskPrivileged )
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
}
else
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
}
#if( configENABLE_MPU == 1 )
{
/* Store the correct CONTROL value for the task on the stack.
* This value is programmed in the CONTROL register on
* context switch. */
pulCurrentStackPointer = ( uint32_t * ) xSecureContextHandle->pucStackStart;
pulCurrentStackPointer--;
if( ulIsTaskPrivileged )
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
}
else
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
}
/* Store the current stack pointer. This value is programmed in
* the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
}
#else /* configENABLE_MPU */
{
/* Current SP is set to the starting of the stack. This
* value programmed in the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = xSecureContextHandle->pucStackStart;
/* Store the current stack pointer. This value is programmed in
* the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
}
#else /* configENABLE_MPU */
{
/* Current SP is set to the starting of the stack. This
* value programmed in the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = xSecureContextHandle->pucStackStart;
}
#endif /* configENABLE_MPU */
}
else
{
/* Free the context to avoid memory leak and make sure to return
* NULL to indicate failure. */
vPortFree( xSecureContextHandle );
xSecureContextHandle = NULL;
}
}
}
}
#endif /* configENABLE_MPU */
}
else
{
/* Free the context to avoid memory leak and make sure to return
* NULL to indicate failure. */
vPortFree( xSecureContextHandle );
xSecureContextHandle = NULL;
}
}
}
return xSecureContextHandle;
return xSecureContextHandle;
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Ensure that valid parameters are passed. */
secureportASSERT( xSecureContextHandle != NULL );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Ensure that valid parameters are passed. */
secureportASSERT( xSecureContextHandle != NULL );
/* Free the stack space. */
vPortFree( xSecureContextHandle->pucStackLimit );
/* Free the stack space. */
vPortFree( xSecureContextHandle->pucStackLimit );
/* Free the context itself. */
vPortFree( xSecureContextHandle );
}
/* Free the context itself. */
vPortFree( xSecureContextHandle );
}
}
/*-----------------------------------------------------------*/

View file

@ -37,13 +37,13 @@
/**
* @brief PSP value when no task's context is loaded.
*/
#define securecontextNO_STACK 0x0
#define securecontextNO_STACK 0x0
/**
* @brief Opaque handle.
*/
struct SecureContext;
typedef struct SecureContext* SecureContextHandle_t;
typedef struct SecureContext* SecureContextHandle_t;
/*-----------------------------------------------------------*/
/**
@ -70,9 +70,9 @@ void SecureContext_Init( void );
* otherwise.
*/
#if( configENABLE_MPU == 1 )
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged );
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged );
#else /* configENABLE_MPU */
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize );
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize );
#endif /* configENABLE_MPU */
/**

View file

@ -33,56 +33,56 @@
secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle )
{
/* xSecureContextHandle value is in r0. */
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r1, ipsr \n" /* r1 = IPSR. */
" cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
" ldmia r0!, {r1, r2} \n" /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
#if( configENABLE_MPU == 1 )
" ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */
" msr control, r3 \n" /* CONTROL = r3. */
#endif /* configENABLE_MPU */
" msr psplim, r2 \n" /* PSPLIM = r2. */
" msr psp, r1 \n" /* PSP = r1. */
" \n"
" load_ctx_therad_mode: \n"
" nop \n"
" \n"
:::"r0", "r1", "r2"
);
/* xSecureContextHandle value is in r0. */
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r1, ipsr \n" /* r1 = IPSR. */
" cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
" ldmia r0!, {r1, r2} \n" /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
#if( configENABLE_MPU == 1 )
" ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */
" msr control, r3 \n" /* CONTROL = r3. */
#endif /* configENABLE_MPU */
" msr psplim, r2 \n" /* PSPLIM = r2. */
" msr psp, r1 \n" /* PSP = r1. */
" \n"
" load_ctx_therad_mode: \n"
" nop \n"
" \n"
:::"r0", "r1", "r2"
);
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle )
{
/* xSecureContextHandle value is in r0. */
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r1, ipsr \n" /* r1 = IPSR. */
" cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
" mrs r1, psp \n" /* r1 = PSP. */
#if( configENABLE_FPU == 1 )
" vstmdb r1!, {s0} \n" /* Trigger the defferred stacking of FPU registers. */
" vldmia r1!, {s0} \n" /* Nullify the effect of the pervious statement. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" mrs r2, control \n" /* r2 = CONTROL. */
" stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */
#endif /* configENABLE_MPU */
" str r1, [r0] \n" /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
" movs r1, %0 \n" /* r1 = securecontextNO_STACK. */
" msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */
" msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
" \n"
" save_ctx_therad_mode: \n"
" nop \n"
" \n"
:: "i" ( securecontextNO_STACK ) : "r1", "memory"
);
/* xSecureContextHandle value is in r0. */
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r1, ipsr \n" /* r1 = IPSR. */
" cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */
" mrs r1, psp \n" /* r1 = PSP. */
#if( configENABLE_FPU == 1 )
" vstmdb r1!, {s0} \n" /* Trigger the defferred stacking of FPU registers. */
" vldmia r1!, {s0} \n" /* Nullify the effect of the pervious statement. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" mrs r2, control \n" /* r2 = CONTROL. */
" stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */
#endif /* configENABLE_MPU */
" str r1, [r0] \n" /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
" movs r1, %0 \n" /* r1 = securecontextNO_STACK. */
" msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */
" msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
" \n"
" save_ctx_therad_mode: \n"
" nop \n"
" \n"
:: "i" ( securecontextNO_STACK ) : "r1", "memory"
);
}
/*-----------------------------------------------------------*/

View file

@ -37,37 +37,37 @@
/**
* @brief Total heap size.
*/
#define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
#define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
/* No test marker by default. */
#ifndef mtCOVERAGE_TEST_MARKER
#define mtCOVERAGE_TEST_MARKER()
#define mtCOVERAGE_TEST_MARKER()
#endif
/* No tracing by default. */
#ifndef traceMALLOC
#define traceMALLOC( pvReturn, xWantedSize )
#define traceMALLOC( pvReturn, xWantedSize )
#endif
/* No tracing by default. */
#ifndef traceFREE
#define traceFREE( pv, xBlockSize )
#define traceFREE( pv, xBlockSize )
#endif
/* Block sizes must not get too small. */
#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
/* Assumes 8bit bytes! */
#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
/*-----------------------------------------------------------*/
/* Allocate the memory for the heap. */
#if( configAPPLICATION_ALLOCATED_HEAP == 1 )
/* The application writer has already defined the array used for the RTOS
* heap - probably so it can be placed in a special segment or address. */
extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
/* The application writer has already defined the array used for the RTOS
* heap - probably so it can be placed in a special segment or address. */
extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
#else /* configAPPLICATION_ALLOCATED_HEAP */
static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
#endif /* configAPPLICATION_ALLOCATED_HEAP */
/**
@ -77,8 +77,8 @@
*/
typedef struct A_BLOCK_LINK
{
struct A_BLOCK_LINK *pxNextFreeBlock; /**< The next free block in the list. */
size_t xBlockSize; /**< The size of the free block. */
struct A_BLOCK_LINK *pxNextFreeBlock; /**< The next free block in the list. */
size_t xBlockSize; /**< The size of the free block. */
} BlockLink_t;
/*-----------------------------------------------------------*/
@ -135,44 +135,44 @@ uint8_t *pucAlignedHeap;
size_t uxAddress;
size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE;
/* Ensure the heap starts on a correctly aligned boundary. */
uxAddress = ( size_t ) ucHeap;
/* Ensure the heap starts on a correctly aligned boundary. */
uxAddress = ( size_t ) ucHeap;
if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
{
uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
}
if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
{
uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
}
pucAlignedHeap = ( uint8_t * ) uxAddress;
pucAlignedHeap = ( uint8_t * ) uxAddress;
/* xStart is used to hold a pointer to the first item in the list of free
* blocks. The void cast is used to prevent compiler warnings. */
xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
xStart.xBlockSize = ( size_t ) 0;
/* xStart is used to hold a pointer to the first item in the list of free
* blocks. The void cast is used to prevent compiler warnings. */
xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
xStart.xBlockSize = ( size_t ) 0;
/* pxEnd is used to mark the end of the list of free blocks and is inserted
* at the end of the heap space. */
uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
uxAddress -= xHeapStructSize;
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
pxEnd = ( void * ) uxAddress;
pxEnd->xBlockSize = 0;
pxEnd->pxNextFreeBlock = NULL;
/* pxEnd is used to mark the end of the list of free blocks and is inserted
* at the end of the heap space. */
uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
uxAddress -= xHeapStructSize;
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
pxEnd = ( void * ) uxAddress;
pxEnd->xBlockSize = 0;
pxEnd->pxNextFreeBlock = NULL;
/* To start with there is a single free block that is sized to take up the
* entire heap space, minus the space taken by pxEnd. */
pxFirstFreeBlock = ( void * ) pucAlignedHeap;
pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
/* To start with there is a single free block that is sized to take up the
* entire heap space, minus the space taken by pxEnd. */
pxFirstFreeBlock = ( void * ) pucAlignedHeap;
pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
/* Only one block exists - and it covers the entire usable heap space. */
xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
/* Only one block exists - and it covers the entire usable heap space. */
xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
/* Work out the position of the top bit in a size_t variable. */
xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
/* Work out the position of the top bit in a size_t variable. */
xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
}
/*-----------------------------------------------------------*/
@ -181,59 +181,59 @@ static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert )
BlockLink_t *pxIterator;
uint8_t *puc;
/* Iterate through the list until a block is found that has a higher address
* than the block being inserted. */
for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
{
/* Nothing to do here, just iterate to the right position. */
}
/* Iterate through the list until a block is found that has a higher address
* than the block being inserted. */
for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
{
/* Nothing to do here, just iterate to the right position. */
}
/* Do the block being inserted, and the block it is being inserted after
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxIterator;
if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
{
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted after
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxIterator;
if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
{
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted before
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxBlockToInsert;
if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
{
if( pxIterator->pxNextFreeBlock != pxEnd )
{
/* Form one big block from the two blocks. */
pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxEnd;
}
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* Do the block being inserted, and the block it is being inserted before
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxBlockToInsert;
if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
{
if( pxIterator->pxNextFreeBlock != pxEnd )
{
/* Form one big block from the two blocks. */
pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxEnd;
}
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* If the block being inserted plugged a gab, so was merged with the block
* before and the block after, then it's pxNextFreeBlock pointer will have
* already been set, and should not be set here as that would make it point
* to itself. */
if( pxIterator != pxBlockToInsert )
{
pxIterator->pxNextFreeBlock = pxBlockToInsert;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* If the block being inserted plugged a gab, so was merged with the block
* before and the block after, then it's pxNextFreeBlock pointer will have
* already been set, and should not be set here as that would make it point
* to itself. */
if( pxIterator != pxBlockToInsert )
{
pxIterator->pxNextFreeBlock = pxBlockToInsert;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
/*-----------------------------------------------------------*/
@ -242,144 +242,144 @@ void *pvPortMalloc( size_t xWantedSize )
BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink;
void *pvReturn = NULL;
/* If this is the first call to malloc then the heap will require
* initialisation to setup the list of free blocks. */
if( pxEnd == NULL )
{
prvHeapInit();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* If this is the first call to malloc then the heap will require
* initialisation to setup the list of free blocks. */
if( pxEnd == NULL )
{
prvHeapInit();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Check the requested block size is not so large that the top bit is set.
* The top bit of the block size member of the BlockLink_t structure is used
* to determine who owns the block - the application or the kernel, so it
* must be free. */
if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
{
/* The wanted size is increased so it can contain a BlockLink_t
* structure in addition to the requested amount of bytes. */
if( xWantedSize > 0 )
{
xWantedSize += xHeapStructSize;
/* Check the requested block size is not so large that the top bit is set.
* The top bit of the block size member of the BlockLink_t structure is used
* to determine who owns the block - the application or the kernel, so it
* must be free. */
if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
{
/* The wanted size is increased so it can contain a BlockLink_t
* structure in addition to the requested amount of bytes. */
if( xWantedSize > 0 )
{
xWantedSize += xHeapStructSize;
/* Ensure that blocks are always aligned to the required number of
* bytes. */
if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
{
/* Byte alignment required. */
xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Ensure that blocks are always aligned to the required number of
* bytes. */
if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
{
/* Byte alignment required. */
xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
{
/* Traverse the list from the start (lowest address) block until
* one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
{
pxPreviousBlock = pxBlock;
pxBlock = pxBlock->pxNextFreeBlock;
}
if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
{
/* Traverse the list from the start (lowest address) block until
* one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
{
pxPreviousBlock = pxBlock;
pxBlock = pxBlock->pxNextFreeBlock;
}
/* If the end marker was reached then a block of adequate size was
* not found. */
if( pxBlock != pxEnd )
{
/* Return the memory space pointed to - jumping over the
* BlockLink_t structure at its start. */
pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
/* If the end marker was reached then a block of adequate size was
* not found. */
if( pxBlock != pxEnd )
{
/* Return the memory space pointed to - jumping over the
* BlockLink_t structure at its start. */
pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
/* This block is being returned for use so must be taken out
* of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* This block is being returned for use so must be taken out
* of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* If the block is larger than required it can be split into
* two. */
if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
{
/* This block is to be split into two. Create a new
* block following the number of bytes requested. The void
* cast is used to prevent byte alignment warnings from the
* compiler. */
pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
/* If the block is larger than required it can be split into
* two. */
if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
{
/* This block is to be split into two. Create a new
* block following the number of bytes requested. The void
* cast is used to prevent byte alignment warnings from the
* compiler. */
pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
/* Calculate the sizes of two blocks split from the single
* block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
/* Calculate the sizes of two blocks split from the single
* block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xFreeBytesRemaining -= pxBlock->xBlockSize;
xFreeBytesRemaining -= pxBlock->xBlockSize;
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned by
* the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned by
* the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceMALLOC( pvReturn, xWantedSize );
traceMALLOC( pvReturn, xWantedSize );
#if( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
{
if( pvReturn == NULL )
{
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
#if( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
{
if( pvReturn == NULL )
{
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
}
/*-----------------------------------------------------------*/
@ -388,63 +388,63 @@ void vPortFree( void *pv )
uint8_t *puc = ( uint8_t * ) pv;
BlockLink_t *pxLink;
if( pv != NULL )
{
/* The memory being freed will have an BlockLink_t structure immediately
* before it. */
puc -= xHeapStructSize;
if( pv != NULL )
{
/* The memory being freed will have an BlockLink_t structure immediately
* before it. */
puc -= xHeapStructSize;
/* This casting is to keep the compiler from issuing warnings. */
pxLink = ( void * ) puc;
/* This casting is to keep the compiler from issuing warnings. */
pxLink = ( void * ) puc;
/* Check the block is actually allocated. */
secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
secureportASSERT( pxLink->pxNextFreeBlock == NULL );
/* Check the block is actually allocated. */
secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
secureportASSERT( pxLink->pxNextFreeBlock == NULL );
if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
{
/* The block is being returned to the heap - it is no longer
* allocated. */
pxLink->xBlockSize &= ~xBlockAllocatedBit;
if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
{
/* The block is being returned to the heap - it is no longer
* allocated. */
pxLink->xBlockSize &= ~xBlockAllocatedBit;
secureportDISABLE_NON_SECURE_INTERRUPTS();
{
/* Add this block to the list of free blocks. */
xFreeBytesRemaining += pxLink->xBlockSize;
traceFREE( pv, pxLink->xBlockSize );
prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
}
secureportENABLE_NON_SECURE_INTERRUPTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
secureportDISABLE_NON_SECURE_INTERRUPTS();
{
/* Add this block to the list of free blocks. */
xFreeBytesRemaining += pxLink->xBlockSize;
traceFREE( pv, pxLink->xBlockSize );
prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
}
secureportENABLE_NON_SECURE_INTERRUPTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
/*-----------------------------------------------------------*/
size_t xPortGetFreeHeapSize( void )
{
return xFreeBytesRemaining;
return xFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
size_t xPortGetMinimumEverFreeHeapSize( void )
{
return xMinimumEverFreeBytesRemaining;
return xMinimumEverFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
void vPortInitialiseBlocks( void )
{
/* This just exists to keep the linker quiet. */
/* This just exists to keep the linker quiet. */
}
/*-----------------------------------------------------------*/

View file

@ -37,69 +37,69 @@
/**
* @brief Constants required to manipulate the SCB.
*/
#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
/**
* @brief Constants required to manipulate the FPU.
*/
#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
#define secureinitFPCCR_LSPENS_POS ( 29UL )
#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
#define secureinitFPCCR_TS_POS ( 26UL )
#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
#define secureinitFPCCR_LSPENS_POS ( 29UL )
#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
#define secureinitFPCCR_TS_POS ( 26UL )
#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
#define secureinitNSACR_CP10_POS ( 10UL )
#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
#define secureinitNSACR_CP11_POS ( 11UL )
#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
#define secureinitNSACR_CP10_POS ( 10UL )
#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
#define secureinitNSACR_CP11_POS ( 11UL )
#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
*( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
}
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
*( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
}
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
* permitted. CP11 should be programmed to the same value as CP10. */
*( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
* permitted. CP11 should be programmed to the same value as CP10. */
*( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
/* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
* that we can enable/disable lazy stacking in port.c file. */
*( secureinitFPCCR ) &= ~ ( secureinitFPCCR_LSPENS_MASK );
/* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
* that we can enable/disable lazy stacking in port.c file. */
*( secureinitFPCCR ) &= ~ ( secureinitFPCCR_LSPENS_MASK );
/* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
* registers (S16-S31) are also pushed to stack on exception entry and
* restored on exception return. */
*( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
}
/* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
* registers (S16-S31) are also pushed to stack on exception entry and
* restored on exception return. */
*( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
}
}
/*-----------------------------------------------------------*/

View file

@ -31,103 +31,103 @@
/**
* @brief Byte alignment requirements.
*/
#define secureportBYTE_ALIGNMENT 8
#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
#define secureportBYTE_ALIGNMENT 8
#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
/**
* @brief Macro to declare a function as non-secure callable.
*/
#if defined( __IAR_SYSTEMS_ICC__ )
#define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry
#define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry
#else
#define secureportNON_SECURE_CALLABLE __attribute__((cmse_nonsecure_entry))
#define secureportNON_SECURE_CALLABLE __attribute__((cmse_nonsecure_entry))
#endif
/**
* @brief Set the secure PRIMASK value.
*/
#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \
__asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
__asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
/**
* @brief Set the non-secure PRIMASK value.
*/
#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \
__asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
__asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
/**
* @brief Read the PSP value in the given variable.
*/
#define secureportREAD_PSP( pucOutCurrentStackPointer ) \
__asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
__asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
/**
* @brief Set the PSP to the given value.
*/
#define secureportSET_PSP( pucCurrentStackPointer ) \
__asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
__asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
/**
* @brief Set the PSPLIM to the given value.
*/
#define secureportSET_PSPLIM( pucStackLimit ) \
__asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
__asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
/**
* @brief Set the NonSecure MSP to the given value.
*/
#define secureportSET_MSP_NS( pucMainStackPointer ) \
__asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
__asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
/**
* @brief Set the CONTROL register to the given value.
*/
#define secureportSET_CONTROL( ulControl ) \
__asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
__asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
/**
* @brief Read the Interrupt Program Status Register (IPSR) value in the given
* variable.
*/
#define secureportREAD_IPSR( ulIPSR ) \
__asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
__asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
/**
* @brief PRIMASK value to enable interrupts.
*/
#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
/**
* @brief PRIMASK value to disable interrupts.
*/
#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
/**
* @brief Disable secure interrupts.
*/
#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
/**
* @brief Disable non-secure interrupts.
*
* This effectively disables context switches.
*/
#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
/**
* @brief Enable non-secure interrupts.
*/
#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
/**
* @brief Assert definition.
*/
#define secureportASSERT( x ) \
if( ( x ) == 0 ) \
{ \
secureportDISABLE_SECURE_INTERRUPTS(); \
secureportDISABLE_NON_SECURE_INTERRUPTS(); \
for( ;; ); \
}
#define secureportASSERT( x ) \
if( ( x ) == 0 ) \
{ \
secureportDISABLE_SECURE_INTERRUPTS(); \
secureportDISABLE_NON_SECURE_INTERRUPTS(); \
for( ;; ); \
}
#endif /* __SECURE_PORT_MACROS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -33,253 +33,253 @@
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
" ldr r3, [r1] \n" /* r3 = *r1 i.e. r3 = MAIR0. */
" ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r3, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst2 \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r3, #4 \n" /* r3 = 4. */
" str r3, [r2] \n" /* Program RNR = 4. */
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
" ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r1!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" msr control, r2 \n" /* Set this task's CONTROL value. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const2: .word 0xe000edc0 \n"
"xRNRConst2: .word 0xe000ed98 \n"
"xRBARConst2: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
" ldr r3, [r1] \n" /* r3 = *r1 i.e. r3 = MAIR0. */
" ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r3, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst2 \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r3, #4 \n" /* r3 = 4. */
" str r3, [r2] \n" /* Program RNR = 4. */
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
" ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r1!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" msr control, r2 \n" /* Set this task's CONTROL value. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
#if( configENABLE_MPU == 1 )
"xMAIR0Const2: .word 0xe000edc0 \n"
"xRNRConst2: .word 0xe000ed98 \n"
"xRBARConst2: .word 0xe000ed9c \n"
#endif /* configENABLE_MPU */
);
}
/*-----------------------------------------------------------*/
BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vResetPrivilege( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
:::"r0", "memory"
);
__asm volatile
(
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
:::"r0", "memory"
);
}
/*-----------------------------------------------------------*/
void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
"xVTORConst: .word 0xe000ed08 \n"
:: "i" ( portSVC_START_SCHEDULER ) : "memory"
);
__asm volatile
(
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
"xVTORConst: .word 0xe000ed08 \n"
:: "i" ( portSVC_START_SCHEDULER ) : "memory"
);
}
/*-----------------------------------------------------------*/
uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r0, PRIMASK \n"
" cpsid i \n"
" bx lr \n"
::: "memory"
);
__asm volatile
(
" mrs r0, PRIMASK \n"
" cpsid i \n"
" bx lr \n"
::: "memory"
);
#if !defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
/* To avoid compiler warnings. The return statement will never be reached,
* but some compilers warn if it is not included, while others won't compile
* if it is. */
return 0;
/* To avoid compiler warnings. The return statement will never be reached,
* but some compilers warn if it is not included, while others won't compile
* if it is. */
return 0;
#endif
}
/*-----------------------------------------------------------*/
void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" msr PRIMASK, r0 \n"
" bx lr \n"
::: "memory"
);
__asm volatile
(
" msr PRIMASK, r0 \n"
" bx lr \n"
::: "memory"
);
#if !defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
/* Just to avoid compiler warning. ulMask is used from the asm code but
* the compiler can't see that. Some compilers generate warnings without
* the following line, while others generate warnings if the line is
* included. */
( void ) ulMask;
/* Just to avoid compiler warning. ulMask is used from the asm code but
* the compiler can't see that. Some compilers generate warnings without
* the following line, while others generate warnings if the line is
* included. */
( void ) ulMask;
#endif
}
/*-----------------------------------------------------------*/
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n" /* Read PSP in r0. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n" /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mrs r2, control \n" /* r2 = CONTROL. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r1-r11} \n" /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
#else /* configENABLE_MPU */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
#endif /* configENABLE_MPU */
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
" ldr r3, [r1] \n" /* r3 = *r1 i.e. r3 = MAIR0. */
" ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r3, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r3, #4 \n" /* r3 = 4. */
" str r3, [r2] \n" /* Program RNR = 4. */
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
" ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r1!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldmia r0!, {r1-r11} \n" /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
#else /* configENABLE_MPU */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_FPU == 1 )
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n" /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
" \n"
#if( configENABLE_MPU == 1 )
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" msr control, r2 \n" /* Restore the CONTROL register value for the task. */
#else /* configENABLE_MPU */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
#endif /* configENABLE_MPU */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xMAIR0Const: .word 0xe000edc0 \n"
"xRNRConst: .word 0xe000ed98 \n"
"xRBARConst: .word 0xe000ed9c \n"
);
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n" /* Read PSP in r0. */
#if( configENABLE_FPU == 1 )
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n" /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if( configENABLE_MPU == 1 )
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mrs r2, control \n" /* r2 = CONTROL. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r1-r11} \n" /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
#else /* configENABLE_MPU */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
#endif /* configENABLE_MPU */
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
#if( configENABLE_MPU == 1 )
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
" ldr r3, [r1] \n" /* r3 = *r1 i.e. r3 = MAIR0. */
" ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */
" str r3, [r2] \n" /* Program MAIR0. */
" ldr r2, xRNRConst \n" /* r2 = 0xe000ed98 [Location of RNR]. */
" movs r3, #4 \n" /* r3 = 4. */
" str r3, [r2] \n" /* Program RNR = 4. */
" adds r1, #4 \n" /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
" ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */
" ldmia r1!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */
" stmia r2!, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_MPU == 1 )
" ldmia r0!, {r1-r11} \n" /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
#else /* configENABLE_MPU */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
#endif /* configENABLE_MPU */
" \n"
#if( configENABLE_FPU == 1 )
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n" /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
" \n"
#if( configENABLE_MPU == 1 )
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" msr control, r2 \n" /* Restore the CONTROL register value for the task. */
#else /* configENABLE_MPU */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
#endif /* configENABLE_MPU */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xMAIR0Const: .word 0xe000edc0 \n"
"xRNRConst: .word 0xe000ed98 \n"
"xRBARConst: .word 0xe000ed9c \n"
);
}
/*-----------------------------------------------------------*/
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, svchandler_address_const \n"
" bx r1 \n"
" \n"
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
__asm volatile
(
" tst lr, #4 \n"
" ite eq \n"
" mrseq r0, msp \n"
" mrsne r0, psp \n"
" ldr r1, svchandler_address_const \n"
" bx r1 \n"
" \n"
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
/*-----------------------------------------------------------*/

View file

@ -42,46 +42,59 @@ extern "C" {
*------------------------------------------------------------------------------
*/
#ifndef configENABLE_FPU
#error configENABLE_FPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_FPU */
#ifndef configENABLE_MPU
#error configENABLE_MPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_MPU */
#ifndef configENABLE_TRUSTZONE
#error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portNOP()
#define portINLINE __inline
#define portINLINE __inline
#ifndef portFORCE_INLINE
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#endif
#define portHAS_STACK_OVERFLOW_CHECKING 1
#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
@ -96,13 +109,13 @@ extern uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PR
extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
#if( configENABLE_TRUSTZONE == 1 )
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
#endif /* configENABLE_TRUSTZONE */
#if( configENABLE_MPU == 1 )
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -110,58 +123,58 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
* @brief MPU specific constants.
*/
#if( configENABLE_MPU == 1 )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#else
#define portPRIVILEGE_BIT ( 0x0UL )
#define portPRIVILEGE_BIT ( 0x0UL )
#endif /* configENABLE_MPU */
/* MPU regions. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Devices Region. */
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
/**
@ -169,8 +182,8 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
*/
typedef struct MPURegionSettings
{
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
/**
@ -178,99 +191,99 @@ typedef struct MPURegionSettings
*/
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
} xMPU_SETTINGS;
/*-----------------------------------------------------------*/
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
/*-----------------------------------------------------------*/
#if( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
#else
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
#else
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/

File diff suppressed because it is too large Load diff

View file

@ -25,278 +25,278 @@
* 1 tab == 4 spaces!
*/
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
EXTERN pxCurrentTCB
EXTERN xSecureContext
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN SecureContext_SaveContext
EXTERN SecureContext_LoadContext
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vPortAllocateSecureContext
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
PUBLIC vPortFreeSecureContext
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vPortAllocateSecureContext
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
PUBLIC vPortFreeSecureContext
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
SECTION .text:CODE:NOROOT(2)
THUMB
SECTION .text:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
bx lr /* Return. */
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
#if ( configENABLE_MPU == 1 )
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
ldr r5, =xSecureContext
str r1, [r5] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
msr control, r3 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r4 /* Finally, branch to EXC_RETURN. */
ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */
ldr r5, =xSecureContext
str r1, [r5] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
msr control, r3 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r4 /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
ldr r4, =xSecureContext
str r1, [r4] /* Set xSecureContext to this task's value for the same. */
msr psplim, r2 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR:
mrs r0, PRIMASK
cpsid i
bx lr
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR:
msr PRIMASK, r0
bx lr
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
PendSV_Handler:
mrs r1, psp /* Read PSP in r1. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r2] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
mrs r1, psp /* Read PSP in r1. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
ldr r0, [r2] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
cbz r0, save_ns_context /* No secure context to save. */
push {r0-r2, r14}
bl SecureContext_SaveContext
pop {r0-r3} /* LR is now in r3. */
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl save_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
cbz r0, save_ns_context /* No secure context to save. */
push {r0-r2, r14}
bl SecureContext_SaveContext
pop {r0-r3} /* LR is now in r3. */
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl save_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
subs r1, r1, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
subs r1, r1, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
b select_next_task
b select_next_task
save_ns_context:
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r1!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #16 /* r1 = r1 + 16. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
subs r1, r1, #16 /* r1 = r1 - 16. */
stm r1, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #12 /* r1 = r1 + 12. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r1, r1, #12 /* r1 = r1 - 12. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
save_ns_context:
ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r2, [r3] /* Read pxCurrentTCB. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r1!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
subs r1, r1, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #16 /* r1 = r1 + 16. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mrs r3, control /* r3 = CONTROL. */
mov r4, lr /* r4 = LR/EXC_RETURN. */
subs r1, r1, #16 /* r1 = r1 - 16. */
stm r1, {r0, r2-r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */
#else /* configENABLE_MPU */
subs r1, r1, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
str r1, [r2] /* Save the new top of stack in TCB. */
adds r1, r1, #12 /* r1 = r1 + 12. */
stm r1, {r4-r11} /* Store the registers that are not saved automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
subs r1, r1, #12 /* r1 = r1 - 12. */
stmia r1!, {r0, r2-r3} /* Store xSecureContext, PSPLIM and LR on the stack. */
#endif /* configENABLE_MPU */
select_next_task:
cpsid i
bl vTaskSwitchContext
cpsie i
select_next_task:
cpsid i
bl vTaskSwitchContext
cpsie i
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r1, [r3] /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r3, [r2] /* Read pxCurrentTCB. */
ldr r1, [r3] /* The first item in pxCurrentTCB is the task top of stack. r1 now points to the top of stack. */
#if ( configENABLE_MPU == 1 )
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */
ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r4, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r4, #4 /* r4 = 4. */
str r4, [r2] /* Program RNR = 4. */
adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r3!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r0, r2-r4} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
msr control, r3 /* Restore the CONTROL register value for the task. */
mov lr, r4 /* LR = r4. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r4}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r4}
mov lr, r4 /* LR = r4. */
lsls r2, r4, #25 /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#else /* configENABLE_MPU */
ldmia r1!, {r0, r2-r3} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
mov lr, r3 /* LR = r3. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r3}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r3}
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r0, r2-r4} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
msr control, r3 /* Restore the CONTROL register value for the task. */
mov lr, r4 /* LR = r4. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r4}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r4}
mov lr, r4 /* LR = r4. */
lsls r2, r4, #25 /* r2 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#else /* configENABLE_MPU */
ldmia r1!, {r0, r2-r3} /* Read from stack - r0 = xSecureContext, r2 = PSPLIM and r3 = LR. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
mov lr, r3 /* LR = r3. */
ldr r2, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */
str r0, [r2] /* Restore the task's xSecureContext. */
cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */
push {r1,r3}
bl SecureContext_LoadContext /* Restore the secure context. */
pop {r1,r3}
mov lr, r3 /* LR = r3. */
lsls r2, r3, #25 /* r2 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
bpl restore_ns_context /* bpl - branch if positive or zero. If r2 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
#endif /* configENABLE_MPU */
restore_ns_context:
ldmia r1!, {r4-r11} /* Restore the registers that are not automatically restored. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r1!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
restore_ns_context:
ldmia r1!, {r4-r11} /* Restore the registers that are not automatically restored. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r1!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
msr psp, r1 /* Remember the new top of stack for the task. */
bx lr
/*-----------------------------------------------------------*/
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
vPortFreeSecureContext:
/* r0 = uint32_t *pulTCB. */
ldr r1, [r0] /* The first item in the TCB is the top of the stack. */
ldr r0, [r1] /* The first item on the stack is the task's xSecureContext. */
cmp r0, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
bx lr /* Return. */
/* r0 = uint32_t *pulTCB. */
ldr r1, [r0] /* The first item in the TCB is the top of the stack. */
ldr r0, [r1] /* The first item on the stack is the task's xSecureContext. */
cmp r0, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
END
END

View file

@ -42,46 +42,59 @@ extern "C" {
*------------------------------------------------------------------------------
*/
#ifndef configENABLE_FPU
#error configENABLE_FPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_FPU */
#ifndef configENABLE_MPU
#error configENABLE_MPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_MPU */
#ifndef configENABLE_TRUSTZONE
#error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portNOP()
#define portINLINE __inline
#define portINLINE __inline
#ifndef portFORCE_INLINE
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#endif
#define portHAS_STACK_OVERFLOW_CHECKING 1
#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
@ -96,13 +109,13 @@ extern uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PR
extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
#if( configENABLE_TRUSTZONE == 1 )
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
#endif /* configENABLE_TRUSTZONE */
#if( configENABLE_MPU == 1 )
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -110,58 +123,58 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
* @brief MPU specific constants.
*/
#if( configENABLE_MPU == 1 )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#else
#define portPRIVILEGE_BIT ( 0x0UL )
#define portPRIVILEGE_BIT ( 0x0UL )
#endif /* configENABLE_MPU */
/* MPU regions. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Devices Region. */
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
/**
@ -169,8 +182,8 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
*/
typedef struct MPURegionSettings
{
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
/**
@ -178,99 +191,99 @@ typedef struct MPURegionSettings
*/
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
} xMPU_SETTINGS;
/*-----------------------------------------------------------*/
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
/*-----------------------------------------------------------*/
#if( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
#else
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
#else
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/

View file

@ -40,7 +40,7 @@
* Bit[0] - 0 --> Thread mode is privileged.
* Bit[1] - 1 --> Thread mode uses PSP.
*/
#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
#define securecontextCONTROL_VALUE_PRIVILEGED 0x02
/**
* @brief CONTROL value for un-privileged tasks.
@ -48,7 +48,7 @@
* Bit[0] - 1 --> Thread mode is un-privileged.
* Bit[1] - 1 --> Thread mode uses PSP.
*/
#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03
/*-----------------------------------------------------------*/
/**
@ -59,146 +59,146 @@
*/
typedef struct SecureContext
{
uint8_t *pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
uint8_t *pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
uint8_t *pucStackStart; /**< First location of the stack memory. */
uint8_t *pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */
uint8_t *pucStackLimit; /**< Last location of the stack memory (PSPLIM). */
uint8_t *pucStackStart; /**< First location of the stack memory. */
} SecureContext_t;
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_Init( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* No stack for thread mode until a task's context is loaded. */
secureportSET_PSPLIM( securecontextNO_STACK );
secureportSET_PSP( securecontextNO_STACK );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* No stack for thread mode until a task's context is loaded. */
secureportSET_PSPLIM( securecontextNO_STACK );
secureportSET_PSP( securecontextNO_STACK );
#if( configENABLE_MPU == 1 )
{
/* Configure thread mode to use PSP and to be unprivileged. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
}
#else /* configENABLE_MPU */
{
/* Configure thread mode to use PSP and to be privileged.. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
}
#endif /* configENABLE_MPU */
}
#if( configENABLE_MPU == 1 )
{
/* Configure thread mode to use PSP and to be unprivileged. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED );
}
#else /* configENABLE_MPU */
{
/* Configure thread mode to use PSP and to be privileged.. */
secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED );
}
#endif /* configENABLE_MPU */
}
}
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged )
#else /* configENABLE_MPU */
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize )
secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize )
#endif /* configENABLE_MPU */
{
uint8_t *pucStackMemory = NULL;
uint32_t ulIPSR;
SecureContextHandle_t xSecureContextHandle = NULL;
#if( configENABLE_MPU == 1 )
uint32_t *pulCurrentStackPointer = NULL;
#endif /* configENABLE_MPU */
uint8_t *pucStackMemory = NULL;
uint32_t ulIPSR;
SecureContextHandle_t xSecureContextHandle = NULL;
#if( configENABLE_MPU == 1 )
uint32_t *pulCurrentStackPointer = NULL;
#endif /* configENABLE_MPU */
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Allocate the context structure. */
xSecureContextHandle = ( SecureContextHandle_t ) pvPortMalloc( sizeof( SecureContext_t ) );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Allocate the context structure. */
xSecureContextHandle = ( SecureContextHandle_t ) pvPortMalloc( sizeof( SecureContext_t ) );
if( xSecureContextHandle != NULL )
{
/* Allocate the stack space. */
pucStackMemory = pvPortMalloc( ulSecureStackSize );
if( xSecureContextHandle != NULL )
{
/* Allocate the stack space. */
pucStackMemory = pvPortMalloc( ulSecureStackSize );
if( pucStackMemory != NULL )
{
/* Since stack grows down, the starting point will be the last
* location. Note that this location is next to the last
* allocated byte because the hardware decrements the stack
* pointer before writing i.e. if stack pointer is 0x2, a push
* operation will decrement the stack pointer to 0x1 and then
* write at 0x1. */
xSecureContextHandle->pucStackStart = pucStackMemory + ulSecureStackSize;
if( pucStackMemory != NULL )
{
/* Since stack grows down, the starting point will be the last
* location. Note that this location is next to the last
* allocated byte because the hardware decrements the stack
* pointer before writing i.e. if stack pointer is 0x2, a push
* operation will decrement the stack pointer to 0x1 and then
* write at 0x1. */
xSecureContextHandle->pucStackStart = pucStackMemory + ulSecureStackSize;
/* The stack cannot go beyond this location. This value is
* programmed in the PSPLIM register on context switch.*/
xSecureContextHandle->pucStackLimit = pucStackMemory;
/* The stack cannot go beyond this location. This value is
* programmed in the PSPLIM register on context switch.*/
xSecureContextHandle->pucStackLimit = pucStackMemory;
#if( configENABLE_MPU == 1 )
{
/* Store the correct CONTROL value for the task on the stack.
* This value is programmed in the CONTROL register on
* context switch. */
pulCurrentStackPointer = ( uint32_t * ) xSecureContextHandle->pucStackStart;
pulCurrentStackPointer--;
if( ulIsTaskPrivileged )
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
}
else
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
}
#if( configENABLE_MPU == 1 )
{
/* Store the correct CONTROL value for the task on the stack.
* This value is programmed in the CONTROL register on
* context switch. */
pulCurrentStackPointer = ( uint32_t * ) xSecureContextHandle->pucStackStart;
pulCurrentStackPointer--;
if( ulIsTaskPrivileged )
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED;
}
else
{
*( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED;
}
/* Store the current stack pointer. This value is programmed in
* the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
}
#else /* configENABLE_MPU */
{
/* Current SP is set to the starting of the stack. This
* value programmed in the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = xSecureContextHandle->pucStackStart;
/* Store the current stack pointer. This value is programmed in
* the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer;
}
#else /* configENABLE_MPU */
{
/* Current SP is set to the starting of the stack. This
* value programmed in the PSP register on context switch. */
xSecureContextHandle->pucCurrentStackPointer = xSecureContextHandle->pucStackStart;
}
#endif /* configENABLE_MPU */
}
else
{
/* Free the context to avoid memory leak and make sure to return
* NULL to indicate failure. */
vPortFree( xSecureContextHandle );
xSecureContextHandle = NULL;
}
}
}
}
#endif /* configENABLE_MPU */
}
else
{
/* Free the context to avoid memory leak and make sure to return
* NULL to indicate failure. */
vPortFree( xSecureContextHandle );
xSecureContextHandle = NULL;
}
}
}
return xSecureContextHandle;
return xSecureContextHandle;
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Ensure that valid parameters are passed. */
secureportASSERT( xSecureContextHandle != NULL );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* Ensure that valid parameters are passed. */
secureportASSERT( xSecureContextHandle != NULL );
/* Free the stack space. */
vPortFree( xSecureContextHandle->pucStackLimit );
/* Free the stack space. */
vPortFree( xSecureContextHandle->pucStackLimit );
/* Free the context itself. */
vPortFree( xSecureContextHandle );
}
/* Free the context itself. */
vPortFree( xSecureContextHandle );
}
}
/*-----------------------------------------------------------*/

View file

@ -37,13 +37,13 @@
/**
* @brief PSP value when no task's context is loaded.
*/
#define securecontextNO_STACK 0x0
#define securecontextNO_STACK 0x0
/**
* @brief Opaque handle.
*/
struct SecureContext;
typedef struct SecureContext* SecureContextHandle_t;
typedef struct SecureContext* SecureContextHandle_t;
/*-----------------------------------------------------------*/
/**
@ -70,9 +70,9 @@ void SecureContext_Init( void );
* otherwise.
*/
#if( configENABLE_MPU == 1 )
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged );
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, uint32_t ulIsTaskPrivileged );
#else /* configENABLE_MPU */
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize );
SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize );
#endif /* configENABLE_MPU */
/**

View file

@ -37,12 +37,12 @@ extern void SecureContext_SaveContextAsm( SecureContextHandle_t xSecureContextHa
secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle )
{
SecureContext_LoadContextAsm( xSecureContextHandle );
SecureContext_LoadContextAsm( xSecureContextHandle );
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle )
{
SecureContext_SaveContextAsm( xSecureContextHandle );
SecureContext_SaveContextAsm( xSecureContextHandle );
}
/*-----------------------------------------------------------*/

View file

@ -25,49 +25,49 @@
* 1 tab == 4 spaces!
*/
SECTION .text:CODE:NOROOT(2)
THUMB
SECTION .text:CODE:NOROOT(2)
THUMB
PUBLIC SecureContext_LoadContextAsm
PUBLIC SecureContext_SaveContextAsm
PUBLIC SecureContext_LoadContextAsm
PUBLIC SecureContext_SaveContextAsm
/*-----------------------------------------------------------*/
SecureContext_LoadContextAsm:
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
ldmia r0!, {r1, r2} /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
ldmia r0!, {r1, r2} /* r1 = xSecureContextHandle->pucCurrentStackPointer, r2 = xSecureContextHandle->pucStackLimit. */
#if ( configENABLE_MPU == 1 )
ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */
msr control, r3 /* CONTROL = r3. */
ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */
msr control, r3 /* CONTROL = r3. */
#endif /* configENABLE_MPU */
msr psplim, r2 /* PSPLIM = r2. */
msr psp, r1 /* PSP = r1. */
msr psplim, r2 /* PSPLIM = r2. */
msr psp, r1 /* PSP = r1. */
load_ctx_therad_mode:
bx lr
load_ctx_therad_mode:
bx lr
/*-----------------------------------------------------------*/
SecureContext_SaveContextAsm:
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
mrs r1, psp /* r1 = PSP. */
/* xSecureContextHandle value is in r0. */
mrs r1, ipsr /* r1 = IPSR. */
cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */
mrs r1, psp /* r1 = PSP. */
#if ( configENABLE_FPU == 1 )
vstmdb r1!, {s0} /* Trigger the defferred stacking of FPU registers. */
vldmia r1!, {s0} /* Nullify the effect of the pervious statement. */
vstmdb r1!, {s0} /* Trigger the defferred stacking of FPU registers. */
vldmia r1!, {s0} /* Nullify the effect of the pervious statement. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
mrs r2, control /* r2 = CONTROL. */
stmdb r1!, {r2} /* Store CONTROL value on the stack. */
mrs r2, control /* r2 = CONTROL. */
stmdb r1!, {r2} /* Store CONTROL value on the stack. */
#endif /* configENABLE_MPU */
str r1, [r0] /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
movs r1, #0 /* r1 = securecontextNO_STACK. */
msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */
msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
str r1, [r0] /* Save the top of stack in context. xSecureContextHandle->pucCurrentStackPointer = r1. */
movs r1, #0 /* r1 = securecontextNO_STACK. */
msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */
msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */
save_ctx_therad_mode:
bx lr
save_ctx_therad_mode:
bx lr
/*-----------------------------------------------------------*/
END
END

View file

@ -37,37 +37,37 @@
/**
* @brief Total heap size.
*/
#define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
#define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) )
/* No test marker by default. */
#ifndef mtCOVERAGE_TEST_MARKER
#define mtCOVERAGE_TEST_MARKER()
#define mtCOVERAGE_TEST_MARKER()
#endif
/* No tracing by default. */
#ifndef traceMALLOC
#define traceMALLOC( pvReturn, xWantedSize )
#define traceMALLOC( pvReturn, xWantedSize )
#endif
/* No tracing by default. */
#ifndef traceFREE
#define traceFREE( pv, xBlockSize )
#define traceFREE( pv, xBlockSize )
#endif
/* Block sizes must not get too small. */
#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
/* Assumes 8bit bytes! */
#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
#define secureheapBITS_PER_BYTE ( ( size_t ) 8 )
/*-----------------------------------------------------------*/
/* Allocate the memory for the heap. */
#if( configAPPLICATION_ALLOCATED_HEAP == 1 )
/* The application writer has already defined the array used for the RTOS
* heap - probably so it can be placed in a special segment or address. */
extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
/* The application writer has already defined the array used for the RTOS
* heap - probably so it can be placed in a special segment or address. */
extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
#else /* configAPPLICATION_ALLOCATED_HEAP */
static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ];
#endif /* configAPPLICATION_ALLOCATED_HEAP */
/**
@ -77,8 +77,8 @@
*/
typedef struct A_BLOCK_LINK
{
struct A_BLOCK_LINK *pxNextFreeBlock; /**< The next free block in the list. */
size_t xBlockSize; /**< The size of the free block. */
struct A_BLOCK_LINK *pxNextFreeBlock; /**< The next free block in the list. */
size_t xBlockSize; /**< The size of the free block. */
} BlockLink_t;
/*-----------------------------------------------------------*/
@ -135,44 +135,44 @@ uint8_t *pucAlignedHeap;
size_t uxAddress;
size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE;
/* Ensure the heap starts on a correctly aligned boundary. */
uxAddress = ( size_t ) ucHeap;
/* Ensure the heap starts on a correctly aligned boundary. */
uxAddress = ( size_t ) ucHeap;
if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
{
uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
}
if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 )
{
uxAddress += ( secureportBYTE_ALIGNMENT - 1 );
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
}
pucAlignedHeap = ( uint8_t * ) uxAddress;
pucAlignedHeap = ( uint8_t * ) uxAddress;
/* xStart is used to hold a pointer to the first item in the list of free
* blocks. The void cast is used to prevent compiler warnings. */
xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
xStart.xBlockSize = ( size_t ) 0;
/* xStart is used to hold a pointer to the first item in the list of free
* blocks. The void cast is used to prevent compiler warnings. */
xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
xStart.xBlockSize = ( size_t ) 0;
/* pxEnd is used to mark the end of the list of free blocks and is inserted
* at the end of the heap space. */
uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
uxAddress -= xHeapStructSize;
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
pxEnd = ( void * ) uxAddress;
pxEnd->xBlockSize = 0;
pxEnd->pxNextFreeBlock = NULL;
/* pxEnd is used to mark the end of the list of free blocks and is inserted
* at the end of the heap space. */
uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
uxAddress -= xHeapStructSize;
uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK );
pxEnd = ( void * ) uxAddress;
pxEnd->xBlockSize = 0;
pxEnd->pxNextFreeBlock = NULL;
/* To start with there is a single free block that is sized to take up the
* entire heap space, minus the space taken by pxEnd. */
pxFirstFreeBlock = ( void * ) pucAlignedHeap;
pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
/* To start with there is a single free block that is sized to take up the
* entire heap space, minus the space taken by pxEnd. */
pxFirstFreeBlock = ( void * ) pucAlignedHeap;
pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
/* Only one block exists - and it covers the entire usable heap space. */
xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
/* Only one block exists - and it covers the entire usable heap space. */
xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
/* Work out the position of the top bit in a size_t variable. */
xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
/* Work out the position of the top bit in a size_t variable. */
xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 );
}
/*-----------------------------------------------------------*/
@ -181,59 +181,59 @@ static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert )
BlockLink_t *pxIterator;
uint8_t *puc;
/* Iterate through the list until a block is found that has a higher address
* than the block being inserted. */
for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
{
/* Nothing to do here, just iterate to the right position. */
}
/* Iterate through the list until a block is found that has a higher address
* than the block being inserted. */
for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
{
/* Nothing to do here, just iterate to the right position. */
}
/* Do the block being inserted, and the block it is being inserted after
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxIterator;
if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
{
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted after
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxIterator;
if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
{
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted before
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxBlockToInsert;
if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
{
if( pxIterator->pxNextFreeBlock != pxEnd )
{
/* Form one big block from the two blocks. */
pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxEnd;
}
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* Do the block being inserted, and the block it is being inserted before
* make a contiguous block of memory? */
puc = ( uint8_t * ) pxBlockToInsert;
if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
{
if( pxIterator->pxNextFreeBlock != pxEnd )
{
/* Form one big block from the two blocks. */
pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxEnd;
}
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* If the block being inserted plugged a gab, so was merged with the block
* before and the block after, then it's pxNextFreeBlock pointer will have
* already been set, and should not be set here as that would make it point
* to itself. */
if( pxIterator != pxBlockToInsert )
{
pxIterator->pxNextFreeBlock = pxBlockToInsert;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* If the block being inserted plugged a gab, so was merged with the block
* before and the block after, then it's pxNextFreeBlock pointer will have
* already been set, and should not be set here as that would make it point
* to itself. */
if( pxIterator != pxBlockToInsert )
{
pxIterator->pxNextFreeBlock = pxBlockToInsert;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
/*-----------------------------------------------------------*/
@ -242,144 +242,144 @@ void *pvPortMalloc( size_t xWantedSize )
BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink;
void *pvReturn = NULL;
/* If this is the first call to malloc then the heap will require
* initialisation to setup the list of free blocks. */
if( pxEnd == NULL )
{
prvHeapInit();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* If this is the first call to malloc then the heap will require
* initialisation to setup the list of free blocks. */
if( pxEnd == NULL )
{
prvHeapInit();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Check the requested block size is not so large that the top bit is set.
* The top bit of the block size member of the BlockLink_t structure is used
* to determine who owns the block - the application or the kernel, so it
* must be free. */
if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
{
/* The wanted size is increased so it can contain a BlockLink_t
* structure in addition to the requested amount of bytes. */
if( xWantedSize > 0 )
{
xWantedSize += xHeapStructSize;
/* Check the requested block size is not so large that the top bit is set.
* The top bit of the block size member of the BlockLink_t structure is used
* to determine who owns the block - the application or the kernel, so it
* must be free. */
if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
{
/* The wanted size is increased so it can contain a BlockLink_t
* structure in addition to the requested amount of bytes. */
if( xWantedSize > 0 )
{
xWantedSize += xHeapStructSize;
/* Ensure that blocks are always aligned to the required number of
* bytes. */
if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
{
/* Byte alignment required. */
xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Ensure that blocks are always aligned to the required number of
* bytes. */
if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 )
{
/* Byte alignment required. */
xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) );
secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
{
/* Traverse the list from the start (lowest address) block until
* one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
{
pxPreviousBlock = pxBlock;
pxBlock = pxBlock->pxNextFreeBlock;
}
if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
{
/* Traverse the list from the start (lowest address) block until
* one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
{
pxPreviousBlock = pxBlock;
pxBlock = pxBlock->pxNextFreeBlock;
}
/* If the end marker was reached then a block of adequate size was
* not found. */
if( pxBlock != pxEnd )
{
/* Return the memory space pointed to - jumping over the
* BlockLink_t structure at its start. */
pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
/* If the end marker was reached then a block of adequate size was
* not found. */
if( pxBlock != pxEnd )
{
/* Return the memory space pointed to - jumping over the
* BlockLink_t structure at its start. */
pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
/* This block is being returned for use so must be taken out
* of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* This block is being returned for use so must be taken out
* of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* If the block is larger than required it can be split into
* two. */
if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
{
/* This block is to be split into two. Create a new
* block following the number of bytes requested. The void
* cast is used to prevent byte alignment warnings from the
* compiler. */
pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
/* If the block is larger than required it can be split into
* two. */
if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE )
{
/* This block is to be split into two. Create a new
* block following the number of bytes requested. The void
* cast is used to prevent byte alignment warnings from the
* compiler. */
pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 );
/* Calculate the sizes of two blocks split from the single
* block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
/* Calculate the sizes of two blocks split from the single
* block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xFreeBytesRemaining -= pxBlock->xBlockSize;
xFreeBytesRemaining -= pxBlock->xBlockSize;
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned by
* the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned by
* the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceMALLOC( pvReturn, xWantedSize );
traceMALLOC( pvReturn, xWantedSize );
#if( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
{
if( pvReturn == NULL )
{
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
#if( secureconfigUSE_MALLOC_FAILED_HOOK == 1 )
{
if( pvReturn == NULL )
{
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
}
/*-----------------------------------------------------------*/
@ -388,63 +388,63 @@ void vPortFree( void *pv )
uint8_t *puc = ( uint8_t * ) pv;
BlockLink_t *pxLink;
if( pv != NULL )
{
/* The memory being freed will have an BlockLink_t structure immediately
* before it. */
puc -= xHeapStructSize;
if( pv != NULL )
{
/* The memory being freed will have an BlockLink_t structure immediately
* before it. */
puc -= xHeapStructSize;
/* This casting is to keep the compiler from issuing warnings. */
pxLink = ( void * ) puc;
/* This casting is to keep the compiler from issuing warnings. */
pxLink = ( void * ) puc;
/* Check the block is actually allocated. */
secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
secureportASSERT( pxLink->pxNextFreeBlock == NULL );
/* Check the block is actually allocated. */
secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
secureportASSERT( pxLink->pxNextFreeBlock == NULL );
if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
{
/* The block is being returned to the heap - it is no longer
* allocated. */
pxLink->xBlockSize &= ~xBlockAllocatedBit;
if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
{
/* The block is being returned to the heap - it is no longer
* allocated. */
pxLink->xBlockSize &= ~xBlockAllocatedBit;
secureportDISABLE_NON_SECURE_INTERRUPTS();
{
/* Add this block to the list of free blocks. */
xFreeBytesRemaining += pxLink->xBlockSize;
traceFREE( pv, pxLink->xBlockSize );
prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
}
secureportENABLE_NON_SECURE_INTERRUPTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
secureportDISABLE_NON_SECURE_INTERRUPTS();
{
/* Add this block to the list of free blocks. */
xFreeBytesRemaining += pxLink->xBlockSize;
traceFREE( pv, pxLink->xBlockSize );
prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
}
secureportENABLE_NON_SECURE_INTERRUPTS();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
/*-----------------------------------------------------------*/
size_t xPortGetFreeHeapSize( void )
{
return xFreeBytesRemaining;
return xFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
size_t xPortGetMinimumEverFreeHeapSize( void )
{
return xMinimumEverFreeBytesRemaining;
return xMinimumEverFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
void vPortInitialiseBlocks( void )
{
/* This just exists to keep the linker quiet. */
/* This just exists to keep the linker quiet. */
}
/*-----------------------------------------------------------*/

View file

@ -37,69 +37,69 @@
/**
* @brief Constants required to manipulate the SCB.
*/
#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */
#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL )
#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS )
#define secureinitSCB_AIRCR_PRIS_POS ( 14UL )
#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS )
/**
* @brief Constants required to manipulate the FPU.
*/
#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
#define secureinitFPCCR_LSPENS_POS ( 29UL )
#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
#define secureinitFPCCR_TS_POS ( 26UL )
#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */
#define secureinitFPCCR_LSPENS_POS ( 29UL )
#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS )
#define secureinitFPCCR_TS_POS ( 26UL )
#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS )
#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
#define secureinitNSACR_CP10_POS ( 10UL )
#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
#define secureinitNSACR_CP11_POS ( 11UL )
#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */
#define secureinitNSACR_CP10_POS ( 10UL )
#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS )
#define secureinitNSACR_CP11_POS ( 11UL )
#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS )
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
*( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
}
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
*( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) |
( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) |
( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK );
}
}
/*-----------------------------------------------------------*/
secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void )
{
uint32_t ulIPSR;
uint32_t ulIPSR;
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Read the Interrupt Program Status Register (IPSR) value. */
secureportREAD_IPSR( ulIPSR );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
* permitted. CP11 should be programmed to the same value as CP10. */
*( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
/* Do nothing if the processor is running in the Thread Mode. IPSR is zero
* when the processor is running in the Thread Mode. */
if( ulIPSR != 0 )
{
/* CP10 = 1 ==> Non-secure access to the Floating Point Unit is
* permitted. CP11 should be programmed to the same value as CP10. */
*( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK );
/* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
* that we can enable/disable lazy stacking in port.c file. */
*( secureinitFPCCR ) &= ~ ( secureinitFPCCR_LSPENS_MASK );
/* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures
* that we can enable/disable lazy stacking in port.c file. */
*( secureinitFPCCR ) &= ~ ( secureinitFPCCR_LSPENS_MASK );
/* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
* registers (S16-S31) are also pushed to stack on exception entry and
* restored on exception return. */
*( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
}
/* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP
* registers (S16-S31) are also pushed to stack on exception entry and
* restored on exception return. */
*( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK );
}
}
/*-----------------------------------------------------------*/

View file

@ -31,103 +31,103 @@
/**
* @brief Byte alignment requirements.
*/
#define secureportBYTE_ALIGNMENT 8
#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
#define secureportBYTE_ALIGNMENT 8
#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 )
/**
* @brief Macro to declare a function as non-secure callable.
*/
#if defined( __IAR_SYSTEMS_ICC__ )
#define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry
#define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry
#else
#define secureportNON_SECURE_CALLABLE __attribute__((cmse_nonsecure_entry))
#define secureportNON_SECURE_CALLABLE __attribute__((cmse_nonsecure_entry))
#endif
/**
* @brief Set the secure PRIMASK value.
*/
#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \
__asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
__asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" )
/**
* @brief Set the non-secure PRIMASK value.
*/
#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \
__asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
__asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" )
/**
* @brief Read the PSP value in the given variable.
*/
#define secureportREAD_PSP( pucOutCurrentStackPointer ) \
__asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
__asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) )
/**
* @brief Set the PSP to the given value.
*/
#define secureportSET_PSP( pucCurrentStackPointer ) \
__asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
__asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) )
/**
* @brief Set the PSPLIM to the given value.
*/
#define secureportSET_PSPLIM( pucStackLimit ) \
__asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
__asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) )
/**
* @brief Set the NonSecure MSP to the given value.
*/
#define secureportSET_MSP_NS( pucMainStackPointer ) \
__asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
__asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) )
/**
* @brief Set the CONTROL register to the given value.
*/
#define secureportSET_CONTROL( ulControl ) \
__asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
__asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" )
/**
* @brief Read the Interrupt Program Status Register (IPSR) value in the given
* variable.
*/
#define secureportREAD_IPSR( ulIPSR ) \
__asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
__asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) )
/**
* @brief PRIMASK value to enable interrupts.
*/
#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0
/**
* @brief PRIMASK value to disable interrupts.
*/
#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1
/**
* @brief Disable secure interrupts.
*/
#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
/**
* @brief Disable non-secure interrupts.
*
* This effectively disables context switches.
*/
#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL )
/**
* @brief Enable non-secure interrupts.
*/
#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL )
/**
* @brief Assert definition.
*/
#define secureportASSERT( x ) \
if( ( x ) == 0 ) \
{ \
secureportDISABLE_SECURE_INTERRUPTS(); \
secureportDISABLE_NON_SECURE_INTERRUPTS(); \
for( ;; ); \
}
#define secureportASSERT( x ) \
if( ( x ) == 0 ) \
{ \
secureportDISABLE_SECURE_INTERRUPTS(); \
secureportDISABLE_NON_SECURE_INTERRUPTS(); \
for( ;; ); \
}
#endif /* __SECURE_PORT_MACROS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -25,194 +25,194 @@
* 1 tab == 4 spaces!
*/
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
PUBLIC xIsPrivileged
PUBLIC vResetPrivilege
PUBLIC vRestoreContextOfFirstTask
PUBLIC vRaisePrivilege
PUBLIC vStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
PUBLIC PendSV_Handler
PUBLIC SVC_Handler
/*-----------------------------------------------------------*/
/*---------------- Unprivileged Functions -------------------*/
/*-----------------------------------------------------------*/
SECTION .text:CODE:NOROOT(2)
THUMB
SECTION .text:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
xIsPrivileged:
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
mrs r0, control /* r0 = CONTROL. */
tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
ite ne
movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
vResetPrivilege:
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
mrs r0, control /* r0 = CONTROL. */
orr r0, r0, #1 /* r0 = r0 | 1. */
msr control, r0 /* CONTROL = r0. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
/*----------------- Privileged Functions --------------------*/
/*-----------------------------------------------------------*/
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
SECTION privileged_functions:CODE:NOROOT(2)
THUMB
/*-----------------------------------------------------------*/
vRestoreContextOfFirstTask:
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
#if ( configENABLE_MPU == 1 )
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
msr control, r2 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
msr control, r2 /* Set this task's CONTROL value. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r3 /* Finally, branch to EXC_RETURN. */
#else /* configENABLE_MPU */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r2 /* Finally, branch to EXC_RETURN. */
ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
msr psplim, r1 /* Set this task's PSPLIM value. */
movs r1, #2 /* r1 = 2. */
msr CONTROL, r1 /* Switch to use PSP in the thread mode. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
isb
bx r2 /* Finally, branch to EXC_RETURN. */
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
vRaisePrivilege:
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
mrs r0, control /* Read the CONTROL register. */
bic r0, r0, #1 /* Clear the bit 0. */
msr control, r0 /* Write back the new CONTROL value. */
bx lr /* Return to the caller. */
/*-----------------------------------------------------------*/
vStartFirstTask:
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */
ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */
ldr r0, [r0] /* The first entry in vector table is stack pointer. */
msr msp, r0 /* Set the MSP back to the start of the stack. */
cpsie i /* Globally enable interrupts. */
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR:
mrs r0, PRIMASK
cpsid i
bx lr
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR:
msr PRIMASK, r0
bx lr
msr PRIMASK, r0
bx lr
/*-----------------------------------------------------------*/
PendSV_Handler:
mrs r0, psp /* Read PSP in r0. */
mrs r0, psp /* Read PSP in r0. */
#if ( configENABLE_FPU == 1 )
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r0!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vstmdbeq r0!, {s16-s31} /* Store the FPU registers which are not saved automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
mrs r1, psplim /* r1 = PSPLIM. */
mrs r2, control /* r2 = CONTROL. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
mrs r1, psplim /* r1 = PSPLIM. */
mrs r2, control /* r2 = CONTROL. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */
#else /* configENABLE_MPU */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
mrs r2, psplim /* r2 = PSPLIM. */
mov r3, lr /* r3 = LR/EXC_RETURN. */
stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */
#endif /* configENABLE_MPU */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
str r0, [r1] /* Save the new top of stack in TCB. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
str r0, [r1] /* Save the new top of stack in TCB. */
cpsid i
bl vTaskSwitchContext
cpsie i
cpsid i
bl vTaskSwitchContext
cpsie i
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
ldr r1, [r2] /* Read pxCurrentTCB. */
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
#if ( configENABLE_MPU == 1 )
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */
ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */
ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */
str r3, [r2] /* Program MAIR0. */
ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */
movs r3, #4 /* r3 = 4. */
str r3, [r2] /* Program RNR = 4. */
adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */
ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */
ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */
stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */
#endif /* configENABLE_MPU */
#if ( configENABLE_MPU == 1 )
ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */
#else /* configENABLE_MPU */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
#endif /* configENABLE_MPU */
#if ( configENABLE_FPU == 1 )
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r0!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the FPU is in use. */
it eq
vldmiaeq r0!, {s16-s31} /* Restore the FPU registers which are not restored automatically. */
#endif /* configENABLE_FPU */
#if ( configENABLE_MPU == 1 )
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
msr control, r2 /* Restore the CONTROL register value for the task. */
msr psplim, r1 /* Restore the PSPLIM register value for the task. */
msr control, r2 /* Restore the CONTROL register value for the task. */
#else /* configENABLE_MPU */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
msr psplim, r2 /* Restore the PSPLIM register value for the task. */
#endif /* configENABLE_MPU */
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
msr psp, r0 /* Remember the new top of stack for the task. */
bx r3
/*-----------------------------------------------------------*/
SVC_Handler:
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
tst lr, #4
ite eq
mrseq r0, msp
mrsne r0, psp
b vPortSVCHandler_C
/*-----------------------------------------------------------*/
END
END

View file

@ -42,46 +42,59 @@ extern "C" {
*------------------------------------------------------------------------------
*/
#ifndef configENABLE_FPU
#error configENABLE_FPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_FPU */
#ifndef configENABLE_MPU
#error configENABLE_MPU must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_MPU */
#ifndef configENABLE_TRUSTZONE
#error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h.
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
/**
* @brief Type definitions.
*/
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
/*-----------------------------------------------------------*/
/**
* Architecture specifics.
*/
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
#define portNOP()
#define portINLINE __inline
#define portINLINE __inline
#ifndef portFORCE_INLINE
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#define portFORCE_INLINE inline __attribute__(( always_inline ))
#endif
#define portHAS_STACK_OVERFLOW_CHECKING 1
#define portHAS_STACK_OVERFLOW_CHECKING 1
/*-----------------------------------------------------------*/
/**
@ -96,13 +109,13 @@ extern uint32_t ulSetInterruptMaskFromISR( void ) /* __attribute__(( naked )) PR
extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */;
#if( configENABLE_TRUSTZONE == 1 )
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize );
extern void vPortFreeSecureContext( uint32_t *pulTCB ) /* PRIVILEGED_FUNCTION */;
#endif /* configENABLE_TRUSTZONE */
#if( configENABLE_MPU == 1 )
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */;
extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */;
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -110,58 +123,58 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
* @brief MPU specific constants.
*/
#if( configENABLE_MPU == 1 )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#define portUSING_MPU_WRAPPERS 1
#define portPRIVILEGE_BIT ( 0x80000000UL )
#else
#define portPRIVILEGE_BIT ( 0x0UL )
#define portPRIVILEGE_BIT ( 0x0UL )
#endif /* configENABLE_MPU */
/* MPU regions. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
#define portPRIVILEGED_FLASH_REGION ( 0UL )
#define portUNPRIVILEGED_FLASH_REGION ( 1UL )
#define portPRIVILEGED_RAM_REGION ( 2UL )
#define portUNPRIVILEGED_DEVICE_REGION ( 3UL )
#define portSTACK_REGION ( 4UL )
#define portFIRST_CONFIGURABLE_REGION ( 5UL )
#define portLAST_CONFIGURABLE_REGION ( 7UL )
#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 )
#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */
/* Devices Region. */
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
#define portDEVICE_REGION_START_ADDRESS ( 0x50000000 )
#define portDEVICE_REGION_END_ADDRESS ( 0x5FFFFFFF )
/* Device memory attributes used in MPU_MAIR registers.
*
* 8-bit values encoded as follows:
* Bit[7:4] - 0000 - Device Memory
* Bit[3:2] - 00 --> Device-nGnRnE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* 01 --> Device-nGnRE
* 10 --> Device-nGRE
* 11 --> Device-GRE
* Bit[1:0] - 00, Reserved.
*/
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */
#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */
#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */
#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */
/* Normal memory attributes used in MPU_MAIR registers. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */
#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */
/* Attributes used in MPU_RBAR registers. */
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL )
#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL )
#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL )
#define portMPU_REGION_READ_WRITE ( 1UL << 1UL )
#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL )
#define portMPU_REGION_READ_ONLY ( 3UL << 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
#define portMPU_REGION_EXECUTE_NEVER ( 1UL )
/*-----------------------------------------------------------*/
/**
@ -169,8 +182,8 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) /* __attribute__(( nak
*/
typedef struct MPURegionSettings
{
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
uint32_t ulRBAR; /**< RBAR for the region. */
uint32_t ulRLAR; /**< RLAR for the region. */
} MPURegionSettings_t;
/**
@ -178,99 +191,99 @@ typedef struct MPURegionSettings
*/
typedef struct MPU_SETTINGS
{
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */
MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */
} xMPU_SETTINGS;
/*-----------------------------------------------------------*/
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
/*-----------------------------------------------------------*/
/**
* @brief Scheduler utilities.
*/
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
#define portYIELD() vPortYield()
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/**
* @brief Critical section management.
*/
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vClearInterruptMaskFromISR( x )
#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" )
#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" )
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/**
* @brief Task function macros as described on the FreeRTOS.org WEB site.
*/
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
/*-----------------------------------------------------------*/
#if( configENABLE_TRUSTZONE == 1 )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Allocate a secure context for the task.
*
* Tasks are not created with a secure context. Any task that is going to call
* secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a
* secure context before it calls any secure function.
*
* @param[in] ulSecureStackSize The size of the secure stack to be allocated.
*/
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
/**
* @brief Called when a task is deleted to delete the task's secure context,
* if it has one.
*
* @param[in] pxTCB The TCB of the task being deleted.
*/
#define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB )
#else
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize )
#define portCLEAN_UP_TCB( pxTCB )
#endif /* configENABLE_TRUSTZONE */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Checks whether or not the processor is privileged.
*
* @return 1 if the processor is already privileged, 0 otherwise.
*/
#define portIS_PRIVILEGED() xIsPrivileged()
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Raise an SVC request to raise privilege.
*
* The SVC handler checks that the SVC was raised from a system call and only
* then it raises the privilege. If this is called from any other place,
* the privilege is not raised.
*/
#define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" :: "i" ( portSVC_RAISE_PRIVILEGE ) : "memory" );
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
/**
* @brief Lowers the privilege level by setting the bit 0 of the CONTROL
* register.
*/
#define portRESET_PRIVILEGE() vResetPrivilege()
#else
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#define portIS_PRIVILEGED()
#define portRAISE_PRIVILEGE()
#define portRESET_PRIVILEGE()
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/