Update system call entry mechanism (#896)

Earlier the System Call entry from an unprivileged task looked like:

1. SVC for entering system call.
2. System call implementation.
3. SVC for exiting system call.

Now, the system call entry needs to make only one system call
and everything else is handled internally.

This PR also makes the following small changes:

1. Add one struct param for system calls with 5 parameters. This
   removes the need for special handling for system calls with 5
   parameters.
2. Remove raise privilege SVC when MPU wrapper v2 is used.
3. Add additional run time parameter checks to MPU wrappers
   for xTaskGenericNotify and xQueueTakeMutexRecursive APIs.

These changes are tested on the following platforms:
1. STM32H743ZI (Cortex-M7)
2. STM32L152RE (Cortex-M3)
3. Nuvoton M2351 (Cortex-M23)
4. NXP LPC55S69 (Cortex-M33)
This commit is contained in:
Gaurav-Aggarwal-AWS 2023-11-21 18:42:23 +05:30 committed by GitHub
parent 52c1c6e578
commit 9bfd85a253
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
115 changed files with 46168 additions and 54704 deletions

View file

@ -8,7 +8,7 @@ jobs:
- name: Checkout Parent Repository
uses: actions/checkout@v2
with:
ref: main
ref: 80db00d98bfac8b22289a2668d9e6b0265946d24
repository: FreeRTOS/FreeRTOS
submodules: 'recursive'
fetch-depth: 1

View file

@ -38,6 +38,42 @@
#ifndef MPU_PROTOTYPES_H
#define MPU_PROTOTYPES_H
typedef struct xTaskGenericNotifyParams
{
TaskHandle_t xTaskToNotify;
UBaseType_t uxIndexToNotify;
uint32_t ulValue;
eNotifyAction eAction;
uint32_t * pulPreviousNotificationValue;
} xTaskGenericNotifyParams_t;
typedef struct xTaskGenericNotifyWaitParams
{
UBaseType_t uxIndexToWaitOn;
uint32_t ulBitsToClearOnEntry;
uint32_t ulBitsToClearOnExit;
uint32_t * pulNotificationValue;
TickType_t xTicksToWait;
} xTaskGenericNotifyWaitParams_t;
typedef struct xTimerGenericCommandParams
{
TimerHandle_t xTimer;
BaseType_t xCommandID;
TickType_t xOptionalValue;
BaseType_t * pxHigherPriorityTaskWoken;
TickType_t xTicksToWait;
} xTimerGenericCommandParams_t;
typedef struct xEventGroupWaitBitsParams
{
EventGroupHandle_t xEventGroup;
EventBits_t uxBitsToWaitFor;
BaseType_t xClearOnExit;
BaseType_t xWaitForAllBits;
TickType_t xTicksToWait;
} xEventGroupWaitBitsParams_t;
/* MPU versions of task.h API functions. */
void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
@ -77,11 +113,13 @@ BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
uint32_t ulValue,
eNotifyAction eAction,
uint32_t * pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
uint32_t ulBitsToClearOnEntry,
uint32_t ulBitsToClearOnExit,
uint32_t * pulNotificationValue,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
BaseType_t xClearCountOnExit,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
@ -228,9 +266,10 @@ BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
const TickType_t xOptionalValue,
BaseType_t * const pxHigherPriorityTaskWoken,
const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerGenericCommandEntry( const xTimerGenericCommandParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
void MPU_vTimerSetReloadMode( TimerHandle_t xTimer,
const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
const BaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
@ -259,6 +298,7 @@ EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
const BaseType_t xClearOnExit,
const BaseType_t xWaitForAllBits,
TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup,
const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup,

View file

@ -0,0 +1,106 @@
/*
* FreeRTOS Kernel V10.6.1
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
#ifndef MPU_SYSCALL_NUMBERS_H
#define MPU_SYSCALL_NUMBERS_H
/* Numbers assigned to various system calls. */
#define SYSTEM_CALL_xTaskGenericNotify 0
#define SYSTEM_CALL_xTaskGenericNotifyWait 1
#define SYSTEM_CALL_xTimerGenericCommand 2
#define SYSTEM_CALL_xEventGroupWaitBits 3
#define SYSTEM_CALL_xTaskDelayUntil 4
#define SYSTEM_CALL_xTaskAbortDelay 5
#define SYSTEM_CALL_vTaskDelay 6
#define SYSTEM_CALL_uxTaskPriorityGet 7
#define SYSTEM_CALL_eTaskGetState 8
#define SYSTEM_CALL_vTaskGetInfo 9
#define SYSTEM_CALL_xTaskGetIdleTaskHandle 10
#define SYSTEM_CALL_vTaskSuspend 11
#define SYSTEM_CALL_vTaskResume 12
#define SYSTEM_CALL_xTaskGetTickCount 13
#define SYSTEM_CALL_uxTaskGetNumberOfTasks 14
#define SYSTEM_CALL_pcTaskGetName 15
#define SYSTEM_CALL_ulTaskGetRunTimeCounter 16
#define SYSTEM_CALL_ulTaskGetRunTimePercent 17
#define SYSTEM_CALL_ulTaskGetIdleRunTimePercent 18
#define SYSTEM_CALL_ulTaskGetIdleRunTimeCounter 19
#define SYSTEM_CALL_vTaskSetApplicationTaskTag 20
#define SYSTEM_CALL_xTaskGetApplicationTaskTag 21
#define SYSTEM_CALL_vTaskSetThreadLocalStoragePointer 22
#define SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer 23
#define SYSTEM_CALL_uxTaskGetSystemState 24
#define SYSTEM_CALL_uxTaskGetStackHighWaterMark 25
#define SYSTEM_CALL_uxTaskGetStackHighWaterMark2 26
#define SYSTEM_CALL_xTaskGetCurrentTaskHandle 27
#define SYSTEM_CALL_xTaskGetSchedulerState 28
#define SYSTEM_CALL_vTaskSetTimeOutState 29
#define SYSTEM_CALL_xTaskCheckForTimeOut 30
#define SYSTEM_CALL_ulTaskGenericNotifyTake 31
#define SYSTEM_CALL_xTaskGenericNotifyStateClear 32
#define SYSTEM_CALL_ulTaskGenericNotifyValueClear 33
#define SYSTEM_CALL_xQueueGenericSend 34
#define SYSTEM_CALL_uxQueueMessagesWaiting 35
#define SYSTEM_CALL_uxQueueSpacesAvailable 36
#define SYSTEM_CALL_xQueueReceive 37
#define SYSTEM_CALL_xQueuePeek 38
#define SYSTEM_CALL_xQueueSemaphoreTake 39
#define SYSTEM_CALL_xQueueGetMutexHolder 40
#define SYSTEM_CALL_xQueueTakeMutexRecursive 41
#define SYSTEM_CALL_xQueueGiveMutexRecursive 42
#define SYSTEM_CALL_xQueueSelectFromSet 43
#define SYSTEM_CALL_xQueueAddToSet 44
#define SYSTEM_CALL_vQueueAddToRegistry 45
#define SYSTEM_CALL_vQueueUnregisterQueue 46
#define SYSTEM_CALL_pcQueueGetName 47
#define SYSTEM_CALL_pvTimerGetTimerID 48
#define SYSTEM_CALL_vTimerSetTimerID 49
#define SYSTEM_CALL_xTimerIsTimerActive 50
#define SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle 51
#define SYSTEM_CALL_pcTimerGetName 52
#define SYSTEM_CALL_vTimerSetReloadMode 53
#define SYSTEM_CALL_xTimerGetReloadMode 54
#define SYSTEM_CALL_uxTimerGetReloadMode 55
#define SYSTEM_CALL_xTimerGetPeriod 56
#define SYSTEM_CALL_xTimerGetExpiryTime 57
#define SYSTEM_CALL_xEventGroupClearBits 58
#define SYSTEM_CALL_xEventGroupSetBits 59
#define SYSTEM_CALL_xEventGroupSync 60
#define SYSTEM_CALL_uxEventGroupGetNumber 61
#define SYSTEM_CALL_vEventGroupSetNumber 62
#define SYSTEM_CALL_xStreamBufferSend 63
#define SYSTEM_CALL_xStreamBufferReceive 64
#define SYSTEM_CALL_xStreamBufferIsFull 65
#define SYSTEM_CALL_xStreamBufferIsEmpty 66
#define SYSTEM_CALL_xStreamBufferSpacesAvailable 67
#define SYSTEM_CALL_xStreamBufferBytesAvailable 68
#define SYSTEM_CALL_xStreamBufferSetTriggerLevel 69
#define SYSTEM_CALL_xStreamBufferNextMessageLengthBytes 70
#define NUM_SYSTEM_CALLS 71 /* Total number of system calls. */
#endif /* MPU_SYSCALL_NUMBERS_H */

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,6 +36,9 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@ -46,8 +49,8 @@
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -143,36 +146,36 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
" ldr r4, xSecureContextConst2 \n"
" str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n"/* Finally, branch to EXC_RETURN. */
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -183,15 +186,15 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" movs r1, #1 \n"/* r1 = 1. */
" tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
" beq running_privileged \n"/* If the result of previous AND operation was 0, branch. */
" movs r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" bx lr \n"/* Return. */
" mrs r0, control \n" /* r0 = CONTROL. */
" movs r1, #1 \n" /* r1 = 1. */
" tst r0, r1 \n" /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
" beq running_privileged \n" /* If the result of previous AND operation was 0, branch. */
" movs r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" bx lr \n" /* Return. */
" running_privileged: \n"
" movs r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movs r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "r1", "memory"
@ -205,11 +208,11 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" movs r1, #1 \n"/* r1 = 1. */
" bics r0, r1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@ -221,11 +224,11 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" movs r1, #1 \n"/* r1 = 1. */
" orrs r0, r1 \n"/* r0 = r0 | r1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* r0 = r0 | r1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@ -237,14 +240,14 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -283,8 +286,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -438,111 +441,110 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRNRConst: .word 0xe000ed98 \n"
" xRBARConst: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
" mrs r2, psp \n"/* Read PSP in r2. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
" mrs r2, psp \n" /* Read PSP in r2. */
" \n"
" cbz r0, save_ns_context \n"/* No secure context to save. */
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n"/* LR is now in r3. */
" mov lr, r3 \n"/* LR = r3. */
" lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
" mov r4, r8 \n"/* r4 = r8. */
" mov r5, r9 \n"/* r5 = r9. */
" mov r6, r10 \n"/* r6 = r10. */
" mov r7, r11 \n"/* r7 = r11. */
" stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3-r7} \n" /* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
" mov r4, r8 \n" /* r4 = r8. */
" mov r5, r9 \n" /* r5 = r9. */
" mov r6, r10 \n" /* r6 = r10. */
" mov r7, r11 \n" /* r7 = r11. */
" stmia r2!, {r4-r7} \n" /* Store the high registers that are not saved automatically. */
" \n"
" select_next_task: \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
" ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n"/* LR = r4. */
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n"/* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" push {r2, r4} \n"
" bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r2, r4} \n"
" mov lr, r4 \n"/* LR = r4. */
" lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" mov lr, r4 \n" /* LR = r4. */
" lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" restore_ns_context: \n"
" adds r2, r2, #16 \n"/* Move to the high registers. */
" ldmia r2!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
" mov r8, r4 \n"/* r8 = r4. */
" mov r9, r5 \n"/* r9 = r5. */
" mov r10, r6 \n"/* r10 = r6. */
" mov r11, r7 \n"/* r11 = r7. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" subs r2, r2, #32 \n"/* Go back to the low registers. */
" ldmia r2!, {r4-r7} \n"/* Restore the low registers that are not automatically restored. */
" adds r2, r2, #16 \n" /* Move to the high registers. */
" ldmia r2!, {r4-r7} \n" /* Restore the high registers that are not automatically restored. */
" mov r8, r4 \n" /* r8 = r4. */
" mov r9, r5 \n" /* r9 = r5. */
" mov r10, r6 \n" /* r10 = r6. */
" mov r11, r7 \n" /* r11 = r7. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" subs r2, r2, #32 \n" /* Go back to the low registers. */
" ldmia r2!, {r4-r7} \n" /* Restore the low registers that are not automatically restored. */
" bx lr \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"movs r0, #4 \n"
@ -557,34 +559,30 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" b route_svc \n"
" \n"
"route_svc: \n"
" ldr r2, [r0, #24] \n"
" subs r2, #2 \n"
" ldrb r3, [r2, #0] \n"
" cmp r3, %0 \n"
" beq system_call_enter \n"
" cmp r3, %1 \n"
" beq system_call_enter_1 \n"
" cmp r3, %2 \n"
" ldr r3, [r0, #24] \n"
" subs r3, #2 \n"
" ldrb r2, [r3, #0] \n"
" cmp r2, %0 \n"
" blt system_call_enter \n"
" cmp r2, %1 \n"
" beq system_call_exit \n"
" b vPortSVCHandler_C \n"
" \n"
"system_call_enter: \n"
" b vSystemCallEnter \n"
"system_call_enter_1: \n"
" b vSystemCallEnter_1 \n"
"system_call_exit: \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "r3", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -604,7 +602,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -615,8 +613,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (
(
" .syntax unified \n"
" \n"
" svc %0 \n"/* Secure context is allocated in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@ -628,14 +626,14 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR
(
" .syntax unified \n"
" \n"
" ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
" bne free_secure_context \n"/* Branch if r1 != 0. */
" bx lr \n"/* There is no secure context (xSecureContext is NULL). */
" ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" bne free_secure_context \n" /* Branch if r1 != 0. */
" bx lr \n" /* There is no secure context (xSecureContext is NULL). */
" free_secure_context: \n"
" svc %0 \n"/* Secure context is freed in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}

View file

@ -36,6 +36,9 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@ -46,8 +49,8 @@
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -140,33 +143,33 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r2 \n"/* Finally, branch to EXC_RETURN. */
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -177,15 +180,15 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" movs r1, #1 \n"/* r1 = 1. */
" tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
" beq running_privileged \n"/* If the result of previous AND operation was 0, branch. */
" movs r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" bx lr \n"/* Return. */
" mrs r0, control \n" /* r0 = CONTROL. */
" movs r1, #1 \n" /* r1 = 1. */
" tst r0, r1 \n" /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
" beq running_privileged \n" /* If the result of previous AND operation was 0, branch. */
" movs r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" bx lr \n" /* Return. */
" running_privileged: \n"
" movs r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movs r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "r1", "memory"
@ -199,11 +202,11 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" movs r1, #1 \n"/* r1 = 1. */
" bics r0, r1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@ -215,11 +218,11 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" movs r1, #1 \n"/* r1 = 1. */
" orrs r0, r1 \n"/* r0 = r0 | r1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* r0 = r0 | r1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@ -231,14 +234,14 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -277,8 +280,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -401,68 +404,67 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRNRConst: .word 0xe000ed98 \n"
" xRBARConst: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */
" str r0, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r2, psplim \n"/* r2 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
" mov r4, r8 \n"/* r4 = r8. */
" mov r5, r9 \n"/* r5 = r9. */
" mov r6, r10 \n"/* r6 = r10. */
" mov r7, r11 \n"/* r7 = r11. */
" stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
" mrs r0, psp \n" /* Read PSP in r0. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" subs r0, r0, #40 \n" /* Make space for PSPLIM, LR and the remaining registers on the stack. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r0!, {r2-r7} \n" /* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
" mov r4, r8 \n" /* r4 = r8. */
" mov r5, r9 \n" /* r5 = r9. */
" mov r6, r10 \n" /* r6 = r10. */
" mov r7, r11 \n" /* r7 = r11. */
" stmia r0!, {r4-r7} \n" /* Store the high registers that are not saved automatically. */
" \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
" adds r0, r0, #24 \n"/* Move to the high registers. */
" ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
" mov r8, r4 \n"/* r8 = r4. */
" mov r9, r5 \n"/* r9 = r5. */
" mov r10, r6 \n"/* r10 = r6. */
" mov r11, r7 \n"/* r11 = r7. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" subs r0, r0, #40 \n"/* Move to the starting of the saved context. */
" ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
" msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" adds r0, r0, #24 \n" /* Move to the high registers. */
" ldmia r0!, {r4-r7} \n" /* Restore the high registers that are not automatically restored. */
" mov r8, r4 \n" /* r8 = r4. */
" mov r9, r5 \n" /* r9 = r5. */
" mov r10, r6 \n" /* r10 = r6. */
" mov r11, r7 \n" /* r11 = r7. */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" subs r0, r0, #40 \n" /* Move to the starting of the saved context. */
" ldmia r0!, {r2-r7} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"movs r0, #4 \n"
@ -477,34 +479,30 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" b route_svc \n"
" \n"
"route_svc: \n"
" ldr r2, [r0, #24] \n"
" subs r2, #2 \n"
" ldrb r3, [r2, #0] \n"
" cmp r3, %0 \n"
" beq system_call_enter \n"
" cmp r3, %1 \n"
" beq system_call_enter_1 \n"
" cmp r3, %2 \n"
" ldr r3, [r0, #24] \n"
" subs r3, #2 \n"
" ldrb r2, [r3, #0] \n"
" cmp r2, %0 \n"
" blt system_call_enter \n"
" cmp r2, %1 \n"
" beq system_call_exit \n"
" b vPortSVCHandler_C \n"
" \n"
"system_call_enter: \n"
" b vSystemCallEnter \n"
"system_call_enter_1: \n"
" b vSystemCallEnter_1 \n"
"system_call_exit: \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "r3", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -524,7 +522,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -121,12 +124,12 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -151,7 +154,7 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -162,12 +165,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -181,10 +184,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -196,10 +199,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -211,15 +214,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -235,12 +238,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -252,10 +255,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -263,8 +266,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -411,96 +414,96 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n"/* Read PSP in r2. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n" /* Read PSP in r2. */
" \n"
" cbz r0, save_ns_context \n"/* No secure context to save. */
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n"/* LR is now in r3. */
" mov lr, r3 \n"/* LR = r3. */
" lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r2!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n"/* r2 = r2 + 12. */
" stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n"/* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n" /* r2 = r2 + 12. */
" stm r2, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n" /* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
" ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n"/* LR = r4. */
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n"/* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" push {r2, r4} \n"
" bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r2, r4} \n"
" mov lr, r4 \n"/* LR = r4. */
" lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" mov lr, r4 \n" /* LR = r4. */
" lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
" ldmia r2!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r2!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
@ -508,21 +511,20 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"xSecureContextConst: .word xSecureContext \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -533,10 +535,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -544,24 +544,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -576,7 +572,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -587,8 +583,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (
(
" .syntax unified \n"
" \n"
" svc %0 \n"/* Secure context is allocated in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@ -600,12 +596,12 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR
(
" .syntax unified \n"
" \n"
" ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
" ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n"/* Secure context is freed in the supervisor call. */
" bx lr \n"/* Return. */
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -118,35 +121,35 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" mov r0, #0 \n"
" msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n"/* Finally, branch to EXC_RETURN. */
" msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -157,12 +160,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -176,10 +179,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -191,10 +194,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -206,15 +209,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -230,12 +233,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -247,10 +250,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -258,8 +261,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -375,75 +378,74 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
" mrs r0, psp \n" /* Read PSP in r0. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r0!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" mrs r2, psplim \n"/* r2 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" str r0, [r1] \n"/* Save the new top of stack in TCB. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
" ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r0!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -454,10 +456,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -465,24 +465,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -497,7 +493,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -57,10 +54,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -75,10 +69,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -93,10 +84,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -111,10 +99,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -129,10 +114,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -147,10 +129,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -165,10 +144,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -183,10 +159,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -201,10 +174,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -219,10 +189,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -237,10 +204,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -255,10 +219,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -273,10 +234,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -291,10 +249,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -309,10 +264,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -327,10 +279,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -345,10 +294,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -363,10 +309,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -381,10 +324,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -399,10 +339,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -417,10 +354,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -435,10 +369,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -453,10 +384,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -471,10 +399,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -489,10 +414,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -507,10 +429,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -525,14 +444,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -543,14 +459,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -561,10 +474,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -579,10 +489,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -597,10 +504,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -615,10 +519,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -633,10 +534,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -651,10 +549,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -669,10 +564,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -687,10 +579,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -705,10 +594,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -723,10 +609,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -741,10 +624,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -759,10 +639,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -777,10 +654,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -795,10 +669,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -813,10 +684,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -831,10 +699,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -849,10 +714,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -867,10 +729,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -885,10 +744,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -903,10 +759,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -921,10 +774,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -939,14 +789,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0, r1}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -959,13 +806,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0, r1}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -981,10 +825,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -999,10 +840,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -1017,10 +855,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -1035,10 +870,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -1053,10 +885,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1071,14 +900,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -1089,10 +915,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1107,10 +930,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1125,10 +945,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1143,10 +960,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1161,10 +975,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1179,10 +990,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1197,10 +1005,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1215,10 +1020,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1233,10 +1035,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1251,10 +1050,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1269,10 +1065,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1287,10 +1080,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1305,10 +1095,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1323,10 +1110,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1532,9 +1316,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -33,6 +33,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -45,7 +48,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN SecureContext_LoadContext
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -95,7 +97,7 @@ vResetPrivilege:
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
@ -230,7 +232,7 @@ vStartFirstTask:
cpsie i /* Globally enable interrupts. */
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -482,21 +484,17 @@ SVC_Handler:
b route_svc
route_svc:
ldr r2, [r0, #24]
subs r2, #2
ldrb r3, [r2, #0]
cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq system_call_enter
cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq system_call_enter_1
cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */
ldr r3, [r0, #24]
subs r3, #2
ldrb r2, [r3, #0]
cmp r2, #NUM_SYSTEM_CALLS
blt system_call_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq system_call_exit
b vPortSVCHandler_C
system_call_enter:
b vSystemCallEnter
system_call_enter_1:
b vSystemCallEnter_1
system_call_exit:
b vSystemCallExit
@ -523,7 +521,7 @@ vPortFreeSecureContext:
bne free_secure_context /* Branch if r1 != 0. */
bx lr /* There is no secure context (xSecureContext is NULL). */
free_secure_context:
svc 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
svc 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
bx lr /* Return. */
/*-----------------------------------------------------------*/

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -57,10 +54,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -75,10 +69,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -93,10 +84,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -111,10 +99,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -129,10 +114,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -147,10 +129,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -165,10 +144,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -183,10 +159,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -201,10 +174,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -219,10 +189,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -237,10 +204,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -255,10 +219,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -273,10 +234,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -291,10 +249,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -309,10 +264,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -327,10 +279,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -345,10 +294,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -363,10 +309,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -381,10 +324,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -399,10 +339,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -417,10 +354,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -435,10 +369,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -453,10 +384,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -471,10 +399,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -489,10 +414,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -507,10 +429,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -525,14 +444,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -543,14 +459,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -561,10 +474,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -579,10 +489,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -597,10 +504,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -615,10 +519,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -633,10 +534,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -651,10 +549,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -669,10 +564,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -687,10 +579,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -705,10 +594,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -723,10 +609,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -741,10 +624,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -759,10 +639,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -777,10 +654,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -795,10 +669,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -813,10 +684,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -831,10 +699,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -849,10 +714,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -867,10 +729,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -885,10 +744,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -903,10 +759,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -921,10 +774,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -939,14 +789,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0, r1}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -959,13 +806,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0, r1}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -981,10 +825,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -999,10 +840,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -1017,10 +855,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -1035,10 +870,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -1053,10 +885,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1071,14 +900,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -1089,10 +915,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1107,10 +930,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1125,10 +945,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1143,10 +960,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1161,10 +975,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1179,10 +990,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1197,10 +1005,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1215,10 +1020,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1233,10 +1035,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1251,10 +1050,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1269,10 +1065,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1287,10 +1080,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1305,10 +1095,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1323,10 +1110,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1532,9 +1316,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -41,7 +44,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN vPortSVCHandler_C
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -216,7 +218,7 @@ vStartFirstTask:
cpsie i /* Globally enable interrupts. */
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
nop
/*-----------------------------------------------------------*/
@ -401,21 +403,17 @@ SVC_Handler:
b route_svc
route_svc:
ldr r2, [r0, #24]
subs r2, #2
ldrb r3, [r2, #0]
cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq system_call_enter
cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq system_call_enter_1
cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */
ldr r3, [r0, #24]
subs r3, #2
ldrb r2, [r3, #0]
cmp r2, #NUM_SYSTEM_CALLS
blt system_call_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq system_call_exit
b vPortSVCHandler_C
system_call_enter:
b vSystemCallEnter
system_call_enter_1:
b vSystemCallEnter_1
system_call_exit:
b vSystemCallExit

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -56,10 +53,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -73,10 +67,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -90,10 +81,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -107,10 +95,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -124,10 +109,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -141,10 +123,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -158,10 +137,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -175,10 +151,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -192,10 +165,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -209,10 +179,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -226,10 +193,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -243,10 +207,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -260,10 +221,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -277,10 +235,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -294,10 +249,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -311,10 +263,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -328,10 +277,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -345,10 +291,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -362,10 +305,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -379,10 +319,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -396,10 +333,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -413,10 +347,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -430,10 +361,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -447,10 +375,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -464,10 +389,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -481,10 +403,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -498,14 +417,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -515,14 +431,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -532,10 +445,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -549,10 +459,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -566,10 +473,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -583,10 +487,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -600,10 +501,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -617,10 +515,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -634,10 +529,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -651,10 +543,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -668,10 +557,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -685,10 +571,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -702,10 +585,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -719,10 +599,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -736,10 +613,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -753,10 +627,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -770,10 +641,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -787,10 +655,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -804,10 +669,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -821,10 +683,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -838,10 +697,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -855,10 +711,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -872,10 +725,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -889,14 +739,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -908,13 +755,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -929,10 +773,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -946,10 +787,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -963,10 +801,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -980,10 +815,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -997,10 +829,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1014,14 +843,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -1031,10 +857,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1048,10 +871,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1065,10 +885,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1082,10 +899,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1099,10 +913,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1116,10 +927,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1133,10 +941,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1150,10 +955,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1167,10 +969,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1184,10 +983,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1201,10 +997,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1218,10 +1011,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1235,10 +1025,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1252,10 +1039,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1461,9 +1245,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -44,7 +47,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN SecureContext_LoadContext
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -86,7 +88,7 @@ vResetPrivilege:
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
@ -205,7 +207,7 @@ vStartFirstTask:
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -455,11 +457,9 @@ SVC_Handler:
ldr r1, [r0, #24]
ldrb r2, [r1, #-2]
cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq syscall_enter
cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq syscall_enter_1
cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
cmp r2, #NUM_SYSTEM_CALLS
blt syscall_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq syscall_exit
b vPortSVCHandler_C
@ -467,10 +467,6 @@ SVC_Handler:
mov r1, lr
b vSystemCallEnter
syscall_enter_1:
mov r1, lr
b vSystemCallEnter_1
syscall_exit:
mov r1, lr
b vSystemCallExit
@ -493,7 +489,7 @@ vPortFreeSecureContext:
ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
svcne 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
bx lr /* Return. */
/*-----------------------------------------------------------*/

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -56,10 +53,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -73,10 +67,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -90,10 +81,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -107,10 +95,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -124,10 +109,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -141,10 +123,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -158,10 +137,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -175,10 +151,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -192,10 +165,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -209,10 +179,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -226,10 +193,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -243,10 +207,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -260,10 +221,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -277,10 +235,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -294,10 +249,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -311,10 +263,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -328,10 +277,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -345,10 +291,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -362,10 +305,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -379,10 +319,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -396,10 +333,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -413,10 +347,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -430,10 +361,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -447,10 +375,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -464,10 +389,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -481,10 +403,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -498,14 +417,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -515,14 +431,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -532,10 +445,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -549,10 +459,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -566,10 +473,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -583,10 +487,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -600,10 +501,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -617,10 +515,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -634,10 +529,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -651,10 +543,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -668,10 +557,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -685,10 +571,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -702,10 +585,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -719,10 +599,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -736,10 +613,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -753,10 +627,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -770,10 +641,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -787,10 +655,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -804,10 +669,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -821,10 +683,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -838,10 +697,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -855,10 +711,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -872,10 +725,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -889,14 +739,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -908,13 +755,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -929,10 +773,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -946,10 +787,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -963,10 +801,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -980,10 +815,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -997,10 +829,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1014,14 +843,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -1031,10 +857,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1048,10 +871,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1065,10 +885,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1082,10 +899,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1099,10 +913,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1116,10 +927,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1133,10 +941,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1150,10 +955,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1167,10 +969,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1184,10 +983,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1201,10 +997,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1218,10 +1011,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1235,10 +1025,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1252,10 +1039,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1461,9 +1245,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -41,7 +44,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN vPortSVCHandler_C
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -191,7 +193,7 @@ vStartFirstTask:
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -371,11 +373,9 @@ SVC_Handler:
ldr r1, [r0, #24]
ldrb r2, [r1, #-2]
cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq syscall_enter
cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq syscall_enter_1
cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
cmp r2, #NUM_SYSTEM_CALLS
blt syscall_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq syscall_exit
b vPortSVCHandler_C
@ -383,10 +383,6 @@ SVC_Handler:
mov r1, lr
b vSystemCallEnter
syscall_enter_1:
mov r1, lr
b vSystemCallEnter_1
syscall_exit:
mov r1, lr
b vSystemCallExit

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

View file

@ -44,6 +44,7 @@
#include "event_groups.h"
#include "stream_buffer.h"
#include "mpu_prototypes.h"
#include "mpu_syscall_numbers.h"
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
/*-----------------------------------------------------------*/
@ -1138,35 +1139,63 @@
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
BaseType_t MPU_xTaskGenericNotifyImpl( TaskHandle_t xTaskToNotify,
BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify,
UBaseType_t uxIndexToNotify,
uint32_t ulValue,
eNotifyAction eAction,
uint32_t * pulPreviousNotificationValue ) PRIVILEGED_FUNCTION;
uint32_t * pulPreviousNotificationValue ) /* FREERTOS_SYSTEM_CALL */
{
BaseType_t xReturn = pdFAIL;
xTaskGenericNotifyParams_t xParams;
BaseType_t MPU_xTaskGenericNotifyImpl( TaskHandle_t xTaskToNotify,
UBaseType_t uxIndexToNotify,
uint32_t ulValue,
eNotifyAction eAction,
uint32_t * pulPreviousNotificationValue ) /* PRIVILEGED_FUNCTION */
xParams.xTaskToNotify = xTaskToNotify;
xParams.uxIndexToNotify = uxIndexToNotify;
xParams.ulValue = ulValue;
xParams.eAction = eAction;
xParams.pulPreviousNotificationValue = pulPreviousNotificationValue;
xReturn = MPU_xTaskGenericNotifyEntry( &( xParams ) );
return xReturn;
}
BaseType_t MPU_xTaskGenericNotifyImpl( const xTaskGenericNotifyParams_t * pxParams ) PRIVILEGED_FUNCTION;
BaseType_t MPU_xTaskGenericNotifyImpl( const xTaskGenericNotifyParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xReturn = pdFAIL;
int32_t lIndex;
TaskHandle_t xInternalTaskHandle = NULL;
BaseType_t xIsPreviousNotificationValueWriteable = pdFALSE;
BaseType_t xAreParamsReadable = pdFALSE;
if( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES )
if( pxParams != NULL )
{
if( pulPreviousNotificationValue != NULL )
xAreParamsReadable = xPortIsAuthorizedToAccessBuffer( pxParams,
sizeof( xTaskGenericNotifyParams_t ),
tskMPU_READ_PERMISSION );
}
if( xAreParamsReadable == pdTRUE )
{
xIsPreviousNotificationValueWriteable = xPortIsAuthorizedToAccessBuffer( pulPreviousNotificationValue,
if( ( pxParams->uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ) &&
( ( pxParams->eAction == eNoAction ) ||
( pxParams->eAction == eSetBits ) ||
( pxParams->eAction == eIncrement ) ||
( pxParams->eAction == eSetValueWithOverwrite ) ||
( pxParams->eAction == eSetValueWithoutOverwrite ) ) )
{
if( pxParams->pulPreviousNotificationValue != NULL )
{
xIsPreviousNotificationValueWriteable = xPortIsAuthorizedToAccessBuffer( pxParams->pulPreviousNotificationValue,
sizeof( uint32_t ),
tskMPU_WRITE_PERMISSION );
}
if( ( pulPreviousNotificationValue == NULL ) || ( xIsPreviousNotificationValueWriteable == pdTRUE ) )
if( ( pxParams->pulPreviousNotificationValue == NULL ) ||
( xIsPreviousNotificationValueWriteable == pdTRUE ) )
{
lIndex = ( int32_t ) xTaskToNotify;
lIndex = ( int32_t ) ( pxParams->xTaskToNotify );
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
{
@ -1174,7 +1203,12 @@
if( xInternalTaskHandle != NULL )
{
xReturn = xTaskGenericNotify( xInternalTaskHandle, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue );
xReturn = xTaskGenericNotify( xInternalTaskHandle,
pxParams->uxIndexToNotify,
pxParams->ulValue,
pxParams->eAction,
pxParams->pulPreviousNotificationValue );
}
}
}
}
@ -1188,33 +1222,61 @@
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
BaseType_t MPU_xTaskGenericNotifyWaitImpl( UBaseType_t uxIndexToWaitOn,
BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
uint32_t ulBitsToClearOnEntry,
uint32_t ulBitsToClearOnExit,
uint32_t * pulNotificationValue,
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
TickType_t xTicksToWait )
{
BaseType_t xReturn = pdFAIL;
xTaskGenericNotifyWaitParams_t xParams;
BaseType_t MPU_xTaskGenericNotifyWaitImpl( UBaseType_t uxIndexToWaitOn,
uint32_t ulBitsToClearOnEntry,
uint32_t ulBitsToClearOnExit,
uint32_t * pulNotificationValue,
TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
xParams.uxIndexToWaitOn = uxIndexToWaitOn;
xParams.ulBitsToClearOnEntry = ulBitsToClearOnEntry;
xParams.ulBitsToClearOnExit = ulBitsToClearOnExit;
xParams.pulNotificationValue = pulNotificationValue;
xParams.xTicksToWait = xTicksToWait;
xReturn = MPU_xTaskGenericNotifyWaitEntry( &( xParams ) );
return xReturn;
}
BaseType_t MPU_xTaskGenericNotifyWaitImpl( const xTaskGenericNotifyWaitParams_t * pxParams ) PRIVILEGED_FUNCTION;
BaseType_t MPU_xTaskGenericNotifyWaitImpl( const xTaskGenericNotifyWaitParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xReturn = pdFAIL;
BaseType_t xIsNotificationValueWritable = pdFALSE;
BaseType_t xAreParamsReadable = pdFALSE;
if( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES )
if( pxParams != NULL )
{
if( pulNotificationValue != NULL )
xAreParamsReadable = xPortIsAuthorizedToAccessBuffer( pxParams,
sizeof( xTaskGenericNotifyWaitParams_t ),
tskMPU_READ_PERMISSION );
}
if( xAreParamsReadable == pdTRUE )
{
xIsNotificationValueWritable = xPortIsAuthorizedToAccessBuffer( pulNotificationValue,
if( pxParams->uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES )
{
if( pxParams->pulNotificationValue != NULL )
{
xIsNotificationValueWritable = xPortIsAuthorizedToAccessBuffer( pxParams->pulNotificationValue,
sizeof( uint32_t ),
tskMPU_WRITE_PERMISSION );
}
if( ( pulNotificationValue == NULL ) || ( xIsNotificationValueWritable == pdTRUE ) )
if( ( pxParams->pulNotificationValue == NULL ) ||
( xIsNotificationValueWritable == pdTRUE ) )
{
xReturn = xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
xReturn = xTaskGenericNotifyWait( pxParams->uxIndexToWaitOn,
pxParams->ulBitsToClearOnEntry,
pxParams->ulBitsToClearOnExit,
pxParams->pulNotificationValue,
pxParams->xTicksToWait );
}
}
}
@ -2113,6 +2175,7 @@
BaseType_t xReturn = pdFAIL;
int32_t lIndex;
QueueHandle_t xInternalQueueHandle = NULL;
UBaseType_t uxQueueItemSize;
lIndex = ( int32_t ) xMutex;
@ -2121,10 +2184,15 @@
xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
if( xInternalQueueHandle != NULL )
{
uxQueueItemSize = uxQueueGetQueueItemSize( xInternalQueueHandle );
if( uxQueueItemSize == 0 )
{
xReturn = xQueueTakeMutexRecursive( xInternalQueueHandle, xBlockTime );
}
}
}
return xReturn;
}
@ -2995,33 +3063,56 @@
#if ( configUSE_TIMERS == 1 )
BaseType_t MPU_xTimerGenericCommandImpl( TimerHandle_t xTimer,
BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer,
const BaseType_t xCommandID,
const TickType_t xOptionalValue,
BaseType_t * const pxHigherPriorityTaskWoken,
const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
{
BaseType_t xReturn = pdFALSE;
xTimerGenericCommandParams_t xParams;
BaseType_t MPU_xTimerGenericCommandImpl( TimerHandle_t xTimer,
const BaseType_t xCommandID,
const TickType_t xOptionalValue,
BaseType_t * const pxHigherPriorityTaskWoken,
const TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
xParams.xTimer = xTimer;
xParams.xCommandID = xCommandID;
xParams.xOptionalValue = xOptionalValue;
xParams.pxHigherPriorityTaskWoken = pxHigherPriorityTaskWoken;
xParams.xTicksToWait = xTicksToWait;
xReturn = MPU_xTimerGenericCommandEntry( &( xParams ) );
return xReturn;
}
BaseType_t MPU_xTimerGenericCommandImpl( const xTimerGenericCommandParams_t * pxParams ) PRIVILEGED_FUNCTION;
BaseType_t MPU_xTimerGenericCommandImpl( const xTimerGenericCommandParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xReturn = pdFALSE;
TimerHandle_t xInternalTimerHandle = NULL;
int32_t lIndex;
BaseType_t xIsHigherPriorityTaskWokenWriteable = pdFALSE;
BaseType_t xAreParamsReadable = pdFALSE;
if( pxHigherPriorityTaskWoken != NULL )
if( pxParams != NULL )
{
xIsHigherPriorityTaskWokenWriteable = xPortIsAuthorizedToAccessBuffer( pxHigherPriorityTaskWoken,
xAreParamsReadable = xPortIsAuthorizedToAccessBuffer( pxParams,
sizeof( xTimerGenericCommandParams_t ),
tskMPU_READ_PERMISSION );
}
if( xAreParamsReadable == pdTRUE )
{
if( pxParams->pxHigherPriorityTaskWoken != NULL )
{
xIsHigherPriorityTaskWokenWriteable = xPortIsAuthorizedToAccessBuffer( pxParams->pxHigherPriorityTaskWoken,
sizeof( BaseType_t ),
tskMPU_WRITE_PERMISSION );
}
if( ( pxHigherPriorityTaskWoken == NULL ) || ( xIsHigherPriorityTaskWokenWriteable == pdTRUE ) )
if( ( pxParams->pxHigherPriorityTaskWoken == NULL ) ||
( xIsHigherPriorityTaskWokenWriteable == pdTRUE ) )
{
lIndex = ( int32_t ) xTimer;
lIndex = ( int32_t ) ( pxParams->xTimer );
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
{
@ -3029,7 +3120,42 @@
if( xInternalTimerHandle != NULL )
{
xReturn = xTimerGenericCommand( xInternalTimerHandle, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait );
xReturn = xTimerGenericCommand( xInternalTimerHandle,
pxParams->xCommandID,
pxParams->xOptionalValue,
pxParams->pxHigherPriorityTaskWoken,
pxParams->xTicksToWait );
}
}
}
}
return xReturn;
}
BaseType_t MPU_xTimerGenericCommandPrivImpl( const xTimerGenericCommandParams_t * pxParams ) PRIVILEGED_FUNCTION;
BaseType_t MPU_xTimerGenericCommandPrivImpl( const xTimerGenericCommandParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xReturn = pdFALSE;
TimerHandle_t xInternalTimerHandle = NULL;
int32_t lIndex;
if( pxParams != NULL )
{
lIndex = ( int32_t ) ( pxParams->xTimer );
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
{
xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) );
if( xInternalTimerHandle != NULL )
{
xReturn = xTimerGenericCommand( xInternalTimerHandle,
pxParams->xCommandID,
pxParams->xOptionalValue,
pxParams->pxHigherPriorityTaskWoken,
pxParams->xTicksToWait );
}
}
}
@ -3314,30 +3440,52 @@
/* MPU wrappers for event group APIs. */
/*-----------------------------------------------------------*/
EventBits_t MPU_xEventGroupWaitBitsImpl( EventGroupHandle_t xEventGroup,
EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
const EventBits_t uxBitsToWaitFor,
const BaseType_t xClearOnExit,
const BaseType_t xWaitForAllBits,
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */
{
EventBits_t xReturn = 0;
xEventGroupWaitBitsParams_t xParams;
EventBits_t MPU_xEventGroupWaitBitsImpl( EventGroupHandle_t xEventGroup,
const EventBits_t uxBitsToWaitFor,
const BaseType_t xClearOnExit,
const BaseType_t xWaitForAllBits,
TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */
xParams.xEventGroup = xEventGroup;
xParams.uxBitsToWaitFor = uxBitsToWaitFor;
xParams.xClearOnExit = xClearOnExit;
xParams.xWaitForAllBits = xWaitForAllBits;
xParams.xTicksToWait = xTicksToWait;
xReturn = MPU_xEventGroupWaitBitsEntry( &( xParams ) );
return xReturn;
}
EventBits_t MPU_xEventGroupWaitBitsImpl( const xEventGroupWaitBitsParams_t * pxParams ) PRIVILEGED_FUNCTION;
EventBits_t MPU_xEventGroupWaitBitsImpl( const xEventGroupWaitBitsParams_t * pxParams ) /* PRIVILEGED_FUNCTION */
{
EventBits_t xReturn = 0;
EventGroupHandle_t xInternalEventGroupHandle = NULL;
int32_t lIndex;
BaseType_t xAreParamsReadable = pdFALSE;
if( ( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ) &&
( uxBitsToWaitFor != 0 )
if( pxParams != NULL )
{
xAreParamsReadable = xPortIsAuthorizedToAccessBuffer( pxParams,
sizeof( xEventGroupWaitBitsParams_t ),
tskMPU_READ_PERMISSION );
}
if( xAreParamsReadable == pdTRUE )
{
if( ( ( pxParams->uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ) &&
( pxParams->uxBitsToWaitFor != 0 )
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) )
&& ( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( pxParams->xTicksToWait != 0 ) ) )
#endif
)
{
lIndex = ( int32_t ) xEventGroup;
lIndex = ( int32_t ) ( pxParams->xEventGroup );
if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE )
{
@ -3345,7 +3493,12 @@
if( xInternalEventGroupHandle != NULL )
{
xReturn = xEventGroupWaitBits( xInternalEventGroupHandle, uxBitsToWaitFor, xClearOnExit, xWaitForAllBits, xTicksToWait );
xReturn = xEventGroupWaitBits( xInternalEventGroupHandle,
pxParams->uxBitsToWaitFor,
pxParams->xClearOnExit,
pxParams->xWaitForAllBits,
pxParams->xTicksToWait );
}
}
}
}
@ -4223,5 +4376,246 @@
#endif
/*-----------------------------------------------------------*/
/**
* @brief Array of system call implementation functions.
*
* The index in the array MUST match the corresponding system call number
* defined in mpu_wrappers.h.
*/
PRIVILEGED_DATA UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ] =
{
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
( UBaseType_t ) MPU_xTaskGenericNotifyImpl, /* SYSTEM_CALL_xTaskGenericNotify. */
( UBaseType_t ) MPU_xTaskGenericNotifyWaitImpl, /* SYSTEM_CALL_xTaskGenericNotifyWait. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGenericNotify. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGenericNotifyWait. */
#endif
#if ( configUSE_TIMERS == 1 )
( UBaseType_t ) MPU_xTimerGenericCommandImpl, /* SYSTEM_CALL_xTimerGenericCommand. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGenericCommand. */
#endif
( UBaseType_t ) MPU_xEventGroupWaitBitsImpl, /* SYSTEM_CALL_xEventGroupWaitBits. */
/* The system calls above this line take 5 parameters. */
#if ( INCLUDE_xTaskDelayUntil == 1 )
( UBaseType_t ) MPU_xTaskDelayUntilImpl, /* SYSTEM_CALL_xTaskDelayUntil. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskDelayUntil. */
#endif
#if ( INCLUDE_xTaskAbortDelay == 1 )
( UBaseType_t ) MPU_xTaskAbortDelayImpl, /* SYSTEM_CALL_xTaskAbortDelay. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskAbortDelay. */
#endif
#if ( INCLUDE_vTaskDelay == 1 )
( UBaseType_t ) MPU_vTaskDelayImpl, /* SYSTEM_CALL_vTaskDelay. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskDelay. */
#endif
#if ( INCLUDE_uxTaskPriorityGet == 1 )
( UBaseType_t ) MPU_uxTaskPriorityGetImpl, /* SYSTEM_CALL_uxTaskPriorityGet. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_uxTaskPriorityGet. */
#endif
#if ( INCLUDE_eTaskGetState == 1 )
( UBaseType_t ) MPU_eTaskGetStateImpl, /* SYSTEM_CALL_eTaskGetState. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_eTaskGetState. */
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
( UBaseType_t ) MPU_vTaskGetInfoImpl, /* SYSTEM_CALL_vTaskGetInfo. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskGetInfo. */
#endif
#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
( UBaseType_t ) MPU_xTaskGetIdleTaskHandleImpl, /* SYSTEM_CALL_xTaskGetIdleTaskHandle. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGetIdleTaskHandle. */
#endif
#if ( INCLUDE_vTaskSuspend == 1 )
( UBaseType_t ) MPU_vTaskSuspendImpl, /* SYSTEM_CALL_vTaskSuspend. */
( UBaseType_t ) MPU_vTaskResumeImpl, /* SYSTEM_CALL_vTaskResume. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskSuspend. */
( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskResume. */
#endif
( UBaseType_t ) MPU_xTaskGetTickCountImpl, /* SYSTEM_CALL_xTaskGetTickCount. */
( UBaseType_t ) MPU_uxTaskGetNumberOfTasksImpl, /* SYSTEM_CALL_uxTaskGetNumberOfTasks. */
( UBaseType_t ) MPU_pcTaskGetNameImpl, /* SYSTEM_CALL_pcTaskGetName. */
#if ( configGENERATE_RUN_TIME_STATS == 1 )
( UBaseType_t ) MPU_ulTaskGetRunTimeCounterImpl, /* SYSTEM_CALL_ulTaskGetRunTimeCounter. */
( UBaseType_t ) MPU_ulTaskGetRunTimePercentImpl, /* SYSTEM_CALL_ulTaskGetRunTimePercent. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGetRunTimeCounter. */
( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGetRunTimePercent. */
#endif
#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
( UBaseType_t ) MPU_ulTaskGetIdleRunTimePercentImpl, /* SYSTEM_CALL_ulTaskGetIdleRunTimePercent. */
( UBaseType_t ) MPU_ulTaskGetIdleRunTimeCounterImpl, /* SYSTEM_CALL_ulTaskGetIdleRunTimeCounter. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGetIdleRunTimePercent. */
( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGetIdleRunTimeCounter. */
#endif
#if ( configUSE_APPLICATION_TASK_TAG == 1 )
( UBaseType_t ) MPU_vTaskSetApplicationTaskTagImpl, /* SYSTEM_CALL_vTaskSetApplicationTaskTag. */
( UBaseType_t ) MPU_xTaskGetApplicationTaskTagImpl, /* SYSTEM_CALL_xTaskGetApplicationTaskTag. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskSetApplicationTaskTag. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGetApplicationTaskTag. */
#endif
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
( UBaseType_t ) MPU_vTaskSetThreadLocalStoragePointerImpl, /* SYSTEM_CALL_vTaskSetThreadLocalStoragePointer. */
( UBaseType_t ) MPU_pvTaskGetThreadLocalStoragePointerImpl, /* SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_vTaskSetThreadLocalStoragePointer. */
( UBaseType_t ) 0, /* SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer. */
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
( UBaseType_t ) MPU_uxTaskGetSystemStateImpl, /* SYSTEM_CALL_uxTaskGetSystemState. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_uxTaskGetSystemState. */
#endif
#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
( UBaseType_t ) MPU_uxTaskGetStackHighWaterMarkImpl, /* SYSTEM_CALL_uxTaskGetStackHighWaterMark. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_uxTaskGetStackHighWaterMark. */
#endif
#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
( UBaseType_t ) MPU_uxTaskGetStackHighWaterMark2Impl, /* SYSTEM_CALL_uxTaskGetStackHighWaterMark2. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_uxTaskGetStackHighWaterMark2. */
#endif
#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
( UBaseType_t ) MPU_xTaskGetCurrentTaskHandleImpl, /* SYSTEM_CALL_xTaskGetCurrentTaskHandle. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGetCurrentTaskHandle. */
#endif
#if ( INCLUDE_xTaskGetSchedulerState == 1 )
( UBaseType_t ) MPU_xTaskGetSchedulerStateImpl, /* SYSTEM_CALL_xTaskGetSchedulerState. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGetSchedulerState. */
#endif
( UBaseType_t ) MPU_vTaskSetTimeOutStateImpl, /* SYSTEM_CALL_vTaskSetTimeOutState. */
( UBaseType_t ) MPU_xTaskCheckForTimeOutImpl, /* SYSTEM_CALL_xTaskCheckForTimeOut. */
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
( UBaseType_t ) MPU_ulTaskGenericNotifyTakeImpl, /* SYSTEM_CALL_ulTaskGenericNotifyTake. */
( UBaseType_t ) MPU_xTaskGenericNotifyStateClearImpl, /* SYSTEM_CALL_xTaskGenericNotifyStateClear. */
( UBaseType_t ) MPU_ulTaskGenericNotifyValueClearImpl, /* SYSTEM_CALL_ulTaskGenericNotifyValueClear. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGenericNotifyTake. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xTaskGenericNotifyStateClear. */
( UBaseType_t ) 0, /* SYSTEM_CALL_ulTaskGenericNotifyValueClear. */
#endif
( UBaseType_t ) MPU_xQueueGenericSendImpl, /* SYSTEM_CALL_xQueueGenericSend. */
( UBaseType_t ) MPU_uxQueueMessagesWaitingImpl, /* SYSTEM_CALL_uxQueueMessagesWaiting. */
( UBaseType_t ) MPU_uxQueueSpacesAvailableImpl, /* SYSTEM_CALL_uxQueueSpacesAvailable. */
( UBaseType_t ) MPU_xQueueReceiveImpl, /* SYSTEM_CALL_xQueueReceive. */
( UBaseType_t ) MPU_xQueuePeekImpl, /* SYSTEM_CALL_xQueuePeek. */
( UBaseType_t ) MPU_xQueueSemaphoreTakeImpl, /* SYSTEM_CALL_xQueueSemaphoreTake. */
#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
( UBaseType_t ) MPU_xQueueGetMutexHolderImpl, /* SYSTEM_CALL_xQueueGetMutexHolder. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueGetMutexHolder. */
#endif
#if ( configUSE_RECURSIVE_MUTEXES == 1 )
( UBaseType_t ) MPU_xQueueTakeMutexRecursiveImpl, /* SYSTEM_CALL_xQueueTakeMutexRecursive. */
( UBaseType_t ) MPU_xQueueGiveMutexRecursiveImpl, /* SYSTEM_CALL_xQueueGiveMutexRecursive. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueTakeMutexRecursive. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueGiveMutexRecursive. */
#endif
#if ( configUSE_QUEUE_SETS == 1 )
( UBaseType_t ) MPU_xQueueSelectFromSetImpl, /* SYSTEM_CALL_xQueueSelectFromSet. */
( UBaseType_t ) MPU_xQueueAddToSetImpl, /* SYSTEM_CALL_xQueueAddToSet. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueSelectFromSet. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xQueueAddToSet. */
#endif
#if configQUEUE_REGISTRY_SIZE > 0
( UBaseType_t ) MPU_vQueueAddToRegistryImpl, /* SYSTEM_CALL_vQueueAddToRegistry. */
( UBaseType_t ) MPU_vQueueUnregisterQueueImpl, /* SYSTEM_CALL_vQueueUnregisterQueue. */
( UBaseType_t ) MPU_pcQueueGetNameImpl, /* SYSTEM_CALL_pcQueueGetName. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_vQueueAddToRegistry. */
( UBaseType_t ) 0, /* SYSTEM_CALL_vQueueUnregisterQueue. */
( UBaseType_t ) 0, /* SYSTEM_CALL_pcQueueGetName. */
#endif
#if ( configUSE_TIMERS == 1 )
( UBaseType_t ) MPU_pvTimerGetTimerIDImpl, /* SYSTEM_CALL_pvTimerGetTimerID. */
( UBaseType_t ) MPU_vTimerSetTimerIDImpl, /* SYSTEM_CALL_vTimerSetTimerID. */
( UBaseType_t ) MPU_xTimerIsTimerActiveImpl, /* SYSTEM_CALL_xTimerIsTimerActive. */
( UBaseType_t ) MPU_xTimerGetTimerDaemonTaskHandleImpl, /* SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle. */
( UBaseType_t ) MPU_pcTimerGetNameImpl, /* SYSTEM_CALL_pcTimerGetName. */
( UBaseType_t ) MPU_vTimerSetReloadModeImpl, /* SYSTEM_CALL_vTimerSetReloadMode. */
( UBaseType_t ) MPU_xTimerGetReloadModeImpl, /* SYSTEM_CALL_xTimerGetReloadMode. */
( UBaseType_t ) MPU_uxTimerGetReloadModeImpl, /* SYSTEM_CALL_uxTimerGetReloadMode. */
( UBaseType_t ) MPU_xTimerGetPeriodImpl, /* SYSTEM_CALL_xTimerGetPeriod. */
( UBaseType_t ) MPU_xTimerGetExpiryTimeImpl, /* SYSTEM_CALL_xTimerGetExpiryTime. */
#else /* if ( configUSE_TIMERS == 1 ) */
( UBaseType_t ) 0, /* SYSTEM_CALL_pvTimerGetTimerID. */
( UBaseType_t ) 0, /* SYSTEM_CALL_vTimerSetTimerID. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerIsTimerActive. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle. */
( UBaseType_t ) 0, /* SYSTEM_CALL_pcTimerGetName. */
( UBaseType_t ) 0, /* SYSTEM_CALL_vTimerSetReloadMode. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGetReloadMode. */
( UBaseType_t ) 0, /* SYSTEM_CALL_uxTimerGetReloadMode. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGetPeriod. */
( UBaseType_t ) 0, /* SYSTEM_CALL_xTimerGetExpiryTime. */
#endif /* if ( configUSE_TIMERS == 1 ) */
( UBaseType_t ) MPU_xEventGroupClearBitsImpl, /* SYSTEM_CALL_xEventGroupClearBits. */
( UBaseType_t ) MPU_xEventGroupSetBitsImpl, /* SYSTEM_CALL_xEventGroupSetBits. */
( UBaseType_t ) MPU_xEventGroupSyncImpl, /* SYSTEM_CALL_xEventGroupSync. */
#if ( configUSE_TRACE_FACILITY == 1 )
( UBaseType_t ) MPU_uxEventGroupGetNumberImpl, /* SYSTEM_CALL_uxEventGroupGetNumber. */
( UBaseType_t ) MPU_vEventGroupSetNumberImpl, /* SYSTEM_CALL_vEventGroupSetNumber. */
#else
( UBaseType_t ) 0, /* SYSTEM_CALL_uxEventGroupGetNumber. */
( UBaseType_t ) 0, /* SYSTEM_CALL_vEventGroupSetNumber. */
#endif
( UBaseType_t ) MPU_xStreamBufferSendImpl, /* SYSTEM_CALL_xStreamBufferSend. */
( UBaseType_t ) MPU_xStreamBufferReceiveImpl, /* SYSTEM_CALL_xStreamBufferReceive. */
( UBaseType_t ) MPU_xStreamBufferIsFullImpl, /* SYSTEM_CALL_xStreamBufferIsFull. */
( UBaseType_t ) MPU_xStreamBufferIsEmptyImpl, /* SYSTEM_CALL_xStreamBufferIsEmpty. */
( UBaseType_t ) MPU_xStreamBufferSpacesAvailableImpl, /* SYSTEM_CALL_xStreamBufferSpacesAvailable. */
( UBaseType_t ) MPU_xStreamBufferBytesAvailableImpl, /* SYSTEM_CALL_xStreamBufferBytesAvailable. */
( UBaseType_t ) MPU_xStreamBufferSetTriggerLevelImpl, /* SYSTEM_CALL_xStreamBufferSetTriggerLevel. */
( UBaseType_t ) MPU_xStreamBufferNextMessageLengthBytesImpl /* SYSTEM_CALL_xStreamBufferNextMessageLengthBytes. */
};
/*-----------------------------------------------------------*/
#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
/*-----------------------------------------------------------*/

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,6 +36,9 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@ -46,8 +49,8 @@
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -143,36 +146,36 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r3, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
" ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */
" ldr r4, xSecureContextConst2 \n"
" str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */
" msr psplim, r2 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r3 \n"/* Finally, branch to EXC_RETURN. */
" bx r3 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -183,15 +186,15 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" movs r1, #1 \n"/* r1 = 1. */
" tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
" beq running_privileged \n"/* If the result of previous AND operation was 0, branch. */
" movs r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" bx lr \n"/* Return. */
" mrs r0, control \n" /* r0 = CONTROL. */
" movs r1, #1 \n" /* r1 = 1. */
" tst r0, r1 \n" /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
" beq running_privileged \n" /* If the result of previous AND operation was 0, branch. */
" movs r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" bx lr \n" /* Return. */
" running_privileged: \n"
" movs r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movs r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "r1", "memory"
@ -205,11 +208,11 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" movs r1, #1 \n"/* r1 = 1. */
" bics r0, r1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@ -221,11 +224,11 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" movs r1, #1 \n"/* r1 = 1. */
" orrs r0, r1 \n"/* r0 = r0 | r1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* r0 = r0 | r1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@ -237,14 +240,14 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -283,8 +286,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -438,111 +441,110 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRNRConst: .word 0xe000ed98 \n"
" xRBARConst: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
" mrs r2, psp \n"/* Read PSP in r2. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/
" mrs r2, psp \n" /* Read PSP in r2. */
" \n"
" cbz r0, save_ns_context \n"/* No secure context to save. */
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n"/* LR is now in r3. */
" mov lr, r3 \n"/* LR = r3. */
" lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
" mov r4, r8 \n"/* r4 = r8. */
" mov r5, r9 \n"/* r5 = r9. */
" mov r6, r10 \n"/* r6 = r10. */
" mov r7, r11 \n"/* r7 = r11. */
" stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3-r7} \n" /* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */
" mov r4, r8 \n" /* r4 = r8. */
" mov r5, r9 \n" /* r5 = r9. */
" mov r6, r10 \n" /* r6 = r10. */
" mov r7, r11 \n" /* r7 = r11. */
" stmia r2!, {r4-r7} \n" /* Store the high registers that are not saved automatically. */
" \n"
" select_next_task: \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
" ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n"/* LR = r4. */
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n"/* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" push {r2, r4} \n"
" bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r2, r4} \n"
" mov lr, r4 \n"/* LR = r4. */
" lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" mov lr, r4 \n" /* LR = r4. */
" lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" restore_ns_context: \n"
" adds r2, r2, #16 \n"/* Move to the high registers. */
" ldmia r2!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
" mov r8, r4 \n"/* r8 = r4. */
" mov r9, r5 \n"/* r9 = r5. */
" mov r10, r6 \n"/* r10 = r6. */
" mov r11, r7 \n"/* r11 = r7. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" subs r2, r2, #32 \n"/* Go back to the low registers. */
" ldmia r2!, {r4-r7} \n"/* Restore the low registers that are not automatically restored. */
" adds r2, r2, #16 \n" /* Move to the high registers. */
" ldmia r2!, {r4-r7} \n" /* Restore the high registers that are not automatically restored. */
" mov r8, r4 \n" /* r8 = r4. */
" mov r9, r5 \n" /* r9 = r5. */
" mov r10, r6 \n" /* r10 = r6. */
" mov r11, r7 \n" /* r11 = r7. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" subs r2, r2, #32 \n" /* Go back to the low registers. */
" ldmia r2!, {r4-r7} \n" /* Restore the low registers that are not automatically restored. */
" bx lr \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
"xSecureContextConst: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"movs r0, #4 \n"
@ -557,34 +559,30 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" b route_svc \n"
" \n"
"route_svc: \n"
" ldr r2, [r0, #24] \n"
" subs r2, #2 \n"
" ldrb r3, [r2, #0] \n"
" cmp r3, %0 \n"
" beq system_call_enter \n"
" cmp r3, %1 \n"
" beq system_call_enter_1 \n"
" cmp r3, %2 \n"
" ldr r3, [r0, #24] \n"
" subs r3, #2 \n"
" ldrb r2, [r3, #0] \n"
" cmp r2, %0 \n"
" blt system_call_enter \n"
" cmp r2, %1 \n"
" beq system_call_exit \n"
" b vPortSVCHandler_C \n"
" \n"
"system_call_enter: \n"
" b vSystemCallEnter \n"
"system_call_enter_1: \n"
" b vSystemCallEnter_1 \n"
"system_call_exit: \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "r3", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -604,7 +602,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -615,8 +613,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (
(
" .syntax unified \n"
" \n"
" svc %0 \n"/* Secure context is allocated in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@ -628,14 +626,14 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR
(
" .syntax unified \n"
" \n"
" ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
" bne free_secure_context \n"/* Branch if r1 != 0. */
" bx lr \n"/* There is no secure context (xSecureContext is NULL). */
" ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" bne free_secure_context \n" /* Branch if r1 != 0. */
" bx lr \n" /* There is no secure context (xSecureContext is NULL). */
" free_secure_context: \n"
" svc %0 \n"/* Secure context is freed in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,6 +36,9 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@ -46,8 +49,8 @@
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -140,33 +143,33 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" bx r2 \n"/* Finally, branch to EXC_RETURN. */
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -177,15 +180,15 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" movs r1, #1 \n"/* r1 = 1. */
" tst r0, r1 \n"/* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
" beq running_privileged \n"/* If the result of previous AND operation was 0, branch. */
" movs r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" bx lr \n"/* Return. */
" mrs r0, control \n" /* r0 = CONTROL. */
" movs r1, #1 \n" /* r1 = 1. */
" tst r0, r1 \n" /* Perform r0 & r1 (bitwise AND) and update the conditions flag. */
" beq running_privileged \n" /* If the result of previous AND operation was 0, branch. */
" movs r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" bx lr \n" /* Return. */
" running_privileged: \n"
" movs r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movs r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "r1", "memory"
@ -199,11 +202,11 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" movs r1, #1 \n"/* r1 = 1. */
" bics r0, r1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@ -215,11 +218,11 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" movs r1, #1 \n"/* r1 = 1. */
" orrs r0, r1 \n"/* r0 = r0 | r1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* r0 = r0 | r1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "r1", "memory"
);
}
@ -231,14 +234,14 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -277,8 +280,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -401,68 +404,67 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRNRConst: .word 0xe000ed98 \n"
" xRBARConst: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */
" str r0, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r2, psplim \n"/* r2 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
" mov r4, r8 \n"/* r4 = r8. */
" mov r5, r9 \n"/* r5 = r9. */
" mov r6, r10 \n"/* r6 = r10. */
" mov r7, r11 \n"/* r7 = r11. */
" stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */
" mrs r0, psp \n" /* Read PSP in r0. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" subs r0, r0, #40 \n" /* Make space for PSPLIM, LR and the remaining registers on the stack. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r0!, {r2-r7} \n" /* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */
" mov r4, r8 \n" /* r4 = r8. */
" mov r5, r9 \n" /* r5 = r9. */
" mov r6, r10 \n" /* r6 = r10. */
" mov r7, r11 \n" /* r7 = r11. */
" stmia r0!, {r4-r7} \n" /* Store the high registers that are not saved automatically. */
" \n"
" cpsid i \n"
" bl vTaskSwitchContext \n"
" cpsie i \n"
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
" adds r0, r0, #24 \n"/* Move to the high registers. */
" ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */
" mov r8, r4 \n"/* r8 = r4. */
" mov r9, r5 \n"/* r9 = r5. */
" mov r10, r6 \n"/* r10 = r6. */
" mov r11, r7 \n"/* r11 = r7. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" subs r0, r0, #40 \n"/* Move to the starting of the saved context. */
" ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
" msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" adds r0, r0, #24 \n" /* Move to the high registers. */
" ldmia r0!, {r4-r7} \n" /* Restore the high registers that are not automatically restored. */
" mov r8, r4 \n" /* r8 = r4. */
" mov r9, r5 \n" /* r9 = r5. */
" mov r10, r6 \n" /* r10 = r6. */
" mov r11, r7 \n" /* r11 = r7. */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" subs r0, r0, #40 \n" /* Move to the starting of the saved context. */
" ldmia r0!, {r2-r7} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"movs r0, #4 \n"
@ -477,34 +479,30 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" b route_svc \n"
" \n"
"route_svc: \n"
" ldr r2, [r0, #24] \n"
" subs r2, #2 \n"
" ldrb r3, [r2, #0] \n"
" cmp r3, %0 \n"
" beq system_call_enter \n"
" cmp r3, %1 \n"
" beq system_call_enter_1 \n"
" cmp r3, %2 \n"
" ldr r3, [r0, #24] \n"
" subs r3, #2 \n"
" ldrb r2, [r3, #0] \n"
" cmp r2, %0 \n"
" blt system_call_enter \n"
" cmp r2, %1 \n"
" beq system_call_exit \n"
" b vPortSVCHandler_C \n"
" \n"
"system_call_enter: \n"
" b vSystemCallEnter \n"
"system_call_enter_1: \n"
" b vSystemCallEnter_1 \n"
"system_call_exit: \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "r3", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -524,7 +522,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -121,12 +124,12 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -151,7 +154,7 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -162,12 +165,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -181,10 +184,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -196,10 +199,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -211,15 +214,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -235,12 +238,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -252,10 +255,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -263,8 +266,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -411,96 +414,96 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n"/* Read PSP in r2. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n" /* Read PSP in r2. */
" \n"
" cbz r0, save_ns_context \n"/* No secure context to save. */
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n"/* LR is now in r3. */
" mov lr, r3 \n"/* LR = r3. */
" lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r2!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n"/* r2 = r2 + 12. */
" stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n"/* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n" /* r2 = r2 + 12. */
" stm r2, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n" /* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
" ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n"/* LR = r4. */
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n"/* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" push {r2, r4} \n"
" bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r2, r4} \n"
" mov lr, r4 \n"/* LR = r4. */
" lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" mov lr, r4 \n" /* LR = r4. */
" lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
" ldmia r2!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r2!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
@ -508,21 +511,20 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"xSecureContextConst: .word xSecureContext \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -533,10 +535,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -544,24 +544,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -576,7 +572,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -587,8 +583,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (
(
" .syntax unified \n"
" \n"
" svc %0 \n"/* Secure context is allocated in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@ -600,12 +596,12 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR
(
" .syntax unified \n"
" \n"
" ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
" ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n"/* Secure context is freed in the supervisor call. */
" bx lr \n"/* Return. */
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -118,35 +121,35 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" mov r0, #0 \n"
" msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n"/* Finally, branch to EXC_RETURN. */
" msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -157,12 +160,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -176,10 +179,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -191,10 +194,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -206,15 +209,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -230,12 +233,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -247,10 +250,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -258,8 +261,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -375,75 +378,74 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
" mrs r0, psp \n" /* Read PSP in r0. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r0!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" mrs r2, psplim \n"/* r2 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" str r0, [r1] \n"/* Save the new top of stack in TCB. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
" ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r0!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -454,10 +456,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -465,24 +465,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -497,7 +493,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -121,12 +124,12 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -151,7 +154,7 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -162,12 +165,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -181,10 +184,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -196,10 +199,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -211,15 +214,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -235,12 +238,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -252,10 +255,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -263,8 +266,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -411,96 +414,96 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n"/* Read PSP in r2. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n" /* Read PSP in r2. */
" \n"
" cbz r0, save_ns_context \n"/* No secure context to save. */
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n"/* LR is now in r3. */
" mov lr, r3 \n"/* LR = r3. */
" lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r2!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n"/* r2 = r2 + 12. */
" stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n"/* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n" /* r2 = r2 + 12. */
" stm r2, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n" /* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
" ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n"/* LR = r4. */
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n"/* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" push {r2, r4} \n"
" bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r2, r4} \n"
" mov lr, r4 \n"/* LR = r4. */
" lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" mov lr, r4 \n" /* LR = r4. */
" lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
" ldmia r2!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r2!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
@ -508,21 +511,20 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"xSecureContextConst: .word xSecureContext \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -533,10 +535,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -544,24 +544,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -576,7 +572,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -587,8 +583,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (
(
" .syntax unified \n"
" \n"
" svc %0 \n"/* Secure context is allocated in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@ -600,12 +596,12 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR
(
" .syntax unified \n"
" \n"
" ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
" ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n"/* Secure context is freed in the supervisor call. */
" bx lr \n"/* Return. */
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -118,35 +121,35 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" mov r0, #0 \n"
" msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n"/* Finally, branch to EXC_RETURN. */
" msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -157,12 +160,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -176,10 +179,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -191,10 +194,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -206,15 +209,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -230,12 +233,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -247,10 +250,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -258,8 +261,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -375,75 +378,74 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
" mrs r0, psp \n" /* Read PSP in r0. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r0!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" mrs r2, psplim \n"/* r2 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" str r0, [r1] \n"/* Save the new top of stack in TCB. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
" ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r0!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -454,10 +456,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -465,24 +465,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -497,7 +493,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

294
portable/GCC/ARM_CM3_MPU/port.c Executable file → Normal file
View file

@ -38,6 +38,7 @@
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
#include "mpu_syscall_numbers.h"
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@ -188,7 +189,7 @@ void vResetPrivilege( void ) __attribute__( ( naked ) );
/**
* @brief Enter critical section.
*/
#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
@ -197,7 +198,7 @@ void vResetPrivilege( void ) __attribute__( ( naked ) );
/**
* @brief Exit from critical section.
*/
#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
@ -205,29 +206,24 @@ void vResetPrivilege( void ) __attribute__( ( naked ) );
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -325,14 +321,13 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -340,26 +335,24 @@ void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION *
"mrseq r0, msp \n"
"mrsne r0, psp \n"
" \n"
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq vSystemCallEnter \n"
"cmp r2, %1 \n"
"beq vSystemCallEnter_1 \n"
"cmp r2, %2 \n"
"ldr r2, [r0, #24] \n"
"ldrb r1, [r2, #-2] \n"
"cmp r1, %0 \n"
"blt vSystemCallEnter \n"
"cmp r1, %1 \n"
"beq vSystemCallExit \n"
"b vSVCHandler_C \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
/* Assumes psp was in use. */
__asm volatile
(
@ -374,7 +367,7 @@ void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION *
" b %0 \n"
::"i" ( vSVCHandler_C ) : "r0", "memory"
);
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -384,9 +377,8 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
uint8_t ucSVCNumber;
uint32_t ulPC;
#if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
#if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -396,7 +388,7 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
#endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) ) */
/* The stack contains: r0, r1, r2, r3, r12, LR, PC and xPSR. The first
* argument (r0) is pulParam[ 0 ]. */
@ -422,6 +414,7 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
break;
#if ( configUSE_MPU_WRAPPERS_V1 == 1 )
#if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
* svc was raised from any of the
@ -432,9 +425,9 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r1, control \n"/* Obtain current control value. */
" bic r1, #1 \n"/* Set privilege bit. */
" msr control, r1 \n"/* Write back new control value. */
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Set privilege bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
}
@ -444,13 +437,14 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
case portSVC_RAISE_PRIVILEGE:
__asm volatile
(
" mrs r1, control \n"/* Obtain current control value. */
" bic r1, #1 \n"/* Set privilege bit. */
" msr control, r1 \n"/* Write back new control value. */
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Set privilege bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
break;
#endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default: /* Unknown SVC call. */
break;
@ -460,13 +454,16 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallEnter( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulSystemCallLocation, i;
const uint32_t ulStackFrameSize = 8;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -479,16 +476,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -503,7 +510,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Clear nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
@ -514,10 +522,19 @@ void vSystemCallEnter( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Store the value of the Link Register before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
@ -534,127 +551,58 @@ void vSystemCallEnter( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
}
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallEnter_1( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulSystemCallLocation, i;
const uint32_t ulStackFrameSize = 8;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Clear nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Store the value of the Link Register before the SVC was raised. We need to
* restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallExit( uint32_t * pulSystemCallStack ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallExit( uint32_t * pulSystemCallStack ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulSystemCallLocation, i;
const uint32_t ulStackFrameSize = 8;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
/* Make space on the task stack for the stack frame. */
@ -670,15 +618,18 @@ void vSystemCallExit( uint32_t * pulSystemCallStack ) /* PRIVILEGED_FUNCTION */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r1, control \n" /* Obtain current control value. */
" orr r1, #1 \n" /* Set nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
/* Restore the stacked link register to what it was at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* If the hardware used padding to force the stack pointer
@ -696,7 +647,7 @@ void vSystemCallExit( uint32_t * pulSystemCallStack ) /* PRIVILEGED_FUNCTION */
/* This is not NULL only for the duration of the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
}
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -874,19 +825,21 @@ BaseType_t xPortStartScheduler( void )
uxCriticalNesting = 0;
/* Start the first task. */
__asm volatile (
" ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
__asm volatile
(
" ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"
" ldr r0, [r0] \n"
" msr msp, r0 \n"/* Set the msp back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" msr msp, r0 \n" /* Set the msp back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start first task. */
" svc %0 \n" /* System call to start first task. */
" nop \n"
" .ltorg \n"
::"i" ( portSVC_START_SCHEDULER ) : "memory" );
::"i" ( portSVC_START_SCHEDULER ) : "memory"
);
/* Should not get here! */
return 0;
@ -903,7 +856,7 @@ void vPortEndScheduler( void )
void vPortEnterCritical( void )
{
#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
if( portIS_PRIVILEGED() == pdFALSE )
{
portRAISE_PRIVILEGE();
@ -921,16 +874,16 @@ void vPortEnterCritical( void )
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
}
#else
#else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
#endif
#endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
if( portIS_PRIVILEGED() == pdFALSE )
{
portRAISE_PRIVILEGE();
@ -943,6 +896,7 @@ void vPortExitCritical( void )
{
portENABLE_INTERRUPTS();
}
portMEMORY_BARRIER();
portRESET_PRIVILEGE();
@ -958,7 +912,7 @@ void vPortExitCritical( void )
portENABLE_INTERRUPTS();
}
}
#else
#else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
configASSERT( uxCriticalNesting );
uxCriticalNesting--;
@ -966,7 +920,7 @@ void vPortExitCritical( void )
{
portENABLE_INTERRUPTS();
}
#endif
#endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
@ -1167,12 +1121,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -1184,10 +1138,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -1284,11 +1238,13 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
{
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
}
if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
{
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );

View file

@ -153,12 +153,10 @@
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
#define portSVC_START_SCHEDULER 0
#define portSVC_YIELD 1
#define portSVC_RAISE_PRIVILEGE 2
#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 5
#define portSVC_START_SCHEDULER 100
#define portSVC_YIELD 101
#define portSVC_RAISE_PRIVILEGE 102
#define portSVC_SYSTEM_CALL_EXIT 103
/* Scheduler utilities. */

File diff suppressed because it is too large Load diff

326
portable/GCC/ARM_CM4_MPU/port.c Executable file → Normal file
View file

@ -38,6 +38,7 @@
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
#include "mpu_syscall_numbers.h"
#ifndef __VFP_FP__
#error This port can only be used when the project options are configured to enable hardware floating point support.
@ -209,7 +210,7 @@ void vResetPrivilege( void ) __attribute__( ( naked ) );
/**
* @brief Enter critical section.
*/
#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
@ -218,7 +219,7 @@ void vResetPrivilege( void ) __attribute__( ( naked ) );
/**
* @brief Exit from critical section.
*/
#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL;
#else
void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
@ -226,31 +227,26 @@ void vResetPrivilege( void ) __attribute__( ( naked ) );
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -263,7 +259,8 @@ void vResetPrivilege( void ) __attribute__( ( naked ) );
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -350,14 +347,13 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -368,10 +364,8 @@ void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION *
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vSVCHandler_C \n"
" \n"
@ -379,24 +373,20 @@ void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION *
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void vPortSVCHandler( void )
{
void vPortSVCHandler( void )
{
/* Assumes psp was in use. */
__asm volatile
(
@ -411,7 +401,7 @@ void vPortSVCHandler( void )
" b %0 \n"
::"i" ( vSVCHandler_C ) : "r0", "memory"
);
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -421,9 +411,8 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
uint8_t ucSVCNumber;
uint32_t ulPC;
#if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
#if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -433,7 +422,7 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
#endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) ) */
/* The stack contains: r0, r1, r2, r3, r12, LR, PC and xPSR. The first
* argument (r0) is pulParam[ 0 ]. */
@ -459,6 +448,7 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
break;
#if ( configUSE_MPU_WRAPPERS_V1 == 1 )
#if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
* svc was raised from any of the
@ -469,9 +459,9 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
{
__asm volatile
(
" mrs r1, control \n"/* Obtain current control value. */
" bic r1, #1 \n"/* Set privilege bit. */
" msr control, r1 \n"/* Write back new control value. */
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Set privilege bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
}
@ -481,13 +471,14 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
case portSVC_RAISE_PRIVILEGE:
__asm volatile
(
" mrs r1, control \n"/* Obtain current control value. */
" bic r1, #1 \n"/* Set privilege bit. */
" msr control, r1 \n"/* Write back new control value. */
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Set privilege bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
break;
#endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default: /* Unknown SVC call. */
break;
@ -497,12 +488,16 @@ void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -515,22 +510,33 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -555,7 +561,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Clear nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
@ -566,10 +573,19 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Store the value of the Link Register before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
@ -586,148 +602,66 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
}
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Clear nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Store the value of the Link Register before the SVC was raised. We need to
* restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -752,15 +686,18 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r1, control \n" /* Obtain current control value. */
" orr r1, #1 \n" /* Set nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
/* Restore the stacked link register to what it was at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* If the hardware used padding to force the stack pointer
@ -778,7 +715,7 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* This is not NULL only for the duration of the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
}
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -984,21 +921,23 @@ BaseType_t xPortStartScheduler( void )
* in use in case the FPU was used before the scheduler was started - which
* would otherwise result in the unnecessary leaving of space in the SVC stack
* for lazy saving of FPU registers. */
__asm volatile (
" ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */
__asm volatile
(
" ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"
" ldr r0, [r0] \n"
" msr msp, r0 \n"/* Set the msp back to the start of the stack. */
" mov r0, #0 \n"/* Clear the bit that indicates the FPU is in use, see comment above. */
" msr msp, r0 \n" /* Set the msp back to the start of the stack. */
" mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */
" msr control, r0 \n"
" cpsie i \n"/* Globally enable interrupts. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start first task. */
" svc %0 \n" /* System call to start first task. */
" nop \n"
" .ltorg \n"
::"i" ( portSVC_START_SCHEDULER ) : "memory" );
::"i" ( portSVC_START_SCHEDULER ) : "memory"
);
/* Should not get here! */
return 0;
@ -1015,7 +954,7 @@ void vPortEndScheduler( void )
void vPortEnterCritical( void )
{
#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
if( portIS_PRIVILEGED() == pdFALSE )
{
portRAISE_PRIVILEGE();
@ -1033,16 +972,16 @@ void vPortEnterCritical( void )
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
}
#else
#else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
#endif
#endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 )
if( portIS_PRIVILEGED() == pdFALSE )
{
portRAISE_PRIVILEGE();
@ -1055,6 +994,7 @@ void vPortExitCritical( void )
{
portENABLE_INTERRUPTS();
}
portMEMORY_BARRIER();
portRESET_PRIVILEGE();
@ -1070,7 +1010,7 @@ void vPortExitCritical( void )
portENABLE_INTERRUPTS();
}
}
#else
#else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
configASSERT( uxCriticalNesting );
uxCriticalNesting--;
@ -1078,7 +1018,7 @@ void vPortExitCritical( void )
{
portENABLE_INTERRUPTS();
}
#endif
#endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */
}
/*-----------------------------------------------------------*/
@ -1219,10 +1159,10 @@ static void vPortEnableVFP( void )
{
__asm volatile
(
" ldr.w r0, =0xE000ED88 \n"/* The FPU enable bits are in the CPACR. */
" ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */
" ldr r1, [r0] \n"
" \n"
" orr r1, r1, #( 0xf << 20 ) \n"/* Enable CP10 and CP11 coprocessors, then save back. */
" orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */
" str r1, [r0] \n"
" bx r14 \n"
" .ltorg \n"
@ -1233,7 +1173,6 @@ static void vPortEnableVFP( void )
static void prvSetupMPU( void )
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -1341,12 +1280,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -1358,10 +1297,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
{
__asm volatile
(
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -1373,7 +1312,6 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
uint32_t ulStackDepth )
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __SRAM_segment_start__;
@ -1471,11 +1409,13 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
{
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
}
if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
{
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );

View file

@ -242,12 +242,10 @@ typedef struct MPU_SETTINGS
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
#define portSVC_START_SCHEDULER 0
#define portSVC_YIELD 1
#define portSVC_RAISE_PRIVILEGE 2
#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 5
#define portSVC_START_SCHEDULER 100
#define portSVC_YIELD 101
#define portSVC_RAISE_PRIVILEGE 102
#define portSVC_SYSTEM_CALL_EXIT 103
/* Scheduler utilities. */

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -121,12 +124,12 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -151,7 +154,7 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -162,12 +165,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -181,10 +184,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -196,10 +199,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -211,15 +214,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -235,12 +238,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -252,10 +255,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -263,8 +266,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -411,96 +414,96 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n"/* Read PSP in r2. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n" /* Read PSP in r2. */
" \n"
" cbz r0, save_ns_context \n"/* No secure context to save. */
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n"/* LR is now in r3. */
" mov lr, r3 \n"/* LR = r3. */
" lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r2!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n"/* r2 = r2 + 12. */
" stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n"/* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n" /* r2 = r2 + 12. */
" stm r2, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n" /* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
" ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n"/* LR = r4. */
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n"/* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" push {r2, r4} \n"
" bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r2, r4} \n"
" mov lr, r4 \n"/* LR = r4. */
" lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" mov lr, r4 \n" /* LR = r4. */
" lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
" ldmia r2!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r2!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
@ -508,21 +511,20 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"xSecureContextConst: .word xSecureContext \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -533,10 +535,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -544,24 +544,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -576,7 +572,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -587,8 +583,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (
(
" .syntax unified \n"
" \n"
" svc %0 \n"/* Secure context is allocated in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@ -600,12 +596,12 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR
(
" .syntax unified \n"
" \n"
" ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
" ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n"/* Secure context is freed in the supervisor call. */
" bx lr \n"/* Return. */
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -118,35 +121,35 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" mov r0, #0 \n"
" msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n"/* Finally, branch to EXC_RETURN. */
" msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -157,12 +160,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -176,10 +179,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -191,10 +194,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -206,15 +209,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -230,12 +233,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -247,10 +250,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -258,8 +261,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -375,75 +378,74 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
" mrs r0, psp \n" /* Read PSP in r0. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r0!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" mrs r2, psplim \n"/* r2 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" str r0, [r1] \n"/* Save the new top of stack in TCB. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
" ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r0!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -454,10 +456,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -465,24 +465,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -497,7 +493,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -121,12 +124,12 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -151,7 +154,7 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
"xSecureContextConst2: .word xSecureContext \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -162,12 +165,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -181,10 +184,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -196,10 +199,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -211,15 +214,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -235,12 +238,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -252,10 +255,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -263,8 +266,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -411,96 +414,96 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" .extern SecureContext_SaveContext \n"
" .extern SecureContext_LoadContext \n"
" \n"
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n"/* Read PSP in r2. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */
" mrs r2, psp \n" /* Read PSP in r2. */
" \n"
" cbz r0, save_ns_context \n"/* No secure context to save. */
" cbz r0, save_ns_context \n" /* No secure context to save. */
" push {r0-r2, r14} \n"
" bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n"/* LR is now in r3. */
" mov lr, r3 \n"/* LR = r3. */
" lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r0-r3} \n" /* LR is now in r3. */
" mov lr, r3 \n" /* LR = r3. */
" lsls r1, r3, #25 \n" /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl save_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB.*/
" subs r2, r2, #12 \n" /* Make space for xSecureContext, PSPLIM and LR on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" b select_next_task \n"
" \n"
" save_ns_context: \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r2!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n"/* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n"/* r2 = r2 + 12. */
" stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */
" mrs r1, psplim \n"/* r1 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n"/* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */
" subs r2, r2, #44 \n" /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */
" str r2, [r1] \n" /* Save the new top of stack in TCB. */
" adds r2, r2, #12 \n" /* r2 = r2 + 12. */
" stm r2, {r4-r11} \n" /* Store the registers that are not saved automatically. */
" mrs r1, psplim \n" /* r1 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" subs r2, r2, #12 \n" /* r2 = r2 - 12. */
" stmia r2!, {r0, r1, r3} \n" /* Store xSecureContext, PSPLIM and LR on the stack. */
" \n"
" select_next_task: \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" ldr r2, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */
" \n"
" ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n"/* LR = r4. */
" ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n"/* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n"/* Read pxCurrentTCB. */
" ldmia r2!, {r0, r1, r4} \n" /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */
" msr psplim, r1 \n" /* Restore the PSPLIM register value for the task. */
" mov lr, r4 \n" /* LR = r4. */
" ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */
" str r0, [r3] \n" /* Restore the task's xSecureContext. */
" cbz r0, restore_ns_context \n" /* If there is no secure context for the task, restore the non-secure context. */
" ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r3] \n" /* Read pxCurrentTCB. */
" push {r2, r4} \n"
" bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" bl SecureContext_LoadContext \n" /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */
" pop {r2, r4} \n"
" mov lr, r4 \n"/* LR = r4. */
" lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" mov lr, r4 \n" /* LR = r4. */
" lsls r1, r4, #25 \n" /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */
" bpl restore_ns_context \n" /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" restore_ns_context: \n"
" ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */
" ldmia r2!, {r4-r11} \n" /* Restore the registers that are not automatically restored. */
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r2!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" msr psp, r2 \n"/* Remember the new top of stack for the task. */
" msr psp, r2 \n" /* Remember the new top of stack for the task. */
" bx lr \n"
" \n"
" .align 4 \n"
@ -508,21 +511,20 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"xSecureContextConst: .word xSecureContext \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -533,10 +535,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -544,24 +544,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -576,7 +572,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -587,8 +583,8 @@ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (
(
" .syntax unified \n"
" \n"
" svc %0 \n"/* Secure context is allocated in the supervisor call. */
" bx lr \n"/* Return. */
" svc %0 \n" /* Secure context is allocated in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory"
);
}
@ -600,12 +596,12 @@ void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PR
(
" .syntax unified \n"
" \n"
" ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */
" ldr r2, [r0] \n" /* The first item in the TCB is the top of the stack. */
" ldr r1, [r2] \n" /* The first item on the stack is the task's xSecureContext. */
" cmp r1, #0 \n" /* Raise svc if task's xSecureContext is not NULL. */
" it ne \n"
" svcne %0 \n"/* Secure context is freed in the supervisor call. */
" bx lr \n"/* Return. */
" svcne %0 \n" /* Secure context is freed in the supervisor call. */
" bx lr \n" /* Return. */
::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory"
);
}

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

File diff suppressed because it is too large Load diff

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -36,14 +36,17 @@
/* Portasm includes. */
#include "portasm.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
* header files. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if ( configENABLE_MPU == 1 )
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -118,35 +121,35 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_
" xRNRConst2: .word 0xe000ed98 \n"
" xRBARConst2: .word 0xe000ed9c \n"
);
}
}
#else /* configENABLE_MPU */
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */
" \n"
" ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n"/* Set this task's PSPLIM value. */
" movs r1, #2 \n"/* r1 = 2. */
" msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */
" adds r0, #32 \n"/* Discard everything up to r0. */
" msr psp, r0 \n"/* This is now the new top of stack to use in the task. */
" ldm r0!, {r1-r2} \n" /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */
" msr psplim, r1 \n" /* Set this task's PSPLIM value. */
" movs r1, #2 \n" /* r1 = 2. */
" msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */
" adds r0, #32 \n" /* Discard everything up to r0. */
" msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
" isb \n"
" mov r0, #0 \n"
" msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n"/* Finally, branch to EXC_RETURN. */
" msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */
" bx r2 \n" /* Finally, branch to EXC_RETURN. */
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -157,12 +160,12 @@ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" mrs r0, control \n" /* r0 = CONTROL. */
" tst r0, #1 \n" /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */
" ite ne \n"
" movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n"/* Return. */
" movne r0, #0 \n" /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */
" moveq r0, #1 \n" /* CONTROL[0]==0. Return true to indicate that the processor is privileged. */
" bx lr \n" /* Return. */
" \n"
" .align 4 \n"
::: "r0", "memory"
@ -176,10 +179,10 @@ void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* Read the CONTROL register. */
" bic r0, #1 \n"/* Clear the bit 0. */
" msr control, r0 \n"/* Write back the new CONTROL value. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* Read the CONTROL register. */
" bic r0, #1 \n" /* Clear the bit 0. */
" msr control, r0 \n" /* Write back the new CONTROL value. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -191,10 +194,10 @@ void vResetPrivilege( void ) /* __attribute__ (( naked )) */
(
" .syntax unified \n"
" \n"
" mrs r0, control \n"/* r0 = CONTROL. */
" orr r0, #1 \n"/* r0 = r0 | 1. */
" msr control, r0 \n"/* CONTROL = r0. */
" bx lr \n"/* Return to the caller. */
" mrs r0, control \n" /* r0 = CONTROL. */
" orr r0, #1 \n" /* r0 = r0 | 1. */
" msr control, r0 \n" /* CONTROL = r0. */
" bx lr \n" /* Return to the caller. */
::: "r0", "memory"
);
}
@ -206,15 +209,15 @@ void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
(
" .syntax unified \n"
" \n"
" ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */
" msr msp, r0 \n"/* Set the MSP back to the start of the stack. */
" cpsie i \n"/* Globally enable interrupts. */
" ldr r0, xVTORConst \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n" /* Read the VTOR register which gives the address of vector table. */
" ldr r0, [r0] \n" /* The first entry in vector table is stack pointer. */
" msr msp, r0 \n" /* Set the MSP back to the start of the stack. */
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc %0 \n"/* System call to start the first task. */
" svc %0 \n" /* System call to start the first task. */
" nop \n"
" \n"
" .align 4 \n"
@ -230,12 +233,12 @@ uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCT
(
" .syntax unified \n"
" \n"
" mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mrs r0, basepri \n" /* r0 = basepri. Return original basepri value. */
" mov r1, %0 \n" /* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" msr basepri, r1 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
@ -247,10 +250,10 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
(
" .syntax unified \n"
" \n"
" msr basepri, r0 \n"/* basepri = ulMask. */
" msr basepri, r0 \n" /* basepri = ulMask. */
" dsb \n"
" isb \n"
" bx lr \n"/* Return. */
" bx lr \n" /* Return. */
::: "memory"
);
}
@ -258,8 +261,8 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att
#if ( configENABLE_MPU == 1 )
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -375,75 +378,74 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" xRBARConst: .word 0xe000ed9c \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#else /* configENABLE_MPU */
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
" \n"
" mrs r0, psp \n"/* Read PSP in r0. */
" mrs r0, psp \n" /* Read PSP in r0. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst lr, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */
" vstmdbeq r0!, {s16-s31} \n" /* Store the additional FP context registers which are not saved automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" mrs r2, psplim \n"/* r2 = PSPLIM. */
" mov r3, lr \n"/* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" mrs r2, psplim \n" /* r2 = PSPLIM. */
" mov r3, lr \n" /* r3 = LR/EXC_RETURN. */
" stmdb r0!, {r2-r11} \n" /* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" str r0, [r1] \n"/* Save the new top of stack in TCB. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" str r0, [r1] \n" /* Save the new top of stack in TCB. */
" \n"
" mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */
" msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */
" dsb \n"
" isb \n"
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"/* r0 = 0. */
" msr basepri, r0 \n"/* Enable interrupts. */
" mov r0, #0 \n" /* r0 = 0. */
" msr basepri, r0 \n" /* Enable interrupts. */
" \n"
" ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n"/* Read pxCurrentTCB. */
" ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */
" ldr r1, [r2] \n" /* Read pxCurrentTCB. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */
" \n"
" ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" ldmia r0!, {r2-r11} \n" /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */
" \n"
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
" tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" tst r3, #0x10 \n" /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */
" vldmiaeq r0!, {s16-s31} \n" /* Restore the additional FP context registers which are not restored automatically. */
#endif /* configENABLE_FPU || configENABLE_MVE */
" \n"
" msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n"/* Remember the new top of stack for the task. */
" msr psplim, r2 \n" /* Restore the PSPLIM register value for the task. */
" msr psp, r0 \n" /* Remember the new top of stack for the task. */
" bx r3 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY )
);
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
".syntax unified \n"
".extern vPortSVCHandler_C \n"
".extern vSystemCallEnter \n"
".extern vSystemCallEnter_1 \n"
".extern vSystemCallExit \n"
" \n"
"tst lr, #4 \n"
@ -454,10 +456,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
"ldr r1, [r0, #24] \n"
"ldrb r2, [r1, #-2] \n"
"cmp r2, %0 \n"
"beq syscall_enter \n"
"blt syscall_enter \n"
"cmp r2, %1 \n"
"beq syscall_enter_1 \n"
"cmp r2, %2 \n"
"beq syscall_exit \n"
"b vPortSVCHandler_C \n"
" \n"
@ -465,24 +465,20 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" mov r1, lr \n"
" b vSystemCallEnter \n"
" \n"
"syscall_enter_1: \n"
" mov r1, lr \n"
" b vSystemCallEnter_1 \n"
" \n"
"syscall_exit: \n"
" mov r1, lr \n"
" b vSystemCallExit \n"
" \n"
: /* No outputs. */
:"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "i" ( NUM_SYSTEM_CALLS ), "i" ( portSVC_SYSTEM_CALL_EXIT )
: "r0", "r1", "r2", "memory"
);
}
}
#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
{
__asm volatile
(
" .syntax unified \n"
@ -497,7 +493,7 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */
" .align 4 \n"
"svchandler_address_const: .word vPortSVCHandler_C \n"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -57,10 +54,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -75,10 +69,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -93,10 +84,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -111,10 +99,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -129,10 +114,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -147,10 +129,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -165,10 +144,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -183,10 +159,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -201,10 +174,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -219,10 +189,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -237,10 +204,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -255,10 +219,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -273,10 +234,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -291,10 +249,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -309,10 +264,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -327,10 +279,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -345,10 +294,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -363,10 +309,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -381,10 +324,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -399,10 +339,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -417,10 +354,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -435,10 +369,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -453,10 +384,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -471,10 +399,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -489,10 +414,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -507,10 +429,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -525,14 +444,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -543,14 +459,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -561,10 +474,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -579,10 +489,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -597,10 +504,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -615,10 +519,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -633,10 +534,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -651,10 +549,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -669,10 +564,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -687,10 +579,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -705,10 +594,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -723,10 +609,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -741,10 +624,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -759,10 +639,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -777,10 +654,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -795,10 +669,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -813,10 +684,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -831,10 +699,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -849,10 +714,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -867,10 +729,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -885,10 +744,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -903,10 +759,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -921,10 +774,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -939,14 +789,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0, r1}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -959,13 +806,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0, r1}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -981,10 +825,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -999,10 +840,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -1017,10 +855,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -1035,10 +870,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -1053,10 +885,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1071,14 +900,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -1089,10 +915,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1107,10 +930,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1125,10 +945,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1143,10 +960,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1161,10 +975,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1179,10 +990,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1197,10 +1005,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1215,10 +1020,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1233,10 +1035,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1251,10 +1050,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1269,10 +1065,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1287,10 +1080,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1305,10 +1095,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1323,10 +1110,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1532,9 +1316,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -33,6 +33,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -45,7 +48,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN SecureContext_LoadContext
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -95,7 +97,7 @@ vResetPrivilege:
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
@ -230,7 +232,7 @@ vStartFirstTask:
cpsie i /* Globally enable interrupts. */
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -482,21 +484,17 @@ SVC_Handler:
b route_svc
route_svc:
ldr r2, [r0, #24]
subs r2, #2
ldrb r3, [r2, #0]
cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq system_call_enter
cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq system_call_enter_1
cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */
ldr r3, [r0, #24]
subs r3, #2
ldrb r2, [r3, #0]
cmp r2, #NUM_SYSTEM_CALLS
blt system_call_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq system_call_exit
b vPortSVCHandler_C
system_call_enter:
b vSystemCallEnter
system_call_enter_1:
b vSystemCallEnter_1
system_call_exit:
b vSystemCallExit
@ -523,7 +521,7 @@ vPortFreeSecureContext:
bne free_secure_context /* Branch if r1 != 0. */
bx lr /* There is no secure context (xSecureContext is NULL). */
free_secure_context:
svc 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
svc 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
bx lr /* Return. */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -57,10 +54,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -75,10 +69,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -93,10 +84,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -111,10 +99,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -129,10 +114,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -147,10 +129,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -165,10 +144,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -183,10 +159,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -201,10 +174,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -219,10 +189,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -237,10 +204,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -255,10 +219,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -273,10 +234,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -291,10 +249,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -309,10 +264,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -327,10 +279,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -345,10 +294,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -363,10 +309,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -381,10 +324,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -399,10 +339,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -417,10 +354,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -435,10 +369,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -453,10 +384,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -471,10 +399,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -489,10 +414,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -507,10 +429,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -525,14 +444,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -543,14 +459,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -561,10 +474,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -579,10 +489,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -597,10 +504,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -615,10 +519,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -633,10 +534,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -651,10 +549,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -669,10 +564,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -687,10 +579,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -705,10 +594,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -723,10 +609,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -741,10 +624,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -759,10 +639,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -777,10 +654,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -795,10 +669,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -813,10 +684,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -831,10 +699,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -849,10 +714,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -867,10 +729,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -885,10 +744,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -903,10 +759,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -921,10 +774,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -939,14 +789,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0, r1}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -959,13 +806,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0, r1}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -981,10 +825,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -999,10 +840,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -1017,10 +855,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -1035,10 +870,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -1053,10 +885,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1071,14 +900,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0, r1}
mrs r0, control
movs r1, #1
@ -1089,10 +915,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1107,10 +930,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1125,10 +945,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1143,10 +960,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1161,10 +975,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1179,10 +990,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1197,10 +1005,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1215,10 +1020,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1233,10 +1035,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1251,10 +1050,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1269,10 +1065,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1287,10 +1080,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1305,10 +1095,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1323,10 +1110,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0, r1}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1532,9 +1316,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -41,7 +44,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN vPortSVCHandler_C
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -216,7 +218,7 @@ vStartFirstTask:
cpsie i /* Globally enable interrupts. */
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
nop
/*-----------------------------------------------------------*/
@ -401,21 +403,17 @@ SVC_Handler:
b route_svc
route_svc:
ldr r2, [r0, #24]
subs r2, #2
ldrb r3, [r2, #0]
cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq system_call_enter
cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq system_call_enter_1
cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */
ldr r3, [r0, #24]
subs r3, #2
ldrb r2, [r3, #0]
cmp r2, #NUM_SYSTEM_CALLS
blt system_call_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq system_call_exit
b vPortSVCHandler_C
system_call_enter:
b vSystemCallEnter
system_call_enter_1:
b vSystemCallEnter_1
system_call_exit:
b vSystemCallExit

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -56,10 +53,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -73,10 +67,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -90,10 +81,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -107,10 +95,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -124,10 +109,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -141,10 +123,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -158,10 +137,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -175,10 +151,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -192,10 +165,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -209,10 +179,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -226,10 +193,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -243,10 +207,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -260,10 +221,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -277,10 +235,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -294,10 +249,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -311,10 +263,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -328,10 +277,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -345,10 +291,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -362,10 +305,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -379,10 +319,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -396,10 +333,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -413,10 +347,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -430,10 +361,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -447,10 +375,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -464,10 +389,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -481,10 +403,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -498,14 +417,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -515,14 +431,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -532,10 +445,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -549,10 +459,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -566,10 +473,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -583,10 +487,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -600,10 +501,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -617,10 +515,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -634,10 +529,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -651,10 +543,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -668,10 +557,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -685,10 +571,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -702,10 +585,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -719,10 +599,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -736,10 +613,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -753,10 +627,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -770,10 +641,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -787,10 +655,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -804,10 +669,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -821,10 +683,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -838,10 +697,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -855,10 +711,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -872,10 +725,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -889,14 +739,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -908,13 +755,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -929,10 +773,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -946,10 +787,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -963,10 +801,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -980,10 +815,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -997,10 +829,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1014,14 +843,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -1031,10 +857,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1048,10 +871,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1065,10 +885,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1082,10 +899,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1099,10 +913,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1116,10 +927,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1133,10 +941,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1150,10 +955,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1167,10 +969,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1184,10 +983,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1201,10 +997,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1218,10 +1011,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1235,10 +1025,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1252,10 +1039,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1461,9 +1245,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -44,7 +47,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN SecureContext_LoadContext
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -86,7 +88,7 @@ vResetPrivilege:
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
@ -205,7 +207,7 @@ vStartFirstTask:
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -455,11 +457,9 @@ SVC_Handler:
ldr r1, [r0, #24]
ldrb r2, [r1, #-2]
cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq syscall_enter
cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq syscall_enter_1
cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
cmp r2, #NUM_SYSTEM_CALLS
blt syscall_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq syscall_exit
b vPortSVCHandler_C
@ -467,10 +467,6 @@ SVC_Handler:
mov r1, lr
b vSystemCallEnter
syscall_enter_1:
mov r1, lr
b vSystemCallEnter_1
syscall_exit:
mov r1, lr
b vSystemCallExit
@ -493,7 +489,7 @@ vPortFreeSecureContext:
ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
svcne 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
bx lr /* Return. */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -56,10 +53,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -73,10 +67,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -90,10 +81,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -107,10 +95,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -124,10 +109,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -141,10 +123,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -158,10 +137,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -175,10 +151,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -192,10 +165,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -209,10 +179,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -226,10 +193,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -243,10 +207,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -260,10 +221,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -277,10 +235,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -294,10 +249,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -311,10 +263,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -328,10 +277,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -345,10 +291,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -362,10 +305,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -379,10 +319,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -396,10 +333,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -413,10 +347,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -430,10 +361,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -447,10 +375,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -464,10 +389,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -481,10 +403,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -498,14 +417,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -515,14 +431,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -532,10 +445,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -549,10 +459,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -566,10 +473,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -583,10 +487,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -600,10 +501,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -617,10 +515,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -634,10 +529,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -651,10 +543,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -668,10 +557,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -685,10 +571,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -702,10 +585,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -719,10 +599,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -736,10 +613,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -753,10 +627,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -770,10 +641,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -787,10 +655,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -804,10 +669,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -821,10 +683,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -838,10 +697,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -855,10 +711,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -872,10 +725,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -889,14 +739,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -908,13 +755,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -929,10 +773,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -946,10 +787,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -963,10 +801,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -980,10 +815,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -997,10 +829,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1014,14 +843,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -1031,10 +857,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1048,10 +871,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1065,10 +885,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1082,10 +899,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1099,10 +913,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1116,10 +927,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1133,10 +941,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1150,10 +955,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1167,10 +969,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1184,10 +983,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1201,10 +997,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1218,10 +1011,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1235,10 +1025,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1252,10 +1039,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1461,9 +1245,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -41,7 +44,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN vPortSVCHandler_C
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -191,7 +193,7 @@ vStartFirstTask:
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -371,11 +373,9 @@ SVC_Handler:
ldr r1, [r0, #24]
ldrb r2, [r1, #-2]
cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq syscall_enter
cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq syscall_enter_1
cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
cmp r2, #NUM_SYSTEM_CALLS
blt syscall_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq syscall_exit
b vPortSVCHandler_C
@ -383,10 +383,6 @@ SVC_Handler:
mov r1, lr
b vSystemCallEnter
syscall_enter_1:
mov r1, lr
b vSystemCallEnter_1
syscall_exit:
mov r1, lr
b vSystemCallExit

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -56,10 +53,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -73,10 +67,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -90,10 +81,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -107,10 +95,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -124,10 +109,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -141,10 +123,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -158,10 +137,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -175,10 +151,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -192,10 +165,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -209,10 +179,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -226,10 +193,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -243,10 +207,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -260,10 +221,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -277,10 +235,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -294,10 +249,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -311,10 +263,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -328,10 +277,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -345,10 +291,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -362,10 +305,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -379,10 +319,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -396,10 +333,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -413,10 +347,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -430,10 +361,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -447,10 +375,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -464,10 +389,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -481,10 +403,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -498,14 +417,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -515,14 +431,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -532,10 +445,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -549,10 +459,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -566,10 +473,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -583,10 +487,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -600,10 +501,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -617,10 +515,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -634,10 +529,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -651,10 +543,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -668,10 +557,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -685,10 +571,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -702,10 +585,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -719,10 +599,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -736,10 +613,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -753,10 +627,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -770,10 +641,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -787,10 +655,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -804,10 +669,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -821,10 +683,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -838,10 +697,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -855,10 +711,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -872,10 +725,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -889,14 +739,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -908,13 +755,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -929,10 +773,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -946,10 +787,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -963,10 +801,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -980,10 +815,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -997,10 +829,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1014,14 +843,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -1031,10 +857,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1048,10 +871,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1065,10 +885,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1082,10 +899,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1099,10 +913,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1116,10 +927,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1133,10 +941,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1150,10 +955,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1167,10 +969,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1184,10 +983,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1201,10 +997,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1218,10 +1011,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1235,10 +1025,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1252,10 +1039,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1461,9 +1245,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -44,7 +47,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN SecureContext_LoadContext
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -86,7 +88,7 @@ vResetPrivilege:
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
@ -205,7 +207,7 @@ vStartFirstTask:
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -455,11 +457,9 @@ SVC_Handler:
ldr r1, [r0, #24]
ldrb r2, [r1, #-2]
cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq syscall_enter
cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq syscall_enter_1
cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
cmp r2, #NUM_SYSTEM_CALLS
blt syscall_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq syscall_exit
b vPortSVCHandler_C
@ -467,10 +467,6 @@ SVC_Handler:
mov r1, lr
b vSystemCallEnter
syscall_enter_1:
mov r1, lr
b vSystemCallEnter_1
syscall_exit:
mov r1, lr
b vSystemCallExit
@ -493,7 +489,7 @@ vPortFreeSecureContext:
ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
svcne 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
bx lr /* Return. */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -56,10 +53,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -73,10 +67,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -90,10 +81,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -107,10 +95,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -124,10 +109,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -141,10 +123,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -158,10 +137,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -175,10 +151,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -192,10 +165,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -209,10 +179,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -226,10 +193,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -243,10 +207,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -260,10 +221,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -277,10 +235,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -294,10 +249,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -311,10 +263,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -328,10 +277,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -345,10 +291,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -362,10 +305,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -379,10 +319,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -396,10 +333,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -413,10 +347,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -430,10 +361,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -447,10 +375,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -464,10 +389,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -481,10 +403,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -498,14 +417,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -515,14 +431,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -532,10 +445,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -549,10 +459,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -566,10 +473,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -583,10 +487,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -600,10 +501,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -617,10 +515,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -634,10 +529,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -651,10 +543,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -668,10 +557,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -685,10 +571,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -702,10 +585,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -719,10 +599,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -736,10 +613,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -753,10 +627,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -770,10 +641,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -787,10 +655,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -804,10 +669,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -821,10 +683,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -838,10 +697,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -855,10 +711,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -872,10 +725,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -889,14 +739,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -908,13 +755,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -929,10 +773,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -946,10 +787,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -963,10 +801,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -980,10 +815,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -997,10 +829,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1014,14 +843,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -1031,10 +857,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1048,10 +871,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1065,10 +885,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1082,10 +899,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1099,10 +913,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1116,10 +927,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1133,10 +941,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1150,10 +955,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1167,10 +969,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1184,10 +983,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1201,10 +997,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1218,10 +1011,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1235,10 +1025,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1252,10 +1039,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1461,9 +1245,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -41,7 +44,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN vPortSVCHandler_C
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -191,7 +193,7 @@ vStartFirstTask:
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -371,11 +373,9 @@ SVC_Handler:
ldr r1, [r0, #24]
ldrb r2, [r1, #-2]
cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq syscall_enter
cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq syscall_enter_1
cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
cmp r2, #NUM_SYSTEM_CALLS
blt syscall_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq syscall_exit
b vPortSVCHandler_C
@ -383,10 +383,6 @@ SVC_Handler:
mov r1, lr
b vSystemCallEnter
syscall_enter_1:
mov r1, lr
b vSystemCallEnter_1
syscall_exit:
mov r1, lr
b vSystemCallExit

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

View file

@ -32,6 +32,7 @@
* the code is included in C files but excluded by the preprocessor in assembly
* files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
SECTION freertos_system_calls:CODE:NOROOT(2)
THUMB
@ -41,10 +42,6 @@
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 5
/*-----------------------------------------------------------*/
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
@ -60,10 +57,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -77,10 +71,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -94,10 +85,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -111,10 +99,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -128,10 +113,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -145,10 +127,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -162,10 +141,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -179,10 +155,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -196,10 +169,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -213,10 +183,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -230,10 +197,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -247,10 +211,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -264,10 +225,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -281,10 +239,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -298,10 +253,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -315,10 +267,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -332,10 +281,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -349,10 +295,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -366,10 +309,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -383,10 +323,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -400,10 +337,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -417,10 +351,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -434,10 +365,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -451,10 +379,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -468,10 +393,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -485,10 +407,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -502,14 +421,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -519,14 +435,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -536,10 +449,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -553,10 +463,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -570,10 +477,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -587,10 +491,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -604,10 +505,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -621,10 +519,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -638,10 +533,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -655,10 +547,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -672,10 +561,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -689,10 +575,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -706,10 +589,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -723,10 +603,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -740,10 +617,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -757,10 +631,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -774,10 +645,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -791,10 +659,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -808,10 +673,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -825,10 +687,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -842,10 +701,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -859,10 +715,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -876,10 +729,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -893,14 +743,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -912,13 +759,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -933,10 +777,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -950,10 +791,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -967,10 +805,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -984,10 +819,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -1001,10 +833,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1018,14 +847,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -1035,10 +861,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1052,10 +875,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1069,10 +889,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1086,10 +903,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1103,10 +917,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1120,10 +931,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1137,10 +945,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1154,10 +959,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1171,10 +973,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1188,10 +987,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1205,10 +1001,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1222,10 +1015,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1239,10 +1029,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1256,10 +1043,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1465,9 +1249,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

238
portable/IAR/ARM_CM4F_MPU/port.c Executable file → Normal file
View file

@ -41,6 +41,7 @@
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
#include "mpu_syscall_numbers.h"
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
@ -232,31 +233,26 @@ extern void vPortRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION;
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -269,7 +265,8 @@ extern void vPortRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION;
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -359,10 +356,10 @@ void vPortSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
uint8_t ucSVCNumber;
uint32_t ulPC;
#if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
#if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) )
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
#endif /* #if ( ( configUSE_MPU_WRAPPERS_V1 == 1 ) && ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) ) */
/* The stack contains: r0, r1, r2, r3, r12, LR, PC and xPSR. The first
* argument (r0) is pulParam[ 0 ]. */
@ -388,6 +385,7 @@ void vPortSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
break;
#if ( configUSE_MPU_WRAPPERS_V1 == 1 )
#if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 )
case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the
* svc was raised from any of the
@ -417,6 +415,7 @@ void vPortSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
);
break;
#endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
default: /* Unknown SVC call. */
break;
@ -426,12 +425,16 @@ void vPortSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -444,22 +447,33 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -484,7 +498,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Clear nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
@ -495,10 +510,20 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Store the value of the Link Register before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
@ -515,148 +540,66 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
}
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r1, control \n" /* Obtain current control value. */
" bic r1, #1 \n" /* Clear nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Store the value of the Link Register before the SVC was raised. We need to
* restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configUSE_MPU_WRAPPERS_V1 == 0 )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -681,15 +624,18 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r1, control \n" /* Obtain current control value. */
" orr r1, #1 \n" /* Set nPRIV bit. */
" msr control, r1 \n" /* Write back new control value. */
::: "r1", "memory"
);
/* Restore the stacked link register to what it was at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* If the hardware used padding to force the stack pointer
@ -707,7 +653,7 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* This is not NULL only for the duration of the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
}
}
}
#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
@ -736,7 +682,6 @@ BaseType_t xPortStartScheduler( void )
#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 )
configASSERT( ( portCPUID == portCORTEX_M7_r0p1_ID ) || ( portCPUID == portCORTEX_M7_r0p0_ID ) );
#else
/* When using this port on a Cortex-M7 r0p0 or r0p1 core, define
* configENABLE_ERRATA_837070_WORKAROUND to 1 in your
* FreeRTOSConfig.h. */
@ -1163,7 +1108,6 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL );
xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION |
tskMPU_WRITE_PERMISSION );
}
lIndex = 0;
@ -1185,14 +1129,16 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
( xRegions[ lIndex ].ulParameters ) |
( portMPU_REGION_ENABLE );
xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t) xRegions[ lIndex ].pvBaseAddress;
xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress;
xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL );
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL;
if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) ||
( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) )
{
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION;
}
if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE )
{
xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );

View file

@ -32,6 +32,7 @@ To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so
the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include <FreeRTOSConfig.h>
#include <mpu_syscall_numbers.h>
RSEG CODE:CODE(2)
thumb
@ -40,7 +41,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN vTaskSwitchContext
EXTERN vPortSVCHandler_C
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
PUBLIC xPortPendSVHandler
@ -58,9 +58,8 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 3
#define portSVC_SYSTEM_CALL_ENTER_1 4
#define portSVC_SYSTEM_CALL_EXIT 5
#define portSVC_START_SCHEDULER 100
#define portSVC_SYSTEM_CALL_EXIT 103
/*-----------------------------------------------------------*/
xPortPendSVHandler:
@ -164,10 +163,8 @@ vPortSVCHandler:
ldr r1, [r0, #24]
ldrb r2, [r1, #-2]
cmp r2, #portSVC_SYSTEM_CALL_ENTER
beq syscall_enter
cmp r2, #portSVC_SYSTEM_CALL_ENTER_1
beq syscall_enter_1
cmp r2, #NUM_SYSTEM_CALLS
blt syscall_enter
cmp r2, #portSVC_SYSTEM_CALL_EXIT
beq syscall_exit
b vPortSVCHandler_C
@ -176,10 +173,6 @@ vPortSVCHandler:
mov r1, lr
b vSystemCallEnter
syscall_enter_1:
mov r1, lr
b vSystemCallEnter_1
syscall_exit:
mov r1, lr
b vSystemCallExit
@ -218,7 +211,7 @@ vPortStartFirstTask:
cpsie f
dsb
isb
svc 0
svc #portSVC_START_SCHEDULER
/*-----------------------------------------------------------*/

View file

@ -243,12 +243,10 @@ typedef struct MPU_SETTINGS
/*-----------------------------------------------------------*/
/* SVC numbers for various services. */
#define portSVC_START_SCHEDULER 0
#define portSVC_YIELD 1
#define portSVC_RAISE_PRIVILEGE 2
#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 5
#define portSVC_START_SCHEDULER 100
#define portSVC_YIELD 101
#define portSVC_RAISE_PRIVILEGE 102
#define portSVC_SYSTEM_CALL_EXIT 103
/* Scheduler utilities. */

View file

@ -32,15 +32,12 @@
/*-----------------------------------------------------------*/
#include "FreeRTOSConfig.h"
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
/* These must be in sync with portmacro.h. */
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
@ -56,10 +53,7 @@ MPU_xTaskDelayUntil:
b MPU_xTaskDelayUntilImpl
MPU_xTaskDelayUntil_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskDelayUntilImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskDelayUntil
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskAbortDelay
@ -73,10 +67,7 @@ MPU_xTaskAbortDelay:
b MPU_xTaskAbortDelayImpl
MPU_xTaskAbortDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskAbortDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskAbortDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskDelay
@ -90,10 +81,7 @@ MPU_vTaskDelay:
b MPU_vTaskDelayImpl
MPU_vTaskDelay_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskDelayImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskDelay
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskPriorityGet
@ -107,10 +95,7 @@ MPU_uxTaskPriorityGet:
b MPU_uxTaskPriorityGetImpl
MPU_uxTaskPriorityGet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskPriorityGetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskPriorityGet
/*-----------------------------------------------------------*/
PUBLIC MPU_eTaskGetState
@ -124,10 +109,7 @@ MPU_eTaskGetState:
b MPU_eTaskGetStateImpl
MPU_eTaskGetState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_eTaskGetStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_eTaskGetState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskGetInfo
@ -141,10 +123,7 @@ MPU_vTaskGetInfo:
b MPU_vTaskGetInfoImpl
MPU_vTaskGetInfo_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskGetInfoImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskGetInfo
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetIdleTaskHandle
@ -158,10 +137,7 @@ MPU_xTaskGetIdleTaskHandle:
b MPU_xTaskGetIdleTaskHandleImpl
MPU_xTaskGetIdleTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetIdleTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetIdleTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSuspend
@ -175,10 +151,7 @@ MPU_vTaskSuspend:
b MPU_vTaskSuspendImpl
MPU_vTaskSuspend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSuspendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSuspend
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskResume
@ -192,10 +165,7 @@ MPU_vTaskResume:
b MPU_vTaskResumeImpl
MPU_vTaskResume_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskResumeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskResume
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetTickCount
@ -209,10 +179,7 @@ MPU_xTaskGetTickCount:
b MPU_xTaskGetTickCountImpl
MPU_xTaskGetTickCount_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetTickCountImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetTickCount
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetNumberOfTasks
@ -226,10 +193,7 @@ MPU_uxTaskGetNumberOfTasks:
b MPU_uxTaskGetNumberOfTasksImpl
MPU_uxTaskGetNumberOfTasks_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetNumberOfTasksImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetNumberOfTasks
/*-----------------------------------------------------------*/
PUBLIC MPU_pcTaskGetName
@ -243,10 +207,7 @@ MPU_pcTaskGetName:
b MPU_pcTaskGetNameImpl
MPU_pcTaskGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTaskGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTaskGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimeCounter
@ -260,10 +221,7 @@ MPU_ulTaskGetRunTimeCounter:
b MPU_ulTaskGetRunTimeCounterImpl
MPU_ulTaskGetRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetRunTimePercent
@ -277,10 +235,7 @@ MPU_ulTaskGetRunTimePercent:
b MPU_ulTaskGetRunTimePercentImpl
MPU_ulTaskGetRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimePercent
@ -294,10 +249,7 @@ MPU_ulTaskGetIdleRunTimePercent:
b MPU_ulTaskGetIdleRunTimePercentImpl
MPU_ulTaskGetIdleRunTimePercent_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimePercentImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimePercent
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGetIdleRunTimeCounter
@ -311,10 +263,7 @@ MPU_ulTaskGetIdleRunTimeCounter:
b MPU_ulTaskGetIdleRunTimeCounterImpl
MPU_ulTaskGetIdleRunTimeCounter_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGetIdleRunTimeCounterImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGetIdleRunTimeCounter
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetApplicationTaskTag
@ -328,10 +277,7 @@ MPU_vTaskSetApplicationTaskTag:
b MPU_vTaskSetApplicationTaskTagImpl
MPU_vTaskSetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetApplicationTaskTag
@ -345,10 +291,7 @@ MPU_xTaskGetApplicationTaskTag:
b MPU_xTaskGetApplicationTaskTagImpl
MPU_xTaskGetApplicationTaskTag_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetApplicationTaskTagImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetApplicationTaskTag
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetThreadLocalStoragePointer
@ -362,10 +305,7 @@ MPU_vTaskSetThreadLocalStoragePointer:
b MPU_vTaskSetThreadLocalStoragePointerImpl
MPU_vTaskSetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTaskGetThreadLocalStoragePointer
@ -379,10 +319,7 @@ MPU_pvTaskGetThreadLocalStoragePointer:
b MPU_pvTaskGetThreadLocalStoragePointerImpl
MPU_pvTaskGetThreadLocalStoragePointer_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTaskGetThreadLocalStoragePointerImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetSystemState
@ -396,10 +333,7 @@ MPU_uxTaskGetSystemState:
b MPU_uxTaskGetSystemStateImpl
MPU_uxTaskGetSystemState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetSystemStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetSystemState
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark
@ -413,10 +347,7 @@ MPU_uxTaskGetStackHighWaterMark:
b MPU_uxTaskGetStackHighWaterMarkImpl
MPU_uxTaskGetStackHighWaterMark_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMarkImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTaskGetStackHighWaterMark2
@ -430,10 +361,7 @@ MPU_uxTaskGetStackHighWaterMark2:
b MPU_uxTaskGetStackHighWaterMark2Impl
MPU_uxTaskGetStackHighWaterMark2_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTaskGetStackHighWaterMark2Impl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTaskGetStackHighWaterMark2
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetCurrentTaskHandle
@ -447,10 +375,7 @@ MPU_xTaskGetCurrentTaskHandle:
b MPU_xTaskGetCurrentTaskHandleImpl
MPU_xTaskGetCurrentTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetCurrentTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetCurrentTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGetSchedulerState
@ -464,10 +389,7 @@ MPU_xTaskGetSchedulerState:
b MPU_xTaskGetSchedulerStateImpl
MPU_xTaskGetSchedulerState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGetSchedulerStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGetSchedulerState
/*-----------------------------------------------------------*/
PUBLIC MPU_vTaskSetTimeOutState
@ -481,10 +403,7 @@ MPU_vTaskSetTimeOutState:
b MPU_vTaskSetTimeOutStateImpl
MPU_vTaskSetTimeOutState_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTaskSetTimeOutStateImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTaskSetTimeOutState
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskCheckForTimeOut
@ -498,14 +417,11 @@ MPU_xTaskCheckForTimeOut:
b MPU_xTaskCheckForTimeOutImpl
MPU_xTaskCheckForTimeOut_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskCheckForTimeOutImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskCheckForTimeOut
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotify
MPU_xTaskGenericNotify:
PUBLIC MPU_xTaskGenericNotifyEntry
MPU_xTaskGenericNotifyEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -515,14 +431,11 @@ MPU_xTaskGenericNotify:
b MPU_xTaskGenericNotifyImpl
MPU_xTaskGenericNotify_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotify
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyWait
MPU_xTaskGenericNotifyWait:
PUBLIC MPU_xTaskGenericNotifyWaitEntry
MPU_xTaskGenericNotifyWaitEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -532,10 +445,7 @@ MPU_xTaskGenericNotifyWait:
b MPU_xTaskGenericNotifyWaitImpl
MPU_xTaskGenericNotifyWait_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTaskGenericNotifyWaitImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyWait
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyTake
@ -549,10 +459,7 @@ MPU_ulTaskGenericNotifyTake:
b MPU_ulTaskGenericNotifyTakeImpl
MPU_ulTaskGenericNotifyTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xTaskGenericNotifyStateClear
@ -566,10 +473,7 @@ MPU_xTaskGenericNotifyStateClear:
b MPU_xTaskGenericNotifyStateClearImpl
MPU_xTaskGenericNotifyStateClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTaskGenericNotifyStateClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTaskGenericNotifyStateClear
/*-----------------------------------------------------------*/
PUBLIC MPU_ulTaskGenericNotifyValueClear
@ -583,10 +487,7 @@ MPU_ulTaskGenericNotifyValueClear:
b MPU_ulTaskGenericNotifyValueClearImpl
MPU_ulTaskGenericNotifyValueClear_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_ulTaskGenericNotifyValueClearImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_ulTaskGenericNotifyValueClear
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGenericSend
@ -600,10 +501,7 @@ MPU_xQueueGenericSend:
b MPU_xQueueGenericSendImpl
MPU_xQueueGenericSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGenericSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGenericSend
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueMessagesWaiting
@ -617,10 +515,7 @@ MPU_uxQueueMessagesWaiting:
b MPU_uxQueueMessagesWaitingImpl
MPU_uxQueueMessagesWaiting_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueMessagesWaitingImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueMessagesWaiting
/*-----------------------------------------------------------*/
PUBLIC MPU_uxQueueSpacesAvailable
@ -634,10 +529,7 @@ MPU_uxQueueSpacesAvailable:
b MPU_uxQueueSpacesAvailableImpl
MPU_uxQueueSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxQueueSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxQueueSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueReceive
@ -651,10 +543,7 @@ MPU_xQueueReceive:
b MPU_xQueueReceiveImpl
MPU_xQueueReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueuePeek
@ -668,10 +557,7 @@ MPU_xQueuePeek:
b MPU_xQueuePeekImpl
MPU_xQueuePeek_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueuePeekImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueuePeek
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSemaphoreTake
@ -685,10 +571,7 @@ MPU_xQueueSemaphoreTake:
b MPU_xQueueSemaphoreTakeImpl
MPU_xQueueSemaphoreTake_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSemaphoreTakeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSemaphoreTake
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGetMutexHolder
@ -702,10 +585,7 @@ MPU_xQueueGetMutexHolder:
b MPU_xQueueGetMutexHolderImpl
MPU_xQueueGetMutexHolder_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGetMutexHolderImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGetMutexHolder
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueTakeMutexRecursive
@ -719,10 +599,7 @@ MPU_xQueueTakeMutexRecursive:
b MPU_xQueueTakeMutexRecursiveImpl
MPU_xQueueTakeMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueTakeMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueTakeMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueGiveMutexRecursive
@ -736,10 +613,7 @@ MPU_xQueueGiveMutexRecursive:
b MPU_xQueueGiveMutexRecursiveImpl
MPU_xQueueGiveMutexRecursive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueGiveMutexRecursiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueGiveMutexRecursive
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueSelectFromSet
@ -753,10 +627,7 @@ MPU_xQueueSelectFromSet:
b MPU_xQueueSelectFromSetImpl
MPU_xQueueSelectFromSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueSelectFromSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueSelectFromSet
/*-----------------------------------------------------------*/
PUBLIC MPU_xQueueAddToSet
@ -770,10 +641,7 @@ MPU_xQueueAddToSet:
b MPU_xQueueAddToSetImpl
MPU_xQueueAddToSet_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xQueueAddToSetImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xQueueAddToSet
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueAddToRegistry
@ -787,10 +655,7 @@ MPU_vQueueAddToRegistry:
b MPU_vQueueAddToRegistryImpl
MPU_vQueueAddToRegistry_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueAddToRegistryImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueAddToRegistry
/*-----------------------------------------------------------*/
PUBLIC MPU_vQueueUnregisterQueue
@ -804,10 +669,7 @@ MPU_vQueueUnregisterQueue:
b MPU_vQueueUnregisterQueueImpl
MPU_vQueueUnregisterQueue_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vQueueUnregisterQueueImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vQueueUnregisterQueue
/*-----------------------------------------------------------*/
PUBLIC MPU_pcQueueGetName
@ -821,10 +683,7 @@ MPU_pcQueueGetName:
b MPU_pcQueueGetNameImpl
MPU_pcQueueGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcQueueGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcQueueGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_pvTimerGetTimerID
@ -838,10 +697,7 @@ MPU_pvTimerGetTimerID:
b MPU_pvTimerGetTimerIDImpl
MPU_pvTimerGetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pvTimerGetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pvTimerGetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetTimerID
@ -855,10 +711,7 @@ MPU_vTimerSetTimerID:
b MPU_vTimerSetTimerIDImpl
MPU_vTimerSetTimerID_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetTimerIDImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetTimerID
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerIsTimerActive
@ -872,10 +725,7 @@ MPU_xTimerIsTimerActive:
b MPU_xTimerIsTimerActiveImpl
MPU_xTimerIsTimerActive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerIsTimerActiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerIsTimerActive
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetTimerDaemonTaskHandle
@ -889,14 +739,11 @@ MPU_xTimerGetTimerDaemonTaskHandle:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
MPU_xTimerGetTimerDaemonTaskHandle_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetTimerDaemonTaskHandleImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGenericCommand
MPU_xTimerGenericCommand:
PUBLIC MPU_xTimerGenericCommandEntry
MPU_xTimerGenericCommandEntry:
push {r0}
/* This function can be called from ISR also and therefore, we need a check
* to take privileged path, if called from ISR. */
@ -908,13 +755,10 @@ MPU_xTimerGenericCommand:
beq MPU_xTimerGenericCommand_Priv
MPU_xTimerGenericCommand_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xTimerGenericCommandImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGenericCommand
MPU_xTimerGenericCommand_Priv:
pop {r0}
b MPU_xTimerGenericCommandImpl
b MPU_xTimerGenericCommandPrivImpl
/*-----------------------------------------------------------*/
@ -929,10 +773,7 @@ MPU_pcTimerGetName:
b MPU_pcTimerGetNameImpl
MPU_pcTimerGetName_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_pcTimerGetNameImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_pcTimerGetName
/*-----------------------------------------------------------*/
PUBLIC MPU_vTimerSetReloadMode
@ -946,10 +787,7 @@ MPU_vTimerSetReloadMode:
b MPU_vTimerSetReloadModeImpl
MPU_vTimerSetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vTimerSetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vTimerSetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetReloadMode
@ -963,10 +801,7 @@ MPU_xTimerGetReloadMode:
b MPU_xTimerGetReloadModeImpl
MPU_xTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_uxTimerGetReloadMode
@ -980,10 +815,7 @@ MPU_uxTimerGetReloadMode:
b MPU_uxTimerGetReloadModeImpl
MPU_uxTimerGetReloadMode_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxTimerGetReloadModeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxTimerGetReloadMode
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetPeriod
@ -997,10 +829,7 @@ MPU_xTimerGetPeriod:
b MPU_xTimerGetPeriodImpl
MPU_xTimerGetPeriod_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetPeriodImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetPeriod
/*-----------------------------------------------------------*/
PUBLIC MPU_xTimerGetExpiryTime
@ -1014,14 +843,11 @@ MPU_xTimerGetExpiryTime:
b MPU_xTimerGetExpiryTimeImpl
MPU_xTimerGetExpiryTime_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xTimerGetExpiryTimeImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xTimerGetExpiryTime
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupWaitBits
MPU_xEventGroupWaitBits:
PUBLIC MPU_xEventGroupWaitBitsEntry
MPU_xEventGroupWaitBitsEntry:
push {r0}
mrs r0, control
tst r0, #1
@ -1031,10 +857,7 @@ MPU_xEventGroupWaitBits:
b MPU_xEventGroupWaitBitsImpl
MPU_xEventGroupWaitBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER_1
bl MPU_xEventGroupWaitBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupWaitBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupClearBits
@ -1048,10 +871,7 @@ MPU_xEventGroupClearBits:
b MPU_xEventGroupClearBitsImpl
MPU_xEventGroupClearBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupClearBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupClearBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSetBits
@ -1065,10 +885,7 @@ MPU_xEventGroupSetBits:
b MPU_xEventGroupSetBitsImpl
MPU_xEventGroupSetBits_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSetBitsImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSetBits
/*-----------------------------------------------------------*/
PUBLIC MPU_xEventGroupSync
@ -1082,10 +899,7 @@ MPU_xEventGroupSync:
b MPU_xEventGroupSyncImpl
MPU_xEventGroupSync_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xEventGroupSyncImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xEventGroupSync
/*-----------------------------------------------------------*/
PUBLIC MPU_uxEventGroupGetNumber
@ -1099,10 +913,7 @@ MPU_uxEventGroupGetNumber:
b MPU_uxEventGroupGetNumberImpl
MPU_uxEventGroupGetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_uxEventGroupGetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_uxEventGroupGetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_vEventGroupSetNumber
@ -1116,10 +927,7 @@ MPU_vEventGroupSetNumber:
b MPU_vEventGroupSetNumberImpl
MPU_vEventGroupSetNumber_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_vEventGroupSetNumberImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_vEventGroupSetNumber
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSend
@ -1133,10 +941,7 @@ MPU_xStreamBufferSend:
b MPU_xStreamBufferSendImpl
MPU_xStreamBufferSend_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSendImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSend
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferReceive
@ -1150,10 +955,7 @@ MPU_xStreamBufferReceive:
b MPU_xStreamBufferReceiveImpl
MPU_xStreamBufferReceive_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferReceiveImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferReceive
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsFull
@ -1167,10 +969,7 @@ MPU_xStreamBufferIsFull:
b MPU_xStreamBufferIsFullImpl
MPU_xStreamBufferIsFull_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsFullImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsFull
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferIsEmpty
@ -1184,10 +983,7 @@ MPU_xStreamBufferIsEmpty:
b MPU_xStreamBufferIsEmptyImpl
MPU_xStreamBufferIsEmpty_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferIsEmptyImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferIsEmpty
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSpacesAvailable
@ -1201,10 +997,7 @@ MPU_xStreamBufferSpacesAvailable:
b MPU_xStreamBufferSpacesAvailableImpl
MPU_xStreamBufferSpacesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSpacesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSpacesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferBytesAvailable
@ -1218,10 +1011,7 @@ MPU_xStreamBufferBytesAvailable:
b MPU_xStreamBufferBytesAvailableImpl
MPU_xStreamBufferBytesAvailable_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferBytesAvailableImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferBytesAvailable
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferSetTriggerLevel
@ -1235,10 +1025,7 @@ MPU_xStreamBufferSetTriggerLevel:
b MPU_xStreamBufferSetTriggerLevelImpl
MPU_xStreamBufferSetTriggerLevel_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferSetTriggerLevelImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferSetTriggerLevel
/*-----------------------------------------------------------*/
PUBLIC MPU_xStreamBufferNextMessageLengthBytes
@ -1252,10 +1039,7 @@ MPU_xStreamBufferNextMessageLengthBytes:
b MPU_xStreamBufferNextMessageLengthBytesImpl
MPU_xStreamBufferNextMessageLengthBytes_Unpriv:
pop {r0}
svc #portSVC_SYSTEM_CALL_ENTER
bl MPU_xStreamBufferNextMessageLengthBytesImpl
svc #portSVC_SYSTEM_CALL_EXIT
bx lr
svc #SYSTEM_CALL_xStreamBufferNextMessageLengthBytes
/*-----------------------------------------------------------*/
/* Default weak implementations in case one is not available from
@ -1461,9 +1245,9 @@ MPU_xTimerIsTimerActiveImpl:
MPU_xTimerGetTimerDaemonTaskHandleImpl:
b MPU_xTimerGetTimerDaemonTaskHandleImpl
PUBWEAK MPU_xTimerGenericCommandImpl
MPU_xTimerGenericCommandImpl:
b MPU_xTimerGenericCommandImpl
PUBWEAK MPU_xTimerGenericCommandPrivImpl
MPU_xTimerGenericCommandPrivImpl:
b MPU_xTimerGenericCommandPrivImpl
PUBWEAK MPU_pcTimerGetNameImpl
MPU_pcTimerGetNameImpl:

View file

@ -35,8 +35,9 @@
#include "FreeRTOS.h"
#include "task.h"
/* MPU wrappers includes. */
/* MPU includes. */
#include "mpu_wrappers.h"
#include "mpu_syscall_numbers.h"
/* Portasm includes. */
#include "portasm.h"
@ -422,31 +423,26 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with up to 4 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
* @param ucSystemCallNumber The system call number of the system call.
*/
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
/**
* @brief Sets up the system call stack so that upon returning from
* SVC, the system call stack is used.
*
* It is used for the system calls with 5 parameters.
*
* @param pulTaskStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
/**
* @brief Raise SVC for exiting from a system call.
*/
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -459,7 +455,8 @@ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIV
* @param pulSystemCallStack The current SP when the SVC was raised.
* @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
*/
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION;
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) PRIVILEGED_FUNCTION;
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
@ -813,7 +810,6 @@ static void prvTaskExitError( void )
static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
{
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_functions_start__;
@ -983,7 +979,6 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
{
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
@ -1101,12 +1096,16 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vSystemCallEnter( uint32_t * pulTaskStack,
uint32_t ulLR,
uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
@ -1119,16 +1118,26 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
/* Checks:
* 1. SVC is raised from the system call section (i.e. application is
* not raising SVC directly).
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
* it is non-NULL only during the execution of a system call (i.e.
* between system call enter and exit).
* 3. System call is not for a kernel API disabled by the configuration
* in FreeRTOSConfig.h.
* 4. We do not need to check that ucSystemCallNumber is within range
* because the assembly SVC handler checks that before calling
* this function.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
{
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
@ -1136,7 +1145,8 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1148,11 +1158,11 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the system call stack for the stack frame. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
@ -1163,152 +1173,50 @@ void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FU
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to
* restore it when we exit from the system call. */
/* Store the value of the Link Register before the SVC was raised.
* It contains the address of the caller of the System Call entry
* point (i.e. the caller of the MPU_<API>). We need to restore it
* when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
" msr control, r0 \n" /* Write back new control value. */
::: "r0", "r1", "memory"
);
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulSystemCallStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
/* This is not NULL only for the duration of the system call. */
configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL );
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
{
if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
);
}
else
{
/* Standard frame i.e. FPU not in use. */
ulStackFrameSize = 8;
}
}
#else
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
/* Make space on the system call stack for the stack frame and
* the parameter passed on the stack. We only need to copy one
* parameter but we still reserve 2 spaces to keep the stack
* double word aligned. */
pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL;
/* Copy the stack frame. */
for( i = 0; i < ulStackFrameSize; i++ )
{
pulSystemCallStack[ i ] = pulTaskStack[ i ];
}
/* Copy the parameter which is passed the stack. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ];
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* Store the value of the LR and PSPLIM registers before the SVC was raised.
/* Store the value of the PSPLIM register before the SVC was raised.
* We need to restore it when we exit from the system call. */
pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
__asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* Use the pulSystemCallStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
/* Start executing the system call upon returning from this handler. */
pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
/* Raise a request to exit from the system call upon finishing the
* system call. */
pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
/* Remember the location where we should copy the stack frame when we exit from
* the system call. */
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
/* Record if the hardware used padding to force the stack pointer
* to be double word aligned. */
if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
{
pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
}
else
{
pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
}
/* We ensure in pxPortInitialiseStack that the system call stack is
* double word aligned and therefore, there is no need of padding.
* Clear the bit[9] of stacked xPSR. */
pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
/* Raise the privilege for the duration of the system call. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" bics r0, r1 \n" /* Clear nPRIV bit. */
@ -1316,37 +1224,58 @@ void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
{
__asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
void vSystemCallExit( uint32_t * pulSystemCallStack,
uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
{
extern TaskHandle_t pxCurrentTCB;
xMPU_SETTINGS * pxMpuSettings;
uint32_t * pulTaskStack;
uint32_t ulStackFrameSize, ulSystemCallLocation, i;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __syscalls_flash_start__;
extern uint32_t * __syscalls_flash_end__;
extern uint32_t * __privileged_functions_start__;
extern uint32_t * __privileged_functions_end__;
#else
/* Declaration when these variable are exported from linker scripts. */
extern uint32_t __syscalls_flash_start__[];
extern uint32_t __syscalls_flash_end__[];
extern uint32_t __privileged_functions_start__[];
extern uint32_t __privileged_functions_end__[];
#endif /* #if defined( __ARMCC_VERSION ) */
ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
/* If the request did not come from the system call section, do nothing. */
if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) )
{
pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
/* Checks:
* 1. SVC is raised from the privileged code (i.e. application is not
* raising SVC directly). This SVC is only raised from
* vRequestSystemCallExit which is in the privileged code section.
* 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
* this means that we previously entered a system call and the
* application is not attempting to exit without entering a system
* call.
*/
if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
{
pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
@ -1355,7 +1284,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
{
/* Extended frame i.e. FPU in use. */
ulStackFrameSize = 26;
__asm volatile (
__asm volatile
(
" vpush {s0} \n" /* Trigger lazy stacking. */
" vpop {s0} \n" /* Nullify the affect of the above instruction. */
::: "memory"
@ -1367,11 +1297,11 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
ulStackFrameSize = 8;
}
}
#else
#else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
{
ulStackFrameSize = 8;
}
#endif /* configENABLE_FPU || configENABLE_MVE */
#endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
/* Make space on the task stack for the stack frame. */
pulTaskStack = pulTaskStack - ulStackFrameSize;
@ -1385,9 +1315,14 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
/* Use the pulTaskStack in thread mode. */
__asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
/* Restore the LR and PSPLIM to what they were at the time of
* system call entry. */
/* Return to the caller of the System Call entry point (i.e. the
* caller of the MPU_<API>). */
pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Ensure that LR has a valid value.*/
pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
/* Restore the PSPLIM register to what it was at the time of
* system call entry. */
__asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
/* If the hardware used padding to force the stack pointer
@ -1406,7 +1341,8 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
/* Drop the privilege before returning to the thread mode. */
__asm volatile (
__asm volatile
(
" mrs r0, control \n" /* Obtain current control value. */
" movs r1, #1 \n" /* r1 = 1. */
" orrs r0, r1 \n" /* Set nPRIV bit. */
@ -1414,15 +1350,15 @@ void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEG
::: "r0", "r1", "memory"
);
}
}
}
#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
/*-----------------------------------------------------------*/
#if ( configENABLE_MPU == 1 )
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
{
BaseType_t xTaskIsPrivileged = pdFALSE;
const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
@ -1432,20 +1368,20 @@ BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
}
return xTaskIsPrivileged;
}
}
#endif /* configENABLE_MPU == 1 */
/*-----------------------------------------------------------*/
#if( configENABLE_MPU == 1 )
#if ( configENABLE_MPU == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged,
xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
{
{
uint32_t ulIndex = 0;
xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
@ -1525,15 +1461,15 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
return &( xMPUSettings->ulContext[ ulIndex ] );
}
}
#else /* configENABLE_MPU */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters ) /* PRIVILEGED_FUNCTION */
{
{
/* Simulate the stack frame as it would be created by a context switch
* interrupt. */
#if ( portPRELOAD_REGISTERS == 0 )
@ -1607,7 +1543,7 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
#endif /* portPRELOAD_REGISTERS */
return pxTopOfStack;
}
}
#endif /* configENABLE_MPU */
/*-----------------------------------------------------------*/
@ -1750,7 +1686,6 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
int32_t lIndex = 0;
#if defined( __ARMCC_VERSION )
/* Declaration when these variable are defined in code instead of being
* exported from linker scripts. */
extern uint32_t * __privileged_sram_start__;

View file

@ -32,6 +32,9 @@ the code is included in C files but excluded by the preprocessor in assembly
files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */
#include "FreeRTOSConfig.h"
/* System call numbers includes. */
#include "mpu_syscall_numbers.h"
#ifndef configUSE_MPU_WRAPPERS_V1
#define configUSE_MPU_WRAPPERS_V1 0
#endif
@ -44,7 +47,6 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler.
EXTERN SecureContext_LoadContext
#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
EXTERN vSystemCallEnter
EXTERN vSystemCallEnter_1
EXTERN vSystemCallExit
#endif
@ -86,7 +88,7 @@ vResetPrivilege:
/*-----------------------------------------------------------*/
vPortAllocateSecureContext:
svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */
svc 100 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 100. */
bx lr /* Return. */
/*-----------------------------------------------------------*/
@ -205,7 +207,7 @@ vStartFirstTask:
cpsie f
dsb
isb
svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */
svc 102 /* System call to start the first task. portSVC_START_SCHEDULER = 102. */
/*-----------------------------------------------------------*/
ulSetInterruptMask:
@ -455,11 +457,9 @@ SVC_Handler:
ldr r1, [r0, #24]
ldrb r2, [r1, #-2]
cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */
beq syscall_enter
cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */
beq syscall_enter_1
cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */
cmp r2, #NUM_SYSTEM_CALLS
blt syscall_enter
cmp r2, #104 /* portSVC_SYSTEM_CALL_EXIT. */
beq syscall_exit
b vPortSVCHandler_C
@ -467,10 +467,6 @@ SVC_Handler:
mov r1, lr
b vSystemCallEnter
syscall_enter_1:
mov r1, lr
b vSystemCallEnter_1
syscall_exit:
mov r1, lr
b vSystemCallExit
@ -493,7 +489,7 @@ vPortFreeSecureContext:
ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */
cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */
it ne
svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */
svcne 101 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 101. */
bx lr /* Return. */
/*-----------------------------------------------------------*/

View file

@ -316,13 +316,12 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P
/**
* @brief SVC numbers.
*/
#define portSVC_ALLOCATE_SECURE_CONTEXT 0
#define portSVC_FREE_SECURE_CONTEXT 1
#define portSVC_START_SCHEDULER 2
#define portSVC_RAISE_PRIVILEGE 3
#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */
#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */
#define portSVC_SYSTEM_CALL_EXIT 6
#define portSVC_ALLOCATE_SECURE_CONTEXT 100
#define portSVC_FREE_SECURE_CONTEXT 101
#define portSVC_START_SCHEDULER 102
#define portSVC_RAISE_PRIVILEGE 103
#define portSVC_SYSTEM_CALL_EXIT 104
#define portSVC_YIELD 105
/*-----------------------------------------------------------*/
/**

Some files were not shown because too many files have changed in this diff Show more