Update GCC compiler for:

* RX600v2
 * RX600
 * RX100

Signed-off-by: Dinh Van Nam <vannam.dinh.xt@renesas.com>
This commit is contained in:
Dinh Van Nam 2020-07-16 16:39:48 +09:00
parent 226af680e1
commit d7c280593d
6 changed files with 1146 additions and 1136 deletions

View file

@ -40,16 +40,20 @@
#include "string.h" #include "string.h"
/* Hardware specifics. */ /* Hardware specifics. */
#if defined(configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H) && (configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H == 1)
#include "platform.h"
#else
#include "iodefine.h" #include "iodefine.h"
#endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* Tasks should start with interrupts enabled and in Supervisor mode, therefore /* Tasks should start with interrupts enabled and in Supervisor mode, therefore
* PSW is set with U and I set, and PM and IPL clear. */ PSW is set with U and I set, and PM and IPL clear. */
#define portINITIAL_PSW ( ( StackType_t ) 0x00030000 ) #define portINITIAL_PSW ( ( StackType_t ) 0x00030000 )
/* The peripheral clock is divided by this value before being supplying the /* The peripheral clock is divided by this value before being supplying the
* CMT. */ CMT. */
#if ( configUSE_TICKLESS_IDLE == 0 ) #if ( configUSE_TICKLESS_IDLE == 0 )
/* If tickless idle is not used then the divisor can be fixed. */ /* If tickless idle is not used then the divisor can be fixed. */
#define portCLOCK_DIVISOR 8UL #define portCLOCK_DIVISOR 8UL
@ -64,15 +68,15 @@
#endif #endif
/* These macros allow a critical section to be added around the call to /* These macros allow a critical section to be added around the call to
* xTaskIncrementTick(), which is only ever called from interrupts at the kernel xTaskIncrementTick(), which is only ever called from interrupts at the kernel
* priority - ie a known priority. Therefore these local macros are a slight priority - ie a known priority. Therefore these local macros are a slight
* optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros, optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
* which would require the old IPL to be read first and stored in a local variable. */ which would require the old IPL to be read first and stored in a local variable. */
#define portDISABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) ) #define portDISABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#define portENABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) ) #define portENABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) )
/* Keys required to lock and unlock access to certain system registers /* Keys required to lock and unlock access to certain system registers
* respectively. */ respectively. */
#define portUNLOCK_KEY 0xA50B #define portUNLOCK_KEY 0xA50B
#define portLOCK_KEY 0xA500 #define portLOCK_KEY 0xA500
@ -89,13 +93,21 @@ static void prvStartFirstTask( void ) __attribute__( ( naked ) );
* restoring of registers). Written in asm code as direct register access is * restoring of registers). Written in asm code as direct register access is
* required. * required.
*/ */
#if defined(configTICK_VECTOR)
void vPortSoftwareInterruptISR( void ) __attribute__((naked, vector( R_BSP_SECNAME_INTVECTTBL, VECT_ICU_SWINT )));
#else
void vPortSoftwareInterruptISR( void ) __attribute__((naked)); void vPortSoftwareInterruptISR( void ) __attribute__((naked));
#endif
/* /*
* The tick interrupt handler. * The tick interrupt handler.
*/ */
void vPortTickISR( void ) __attribute__( ( interrupt ) );
#if defined(configTICK_VECTOR)
void vPortTickISR( void ) __attribute__((interrupt( R_BSP_SECNAME_INTVECTTBL, _VECT( configTICK_VECTOR ) )));
#else
void vPortTickISR( void ) __attribute__((interrupt));
#endif
/* /*
* Sets up the periodic ISR used for the RTOS tick using the CMT. * Sets up the periodic ISR used for the RTOS tick using the CMT.
* The application writer can define configSETUP_TICK_INTERRUPT() (in * The application writer can define configSETUP_TICK_INTERRUPT() (in
@ -104,9 +116,8 @@ void vPortTickISR( void ) __attribute__( ( interrupt ) );
*/ */
static void prvSetupTimerInterrupt( void ); static void prvSetupTimerInterrupt( void );
#ifndef configSETUP_TICK_INTERRUPT #ifndef configSETUP_TICK_INTERRUPT
/* The user has not provided their own tick interrupt configuration so use /* The user has not provided their own tick interrupt configuration so use
* the definition in this file (which uses the interval timer). */ the definition in this file (which uses the interval timer). */
#define configSETUP_TICK_INTERRUPT() prvSetupTimerInterrupt() #define configSETUP_TICK_INTERRUPT() prvSetupTimerInterrupt()
#endif /* configSETUP_TICK_INTERRUPT */ #endif /* configSETUP_TICK_INTERRUPT */
@ -130,34 +141,32 @@ static const uint32_t ulMatchValueForOneTick = ( ( configPERIPHERAL_CLOCK_HZ / p
#if configUSE_TICKLESS_IDLE == 1 #if configUSE_TICKLESS_IDLE == 1
/* Holds the maximum number of ticks that can be suppressed - which is /* Holds the maximum number of ticks that can be suppressed - which is
* basically how far into the future an interrupt can be generated. Set basically how far into the future an interrupt can be generated. Set
* during initialisation. This is the maximum possible value that the during initialisation. This is the maximum possible value that the
* compare match register can hold divided by ulMatchValueForOneTick. */ compare match register can hold divided by ulMatchValueForOneTick. */
static const TickType_t xMaximumPossibleSuppressedTicks = USHRT_MAX / ( ( configPERIPHERAL_CLOCK_HZ / portCLOCK_DIVISOR ) / configTICK_RATE_HZ ); static const TickType_t xMaximumPossibleSuppressedTicks = USHRT_MAX / ( ( configPERIPHERAL_CLOCK_HZ / portCLOCK_DIVISOR ) / configTICK_RATE_HZ );
/* Flag set from the tick interrupt to allow the sleep processing to know if /* Flag set from the tick interrupt to allow the sleep processing to know if
* sleep mode was exited because of a tick interrupt, or an interrupt sleep mode was exited because of a tick interrupt, or an interrupt
* generated by something else. */ generated by something else. */
static volatile uint32_t ulTickFlag = pdFALSE; static volatile uint32_t ulTickFlag = pdFALSE;
/* The CMT counter is stopped temporarily each time it is re-programmed. /* The CMT counter is stopped temporarily each time it is re-programmed.
* The following constant offsets the CMT counter match value by the number of The following constant offsets the CMT counter match value by the number of
* CMT counts that would typically be missed while the counter was stopped to CMT counts that would typically be missed while the counter was stopped to
* compensate for the lost time. The large difference between the divided CMT compensate for the lost time. The large difference between the divided CMT
* clock and the CPU clock means it is likely ulStoppedTimerCompensation will clock and the CPU clock means it is likely ulStoppedTimerCompensation will
* equal zero - and be optimised away. */ equal zero - and be optimised away. */
static const uint32_t ulStoppedTimerCompensation = 100UL / ( configCPU_CLOCK_HZ / ( configPERIPHERAL_CLOCK_HZ / portCLOCK_DIVISOR ) ); static const uint32_t ulStoppedTimerCompensation = 100UL / ( configCPU_CLOCK_HZ / ( configPERIPHERAL_CLOCK_HZ / portCLOCK_DIVISOR ) );
#endif /* if configUSE_TICKLESS_IDLE == 1 */ #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* /*
* See header file for description. * See header file for description.
*/ */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
TaskFunction_t pxCode,
void * pvParameters )
{ {
/* Offset to end up on 8 byte boundary. */ /* Offset to end up on 8 byte boundary. */
pxTopOfStack--; pxTopOfStack--;
@ -172,8 +181,8 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = ( StackType_t ) pxCode; *pxTopOfStack = ( StackType_t ) pxCode;
/* When debugging it can be useful if every register is set to a known /* When debugging it can be useful if every register is set to a known
* value. Otherwise code space can be saved by just setting the registers value. Otherwise code space can be saved by just setting the registers
* that need to be set. */ that need to be set. */
#ifdef USE_FULL_REGISTER_INITIALISATION #ifdef USE_FULL_REGISTER_INITIALISATION
{ {
pxTopOfStack--; pxTopOfStack--;
@ -206,13 +215,13 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = 0x22222222; *pxTopOfStack = 0x22222222;
pxTopOfStack--; pxTopOfStack--;
} }
#else /* ifdef USE_FULL_REGISTER_INITIALISATION */ #else
{ {
/* Leave space for the registers that will get popped from the stack /* Leave space for the registers that will get popped from the stack
* when the task first starts executing. */ when the task first starts executing. */
pxTopOfStack -= 15; pxTopOfStack -= 15;
} }
#endif /* ifdef USE_FULL_REGISTER_INITIALISATION */ #endif
*pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */ *pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */
pxTopOfStack--; pxTopOfStack--;
@ -230,10 +239,10 @@ BaseType_t xPortStartScheduler( void )
if( pxCurrentTCB != NULL ) if( pxCurrentTCB != NULL )
{ {
/* Call an application function to set up the timer that will generate /* Call an application function to set up the timer that will generate
* the tick interrupt. This way the application can decide which the tick interrupt. This way the application can decide which
* peripheral to use. If tickless mode is used then the default peripheral to use. If tickless mode is used then the default
* implementation defined in this file (which uses CMT0) should not be implementation defined in this file (which uses CMT0) should not be
* overridden. */ overridden. */
configSETUP_TICK_INTERRUPT(); configSETUP_TICK_INTERRUPT();
/* Enable the software interrupt. */ /* Enable the software interrupt. */
@ -250,11 +259,11 @@ BaseType_t xPortStartScheduler( void )
} }
/* Execution should not reach here as the tasks are now running! /* Execution should not reach here as the tasks are now running!
* prvSetupTimerInterrupt() is called here to prevent the compiler outputting prvSetupTimerInterrupt() is called here to prevent the compiler outputting
* a warning about a statically declared function not being referenced in the a warning about a statically declared function not being referenced in the
* case that the application writer has provided their own tick interrupt case that the application writer has provided their own tick interrupt
* configuration routine (and defined configSETUP_TICK_INTERRUPT() such that configuration routine (and defined configSETUP_TICK_INTERRUPT() such that
* their own routine will be called in place of prvSetupTimerInterrupt()). */ their own routine will be called in place of prvSetupTimerInterrupt()). */
prvSetupTimerInterrupt(); prvSetupTimerInterrupt();
/* Should not get here. */ /* Should not get here. */
@ -265,7 +274,7 @@ BaseType_t xPortStartScheduler( void )
void vPortEndScheduler( void ) void vPortEndScheduler( void )
{ {
/* Not implemented in ports where there is nothing to return to. /* Not implemented in ports where there is nothing to return to.
* Artificially force an assert. */ Artificially force an assert. */
configASSERT( pxCurrentTCB == NULL ); configASSERT( pxCurrentTCB == NULL );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -274,22 +283,19 @@ static void prvStartFirstTask( void )
{ {
__asm volatile __asm volatile
( (
/* When starting the scheduler there is nothing that needs moving to the /* When starting the scheduler there is nothing that needs moving to the
* interrupt stack because the function is not called from an interrupt. interrupt stack because the function is not called from an interrupt.
* Just ensure the current stack is the user stack. */ Just ensure the current stack is the user stack. */
"SETPSW U \n" \ "SETPSW U \n" \
/* Obtain the location of the stack associated with which ever task /* Obtain the location of the stack associated with which ever task
* pxCurrentTCB is currently pointing to. */ pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n" \ "MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [R15], R15 \n" \ "MOV.L [R15], R15 \n" \
"MOV.L [R15], R0 \n" \ "MOV.L [R15], R0 \n" \
/* Restore the registers from the stack of the task pointed to by /* Restore the registers from the stack of the task pointed to by
* pxCurrentTCB. */ pxCurrentTCB. */
"POP R15 \n" \ "POP R15 \n" \
/* Accumulator low 32 bits. */ /* Accumulator low 32 bits. */
@ -317,11 +323,10 @@ void vPortSoftwareInterruptISR( void )
/* Re-enable interrupts. */ /* Re-enable interrupts. */
"SETPSW I \n" \ "SETPSW I \n" \
/* Move the data that was automatically pushed onto the interrupt stack when /* Move the data that was automatically pushed onto the interrupt stack when
* the interrupt occurred from the interrupt stack to the user stack. the interrupt occurred from the interrupt stack to the user stack.
*
* R15 is saved before it is clobbered. */ R15 is saved before it is clobbered. */
"PUSH.L R15 \n" \ "PUSH.L R15 \n" \
/* Read the user stack pointer. */ /* Read the user stack pointer. */
@ -361,9 +366,8 @@ void vPortSoftwareInterruptISR( void )
"MOV.L [ R15 ], R15 \n" \ "MOV.L [ R15 ], R15 \n" \
"MOV.L R0, [ R15 ] \n" \ "MOV.L R0, [ R15 ] \n" \
/* Ensure the interrupt mask is set to the syscall priority while the kernel /* Ensure the interrupt mask is set to the syscall priority while the kernel
* structures are being accessed. */ structures are being accessed. */
"MVTIPL %0 \n" \ "MVTIPL %0 \n" \
/* Select the next task to run. */ /* Select the next task to run. */
@ -372,16 +376,14 @@ void vPortSoftwareInterruptISR( void )
/* Reset the interrupt mask as no more data structure access is required. */ /* Reset the interrupt mask as no more data structure access is required. */
"MVTIPL %1 \n" \ "MVTIPL %1 \n" \
/* Load the stack pointer of the task that is now selected as the Running /* Load the stack pointer of the task that is now selected as the Running
* state task from its TCB. */ state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n" \ "MOV.L #_pxCurrentTCB,R15 \n" \
"MOV.L [ R15 ], R15 \n" \ "MOV.L [ R15 ], R15 \n" \
"MOV.L [ R15 ], R0 \n" \ "MOV.L [ R15 ], R0 \n" \
/* Restore the context of the new task. The PSW (Program Status Word) and /* Restore the context of the new task. The PSW (Program Status Word) and
* PC will be popped by the RTE instruction. */ PC will be popped by the RTE instruction. */
"POP R15 \n" \ "POP R15 \n" \
"MVTACLO R15 \n" \ "MVTACLO R15 \n" \
"POP R15 \n" \ "POP R15 \n" \
@ -401,7 +403,7 @@ void vPortTickISR( void )
__asm volatile( "SETPSW I" ); __asm volatile( "SETPSW I" );
/* Increment the tick, and perform any processing the new tick value /* Increment the tick, and perform any processing the new tick value
* necessitates. Ensure IPL is at the max syscall value first. */ necessitates. Ensure IPL is at the max syscall value first. */
portDISABLE_INTERRUPTS_FROM_KERNEL_ISR(); portDISABLE_INTERRUPTS_FROM_KERNEL_ISR();
{ {
if( xTaskIncrementTick() != pdFALSE ) if( xTaskIncrementTick() != pdFALSE )
@ -417,7 +419,7 @@ void vPortTickISR( void )
ulTickFlag = pdTRUE; ulTickFlag = pdTRUE;
/* If this is the first tick since exiting tickless mode then the CMT /* If this is the first tick since exiting tickless mode then the CMT
* compare match value needs resetting. */ compare match value needs resetting. */
CMT0.CMCOR = ( uint16_t ) ulMatchValueForOneTick; CMT0.CMCOR = ( uint16_t ) ulMatchValueForOneTick;
} }
#endif #endif
@ -488,11 +490,11 @@ static void prvSetupTimerInterrupt( void )
{ {
CMT0.CMCR.BIT.CKS = 0; CMT0.CMCR.BIT.CKS = 0;
} }
#else /* if portCLOCK_DIVISOR == 512 */ #else
{ {
#error Invalid portCLOCK_DIVISOR setting #error Invalid portCLOCK_DIVISOR setting
} }
#endif /* if portCLOCK_DIVISOR == 512 */ #endif
/* Enable the interrupt... */ /* Enable the interrupt... */
_IEN( _CMT0_CMI0 ) = 1; _IEN( _CMT0_CMI0 ) = 1;
@ -513,8 +515,8 @@ static void prvSetupTimerInterrupt( void )
configPRE_SLEEP_PROCESSING( xExpectedIdleTime ); configPRE_SLEEP_PROCESSING( xExpectedIdleTime );
/* xExpectedIdleTime being set to 0 by configPRE_SLEEP_PROCESSING() /* xExpectedIdleTime being set to 0 by configPRE_SLEEP_PROCESSING()
* means the application defined code has already executed the WAIT means the application defined code has already executed the WAIT
* instruction. */ instruction. */
if( xExpectedIdleTime > 0 ) if( xExpectedIdleTime > 0 )
{ {
__asm volatile( "WAIT" ); __asm volatile( "WAIT" );
@ -543,41 +545,38 @@ static void prvSetupTimerInterrupt( void )
} }
/* Calculate the reload value required to wait xExpectedIdleTime tick /* Calculate the reload value required to wait xExpectedIdleTime tick
* periods. */ periods. */
ulMatchValue = ulMatchValueForOneTick * xExpectedIdleTime; ulMatchValue = ulMatchValueForOneTick * xExpectedIdleTime;
if( ulMatchValue > ulStoppedTimerCompensation ) if( ulMatchValue > ulStoppedTimerCompensation )
{ {
/* Compensate for the fact that the CMT is going to be stopped /* Compensate for the fact that the CMT is going to be stopped
* momentarily. */ momentarily. */
ulMatchValue -= ulStoppedTimerCompensation; ulMatchValue -= ulStoppedTimerCompensation;
} }
/* Stop the CMT momentarily. The time the CMT is stopped for is /* Stop the CMT momentarily. The time the CMT is stopped for is
* accounted for as best it can be, but using the tickless mode will accounted for as best it can be, but using the tickless mode will
* inevitably result in some tiny drift of the time maintained by the inevitably result in some tiny drift of the time maintained by the
* kernel with respect to calendar time. */ kernel with respect to calendar time. */
CMT.CMSTR0.BIT.STR0 = 0; CMT.CMSTR0.BIT.STR0 = 0;
while( CMT.CMSTR0.BIT.STR0 == 1 ) while( CMT.CMSTR0.BIT.STR0 == 1 )
{ {
/* Nothing to do here. */ /* Nothing to do here. */
} }
/* Critical section using the global interrupt bit as the i bit is /* Critical section using the global interrupt bit as the i bit is
* automatically reset by the WAIT instruction. */ automatically reset by the WAIT instruction. */
__asm volatile( "CLRPSW i" ); __asm volatile( "CLRPSW i" );
/* The tick flag is set to false before sleeping. If it is true when /* The tick flag is set to false before sleeping. If it is true when
* sleep mode is exited then sleep mode was probably exited because the sleep mode is exited then sleep mode was probably exited because the
* tick was suppressed for the entire xExpectedIdleTime period. */ tick was suppressed for the entire xExpectedIdleTime period. */
ulTickFlag = pdFALSE; ulTickFlag = pdFALSE;
/* If a context switch is pending then abandon the low power entry as /* If a context switch is pending then abandon the low power entry as
* the context switch might have been pended by an external interrupt that the context switch might have been pended by an external interrupt that
* requires processing. */ requires processing. */
eSleepAction = eTaskConfirmSleepModeStatus(); eSleepAction = eTaskConfirmSleepModeStatus();
if( eSleepAction == eAbortSleep ) if( eSleepAction == eAbortSleep )
{ {
/* Restart tick. */ /* Restart tick. */
@ -596,7 +595,7 @@ static void prvSetupTimerInterrupt( void )
SYSTEM.PRCR.WORD = portLOCK_KEY; SYSTEM.PRCR.WORD = portLOCK_KEY;
/* Sleep until something happens. Calling prvSleep() will /* Sleep until something happens. Calling prvSleep() will
* automatically reset the i bit in the PSW. */ automatically reset the i bit in the PSW. */
prvSleep( xExpectedIdleTime ); prvSleep( xExpectedIdleTime );
/* Restart the CMT. */ /* Restart the CMT. */
@ -616,7 +615,7 @@ static void prvSetupTimerInterrupt( void )
SYSTEM.PRCR.WORD = portLOCK_KEY; SYSTEM.PRCR.WORD = portLOCK_KEY;
/* Adjust the match value to take into account that the current /* Adjust the match value to take into account that the current
* time slice is already partially complete. */ time slice is already partially complete. */
ulMatchValue -= ( uint32_t ) CMT0.CMCNT; ulMatchValue -= ( uint32_t ) CMT0.CMCNT;
CMT0.CMCOR = ( uint16_t ) ulMatchValue; CMT0.CMCOR = ( uint16_t ) ulMatchValue;
@ -625,15 +624,14 @@ static void prvSetupTimerInterrupt( void )
CMT.CMSTR0.BIT.STR0 = 1; CMT.CMSTR0.BIT.STR0 = 1;
/* Sleep until something happens. Calling prvSleep() will /* Sleep until something happens. Calling prvSleep() will
* automatically reset the i bit in the PSW. */ automatically reset the i bit in the PSW. */
prvSleep( xExpectedIdleTime ); prvSleep( xExpectedIdleTime );
/* Stop CMT. Again, the time the SysTick is stopped for is /* Stop CMT. Again, the time the SysTick is stopped for is
* accounted for as best it can be, but using the tickless mode will accounted for as best it can be, but using the tickless mode will
* inevitably result in some tiny drift of the time maintained by the inevitably result in some tiny drift of the time maintained by the
* kernel with respect to calendar time. */ kernel with respect to calendar time. */
CMT.CMSTR0.BIT.STR0 = 0; CMT.CMSTR0.BIT.STR0 = 0;
while( CMT.CMSTR0.BIT.STR0 == 1 ) while( CMT.CMSTR0.BIT.STR0 == 1 )
{ {
/* Nothing to do here. */ /* Nothing to do here. */
@ -644,42 +642,42 @@ static void prvSetupTimerInterrupt( void )
if( ulTickFlag != pdFALSE ) if( ulTickFlag != pdFALSE )
{ {
/* The tick interrupt has already executed, although because /* The tick interrupt has already executed, although because
* this function is called with the scheduler suspended the actual this function is called with the scheduler suspended the actual
* tick processing will not occur until after this function has tick processing will not occur until after this function has
* exited. Reset the match value with whatever remains of this exited. Reset the match value with whatever remains of this
* tick period. */ tick period. */
ulMatchValue = ulMatchValueForOneTick - ulCurrentCount; ulMatchValue = ulMatchValueForOneTick - ulCurrentCount;
CMT0.CMCOR = ( uint16_t ) ulMatchValue; CMT0.CMCOR = ( uint16_t ) ulMatchValue;
/* The tick interrupt handler will already have pended the tick /* The tick interrupt handler will already have pended the tick
* processing in the kernel. As the pending tick will be processing in the kernel. As the pending tick will be
* processed as soon as this function exits, the tick value processed as soon as this function exits, the tick value
* maintained by the tick is stepped forward by one less than the maintained by the tick is stepped forward by one less than the
* time spent sleeping. The actual stepping of the tick appears time spent sleeping. The actual stepping of the tick appears
* later in this function. */ later in this function. */
ulCompleteTickPeriods = xExpectedIdleTime - 1UL; ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
} }
else else
{ {
/* Something other than the tick interrupt ended the sleep. /* Something other than the tick interrupt ended the sleep.
* How many complete tick periods passed while the processor was How many complete tick periods passed while the processor was
* sleeping? */ sleeping? */
ulCompleteTickPeriods = ulCurrentCount / ulMatchValueForOneTick; ulCompleteTickPeriods = ulCurrentCount / ulMatchValueForOneTick;
/* The match value is set to whatever fraction of a single tick /* The match value is set to whatever fraction of a single tick
* period remains. */ period remains. */
ulMatchValue = ulCurrentCount - ( ulCompleteTickPeriods * ulMatchValueForOneTick ); ulMatchValue = ulCurrentCount - ( ulCompleteTickPeriods * ulMatchValueForOneTick );
CMT0.CMCOR = ( uint16_t ) ulMatchValue; CMT0.CMCOR = ( uint16_t ) ulMatchValue;
} }
/* Restart the CMT so it runs up to the match value. The match value /* Restart the CMT so it runs up to the match value. The match value
* will get set to the value required to generate exactly one tick period will get set to the value required to generate exactly one tick period
* the next time the CMT interrupt executes. */ the next time the CMT interrupt executes. */
CMT0.CMCNT = 0; CMT0.CMCNT = 0;
CMT.CMSTR0.BIT.STR0 = 1; CMT.CMSTR0.BIT.STR0 = 1;
/* Wind the tick forward by the number of tick periods that the CPU /* Wind the tick forward by the number of tick periods that the CPU
* remained in a low power state. */ remained in a low power state. */
vTaskStepTick( ulCompleteTickPeriods ); vTaskStepTick( ulCompleteTickPeriods );
} }
} }

View file

@ -44,7 +44,7 @@
*/ */
/* Type definitions - these are a bit legacy and not really used now, other than /* Type definitions - these are a bit legacy and not really used now, other than
* portSTACK_TYPE and portBASE_TYPE. */ portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char #define portCHAR char
#define portFLOAT float #define portFLOAT float
#define portDOUBLE double #define portDOUBLE double
@ -65,7 +65,7 @@
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */ not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1 #define portTICK_TYPE_IS_ATOMIC 1
#endif #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -77,8 +77,8 @@
#define portNOP() __asm volatile( "NOP" ) #define portNOP() __asm volatile( "NOP" )
/* Save clobbered register, set ITU SWINR (at address 0x872E0), read the value /* Save clobbered register, set ITU SWINR (at address 0x872E0), read the value
* back to ensure it is set before continuing, then restore the clobbered back to ensure it is set before continuing, then restore the clobbered
* register. */ register. */
#define portYIELD() \ #define portYIELD() \
__asm volatile \ __asm volatile \
( \ ( \
@ -91,14 +91,14 @@
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) { portYIELD(); } #define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) { portYIELD(); }
/* These macros should not be called directly, but through the /* These macros should not be called directly, but through the
* taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
* performed if configASSERT() is defined to ensure an assertion handler does not performed if configASSERT() is defined to ensure an assertion handler does not
* inadvertently attempt to lower the IPL when the call to assert was triggered inadvertently attempt to lower the IPL when the call to assert was triggered
* because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
* when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
* functions are those that end in FromISR. FreeRTOS maintains a separate functions are those that end in FromISR. FreeRTOS maintains a separate
* interrupt API to ensure API function and interrupt entry is as fast and as interrupt API to ensure API function and interrupt entry is as fast and as
* simple as possible. */ simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" ) #define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" )
#ifdef configASSERT #ifdef configASSERT
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) ) #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) )

View file

@ -37,20 +37,24 @@
#include "string.h" #include "string.h"
/* Hardware specifics. */ /* Hardware specifics. */
#if defined(configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H) && (configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H == 1)
#include "platform.h"
#else
#include "iodefine.h" #include "iodefine.h"
#endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* Tasks should start with interrupts enabled and in Supervisor mode, therefore /* Tasks should start with interrupts enabled and in Supervisor mode, therefore
* PSW is set with U and I set, and PM and IPL clear. */ PSW is set with U and I set, and PM and IPL clear. */
#define portINITIAL_PSW ( ( StackType_t ) 0x00030000 ) #define portINITIAL_PSW ( ( StackType_t ) 0x00030000 )
#define portINITIAL_FPSW ( ( StackType_t ) 0x00000100 ) #define portINITIAL_FPSW ( ( StackType_t ) 0x00000100 )
/* These macros allow a critical section to be added around the call to /* These macros allow a critical section to be added around the call to
* xTaskIncrementTick(), which is only ever called from interrupts at the kernel xTaskIncrementTick(), which is only ever called from interrupts at the kernel
* priority - ie a known priority. Therefore these local macros are a slight priority - ie a known priority. Therefore these local macros are a slight
* optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros, optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
* which would require the old IPL to be read first and stored in a local variable. */ which would require the old IPL to be read first and stored in a local variable. */
#define portDISABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) ) #define portDISABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#define portENABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) ) #define portENABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) )
@ -67,12 +71,20 @@ static void prvStartFirstTask( void ) __attribute__( ( naked ) );
* restoring of registers). Written in asm code as direct register access is * restoring of registers). Written in asm code as direct register access is
* required. * required.
*/ */
#if defined(configTICK_VECTOR)
void vSoftwareInterruptISR( void ) __attribute__((naked, vector( R_SECNAME_INTVECTTBL, VECT_ICU_SWINT )));
#else
void vSoftwareInterruptISR( void ) __attribute__((naked)); void vSoftwareInterruptISR( void ) __attribute__((naked));
#endif
/* /*
* The tick interrupt handler. * The tick interrupt handler.
*/ */
#if defined(configTICK_VECTOR)
void vTickISR( void ) __attribute__((interrupt( R_SECNAME_INTVECTTBL, _VECT( configTICK_VECTOR ) )));
#else
void vTickISR( void ) __attribute__((interrupt)); void vTickISR( void ) __attribute__((interrupt));
#endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -83,9 +95,7 @@ extern void * pxCurrentTCB;
/* /*
* See header file for description. * See header file for description.
*/ */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
TaskFunction_t pxCode,
void * pvParameters )
{ {
/* R0 is not included as it is the stack pointer. */ /* R0 is not included as it is the stack pointer. */
@ -96,8 +106,8 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = ( StackType_t ) pxCode; *pxTopOfStack = ( StackType_t ) pxCode;
/* When debugging it can be useful if every register is set to a known /* When debugging it can be useful if every register is set to a known
* value. Otherwise code space can be saved by just setting the registers value. Otherwise code space can be saved by just setting the registers
* that need to be set. */ that need to be set. */
#ifdef USE_FULL_REGISTER_INITIALISATION #ifdef USE_FULL_REGISTER_INITIALISATION
{ {
pxTopOfStack--; pxTopOfStack--;
@ -130,11 +140,11 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = 0x22222222; *pxTopOfStack = 0x22222222;
pxTopOfStack--; pxTopOfStack--;
} }
#else /* ifdef USE_FULL_REGISTER_INITIALISATION */ #else
{ {
pxTopOfStack -= 15; pxTopOfStack -= 15;
} }
#endif /* ifdef USE_FULL_REGISTER_INITIALISATION */ #endif
*pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */ *pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */
pxTopOfStack--; pxTopOfStack--;
@ -156,8 +166,8 @@ BaseType_t xPortStartScheduler( void )
if( pxCurrentTCB != NULL ) if( pxCurrentTCB != NULL )
{ {
/* Call an application function to set up the timer that will generate the /* Call an application function to set up the timer that will generate the
* tick interrupt. This way the application can decide which peripheral to tick interrupt. This way the application can decide which peripheral to
* use. A demo application is provided to show a suitable example. */ use. A demo application is provided to show a suitable example. */
vApplicationSetupTimerInterrupt(); vApplicationSetupTimerInterrupt();
/* Enable the software interrupt. */ /* Enable the software interrupt. */
@ -181,7 +191,7 @@ BaseType_t xPortStartScheduler( void )
void vPortEndScheduler( void ) void vPortEndScheduler( void )
{ {
/* Not implemented in ports where there is nothing to return to. /* Not implemented in ports where there is nothing to return to.
* Artificially force an assert. */ Artificially force an assert. */
configASSERT( pxCurrentTCB == NULL ); configASSERT( pxCurrentTCB == NULL );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -190,22 +200,19 @@ static void prvStartFirstTask( void )
{ {
__asm volatile __asm volatile
( (
/* When starting the scheduler there is nothing that needs moving to the /* When starting the scheduler there is nothing that needs moving to the
* interrupt stack because the function is not called from an interrupt. interrupt stack because the function is not called from an interrupt.
* Just ensure the current stack is the user stack. */ Just ensure the current stack is the user stack. */
"SETPSW U \n" \ "SETPSW U \n" \
/* Obtain the location of the stack associated with which ever task /* Obtain the location of the stack associated with which ever task
* pxCurrentTCB is currently pointing to. */ pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n" \ "MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [R15], R15 \n" \ "MOV.L [R15], R15 \n" \
"MOV.L [R15], R0 \n" \ "MOV.L [R15], R0 \n" \
/* Restore the registers from the stack of the task pointed to by /* Restore the registers from the stack of the task pointed to by
* pxCurrentTCB. */ pxCurrentTCB. */
"POP R15 \n" \ "POP R15 \n" \
/* Accumulator low 32 bits. */ /* Accumulator low 32 bits. */
@ -237,11 +244,10 @@ void vSoftwareInterruptISR( void )
/* Re-enable interrupts. */ /* Re-enable interrupts. */
"SETPSW I \n" \ "SETPSW I \n" \
/* Move the data that was automatically pushed onto the interrupt stack when /* Move the data that was automatically pushed onto the interrupt stack when
* the interrupt occurred from the interrupt stack to the user stack. the interrupt occurred from the interrupt stack to the user stack.
*
* R15 is saved before it is clobbered. */ R15 is saved before it is clobbered. */
"PUSH.L R15 \n" \ "PUSH.L R15 \n" \
/* Read the user stack pointer. */ /* Read the user stack pointer. */
@ -283,9 +289,8 @@ void vSoftwareInterruptISR( void )
"MOV.L [ R15 ], R15 \n" \ "MOV.L [ R15 ], R15 \n" \
"MOV.L R0, [ R15 ] \n" \ "MOV.L R0, [ R15 ] \n" \
/* Ensure the interrupt mask is set to the syscall priority while the kernel /* Ensure the interrupt mask is set to the syscall priority while the kernel
* structures are being accessed. */ structures are being accessed. */
"MVTIPL %0 \n" \ "MVTIPL %0 \n" \
/* Select the next task to run. */ /* Select the next task to run. */
@ -294,16 +299,14 @@ void vSoftwareInterruptISR( void )
/* Reset the interrupt mask as no more data structure access is required. */ /* Reset the interrupt mask as no more data structure access is required. */
"MVTIPL %1 \n" \ "MVTIPL %1 \n" \
/* Load the stack pointer of the task that is now selected as the Running /* Load the stack pointer of the task that is now selected as the Running
* state task from its TCB. */ state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n" \ "MOV.L #_pxCurrentTCB,R15 \n" \
"MOV.L [ R15 ], R15 \n" \ "MOV.L [ R15 ], R15 \n" \
"MOV.L [ R15 ], R0 \n" \ "MOV.L [ R15 ], R0 \n" \
/* Restore the context of the new task. The PSW (Program Status Word) and /* Restore the context of the new task. The PSW (Program Status Word) and
* PC will be popped by the RTE instruction. */ PC will be popped by the RTE instruction. */
"POP R15 \n" \ "POP R15 \n" \
"MVTACLO R15 \n" \ "MVTACLO R15 \n" \
"POP R15 \n" \ "POP R15 \n" \
@ -325,7 +328,7 @@ void vTickISR( void )
__asm volatile( "SETPSW I" ); __asm volatile( "SETPSW I" );
/* Increment the tick, and perform any processing the new tick value /* Increment the tick, and perform any processing the new tick value
* necessitates. Ensure IPL is at the max syscall value first. */ necessitates. Ensure IPL is at the max syscall value first. */
portDISABLE_INTERRUPTS_FROM_KERNEL_ISR(); portDISABLE_INTERRUPTS_FROM_KERNEL_ISR();
{ {
if( xTaskIncrementTick() != pdFALSE ) if( xTaskIncrementTick() != pdFALSE )
@ -353,6 +356,9 @@ uint32_t ulPortGetIPL( void )
void vPortSetIPL( uint32_t ulNewIPL ) void vPortSetIPL( uint32_t ulNewIPL )
{ {
/* Avoid compiler warning about unreferenced parameter. */
( void ) ulNewIPL;
__asm volatile __asm volatile
( (
"PUSH R5 \n" \ "PUSH R5 \n" \

View file

@ -44,7 +44,7 @@
*/ */
/* Type definitions - these are a bit legacy and not really used now, other than /* Type definitions - these are a bit legacy and not really used now, other than
* portSTACK_TYPE and portBASE_TYPE. */ portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char #define portCHAR char
#define portFLOAT float #define portFLOAT float
#define portDOUBLE double #define portDOUBLE double
@ -65,7 +65,7 @@
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */ not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1 #define portTICK_TYPE_IS_ATOMIC 1
#endif #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -77,9 +77,9 @@
#define portNOP() __asm volatile( "NOP" ) #define portNOP() __asm volatile( "NOP" )
/* Yield equivalent to "*portITU_SWINTR = 0x01; ( void ) *portITU_SWINTR;" /* Yield equivalent to "*portITU_SWINTR = 0x01; ( void ) *portITU_SWINTR;"
* where portITU_SWINTR is the location of the software interrupt register where portITU_SWINTR is the location of the software interrupt register
* (0x000872E0). Don't rely on the assembler to select a register, so instead (0x000872E0). Don't rely on the assembler to select a register, so instead
* save and restore clobbered registers manually. */ save and restore clobbered registers manually. */
#define portYIELD() \ #define portYIELD() \
__asm volatile \ __asm volatile \
( \ ( \
@ -93,14 +93,14 @@
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) portYIELD() #define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) portYIELD()
/* These macros should not be called directly, but through the /* These macros should not be called directly, but through the
* taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
* performed if configASSERT() is defined to ensure an assertion handler does not performed if configASSERT() is defined to ensure an assertion handler does not
* inadvertently attempt to lower the IPL when the call to assert was triggered inadvertently attempt to lower the IPL when the call to assert was triggered
* because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
* when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
* functions are those that end in FromISR. FreeRTOS maintains a separate functions are those that end in FromISR. FreeRTOS maintains a separate
* interrupt API to ensure API function and interrupt entry is as fast and as interrupt API to ensure API function and interrupt entry is as fast and as
* simple as possible. */ simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" ) #define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" )
#ifdef configASSERT #ifdef configASSERT
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) ) #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) )

View file

@ -37,20 +37,24 @@
#include "string.h" #include "string.h"
/* Hardware specifics. */ /* Hardware specifics. */
#if defined(configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H) && (configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H == 1)
#include "platform.h"
#else
#include "iodefine.h" #include "iodefine.h"
#endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* Tasks should start with interrupts enabled and in Supervisor mode, therefore /* Tasks should start with interrupts enabled and in Supervisor mode, therefore
* PSW is set with U and I set, and PM and IPL clear. */ PSW is set with U and I set, and PM and IPL clear. */
#define portINITIAL_PSW ( ( StackType_t ) 0x00030000 ) #define portINITIAL_PSW ( ( StackType_t ) 0x00030000 )
#define portINITIAL_FPSW ( ( StackType_t ) 0x00000100 ) #define portINITIAL_FPSW ( ( StackType_t ) 0x00000100 )
/* These macros allow a critical section to be added around the call to /* These macros allow a critical section to be added around the call to
* xTaskIncrementTick(), which is only ever called from interrupts at the kernel xTaskIncrementTick(), which is only ever called from interrupts at the kernel
* priority - ie a known priority. Therefore these local macros are a slight priority - ie a known priority. Therefore these local macros are a slight
* optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros, optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
* which would require the old IPL to be read first and stored in a local variable. */ which would require the old IPL to be read first and stored in a local variable. */
#define portMASK_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) ) #define portMASK_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#define portUNMASK_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) ) #define portUNMASK_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) )
@ -67,12 +71,20 @@ static void prvStartFirstTask( void ) __attribute__( ( naked ) );
* restoring of registers). Written in asm code as direct register access is * restoring of registers). Written in asm code as direct register access is
* required. * required.
*/ */
#if defined(configTICK_VECTOR)
void vSoftwareInterruptISR( void ) __attribute__((naked, vector( R_BSP_SECNAME_INTVECTTBL, VECT_ICU_SWINT )));
#else
void vSoftwareInterruptISR( void ) __attribute__((naked)); void vSoftwareInterruptISR( void ) __attribute__((naked));
#endif
/* /*
* The tick interrupt handler. * The tick interrupt handler.
*/ */
#if defined(configTICK_VECTOR)
void vTickISR( void ) __attribute__((interrupt( R_BSP_SECNAME_INTVECTTBL, _VECT( configTICK_VECTOR ) )));
#else
void vTickISR( void ) __attribute__((interrupt)); void vTickISR( void ) __attribute__((interrupt));
#endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -83,9 +95,7 @@ extern void * pxCurrentTCB;
/* /*
* See header file for description. * See header file for description.
*/ */
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
TaskFunction_t pxCode,
void * pvParameters )
{ {
/* R0 is not included as it is the stack pointer. */ /* R0 is not included as it is the stack pointer. */
@ -96,8 +106,8 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = ( StackType_t ) pxCode; *pxTopOfStack = ( StackType_t ) pxCode;
/* When debugging it can be useful if every register is set to a known /* When debugging it can be useful if every register is set to a known
* value. Otherwise code space can be saved by just setting the registers value. Otherwise code space can be saved by just setting the registers
* that need to be set. */ that need to be set. */
#ifdef USE_FULL_REGISTER_INITIALISATION #ifdef USE_FULL_REGISTER_INITIALISATION
{ {
pxTopOfStack--; pxTopOfStack--;
@ -130,11 +140,11 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = 0x22222222; *pxTopOfStack = 0x22222222;
pxTopOfStack--; pxTopOfStack--;
} }
#else /* ifdef USE_FULL_REGISTER_INITIALISATION */ #else
{ {
pxTopOfStack -= 15; pxTopOfStack -= 15;
} }
#endif /* ifdef USE_FULL_REGISTER_INITIALISATION */ #endif
*pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */ *pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */
pxTopOfStack--; pxTopOfStack--;
@ -164,8 +174,8 @@ BaseType_t xPortStartScheduler( void )
if( pxCurrentTCB != NULL ) if( pxCurrentTCB != NULL )
{ {
/* Call an application function to set up the timer that will generate the /* Call an application function to set up the timer that will generate the
* tick interrupt. This way the application can decide which peripheral to tick interrupt. This way the application can decide which peripheral to
* use. A demo application is provided to show a suitable example. */ use. A demo application is provided to show a suitable example. */
vApplicationSetupTimerInterrupt(); vApplicationSetupTimerInterrupt();
/* Enable the software interrupt. */ /* Enable the software interrupt. */
@ -189,7 +199,7 @@ BaseType_t xPortStartScheduler( void )
void vPortEndScheduler( void ) void vPortEndScheduler( void )
{ {
/* Not implemented in ports where there is nothing to return to. /* Not implemented in ports where there is nothing to return to.
* Artificially force an assert. */ Artificially force an assert. */
configASSERT( pxCurrentTCB == NULL ); configASSERT( pxCurrentTCB == NULL );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -198,22 +208,19 @@ static void prvStartFirstTask( void )
{ {
__asm volatile __asm volatile
( (
/* When starting the scheduler there is nothing that needs moving to the /* When starting the scheduler there is nothing that needs moving to the
* interrupt stack because the function is not called from an interrupt. interrupt stack because the function is not called from an interrupt.
* Just ensure the current stack is the user stack. */ Just ensure the current stack is the user stack. */
"SETPSW U \n" \ "SETPSW U \n" \
/* Obtain the location of the stack associated with which ever task /* Obtain the location of the stack associated with which ever task
* pxCurrentTCB is currently pointing to. */ pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n" \ "MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [R15], R15 \n" \ "MOV.L [R15], R15 \n" \
"MOV.L [R15], R0 \n" \ "MOV.L [R15], R0 \n" \
/* Restore the registers from the stack of the task pointed to by /* Restore the registers from the stack of the task pointed to by
* pxCurrentTCB. */ pxCurrentTCB. */
"POP R15 \n" \ "POP R15 \n" \
/* Accumulator low 32 bits. */ /* Accumulator low 32 bits. */
@ -261,11 +268,10 @@ void vSoftwareInterruptISR( void )
/* Re-enable interrupts. */ /* Re-enable interrupts. */
"SETPSW I \n" \ "SETPSW I \n" \
/* Move the data that was automatically pushed onto the interrupt stack when /* Move the data that was automatically pushed onto the interrupt stack when
* the interrupt occurred from the interrupt stack to the user stack. the interrupt occurred from the interrupt stack to the user stack.
*
* R15 is saved before it is clobbered. */ R15 is saved before it is clobbered. */
"PUSH.L R15 \n" \ "PUSH.L R15 \n" \
/* Read the user stack pointer. */ /* Read the user stack pointer. */
@ -312,9 +318,8 @@ void vSoftwareInterruptISR( void )
"MOV.L [ R15 ], R15 \n" \ "MOV.L [ R15 ], R15 \n" \
"MOV.L R0, [ R15 ] \n" \ "MOV.L R0, [ R15 ] \n" \
/* Ensure the interrupt mask is set to the syscall priority while the kernel /* Ensure the interrupt mask is set to the syscall priority while the kernel
* structures are being accessed. */ structures are being accessed. */
"MVTIPL %0 \n" \ "MVTIPL %0 \n" \
/* Select the next task to run. */ /* Select the next task to run. */
@ -323,16 +328,14 @@ void vSoftwareInterruptISR( void )
/* Reset the interrupt mask as no more data structure access is required. */ /* Reset the interrupt mask as no more data structure access is required. */
"MVTIPL %1 \n" \ "MVTIPL %1 \n" \
/* Load the stack pointer of the task that is now selected as the Running /* Load the stack pointer of the task that is now selected as the Running
* state task from its TCB. */ state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n" \ "MOV.L #_pxCurrentTCB,R15 \n" \
"MOV.L [ R15 ], R15 \n" \ "MOV.L [ R15 ], R15 \n" \
"MOV.L [ R15 ], R0 \n" \ "MOV.L [ R15 ], R0 \n" \
/* Restore the context of the new task. The PSW (Program Status Word) and /* Restore the context of the new task. The PSW (Program Status Word) and
* PC will be popped by the RTE instruction. */ PC will be popped by the RTE instruction. */
"POP R15 \n" \ "POP R15 \n" \
/* Accumulator low 32 bits. */ /* Accumulator low 32 bits. */
@ -374,7 +377,7 @@ void vTickISR( void )
__asm volatile( "SETPSW I" ); __asm volatile( "SETPSW I" );
/* Increment the tick, and perform any processing the new tick value /* Increment the tick, and perform any processing the new tick value
* necessitates. Ensure IPL is at the max syscall value first. */ necessitates. Ensure IPL is at the max syscall value first. */
portMASK_INTERRUPTS_FROM_KERNEL_ISR(); portMASK_INTERRUPTS_FROM_KERNEL_ISR();
{ {
if( xTaskIncrementTick() != pdFALSE ) if( xTaskIncrementTick() != pdFALSE )
@ -402,6 +405,9 @@ uint32_t ulPortGetIPL( void )
void vPortSetIPL( uint32_t ulNewIPL ) void vPortSetIPL( uint32_t ulNewIPL )
{ {
/* Avoid compiler warning about unreferenced parameter. */
( void ) ulNewIPL;
__asm volatile __asm volatile
( (
"PUSH R5 \n" \ "PUSH R5 \n" \

View file

@ -44,7 +44,7 @@
*/ */
/* Type definitions - these are a bit legacy and not really used now, other than /* Type definitions - these are a bit legacy and not really used now, other than
* portSTACK_TYPE and portBASE_TYPE. */ portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char #define portCHAR char
#define portFLOAT float #define portFLOAT float
#define portDOUBLE double #define portDOUBLE double
@ -65,7 +65,7 @@
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL #define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */ not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1 #define portTICK_TYPE_IS_ATOMIC 1
#endif #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -77,9 +77,9 @@
#define portNOP() __asm volatile( "NOP" ) #define portNOP() __asm volatile( "NOP" )
/* Yield equivalent to "*portITU_SWINTR = 0x01; ( void ) *portITU_SWINTR;" /* Yield equivalent to "*portITU_SWINTR = 0x01; ( void ) *portITU_SWINTR;"
* where portITU_SWINTR is the location of the software interrupt register where portITU_SWINTR is the location of the software interrupt register
* (0x000872E0). Don't rely on the assembler to select a register, so instead (0x000872E0). Don't rely on the assembler to select a register, so instead
* save and restore clobbered registers manually. */ save and restore clobbered registers manually. */
#define portYIELD() \ #define portYIELD() \
__asm volatile \ __asm volatile \
( \ ( \
@ -93,14 +93,14 @@
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) portYIELD() #define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) portYIELD()
/* These macros should not be called directly, but through the /* These macros should not be called directly, but through the
* taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
* performed if configASSERT() is defined to ensure an assertion handler does not performed if configASSERT() is defined to ensure an assertion handler does not
* inadvertently attempt to lower the IPL when the call to assert was triggered inadvertently attempt to lower the IPL when the call to assert was triggered
* because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
* when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
* functions are those that end in FromISR. FreeRTOS maintains a separate functions are those that end in FromISR. FreeRTOS maintains a separate
* interrupt API to ensure API function and interrupt entry is as fast and as interrupt API to ensure API function and interrupt entry is as fast and as
* simple as possible. */ simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" ) #define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" )
#ifdef configASSERT #ifdef configASSERT
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) ) #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) )