Update GCC compiler for:

* RX600v2
 * RX600
 * RX100

Signed-off-by: Dinh Van Nam <vannam.dinh.xt@renesas.com>
This commit is contained in:
Dinh Van Nam 2020-07-16 16:39:48 +09:00
parent 226af680e1
commit d7c280593d
6 changed files with 1146 additions and 1136 deletions

View file

@ -40,16 +40,20 @@
#include "string.h"
/* Hardware specifics. */
#if defined(configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H) && (configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H == 1)
#include "platform.h"
#else
#include "iodefine.h"
#endif
/*-----------------------------------------------------------*/
/* Tasks should start with interrupts enabled and in Supervisor mode, therefore
* PSW is set with U and I set, and PM and IPL clear. */
PSW is set with U and I set, and PM and IPL clear. */
#define portINITIAL_PSW ( ( StackType_t ) 0x00030000 )
/* The peripheral clock is divided by this value before being supplying the
* CMT. */
CMT. */
#if ( configUSE_TICKLESS_IDLE == 0 )
/* If tickless idle is not used then the divisor can be fixed. */
#define portCLOCK_DIVISOR 8UL
@ -64,15 +68,15 @@
#endif
/* These macros allow a critical section to be added around the call to
* xTaskIncrementTick(), which is only ever called from interrupts at the kernel
* priority - ie a known priority. Therefore these local macros are a slight
* optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
* which would require the old IPL to be read first and stored in a local variable. */
#define portDISABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0"::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#define portENABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0"::"i" ( configKERNEL_INTERRUPT_PRIORITY ) )
xTaskIncrementTick(), which is only ever called from interrupts at the kernel
priority - ie a known priority. Therefore these local macros are a slight
optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
which would require the old IPL to be read first and stored in a local variable. */
#define portDISABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#define portENABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) )
/* Keys required to lock and unlock access to certain system registers
* respectively. */
respectively. */
#define portUNLOCK_KEY 0xA50B
#define portLOCK_KEY 0xA500
@ -82,20 +86,28 @@
* Function to start the first task executing - written in asm code as direct
* access to registers is required.
*/
static void prvStartFirstTask( void ) __attribute__( ( naked ) );
static void prvStartFirstTask( void ) __attribute__((naked));
/*
* Software interrupt handler. Performs the actual context switch (saving and
* restoring of registers). Written in asm code as direct register access is
* required.
*/
void vPortSoftwareInterruptISR( void ) __attribute__( ( naked ) );
#if defined(configTICK_VECTOR)
void vPortSoftwareInterruptISR( void ) __attribute__((naked, vector( R_BSP_SECNAME_INTVECTTBL, VECT_ICU_SWINT )));
#else
void vPortSoftwareInterruptISR( void ) __attribute__((naked));
#endif
/*
* The tick interrupt handler.
*/
void vPortTickISR( void ) __attribute__( ( interrupt ) );
#if defined(configTICK_VECTOR)
void vPortTickISR( void ) __attribute__((interrupt( R_BSP_SECNAME_INTVECTTBL, _VECT( configTICK_VECTOR ) )));
#else
void vPortTickISR( void ) __attribute__((interrupt));
#endif
/*
* Sets up the periodic ISR used for the RTOS tick using the CMT.
* The application writer can define configSETUP_TICK_INTERRUPT() (in
@ -104,9 +116,8 @@ void vPortTickISR( void ) __attribute__( ( interrupt ) );
*/
static void prvSetupTimerInterrupt( void );
#ifndef configSETUP_TICK_INTERRUPT
/* The user has not provided their own tick interrupt configuration so use
* the definition in this file (which uses the interval timer). */
/* The user has not provided their own tick interrupt configuration so use
the definition in this file (which uses the interval timer). */
#define configSETUP_TICK_INTERRUPT() prvSetupTimerInterrupt()
#endif /* configSETUP_TICK_INTERRUPT */
@ -122,42 +133,40 @@ static void prvSetupTimerInterrupt( void );
/*-----------------------------------------------------------*/
/* Used in the context save and restore code. */
extern void * pxCurrentTCB;
extern void *pxCurrentTCB;
/* Calculate how many clock increments make up a single tick period. */
static const uint32_t ulMatchValueForOneTick = ( ( configPERIPHERAL_CLOCK_HZ / portCLOCK_DIVISOR ) / configTICK_RATE_HZ );
#if configUSE_TICKLESS_IDLE == 1
/* Holds the maximum number of ticks that can be suppressed - which is
* basically how far into the future an interrupt can be generated. Set
* during initialisation. This is the maximum possible value that the
* compare match register can hold divided by ulMatchValueForOneTick. */
/* Holds the maximum number of ticks that can be suppressed - which is
basically how far into the future an interrupt can be generated. Set
during initialisation. This is the maximum possible value that the
compare match register can hold divided by ulMatchValueForOneTick. */
static const TickType_t xMaximumPossibleSuppressedTicks = USHRT_MAX / ( ( configPERIPHERAL_CLOCK_HZ / portCLOCK_DIVISOR ) / configTICK_RATE_HZ );
/* Flag set from the tick interrupt to allow the sleep processing to know if
* sleep mode was exited because of a tick interrupt, or an interrupt
* generated by something else. */
/* Flag set from the tick interrupt to allow the sleep processing to know if
sleep mode was exited because of a tick interrupt, or an interrupt
generated by something else. */
static volatile uint32_t ulTickFlag = pdFALSE;
/* The CMT counter is stopped temporarily each time it is re-programmed.
* The following constant offsets the CMT counter match value by the number of
* CMT counts that would typically be missed while the counter was stopped to
* compensate for the lost time. The large difference between the divided CMT
* clock and the CPU clock means it is likely ulStoppedTimerCompensation will
* equal zero - and be optimised away. */
/* The CMT counter is stopped temporarily each time it is re-programmed.
The following constant offsets the CMT counter match value by the number of
CMT counts that would typically be missed while the counter was stopped to
compensate for the lost time. The large difference between the divided CMT
clock and the CPU clock means it is likely ulStoppedTimerCompensation will
equal zero - and be optimised away. */
static const uint32_t ulStoppedTimerCompensation = 100UL / ( configCPU_CLOCK_HZ / ( configPERIPHERAL_CLOCK_HZ / portCLOCK_DIVISOR ) );
#endif /* if configUSE_TICKLESS_IDLE == 1 */
#endif
/*-----------------------------------------------------------*/
/*
* See header file for description.
*/
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters )
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
{
/* Offset to end up on 8 byte boundary. */
pxTopOfStack--;
@ -172,8 +181,8 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = ( StackType_t ) pxCode;
/* When debugging it can be useful if every register is set to a known
* value. Otherwise code space can be saved by just setting the registers
* that need to be set. */
value. Otherwise code space can be saved by just setting the registers
that need to be set. */
#ifdef USE_FULL_REGISTER_INITIALISATION
{
pxTopOfStack--;
@ -206,13 +215,13 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = 0x22222222;
pxTopOfStack--;
}
#else /* ifdef USE_FULL_REGISTER_INITIALISATION */
#else
{
/* Leave space for the registers that will get popped from the stack
* when the task first starts executing. */
when the task first starts executing. */
pxTopOfStack -= 15;
}
#endif /* ifdef USE_FULL_REGISTER_INITIALISATION */
#endif
*pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */
pxTopOfStack--;
@ -230,10 +239,10 @@ BaseType_t xPortStartScheduler( void )
if( pxCurrentTCB != NULL )
{
/* Call an application function to set up the timer that will generate
* the tick interrupt. This way the application can decide which
* peripheral to use. If tickless mode is used then the default
* implementation defined in this file (which uses CMT0) should not be
* overridden. */
the tick interrupt. This way the application can decide which
peripheral to use. If tickless mode is used then the default
implementation defined in this file (which uses CMT0) should not be
overridden. */
configSETUP_TICK_INTERRUPT();
/* Enable the software interrupt. */
@ -250,11 +259,11 @@ BaseType_t xPortStartScheduler( void )
}
/* Execution should not reach here as the tasks are now running!
* prvSetupTimerInterrupt() is called here to prevent the compiler outputting
* a warning about a statically declared function not being referenced in the
* case that the application writer has provided their own tick interrupt
* configuration routine (and defined configSETUP_TICK_INTERRUPT() such that
* their own routine will be called in place of prvSetupTimerInterrupt()). */
prvSetupTimerInterrupt() is called here to prevent the compiler outputting
a warning about a statically declared function not being referenced in the
case that the application writer has provided their own tick interrupt
configuration routine (and defined configSETUP_TICK_INTERRUPT() such that
their own routine will be called in place of prvSetupTimerInterrupt()). */
prvSetupTimerInterrupt();
/* Should not get here. */
@ -265,7 +274,7 @@ BaseType_t xPortStartScheduler( void )
void vPortEndScheduler( void )
{
/* Not implemented in ports where there is nothing to return to.
* Artificially force an assert. */
Artificially force an assert. */
configASSERT( pxCurrentTCB == NULL );
}
/*-----------------------------------------------------------*/
@ -274,37 +283,34 @@ static void prvStartFirstTask( void )
{
__asm volatile
(
/* When starting the scheduler there is nothing that needs moving to the
* interrupt stack because the function is not called from an interrupt.
* Just ensure the current stack is the user stack. */
"SETPSW U \n"\
interrupt stack because the function is not called from an interrupt.
Just ensure the current stack is the user stack. */
"SETPSW U \n" \
/* Obtain the location of the stack associated with which ever task
* pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n"\
"MOV.L [R15], R15 \n"\
"MOV.L [R15], R0 \n"\
pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [R15], R15 \n" \
"MOV.L [R15], R0 \n" \
/* Restore the registers from the stack of the task pointed to by
* pxCurrentTCB. */
"POP R15 \n"\
pxCurrentTCB. */
"POP R15 \n" \
/* Accumulator low 32 bits. */
"MVTACLO R15 \n"\
"POP R15 \n"\
"MVTACLO R15 \n" \
"POP R15 \n" \
/* Accumulator high 32 bits. */
"MVTACHI R15 \n"\
"MVTACHI R15 \n" \
/* R1 to R15 - R0 is not included as it is the SP. */
"POPM R1-R15 \n"\
"POPM R1-R15 \n" \
/* This pops the remaining registers. */
"RTE \n"\
"NOP \n"\
"RTE \n" \
"NOP \n" \
"NOP \n"
);
}
@ -315,82 +321,78 @@ void vPortSoftwareInterruptISR( void )
__asm volatile
(
/* Re-enable interrupts. */
"SETPSW I \n"\
"SETPSW I \n" \
/* Move the data that was automatically pushed onto the interrupt stack when
* the interrupt occurred from the interrupt stack to the user stack.
*
* R15 is saved before it is clobbered. */
"PUSH.L R15 \n"\
the interrupt occurred from the interrupt stack to the user stack.
R15 is saved before it is clobbered. */
"PUSH.L R15 \n" \
/* Read the user stack pointer. */
"MVFC USP, R15 \n"\
"MVFC USP, R15 \n" \
/* Move the address down to the data being moved. */
"SUB #12, R15 \n"\
"MVTC R15, USP \n"\
"SUB #12, R15 \n" \
"MVTC R15, USP \n" \
/* Copy the data across, R15, then PC, then PSW. */
"MOV.L [ R0 ], [ R15 ] \n"\
"MOV.L 4[ R0 ], 4[ R15 ] \n"\
"MOV.L 8[ R0 ], 8[ R15 ] \n"\
"MOV.L [ R0 ], [ R15 ] \n" \
"MOV.L 4[ R0 ], 4[ R15 ] \n" \
"MOV.L 8[ R0 ], 8[ R15 ] \n" \
/* Move the interrupt stack pointer to its new correct position. */
"ADD #12, R0 \n"\
"ADD #12, R0 \n" \
/* All the rest of the registers are saved directly to the user stack. */
"SETPSW U \n"\
"SETPSW U \n" \
/* Save the rest of the general registers (R15 has been saved already). */
"PUSHM R1-R14 \n"\
"PUSHM R1-R14 \n" \
/* Save the accumulator. */
"MVFACHI R15 \n"\
"PUSH.L R15 \n"\
"MVFACHI R15 \n" \
"PUSH.L R15 \n" \
/* Middle word. */
"MVFACMI R15 \n"\
"MVFACMI R15 \n" \
/* Shifted left as it is restored to the low order word. */
"SHLL #16, R15 \n"\
"PUSH.L R15 \n"\
"SHLL #16, R15 \n" \
"PUSH.L R15 \n" \
/* Save the stack pointer to the TCB. */
"MOV.L #_pxCurrentTCB, R15 \n"\
"MOV.L [ R15 ], R15 \n"\
"MOV.L R0, [ R15 ] \n"\
"MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [ R15 ], R15 \n" \
"MOV.L R0, [ R15 ] \n" \
/* Ensure the interrupt mask is set to the syscall priority while the kernel
* structures are being accessed. */
"MVTIPL %0 \n"\
structures are being accessed. */
"MVTIPL %0 \n" \
/* Select the next task to run. */
"BSR.A _vTaskSwitchContext \n"\
"BSR.A _vTaskSwitchContext \n" \
/* Reset the interrupt mask as no more data structure access is required. */
"MVTIPL %1 \n"\
"MVTIPL %1 \n" \
/* Load the stack pointer of the task that is now selected as the Running
* state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n"\
"MOV.L [ R15 ], R15 \n"\
"MOV.L [ R15 ], R0 \n"\
state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n" \
"MOV.L [ R15 ], R15 \n" \
"MOV.L [ R15 ], R0 \n" \
/* Restore the context of the new task. The PSW (Program Status Word) and
* PC will be popped by the RTE instruction. */
"POP R15 \n"\
"MVTACLO R15 \n"\
"POP R15 \n"\
"MVTACHI R15 \n"\
"POPM R1-R15 \n"\
"RTE \n"\
"NOP \n"\
PC will be popped by the RTE instruction. */
"POP R15 \n" \
"MVTACLO R15 \n" \
"POP R15 \n" \
"MVTACHI R15 \n" \
"POPM R1-R15 \n" \
"RTE \n" \
"NOP \n" \
"NOP "
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ), "i" ( configKERNEL_INTERRUPT_PRIORITY )
:: "i"(configMAX_SYSCALL_INTERRUPT_PRIORITY), "i"(configKERNEL_INTERRUPT_PRIORITY)
);
}
/*-----------------------------------------------------------*/
@ -398,10 +400,10 @@ void vPortSoftwareInterruptISR( void )
void vPortTickISR( void )
{
/* Re-enabled interrupts. */
__asm volatile ( "SETPSW I");
__asm volatile( "SETPSW I" );
/* Increment the tick, and perform any processing the new tick value
* necessitates. Ensure IPL is at the max syscall value first. */
necessitates. Ensure IPL is at the max syscall value first. */
portDISABLE_INTERRUPTS_FROM_KERNEL_ISR();
{
if( xTaskIncrementTick() != pdFALSE )
@ -417,7 +419,7 @@ void vPortTickISR( void )
ulTickFlag = pdTRUE;
/* If this is the first tick since exiting tickless mode then the CMT
* compare match value needs resetting. */
compare match value needs resetting. */
CMT0.CMCOR = ( uint16_t ) ulMatchValueForOneTick;
}
#endif
@ -428,8 +430,8 @@ uint32_t ulPortGetIPL( void )
{
__asm volatile
(
"MVFC PSW, R1 \n"\
"SHLR #24, R1 \n"\
"MVFC PSW, R1 \n" \
"SHLR #24, R1 \n" \
"RTS "
);
@ -442,13 +444,13 @@ void vPortSetIPL( uint32_t ulNewIPL )
{
__asm volatile
(
"PUSH R5 \n"\
"MVFC PSW, R5 \n"\
"SHLL #24, R1 \n"\
"AND #-0F000001H, R5 \n"\
"OR R1, R5 \n"\
"MVTC R5, PSW \n"\
"POP R5 \n"\
"PUSH R5 \n" \
"MVFC PSW, R5 \n" \
"SHLL #24, R1 \n" \
"AND #-0F000001H, R5 \n" \
"OR R1, R5 \n" \
"MVTC R5, PSW \n" \
"POP R5 \n" \
"RTS "
);
}
@ -488,11 +490,11 @@ static void prvSetupTimerInterrupt( void )
{
CMT0.CMCR.BIT.CKS = 0;
}
#else /* if portCLOCK_DIVISOR == 512 */
#else
{
#error Invalid portCLOCK_DIVISOR setting
}
#endif /* if portCLOCK_DIVISOR == 512 */
#endif
/* Enable the interrupt... */
_IEN( _CMT0_CMI0 ) = 1;
@ -513,11 +515,11 @@ static void prvSetupTimerInterrupt( void )
configPRE_SLEEP_PROCESSING( xExpectedIdleTime );
/* xExpectedIdleTime being set to 0 by configPRE_SLEEP_PROCESSING()
* means the application defined code has already executed the WAIT
* instruction. */
means the application defined code has already executed the WAIT
instruction. */
if( xExpectedIdleTime > 0 )
{
__asm volatile ( "WAIT" );
__asm volatile( "WAIT" );
}
/* Allow the application to define some post sleep processing. */
@ -543,46 +545,43 @@ static void prvSetupTimerInterrupt( void )
}
/* Calculate the reload value required to wait xExpectedIdleTime tick
* periods. */
periods. */
ulMatchValue = ulMatchValueForOneTick * xExpectedIdleTime;
if( ulMatchValue > ulStoppedTimerCompensation )
{
/* Compensate for the fact that the CMT is going to be stopped
* momentarily. */
momentarily. */
ulMatchValue -= ulStoppedTimerCompensation;
}
/* Stop the CMT momentarily. The time the CMT is stopped for is
* accounted for as best it can be, but using the tickless mode will
* inevitably result in some tiny drift of the time maintained by the
* kernel with respect to calendar time. */
accounted for as best it can be, but using the tickless mode will
inevitably result in some tiny drift of the time maintained by the
kernel with respect to calendar time. */
CMT.CMSTR0.BIT.STR0 = 0;
while( CMT.CMSTR0.BIT.STR0 == 1 )
{
/* Nothing to do here. */
}
/* Critical section using the global interrupt bit as the i bit is
* automatically reset by the WAIT instruction. */
__asm volatile ( "CLRPSW i" );
automatically reset by the WAIT instruction. */
__asm volatile( "CLRPSW i" );
/* The tick flag is set to false before sleeping. If it is true when
* sleep mode is exited then sleep mode was probably exited because the
* tick was suppressed for the entire xExpectedIdleTime period. */
sleep mode is exited then sleep mode was probably exited because the
tick was suppressed for the entire xExpectedIdleTime period. */
ulTickFlag = pdFALSE;
/* If a context switch is pending then abandon the low power entry as
* the context switch might have been pended by an external interrupt that
* requires processing. */
the context switch might have been pended by an external interrupt that
requires processing. */
eSleepAction = eTaskConfirmSleepModeStatus();
if( eSleepAction == eAbortSleep )
{
/* Restart tick. */
CMT.CMSTR0.BIT.STR0 = 1;
__asm volatile ( "SETPSW i" );
__asm volatile( "SETPSW i" );
}
else if( eSleepAction == eNoTasksWaitingTimeout )
{
@ -596,7 +595,7 @@ static void prvSetupTimerInterrupt( void )
SYSTEM.PRCR.WORD = portLOCK_KEY;
/* Sleep until something happens. Calling prvSleep() will
* automatically reset the i bit in the PSW. */
automatically reset the i bit in the PSW. */
prvSleep( xExpectedIdleTime );
/* Restart the CMT. */
@ -616,7 +615,7 @@ static void prvSetupTimerInterrupt( void )
SYSTEM.PRCR.WORD = portLOCK_KEY;
/* Adjust the match value to take into account that the current
* time slice is already partially complete. */
time slice is already partially complete. */
ulMatchValue -= ( uint32_t ) CMT0.CMCNT;
CMT0.CMCOR = ( uint16_t ) ulMatchValue;
@ -625,15 +624,14 @@ static void prvSetupTimerInterrupt( void )
CMT.CMSTR0.BIT.STR0 = 1;
/* Sleep until something happens. Calling prvSleep() will
* automatically reset the i bit in the PSW. */
automatically reset the i bit in the PSW. */
prvSleep( xExpectedIdleTime );
/* Stop CMT. Again, the time the SysTick is stopped for is
* accounted for as best it can be, but using the tickless mode will
* inevitably result in some tiny drift of the time maintained by the
* kernel with respect to calendar time. */
accounted for as best it can be, but using the tickless mode will
inevitably result in some tiny drift of the time maintained by the
kernel with respect to calendar time. */
CMT.CMSTR0.BIT.STR0 = 0;
while( CMT.CMSTR0.BIT.STR0 == 1 )
{
/* Nothing to do here. */
@ -644,42 +642,42 @@ static void prvSetupTimerInterrupt( void )
if( ulTickFlag != pdFALSE )
{
/* The tick interrupt has already executed, although because
* this function is called with the scheduler suspended the actual
* tick processing will not occur until after this function has
* exited. Reset the match value with whatever remains of this
* tick period. */
this function is called with the scheduler suspended the actual
tick processing will not occur until after this function has
exited. Reset the match value with whatever remains of this
tick period. */
ulMatchValue = ulMatchValueForOneTick - ulCurrentCount;
CMT0.CMCOR = ( uint16_t ) ulMatchValue;
/* The tick interrupt handler will already have pended the tick
* processing in the kernel. As the pending tick will be
* processed as soon as this function exits, the tick value
* maintained by the tick is stepped forward by one less than the
* time spent sleeping. The actual stepping of the tick appears
* later in this function. */
processing in the kernel. As the pending tick will be
processed as soon as this function exits, the tick value
maintained by the tick is stepped forward by one less than the
time spent sleeping. The actual stepping of the tick appears
later in this function. */
ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
}
else
{
/* Something other than the tick interrupt ended the sleep.
* How many complete tick periods passed while the processor was
* sleeping? */
How many complete tick periods passed while the processor was
sleeping? */
ulCompleteTickPeriods = ulCurrentCount / ulMatchValueForOneTick;
/* The match value is set to whatever fraction of a single tick
* period remains. */
period remains. */
ulMatchValue = ulCurrentCount - ( ulCompleteTickPeriods * ulMatchValueForOneTick );
CMT0.CMCOR = ( uint16_t ) ulMatchValue;
}
/* Restart the CMT so it runs up to the match value. The match value
* will get set to the value required to generate exactly one tick period
* the next time the CMT interrupt executes. */
will get set to the value required to generate exactly one tick period
the next time the CMT interrupt executes. */
CMT0.CMCNT = 0;
CMT.CMSTR0.BIT.STR0 = 1;
/* Wind the tick forward by the number of tick periods that the CPU
* remained in a low power state. */
remained in a low power state. */
vTaskStepTick( ulCompleteTickPeriods );
}
}

View file

@ -44,100 +44,100 @@
*/
/* Type definitions - these are a bit legacy and not really used now, other than
* portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if ( configUSE_16_BIT_TICKS == 1 )
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
#endif
/*-----------------------------------------------------------*/
/* Hardware specifics. */
#define portBYTE_ALIGNMENT 8 /* Could make four, according to manual. */
#define portSTACK_GROWTH -1
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portNOP() __asm volatile ( "NOP" )
#define portBYTE_ALIGNMENT 8 /* Could make four, according to manual. */
#define portSTACK_GROWTH -1
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portNOP() __asm volatile( "NOP" )
/* Save clobbered register, set ITU SWINR (at address 0x872E0), read the value
* back to ensure it is set before continuing, then restore the clobbered
* register. */
#define portYIELD() \
back to ensure it is set before continuing, then restore the clobbered
register. */
#define portYIELD() \
__asm volatile \
( \
"MOV.L #0x872E0, r5 \n\t"\
"MOV.B #1, [r5] \n\t"\
"MOV.L [r5], r5 \n\t"\
"MOV.L #0x872E0, r5 \n\t" \
"MOV.B #1, [r5] \n\t" \
"MOV.L [r5], r5 \n\t" \
::: "r5" \
)
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) { portYIELD(); }
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) { portYIELD(); }
/* These macros should not be called directly, but through the
* taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
* performed if configASSERT() is defined to ensure an assertion handler does not
* inadvertently attempt to lower the IPL when the call to assert was triggered
* because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
* when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
* functions are those that end in FromISR. FreeRTOS maintains a separate
* interrupt API to ensure API function and interrupt entry is as fast and as
* simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0")
#ifdef configASSERT
taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
performed if configASSERT() is defined to ensure an assertion handler does not
inadvertently attempt to lower the IPL when the call to assert was triggered
because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
functions are those that end in FromISR. FreeRTOS maintains a separate
interrupt API to ensure API function and interrupt entry is as fast and as
simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" )
#ifdef configASSERT
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#define portDISABLE_INTERRUPTS() if( ulPortGetIPL() < configMAX_SYSCALL_INTERRUPT_PRIORITY ) __asm volatile ( "MVTIPL %0"::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#else
#define portDISABLE_INTERRUPTS() __asm volatile ( "MVTIPL %0"::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#endif
#define portDISABLE_INTERRUPTS() if( ulPortGetIPL() < configMAX_SYSCALL_INTERRUPT_PRIORITY ) __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#else
#define portDISABLE_INTERRUPTS() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#endif
/* Critical nesting counts are stored in the TCB. */
#define portCRITICAL_NESTING_IN_TCB ( 1 )
#define portCRITICAL_NESTING_IN_TCB ( 1 )
/* The critical nesting functions defined within tasks.c. */
extern void vTaskEnterCritical( void );
extern void vTaskExitCritical( void );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
extern void vTaskEnterCritical( void );
extern void vTaskExitCritical( void );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
/* As this port allows interrupt nesting... */
uint32_t ulPortGetIPL( void ) __attribute__( ( naked ) );
void vPortSetIPL( uint32_t ulNewIPL ) __attribute__( ( naked ) );
#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus )
uint32_t ulPortGetIPL( void ) __attribute__((naked));
void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked));
#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus )
/* Tickless idle/low power functionality. */
#if configUSE_TICKLESS_IDLE == 1
#if configUSE_TICKLESS_IDLE == 1
#ifndef portSUPPRESS_TICKS_AND_SLEEP
extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
#define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
#endif
#endif
#endif
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* PORTMACRO_H */

View file

@ -37,22 +37,26 @@
#include "string.h"
/* Hardware specifics. */
#if defined(configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H) && (configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H == 1)
#include "platform.h"
#else
#include "iodefine.h"
#endif
/*-----------------------------------------------------------*/
/* Tasks should start with interrupts enabled and in Supervisor mode, therefore
* PSW is set with U and I set, and PM and IPL clear. */
PSW is set with U and I set, and PM and IPL clear. */
#define portINITIAL_PSW ( ( StackType_t ) 0x00030000 )
#define portINITIAL_FPSW ( ( StackType_t ) 0x00000100 )
/* These macros allow a critical section to be added around the call to
* xTaskIncrementTick(), which is only ever called from interrupts at the kernel
* priority - ie a known priority. Therefore these local macros are a slight
* optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
* which would require the old IPL to be read first and stored in a local variable. */
#define portDISABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0"::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#define portENABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0"::"i" ( configKERNEL_INTERRUPT_PRIORITY ) )
xTaskIncrementTick(), which is only ever called from interrupts at the kernel
priority - ie a known priority. Therefore these local macros are a slight
optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
which would require the old IPL to be read first and stored in a local variable. */
#define portDISABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#define portENABLE_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) )
/*-----------------------------------------------------------*/
@ -60,32 +64,38 @@
* Function to start the first task executing - written in asm code as direct
* access to registers is required.
*/
static void prvStartFirstTask( void ) __attribute__( ( naked ) );
static void prvStartFirstTask( void ) __attribute__((naked));
/*
* Software interrupt handler. Performs the actual context switch (saving and
* restoring of registers). Written in asm code as direct register access is
* required.
*/
void vSoftwareInterruptISR( void ) __attribute__( ( naked ) );
#if defined(configTICK_VECTOR)
void vSoftwareInterruptISR( void ) __attribute__((naked, vector( R_SECNAME_INTVECTTBL, VECT_ICU_SWINT )));
#else
void vSoftwareInterruptISR( void ) __attribute__((naked));
#endif
/*
* The tick interrupt handler.
*/
void vTickISR( void ) __attribute__( ( interrupt ) );
#if defined(configTICK_VECTOR)
void vTickISR( void ) __attribute__((interrupt( R_SECNAME_INTVECTTBL, _VECT( configTICK_VECTOR ) )));
#else
void vTickISR( void ) __attribute__((interrupt));
#endif
/*-----------------------------------------------------------*/
extern void * pxCurrentTCB;
extern void *pxCurrentTCB;
/*-----------------------------------------------------------*/
/*
* See header file for description.
*/
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters )
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
{
/* R0 is not included as it is the stack pointer. */
@ -96,8 +106,8 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = ( StackType_t ) pxCode;
/* When debugging it can be useful if every register is set to a known
* value. Otherwise code space can be saved by just setting the registers
* that need to be set. */
value. Otherwise code space can be saved by just setting the registers
that need to be set. */
#ifdef USE_FULL_REGISTER_INITIALISATION
{
pxTopOfStack--;
@ -130,11 +140,11 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = 0x22222222;
pxTopOfStack--;
}
#else /* ifdef USE_FULL_REGISTER_INITIALISATION */
#else
{
pxTopOfStack -= 15;
}
#endif /* ifdef USE_FULL_REGISTER_INITIALISATION */
#endif
*pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */
pxTopOfStack--;
@ -150,14 +160,14 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
BaseType_t xPortStartScheduler( void )
{
extern void vApplicationSetupTimerInterrupt( void );
extern void vApplicationSetupTimerInterrupt( void );
/* Use pxCurrentTCB just so it does not get optimised away. */
if( pxCurrentTCB != NULL )
{
/* Call an application function to set up the timer that will generate the
* tick interrupt. This way the application can decide which peripheral to
* use. A demo application is provided to show a suitable example. */
tick interrupt. This way the application can decide which peripheral to
use. A demo application is provided to show a suitable example. */
vApplicationSetupTimerInterrupt();
/* Enable the software interrupt. */
@ -181,7 +191,7 @@ BaseType_t xPortStartScheduler( void )
void vPortEndScheduler( void )
{
/* Not implemented in ports where there is nothing to return to.
* Artificially force an assert. */
Artificially force an assert. */
configASSERT( pxCurrentTCB == NULL );
}
/*-----------------------------------------------------------*/
@ -190,41 +200,38 @@ static void prvStartFirstTask( void )
{
__asm volatile
(
/* When starting the scheduler there is nothing that needs moving to the
* interrupt stack because the function is not called from an interrupt.
* Just ensure the current stack is the user stack. */
"SETPSW U \n"\
interrupt stack because the function is not called from an interrupt.
Just ensure the current stack is the user stack. */
"SETPSW U \n" \
/* Obtain the location of the stack associated with which ever task
* pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n"\
"MOV.L [R15], R15 \n"\
"MOV.L [R15], R0 \n"\
pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [R15], R15 \n" \
"MOV.L [R15], R0 \n" \
/* Restore the registers from the stack of the task pointed to by
* pxCurrentTCB. */
"POP R15 \n"\
pxCurrentTCB. */
"POP R15 \n" \
/* Accumulator low 32 bits. */
"MVTACLO R15 \n"\
"POP R15 \n"\
"MVTACLO R15 \n" \
"POP R15 \n" \
/* Accumulator high 32 bits. */
"MVTACHI R15 \n"\
"POP R15 \n"\
"MVTACHI R15 \n" \
"POP R15 \n" \
/* Floating point status word. */
"MVTC R15, FPSW \n"\
"MVTC R15, FPSW \n" \
/* R1 to R15 - R0 is not included as it is the SP. */
"POPM R1-R15 \n"\
"POPM R1-R15 \n" \
/* This pops the remaining registers. */
"RTE \n"\
"NOP \n"\
"RTE \n" \
"NOP \n" \
"NOP \n"
);
}
@ -235,86 +242,82 @@ void vSoftwareInterruptISR( void )
__asm volatile
(
/* Re-enable interrupts. */
"SETPSW I \n"\
"SETPSW I \n" \
/* Move the data that was automatically pushed onto the interrupt stack when
* the interrupt occurred from the interrupt stack to the user stack.
*
* R15 is saved before it is clobbered. */
"PUSH.L R15 \n"\
the interrupt occurred from the interrupt stack to the user stack.
R15 is saved before it is clobbered. */
"PUSH.L R15 \n" \
/* Read the user stack pointer. */
"MVFC USP, R15 \n"\
"MVFC USP, R15 \n" \
/* Move the address down to the data being moved. */
"SUB #12, R15 \n"\
"MVTC R15, USP \n"\
"SUB #12, R15 \n" \
"MVTC R15, USP \n" \
/* Copy the data across, R15, then PC, then PSW. */
"MOV.L [ R0 ], [ R15 ] \n"\
"MOV.L 4[ R0 ], 4[ R15 ] \n"\
"MOV.L 8[ R0 ], 8[ R15 ] \n"\
"MOV.L [ R0 ], [ R15 ] \n" \
"MOV.L 4[ R0 ], 4[ R15 ] \n" \
"MOV.L 8[ R0 ], 8[ R15 ] \n" \
/* Move the interrupt stack pointer to its new correct position. */
"ADD #12, R0 \n"\
"ADD #12, R0 \n" \
/* All the rest of the registers are saved directly to the user stack. */
"SETPSW U \n"\
"SETPSW U \n" \
/* Save the rest of the general registers (R15 has been saved already). */
"PUSHM R1-R14 \n"\
"PUSHM R1-R14 \n" \
/* Save the FPSW and accumulator. */
"MVFC FPSW, R15 \n"\
"PUSH.L R15 \n"\
"MVFACHI R15 \n"\
"PUSH.L R15 \n"\
"MVFC FPSW, R15 \n" \
"PUSH.L R15 \n" \
"MVFACHI R15 \n" \
"PUSH.L R15 \n" \
/* Middle word. */
"MVFACMI R15 \n"\
"MVFACMI R15 \n" \
/* Shifted left as it is restored to the low order word. */
"SHLL #16, R15 \n"\
"PUSH.L R15 \n"\
"SHLL #16, R15 \n" \
"PUSH.L R15 \n" \
/* Save the stack pointer to the TCB. */
"MOV.L #_pxCurrentTCB, R15 \n"\
"MOV.L [ R15 ], R15 \n"\
"MOV.L R0, [ R15 ] \n"\
"MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [ R15 ], R15 \n" \
"MOV.L R0, [ R15 ] \n" \
/* Ensure the interrupt mask is set to the syscall priority while the kernel
* structures are being accessed. */
"MVTIPL %0 \n"\
structures are being accessed. */
"MVTIPL %0 \n" \
/* Select the next task to run. */
"BSR.A _vTaskSwitchContext \n"\
"BSR.A _vTaskSwitchContext \n" \
/* Reset the interrupt mask as no more data structure access is required. */
"MVTIPL %1 \n"\
"MVTIPL %1 \n" \
/* Load the stack pointer of the task that is now selected as the Running
* state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n"\
"MOV.L [ R15 ], R15 \n"\
"MOV.L [ R15 ], R0 \n"\
state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n" \
"MOV.L [ R15 ], R15 \n" \
"MOV.L [ R15 ], R0 \n" \
/* Restore the context of the new task. The PSW (Program Status Word) and
* PC will be popped by the RTE instruction. */
"POP R15 \n"\
"MVTACLO R15 \n"\
"POP R15 \n"\
"MVTACHI R15 \n"\
"POP R15 \n"\
"MVTC R15, FPSW \n"\
"POPM R1-R15 \n"\
"RTE \n"\
"NOP \n"\
PC will be popped by the RTE instruction. */
"POP R15 \n" \
"MVTACLO R15 \n" \
"POP R15 \n" \
"MVTACHI R15 \n" \
"POP R15 \n" \
"MVTC R15, FPSW \n" \
"POPM R1-R15 \n" \
"RTE \n" \
"NOP \n" \
"NOP "
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ), "i" ( configKERNEL_INTERRUPT_PRIORITY )
:: "i"(configMAX_SYSCALL_INTERRUPT_PRIORITY), "i"(configKERNEL_INTERRUPT_PRIORITY)
);
}
/*-----------------------------------------------------------*/
@ -322,10 +325,10 @@ void vSoftwareInterruptISR( void )
void vTickISR( void )
{
/* Re-enabled interrupts. */
__asm volatile ( "SETPSW I");
__asm volatile( "SETPSW I" );
/* Increment the tick, and perform any processing the new tick value
* necessitates. Ensure IPL is at the max syscall value first. */
necessitates. Ensure IPL is at the max syscall value first. */
portDISABLE_INTERRUPTS_FROM_KERNEL_ISR();
{
if( xTaskIncrementTick() != pdFALSE )
@ -341,8 +344,8 @@ uint32_t ulPortGetIPL( void )
{
__asm volatile
(
"MVFC PSW, R1 \n"\
"SHLR #24, R1 \n"\
"MVFC PSW, R1 \n" \
"SHLR #24, R1 \n" \
"RTS "
);
@ -353,15 +356,18 @@ uint32_t ulPortGetIPL( void )
void vPortSetIPL( uint32_t ulNewIPL )
{
/* Avoid compiler warning about unreferenced parameter. */
( void ) ulNewIPL;
__asm volatile
(
"PUSH R5 \n"\
"MVFC PSW, R5 \n"\
"SHLL #24, R1 \n"\
"AND #-0F000001H, R5 \n"\
"OR R1, R5 \n"\
"MVTC R5, PSW \n"\
"POP R5 \n"\
"PUSH R5 \n" \
"MVFC PSW, R5 \n" \
"SHLL #24, R1 \n" \
"AND #-0F000001H, R5 \n" \
"OR R1, R5 \n" \
"MVTC R5, PSW \n" \
"POP R5 \n" \
"RTS "
);
}

View file

@ -44,94 +44,94 @@
*/
/* Type definitions - these are a bit legacy and not really used now, other than
* portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if ( configUSE_16_BIT_TICKS == 1 )
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
#endif
/*-----------------------------------------------------------*/
/* Hardware specifics. */
#define portBYTE_ALIGNMENT 8 /* Could make four, according to manual. */
#define portSTACK_GROWTH -1
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portNOP() __asm volatile ( "NOP" )
#define portBYTE_ALIGNMENT 8 /* Could make four, according to manual. */
#define portSTACK_GROWTH -1
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portNOP() __asm volatile( "NOP" )
/* Yield equivalent to "*portITU_SWINTR = 0x01; ( void ) *portITU_SWINTR;"
* where portITU_SWINTR is the location of the software interrupt register
* (0x000872E0). Don't rely on the assembler to select a register, so instead
* save and restore clobbered registers manually. */
#define portYIELD() \
where portITU_SWINTR is the location of the software interrupt register
(0x000872E0). Don't rely on the assembler to select a register, so instead
save and restore clobbered registers manually. */
#define portYIELD() \
__asm volatile \
( \
"PUSH.L R10 \n"\
"MOV.L #0x872E0, R10 \n"\
"MOV.B #0x1, [R10] \n"\
"MOV.L [R10], R10 \n"\
"POP R10 \n"\
"PUSH.L R10 \n" \
"MOV.L #0x872E0, R10 \n" \
"MOV.B #0x1, [R10] \n" \
"MOV.L [R10], R10 \n" \
"POP R10 \n" \
)
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) portYIELD()
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) portYIELD()
/* These macros should not be called directly, but through the
* taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
* performed if configASSERT() is defined to ensure an assertion handler does not
* inadvertently attempt to lower the IPL when the call to assert was triggered
* because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
* when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
* functions are those that end in FromISR. FreeRTOS maintains a separate
* interrupt API to ensure API function and interrupt entry is as fast and as
* simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0")
#ifdef configASSERT
taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
performed if configASSERT() is defined to ensure an assertion handler does not
inadvertently attempt to lower the IPL when the call to assert was triggered
because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
functions are those that end in FromISR. FreeRTOS maintains a separate
interrupt API to ensure API function and interrupt entry is as fast and as
simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" )
#ifdef configASSERT
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#define portDISABLE_INTERRUPTS() if( ulPortGetIPL() < configMAX_SYSCALL_INTERRUPT_PRIORITY ) __asm volatile ( "MVTIPL %0"::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#else
#define portDISABLE_INTERRUPTS() __asm volatile ( "MVTIPL %0"::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#endif
#define portDISABLE_INTERRUPTS() if( ulPortGetIPL() < configMAX_SYSCALL_INTERRUPT_PRIORITY ) __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#else
#define portDISABLE_INTERRUPTS() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#endif
/* Critical nesting counts are stored in the TCB. */
#define portCRITICAL_NESTING_IN_TCB ( 1 )
#define portCRITICAL_NESTING_IN_TCB ( 1 )
/* The critical nesting functions defined within tasks.c. */
extern void vTaskEnterCritical( void );
extern void vTaskExitCritical( void );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
extern void vTaskEnterCritical( void );
extern void vTaskExitCritical( void );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
/* As this port allows interrupt nesting... */
uint32_t ulPortGetIPL( void ) __attribute__( ( naked ) );
void vPortSetIPL( uint32_t ulNewIPL ) __attribute__( ( naked ) );
#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus )
uint32_t ulPortGetIPL( void ) __attribute__((naked));
void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked));
#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus )
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* PORTMACRO_H */

View file

@ -37,22 +37,26 @@
#include "string.h"
/* Hardware specifics. */
#if defined(configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H) && (configINCLUDE_PLATFORM_H_INSTEAD_OF_IODEFINE_H == 1)
#include "platform.h"
#else
#include "iodefine.h"
#endif
/*-----------------------------------------------------------*/
/* Tasks should start with interrupts enabled and in Supervisor mode, therefore
* PSW is set with U and I set, and PM and IPL clear. */
PSW is set with U and I set, and PM and IPL clear. */
#define portINITIAL_PSW ( ( StackType_t ) 0x00030000 )
#define portINITIAL_FPSW ( ( StackType_t ) 0x00000100 )
/* These macros allow a critical section to be added around the call to
* xTaskIncrementTick(), which is only ever called from interrupts at the kernel
* priority - ie a known priority. Therefore these local macros are a slight
* optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
* which would require the old IPL to be read first and stored in a local variable. */
#define portMASK_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#define portUNMASK_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i" ( configKERNEL_INTERRUPT_PRIORITY ) )
xTaskIncrementTick(), which is only ever called from interrupts at the kernel
priority - ie a known priority. Therefore these local macros are a slight
optimisation compared to calling the global SET/CLEAR_INTERRUPT_MASK macros,
which would require the old IPL to be read first and stored in a local variable. */
#define portMASK_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#define portUNMASK_INTERRUPTS_FROM_KERNEL_ISR() __asm volatile ( "MVTIPL %0" ::"i"(configKERNEL_INTERRUPT_PRIORITY) )
/*-----------------------------------------------------------*/
@ -60,32 +64,38 @@
* Function to start the first task executing - written in asm code as direct
* access to registers is required.
*/
static void prvStartFirstTask( void ) __attribute__( ( naked ) );
static void prvStartFirstTask( void ) __attribute__((naked));
/*
* Software interrupt handler. Performs the actual context switch (saving and
* restoring of registers). Written in asm code as direct register access is
* required.
*/
void vSoftwareInterruptISR( void ) __attribute__( ( naked ) );
#if defined(configTICK_VECTOR)
void vSoftwareInterruptISR( void ) __attribute__((naked, vector( R_BSP_SECNAME_INTVECTTBL, VECT_ICU_SWINT )));
#else
void vSoftwareInterruptISR( void ) __attribute__((naked));
#endif
/*
* The tick interrupt handler.
*/
void vTickISR( void ) __attribute__( ( interrupt ) );
#if defined(configTICK_VECTOR)
void vTickISR( void ) __attribute__((interrupt( R_BSP_SECNAME_INTVECTTBL, _VECT( configTICK_VECTOR ) )));
#else
void vTickISR( void ) __attribute__((interrupt));
#endif
/*-----------------------------------------------------------*/
extern void * pxCurrentTCB;
extern void *pxCurrentTCB;
/*-----------------------------------------------------------*/
/*
* See header file for description.
*/
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters )
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
{
/* R0 is not included as it is the stack pointer. */
@ -96,8 +106,8 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = ( StackType_t ) pxCode;
/* When debugging it can be useful if every register is set to a known
* value. Otherwise code space can be saved by just setting the registers
* that need to be set. */
value. Otherwise code space can be saved by just setting the registers
that need to be set. */
#ifdef USE_FULL_REGISTER_INITIALISATION
{
pxTopOfStack--;
@ -130,11 +140,11 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
*pxTopOfStack = 0x22222222;
pxTopOfStack--;
}
#else /* ifdef USE_FULL_REGISTER_INITIALISATION */
#else
{
pxTopOfStack -= 15;
}
#endif /* ifdef USE_FULL_REGISTER_INITIALISATION */
#endif
*pxTopOfStack = ( StackType_t ) pvParameters; /* R1 */
pxTopOfStack--;
@ -158,14 +168,14 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
BaseType_t xPortStartScheduler( void )
{
extern void vApplicationSetupTimerInterrupt( void );
extern void vApplicationSetupTimerInterrupt( void );
/* Use pxCurrentTCB just so it does not get optimised away. */
if( pxCurrentTCB != NULL )
{
/* Call an application function to set up the timer that will generate the
* tick interrupt. This way the application can decide which peripheral to
* use. A demo application is provided to show a suitable example. */
tick interrupt. This way the application can decide which peripheral to
use. A demo application is provided to show a suitable example. */
vApplicationSetupTimerInterrupt();
/* Enable the software interrupt. */
@ -189,7 +199,7 @@ BaseType_t xPortStartScheduler( void )
void vPortEndScheduler( void )
{
/* Not implemented in ports where there is nothing to return to.
* Artificially force an assert. */
Artificially force an assert. */
configASSERT( pxCurrentTCB == NULL );
}
/*-----------------------------------------------------------*/
@ -198,57 +208,54 @@ static void prvStartFirstTask( void )
{
__asm volatile
(
/* When starting the scheduler there is nothing that needs moving to the
* interrupt stack because the function is not called from an interrupt.
* Just ensure the current stack is the user stack. */
"SETPSW U \n"\
interrupt stack because the function is not called from an interrupt.
Just ensure the current stack is the user stack. */
"SETPSW U \n" \
/* Obtain the location of the stack associated with which ever task
* pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n"\
"MOV.L [R15], R15 \n"\
"MOV.L [R15], R0 \n"\
pxCurrentTCB is currently pointing to. */
"MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [R15], R15 \n" \
"MOV.L [R15], R0 \n" \
/* Restore the registers from the stack of the task pointed to by
* pxCurrentTCB. */
"POP R15 \n"\
pxCurrentTCB. */
"POP R15 \n" \
/* Accumulator low 32 bits. */
"MVTACLO R15, A0 \n"\
"POP R15 \n"\
"MVTACLO R15, A0 \n" \
"POP R15 \n" \
/* Accumulator high 32 bits. */
"MVTACHI R15, A0 \n"\
"POP R15 \n"\
"MVTACHI R15, A0 \n" \
"POP R15 \n" \
/* Accumulator guard. */
"MVTACGU R15, A0 \n"\
"POP R15 \n"\
"MVTACGU R15, A0 \n" \
"POP R15 \n" \
/* Accumulator low 32 bits. */
"MVTACLO R15, A1 \n"\
"POP R15 \n"\
"MVTACLO R15, A1 \n" \
"POP R15 \n" \
/* Accumulator high 32 bits. */
"MVTACHI R15, A1 \n"\
"POP R15 \n"\
"MVTACHI R15, A1 \n" \
"POP R15 \n" \
/* Accumulator guard. */
"MVTACGU R15, A1 \n"\
"POP R15 \n"\
"MVTACGU R15, A1 \n" \
"POP R15 \n" \
/* Floating point status word. */
"MVTC R15, FPSW \n"\
"MVTC R15, FPSW \n" \
/* R1 to R15 - R0 is not included as it is the SP. */
"POPM R1-R15 \n"\
"POPM R1-R15 \n" \
/* This pops the remaining registers. */
"RTE \n"\
"NOP \n"\
"RTE \n" \
"NOP \n" \
"NOP \n"
);
}
@ -259,111 +266,107 @@ void vSoftwareInterruptISR( void )
__asm volatile
(
/* Re-enable interrupts. */
"SETPSW I \n"\
"SETPSW I \n" \
/* Move the data that was automatically pushed onto the interrupt stack when
* the interrupt occurred from the interrupt stack to the user stack.
*
* R15 is saved before it is clobbered. */
"PUSH.L R15 \n"\
the interrupt occurred from the interrupt stack to the user stack.
R15 is saved before it is clobbered. */
"PUSH.L R15 \n" \
/* Read the user stack pointer. */
"MVFC USP, R15 \n"\
"MVFC USP, R15 \n" \
/* Move the address down to the data being moved. */
"SUB #12, R15 \n"\
"MVTC R15, USP \n"\
"SUB #12, R15 \n" \
"MVTC R15, USP \n" \
/* Copy the data across, R15, then PC, then PSW. */
"MOV.L [ R0 ], [ R15 ] \n"\
"MOV.L 4[ R0 ], 4[ R15 ] \n"\
"MOV.L 8[ R0 ], 8[ R15 ] \n"\
"MOV.L [ R0 ], [ R15 ] \n" \
"MOV.L 4[ R0 ], 4[ R15 ] \n" \
"MOV.L 8[ R0 ], 8[ R15 ] \n" \
/* Move the interrupt stack pointer to its new correct position. */
"ADD #12, R0 \n"\
"ADD #12, R0 \n" \
/* All the rest of the registers are saved directly to the user stack. */
"SETPSW U \n"\
"SETPSW U \n" \
/* Save the rest of the general registers (R15 has been saved already). */
"PUSHM R1-R14 \n"\
"PUSHM R1-R14 \n" \
/* Save the FPSW and accumulator. */
"MVFC FPSW, R15 \n"\
"PUSH.L R15 \n"\
"MVFACGU #0, A1, R15 \n"\
"PUSH.L R15 \n"\
"MVFACHI #0, A1, R15 \n"\
"PUSH.L R15 \n"\
"MVFC FPSW, R15 \n" \
"PUSH.L R15 \n" \
"MVFACGU #0, A1, R15 \n" \
"PUSH.L R15 \n" \
"MVFACHI #0, A1, R15 \n" \
"PUSH.L R15 \n" \
/* Low order word. */
"MVFACLO #0, A1, R15 \n"\
"PUSH.L R15 \n"\
"MVFACGU #0, A0, R15 \n"\
"PUSH.L R15 \n"\
"MVFACHI #0, A0, R15 \n"\
"PUSH.L R15 \n"\
"MVFACLO #0, A1, R15 \n" \
"PUSH.L R15 \n" \
"MVFACGU #0, A0, R15 \n" \
"PUSH.L R15 \n" \
"MVFACHI #0, A0, R15 \n" \
"PUSH.L R15 \n" \
/* Low order word. */
"MVFACLO #0, A0, R15 \n"\
"PUSH.L R15 \n"\
"MVFACLO #0, A0, R15 \n" \
"PUSH.L R15 \n" \
/* Save the stack pointer to the TCB. */
"MOV.L #_pxCurrentTCB, R15 \n"\
"MOV.L [ R15 ], R15 \n"\
"MOV.L R0, [ R15 ] \n"\
"MOV.L #_pxCurrentTCB, R15 \n" \
"MOV.L [ R15 ], R15 \n" \
"MOV.L R0, [ R15 ] \n" \
/* Ensure the interrupt mask is set to the syscall priority while the kernel
* structures are being accessed. */
"MVTIPL %0 \n"\
structures are being accessed. */
"MVTIPL %0 \n" \
/* Select the next task to run. */
"BSR.A _vTaskSwitchContext \n"\
"BSR.A _vTaskSwitchContext \n" \
/* Reset the interrupt mask as no more data structure access is required. */
"MVTIPL %1 \n"\
"MVTIPL %1 \n" \
/* Load the stack pointer of the task that is now selected as the Running
* state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n"\
"MOV.L [ R15 ], R15 \n"\
"MOV.L [ R15 ], R0 \n"\
state task from its TCB. */
"MOV.L #_pxCurrentTCB,R15 \n" \
"MOV.L [ R15 ], R15 \n" \
"MOV.L [ R15 ], R0 \n" \
/* Restore the context of the new task. The PSW (Program Status Word) and
* PC will be popped by the RTE instruction. */
"POP R15 \n"\
PC will be popped by the RTE instruction. */
"POP R15 \n" \
/* Accumulator low 32 bits. */
"MVTACLO R15, A0 \n"\
"POP R15 \n"\
"MVTACLO R15, A0 \n" \
"POP R15 \n" \
/* Accumulator high 32 bits. */
"MVTACHI R15, A0 \n"\
"POP R15 \n"\
"MVTACHI R15, A0 \n" \
"POP R15 \n" \
/* Accumulator guard. */
"MVTACGU R15, A0 \n"\
"POP R15 \n"\
"MVTACGU R15, A0 \n" \
"POP R15 \n" \
/* Accumulator low 32 bits. */
"MVTACLO R15, A1 \n"\
"POP R15 \n"\
"MVTACLO R15, A1 \n" \
"POP R15 \n" \
/* Accumulator high 32 bits. */
"MVTACHI R15, A1 \n"\
"POP R15 \n"\
"MVTACHI R15, A1 \n" \
"POP R15 \n" \
/* Accumulator guard. */
"MVTACGU R15, A1 \n"\
"POP R15 \n"\
"MVTC R15, FPSW \n"\
"POPM R1-R15 \n"\
"RTE \n"\
"NOP \n"\
"MVTACGU R15, A1 \n" \
"POP R15 \n" \
"MVTC R15, FPSW \n" \
"POPM R1-R15 \n" \
"RTE \n" \
"NOP \n" \
"NOP "
::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ), "i" ( configKERNEL_INTERRUPT_PRIORITY )
:: "i"(configMAX_SYSCALL_INTERRUPT_PRIORITY), "i"(configKERNEL_INTERRUPT_PRIORITY)
);
}
/*-----------------------------------------------------------*/
@ -371,10 +374,10 @@ void vSoftwareInterruptISR( void )
void vTickISR( void )
{
/* Re-enabled interrupts. */
__asm volatile ( "SETPSW I");
__asm volatile( "SETPSW I" );
/* Increment the tick, and perform any processing the new tick value
* necessitates. Ensure IPL is at the max syscall value first. */
necessitates. Ensure IPL is at the max syscall value first. */
portMASK_INTERRUPTS_FROM_KERNEL_ISR();
{
if( xTaskIncrementTick() != pdFALSE )
@ -390,8 +393,8 @@ uint32_t ulPortGetIPL( void )
{
__asm volatile
(
"MVFC PSW, R1 \n"\
"SHLR #24, R1 \n"\
"MVFC PSW, R1 \n" \
"SHLR #24, R1 \n" \
"RTS "
);
@ -402,15 +405,18 @@ uint32_t ulPortGetIPL( void )
void vPortSetIPL( uint32_t ulNewIPL )
{
/* Avoid compiler warning about unreferenced parameter. */
( void ) ulNewIPL;
__asm volatile
(
"PUSH R5 \n"\
"MVFC PSW, R5 \n"\
"SHLL #24, R1 \n"\
"AND #-0F000001H, R5 \n"\
"OR R1, R5 \n"\
"MVTC R5, PSW \n"\
"POP R5 \n"\
"PUSH R5 \n" \
"MVFC PSW, R5 \n" \
"SHLL #24, R1 \n" \
"AND #-0F000001H, R5 \n" \
"OR R1, R5 \n" \
"MVTC R5, PSW \n" \
"POP R5 \n" \
"RTS "
);
}

View file

@ -44,94 +44,94 @@
*/
/* Type definitions - these are a bit legacy and not really used now, other than
* portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
portSTACK_TYPE and portBASE_TYPE. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if ( configUSE_16_BIT_TICKS == 1 )
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
* not need to be guarded with a critical section. */
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
#endif
/*-----------------------------------------------------------*/
/* Hardware specifics. */
#define portBYTE_ALIGNMENT 8 /* Could make four, according to manual. */
#define portSTACK_GROWTH -1
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portNOP() __asm volatile ( "NOP" )
#define portBYTE_ALIGNMENT 8 /* Could make four, according to manual. */
#define portSTACK_GROWTH -1
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portNOP() __asm volatile( "NOP" )
/* Yield equivalent to "*portITU_SWINTR = 0x01; ( void ) *portITU_SWINTR;"
* where portITU_SWINTR is the location of the software interrupt register
* (0x000872E0). Don't rely on the assembler to select a register, so instead
* save and restore clobbered registers manually. */
#define portYIELD() \
where portITU_SWINTR is the location of the software interrupt register
(0x000872E0). Don't rely on the assembler to select a register, so instead
save and restore clobbered registers manually. */
#define portYIELD() \
__asm volatile \
( \
"PUSH.L R10 \n"\
"MOV.L #0x872E0, R10 \n"\
"MOV.B #0x1, [R10] \n"\
"MOV.L [R10], R10 \n"\
"POP R10 \n"\
"PUSH.L R10 \n" \
"MOV.L #0x872E0, R10 \n" \
"MOV.B #0x1, [R10] \n" \
"MOV.L [R10], R10 \n" \
"POP R10 \n" \
)
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) portYIELD()
#define portYIELD_FROM_ISR( x ) if( x != pdFALSE ) portYIELD()
/* These macros should not be called directly, but through the
* taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
* performed if configASSERT() is defined to ensure an assertion handler does not
* inadvertently attempt to lower the IPL when the call to assert was triggered
* because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
* when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
* functions are those that end in FromISR. FreeRTOS maintains a separate
* interrupt API to ensure API function and interrupt entry is as fast and as
* simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0")
#ifdef configASSERT
taskENTER_CRITICAL() and taskEXIT_CRITICAL() macros. An extra check is
performed if configASSERT() is defined to ensure an assertion handler does not
inadvertently attempt to lower the IPL when the call to assert was triggered
because the IPL value was found to be above configMAX_SYSCALL_INTERRUPT_PRIORITY
when an ISR safe FreeRTOS API function was executed. ISR safe FreeRTOS API
functions are those that end in FromISR. FreeRTOS maintains a separate
interrupt API to ensure API function and interrupt entry is as fast and as
simple as possible. */
#define portENABLE_INTERRUPTS() __asm volatile ( "MVTIPL #0" )
#ifdef configASSERT
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( ulPortGetIPL() <= configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#define portDISABLE_INTERRUPTS() if( ulPortGetIPL() < configMAX_SYSCALL_INTERRUPT_PRIORITY ) __asm volatile ( "MVTIPL %0"::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#else
#define portDISABLE_INTERRUPTS() __asm volatile ( "MVTIPL %0"::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) )
#endif
#define portDISABLE_INTERRUPTS() if( ulPortGetIPL() < configMAX_SYSCALL_INTERRUPT_PRIORITY ) __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#else
#define portDISABLE_INTERRUPTS() __asm volatile ( "MVTIPL %0" ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) )
#endif
/* Critical nesting counts are stored in the TCB. */
#define portCRITICAL_NESTING_IN_TCB ( 1 )
#define portCRITICAL_NESTING_IN_TCB ( 1 )
/* The critical nesting functions defined within tasks.c. */
extern void vTaskEnterCritical( void );
extern void vTaskExitCritical( void );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
extern void vTaskEnterCritical( void );
extern void vTaskExitCritical( void );
#define portENTER_CRITICAL() vTaskEnterCritical()
#define portEXIT_CRITICAL() vTaskExitCritical()
/* As this port allows interrupt nesting... */
uint32_t ulPortGetIPL( void ) __attribute__( ( naked ) );
void vPortSetIPL( uint32_t ulNewIPL ) __attribute__( ( naked ) );
#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus )
uint32_t ulPortGetIPL( void ) __attribute__((naked));
void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked));
#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortGetIPL(); portDISABLE_INTERRUPTS()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) vPortSetIPL( uxSavedInterruptStatus )
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters )
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* PORTMACRO_H */