forked from len0rd/rockbox
Clean unused stuff out of thread.h and config.h and reorganize thread-pp.c to simplify the preprocessor blocks.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26743 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
parent
863891ce9a
commit
05ca8978c4
3 changed files with 309 additions and 416 deletions
|
@ -793,11 +793,6 @@ Lyre prototype 1 */
|
||||||
#define FORCE_SINGLE_CORE
|
#define FORCE_SINGLE_CORE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Core locking types - specifies type of atomic operation */
|
|
||||||
#define CORELOCK_NONE 0
|
|
||||||
#define SW_CORELOCK 1 /* Mutual exclusion provided by a software algorithm
|
|
||||||
and not a special semaphore instruction */
|
|
||||||
|
|
||||||
#if defined(CPU_PP)
|
#if defined(CPU_PP)
|
||||||
#define IDLE_STACK_SIZE 0x80
|
#define IDLE_STACK_SIZE 0x80
|
||||||
#define IDLE_STACK_WORDS 0x20
|
#define IDLE_STACK_WORDS 0x20
|
||||||
|
@ -811,6 +806,7 @@ Lyre prototype 1 */
|
||||||
#if !defined(FORCE_SINGLE_CORE)
|
#if !defined(FORCE_SINGLE_CORE)
|
||||||
|
|
||||||
#define NUM_CORES 2
|
#define NUM_CORES 2
|
||||||
|
#define HAVE_CORELOCK_OBJECT
|
||||||
#define CURRENT_CORE current_core()
|
#define CURRENT_CORE current_core()
|
||||||
/* Attributes for core-shared data in DRAM where IRAM is better used for other
|
/* Attributes for core-shared data in DRAM where IRAM is better used for other
|
||||||
* purposes. */
|
* purposes. */
|
||||||
|
@ -821,9 +817,7 @@ Lyre prototype 1 */
|
||||||
#define IF_COP_VOID(...) __VA_ARGS__
|
#define IF_COP_VOID(...) __VA_ARGS__
|
||||||
#define IF_COP_CORE(core) core
|
#define IF_COP_CORE(core) core
|
||||||
|
|
||||||
#define CONFIG_CORELOCK SW_CORELOCK /* SWP(B) is broken */
|
#endif /* !defined(FORCE_SINGLE_CORE) */
|
||||||
|
|
||||||
#endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */
|
|
||||||
|
|
||||||
#endif /* CPU_PP */
|
#endif /* CPU_PP */
|
||||||
|
|
||||||
|
@ -832,18 +826,6 @@ Lyre prototype 1 */
|
||||||
#define NOCACHEDATA_ATTR __attribute__((section(".ncdata"),nocommon))
|
#define NOCACHEDATA_ATTR __attribute__((section(".ncdata"),nocommon))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_CORELOCK
|
|
||||||
#define CONFIG_CORELOCK CORELOCK_NONE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
||||||
#define IF_SWCL(...) __VA_ARGS__
|
|
||||||
#define IFN_SWCL(...)
|
|
||||||
#else
|
|
||||||
#define IF_SWCL(...)
|
|
||||||
#define IFN_SWCL(...) __VA_ARGS__
|
|
||||||
#endif /* CONFIG_CORELOCK == */
|
|
||||||
|
|
||||||
#ifndef NUM_CORES
|
#ifndef NUM_CORES
|
||||||
/* Default to single core */
|
/* Default to single core */
|
||||||
#define NUM_CORES 1
|
#define NUM_CORES 1
|
||||||
|
@ -855,7 +837,6 @@ Lyre prototype 1 */
|
||||||
#define NOCACHEBSS_ATTR
|
#define NOCACHEBSS_ATTR
|
||||||
#define NOCACHEDATA_ATTR
|
#define NOCACHEDATA_ATTR
|
||||||
#endif
|
#endif
|
||||||
#define CONFIG_CORELOCK CORELOCK_NONE
|
|
||||||
|
|
||||||
#define IF_COP(...)
|
#define IF_COP(...)
|
||||||
#define IF_COP_VOID(...) void
|
#define IF_COP_VOID(...) void
|
||||||
|
|
|
@ -109,6 +109,23 @@ struct regs
|
||||||
uint32_t lr; /* 36 - r14 (lr) */
|
uint32_t lr; /* 36 - r14 (lr) */
|
||||||
uint32_t start; /* 40 - Thread start address, or NULL when started */
|
uint32_t start; /* 40 - Thread start address, or NULL when started */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CPU_PP
|
||||||
|
#ifdef HAVE_CORELOCK_OBJECT
|
||||||
|
/* No reliable atomic instruction available - use Peterson's algorithm */
|
||||||
|
struct corelock
|
||||||
|
{
|
||||||
|
volatile unsigned char myl[NUM_CORES];
|
||||||
|
volatile unsigned char turn;
|
||||||
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
/* Too big to inline everywhere */
|
||||||
|
void corelock_init(struct corelock *cl);
|
||||||
|
void corelock_lock(struct corelock *cl);
|
||||||
|
int corelock_try_lock(struct corelock *cl);
|
||||||
|
void corelock_unlock(struct corelock *cl);
|
||||||
|
#endif /* HAVE_CORELOCK_OBJECT */
|
||||||
|
#endif /* CPU_PP */
|
||||||
#elif defined(CPU_MIPS)
|
#elif defined(CPU_MIPS)
|
||||||
struct regs
|
struct regs
|
||||||
{
|
{
|
||||||
|
@ -162,26 +179,13 @@ struct thread_list
|
||||||
struct thread_entry *next; /* Next thread in a list */
|
struct thread_entry *next; /* Next thread in a list */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Small objects for core-wise mutual exclusion */
|
#ifndef HAVE_CORELOCK_OBJECT
|
||||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
||||||
/* No reliable atomic instruction available - use Peterson's algorithm */
|
|
||||||
struct corelock
|
|
||||||
{
|
|
||||||
volatile unsigned char myl[NUM_CORES];
|
|
||||||
volatile unsigned char turn;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
void corelock_init(struct corelock *cl);
|
|
||||||
void corelock_lock(struct corelock *cl);
|
|
||||||
int corelock_try_lock(struct corelock *cl);
|
|
||||||
void corelock_unlock(struct corelock *cl);
|
|
||||||
#else
|
|
||||||
/* No atomic corelock op needed or just none defined */
|
/* No atomic corelock op needed or just none defined */
|
||||||
#define corelock_init(cl)
|
#define corelock_init(cl)
|
||||||
#define corelock_lock(cl)
|
#define corelock_lock(cl)
|
||||||
#define corelock_try_lock(cl)
|
#define corelock_try_lock(cl)
|
||||||
#define corelock_unlock(cl)
|
#define corelock_unlock(cl)
|
||||||
#endif /* core locking selection */
|
#endif /* HAVE_CORELOCK_OBJECT */
|
||||||
|
|
||||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||||
struct blocker
|
struct blocker
|
||||||
|
@ -341,98 +345,6 @@ struct core_entry
|
||||||
#define IFN_PRIO(...) __VA_ARGS__
|
#define IFN_PRIO(...) __VA_ARGS__
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Macros generate better code than an inline function is this case */
|
|
||||||
#if defined (CPU_ARM)
|
|
||||||
/* atomic */
|
|
||||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
||||||
#define test_and_set(a, v, cl) \
|
|
||||||
xchg8((a), (v), (cl))
|
|
||||||
/* atomic */
|
|
||||||
#define xchg8(a, v, cl) \
|
|
||||||
({ uint32_t o; \
|
|
||||||
corelock_lock(cl); \
|
|
||||||
o = *(uint8_t *)(a); \
|
|
||||||
*(uint8_t *)(a) = (v); \
|
|
||||||
corelock_unlock(cl); \
|
|
||||||
o; })
|
|
||||||
#define xchg32(a, v, cl) \
|
|
||||||
({ uint32_t o; \
|
|
||||||
corelock_lock(cl); \
|
|
||||||
o = *(uint32_t *)(a); \
|
|
||||||
*(uint32_t *)(a) = (v); \
|
|
||||||
corelock_unlock(cl); \
|
|
||||||
o; })
|
|
||||||
#define xchgptr(a, v, cl) \
|
|
||||||
({ typeof (*(a)) o; \
|
|
||||||
corelock_lock(cl); \
|
|
||||||
o = *(a); \
|
|
||||||
*(a) = (v); \
|
|
||||||
corelock_unlock(cl); \
|
|
||||||
o; })
|
|
||||||
#endif /* locking selection */
|
|
||||||
#elif defined (CPU_COLDFIRE)
|
|
||||||
/* atomic */
|
|
||||||
/* one branch will be optimized away if v is a constant expression */
|
|
||||||
#define test_and_set(a, v, ...) \
|
|
||||||
({ uint32_t o = 0; \
|
|
||||||
if (v) { \
|
|
||||||
asm volatile ( \
|
|
||||||
"bset.b #0, (%0)" \
|
|
||||||
: : "a"((uint8_t*)(a)) \
|
|
||||||
: "cc"); \
|
|
||||||
} else { \
|
|
||||||
asm volatile ( \
|
|
||||||
"bclr.b #0, (%0)" \
|
|
||||||
: : "a"((uint8_t*)(a)) \
|
|
||||||
: "cc"); \
|
|
||||||
} \
|
|
||||||
asm volatile ("sne.b %0" \
|
|
||||||
: "+d"(o)); \
|
|
||||||
o; })
|
|
||||||
#elif CONFIG_CPU == SH7034
|
|
||||||
/* atomic */
|
|
||||||
#define test_and_set(a, v, ...) \
|
|
||||||
({ uint32_t o; \
|
|
||||||
asm volatile ( \
|
|
||||||
"tas.b @%2 \n" \
|
|
||||||
"mov #-1, %0 \n" \
|
|
||||||
"negc %0, %0 \n" \
|
|
||||||
: "=r"(o) \
|
|
||||||
: "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
|
|
||||||
"r"((uint8_t *)(a))); \
|
|
||||||
o; })
|
|
||||||
#endif /* CONFIG_CPU == */
|
|
||||||
|
|
||||||
/* defaults for no asm version */
|
|
||||||
#ifndef test_and_set
|
|
||||||
/* not atomic */
|
|
||||||
#define test_and_set(a, v, ...) \
|
|
||||||
({ uint32_t o = *(uint8_t *)(a); \
|
|
||||||
*(uint8_t *)(a) = (v); \
|
|
||||||
o; })
|
|
||||||
#endif /* test_and_set */
|
|
||||||
#ifndef xchg8
|
|
||||||
/* not atomic */
|
|
||||||
#define xchg8(a, v, ...) \
|
|
||||||
({ uint32_t o = *(uint8_t *)(a); \
|
|
||||||
*(uint8_t *)(a) = (v); \
|
|
||||||
o; })
|
|
||||||
#endif /* xchg8 */
|
|
||||||
#ifndef xchg32
|
|
||||||
/* not atomic */
|
|
||||||
#define xchg32(a, v, ...) \
|
|
||||||
({ uint32_t o = *(uint32_t *)(a); \
|
|
||||||
*(uint32_t *)(a) = (v); \
|
|
||||||
o; })
|
|
||||||
#endif /* xchg32 */
|
|
||||||
#ifndef xchgptr
|
|
||||||
/* not atomic */
|
|
||||||
#define xchgptr(a, v, ...) \
|
|
||||||
({ typeof (*(a)) o = *(a); \
|
|
||||||
*(a) = (v); \
|
|
||||||
o; })
|
|
||||||
#endif /* xchgptr */
|
|
||||||
|
|
||||||
void core_idle(void);
|
void core_idle(void);
|
||||||
void core_wake(IF_COP_VOID(unsigned int core));
|
void core_wake(IF_COP_VOID(unsigned int core));
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,21 @@
|
||||||
#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
|
#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if NUM_CORES > 1
|
#if NUM_CORES == 1
|
||||||
|
/* Single-core variants for FORCE_SINGLE_CORE */
|
||||||
|
static inline void core_sleep(void)
|
||||||
|
{
|
||||||
|
sleep_core(CURRENT_CORE);
|
||||||
|
enable_irq();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Shared single-core build debugging version */
|
||||||
|
void core_wake(void)
|
||||||
|
{
|
||||||
|
/* No wakey - core already wakey (because this is it) */
|
||||||
|
}
|
||||||
|
#else /* NUM_CORES > 1 */
|
||||||
|
/** Model-generic PP dual-core code **/
|
||||||
extern uintptr_t cpu_idlestackbegin[];
|
extern uintptr_t cpu_idlestackbegin[];
|
||||||
extern uintptr_t cpu_idlestackend[];
|
extern uintptr_t cpu_idlestackend[];
|
||||||
extern uintptr_t cop_idlestackbegin[];
|
extern uintptr_t cop_idlestackbegin[];
|
||||||
|
@ -37,23 +51,7 @@ static uintptr_t * const idle_stacks[NUM_CORES] =
|
||||||
[COP] = cop_idlestackbegin
|
[COP] = cop_idlestackbegin
|
||||||
};
|
};
|
||||||
|
|
||||||
#if CONFIG_CPU == PP5002
|
/* Core locks using Peterson's mutual exclusion algorithm */
|
||||||
/* Bytes to emulate the PP502x mailbox bits */
|
|
||||||
struct core_semaphores
|
|
||||||
{
|
|
||||||
volatile uint8_t intend_wake; /* 00h */
|
|
||||||
volatile uint8_t stay_awake; /* 01h */
|
|
||||||
volatile uint8_t intend_sleep; /* 02h */
|
|
||||||
volatile uint8_t unused; /* 03h */
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
|
|
||||||
#endif /* CONFIG_CPU == PP5002 */
|
|
||||||
|
|
||||||
#endif /* NUM_CORES */
|
|
||||||
|
|
||||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
||||||
/* Software core locks using Peterson's mutual exclusion algorithm */
|
|
||||||
|
|
||||||
/*---------------------------------------------------------------------------
|
/*---------------------------------------------------------------------------
|
||||||
* Initialize the corelock structure.
|
* Initialize the corelock structure.
|
||||||
|
@ -69,8 +67,7 @@ void corelock_init(struct corelock *cl)
|
||||||
* Wait for the corelock to become free and acquire it when it does.
|
* Wait for the corelock to become free and acquire it when it does.
|
||||||
*---------------------------------------------------------------------------
|
*---------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
void corelock_lock(struct corelock *cl) __attribute__((naked));
|
void __attribute__((naked)) corelock_lock(struct corelock *cl)
|
||||||
void corelock_lock(struct corelock *cl)
|
|
||||||
{
|
{
|
||||||
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
|
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
|
||||||
asm volatile (
|
asm volatile (
|
||||||
|
@ -96,8 +93,7 @@ void corelock_lock(struct corelock *cl)
|
||||||
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
|
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
|
||||||
*---------------------------------------------------------------------------
|
*---------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
int corelock_try_lock(struct corelock *cl) __attribute__((naked));
|
int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
|
||||||
int corelock_try_lock(struct corelock *cl)
|
|
||||||
{
|
{
|
||||||
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
|
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
|
||||||
asm volatile (
|
asm volatile (
|
||||||
|
@ -125,8 +121,7 @@ int corelock_try_lock(struct corelock *cl)
|
||||||
* Release ownership of the corelock
|
* Release ownership of the corelock
|
||||||
*---------------------------------------------------------------------------
|
*---------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
void corelock_unlock(struct corelock *cl) __attribute__((naked));
|
void __attribute__((naked)) corelock_unlock(struct corelock *cl)
|
||||||
void corelock_unlock(struct corelock *cl)
|
|
||||||
{
|
{
|
||||||
asm volatile (
|
asm volatile (
|
||||||
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
|
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
|
||||||
|
@ -138,11 +133,9 @@ void corelock_unlock(struct corelock *cl)
|
||||||
);
|
);
|
||||||
(void)cl;
|
(void)cl;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* C versions for reference */
|
#else /* C versions for reference */
|
||||||
/*---------------------------------------------------------------------------
|
|
||||||
* Wait for the corelock to become free and aquire it when it does.
|
|
||||||
*---------------------------------------------------------------------------
|
|
||||||
*/
|
|
||||||
void corelock_lock(struct corelock *cl)
|
void corelock_lock(struct corelock *cl)
|
||||||
{
|
{
|
||||||
const unsigned int core = CURRENT_CORE;
|
const unsigned int core = CURRENT_CORE;
|
||||||
|
@ -158,10 +151,6 @@ void corelock_lock(struct corelock *cl)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*---------------------------------------------------------------------------
|
|
||||||
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
|
|
||||||
*---------------------------------------------------------------------------
|
|
||||||
*/
|
|
||||||
int corelock_try_lock(struct corelock *cl)
|
int corelock_try_lock(struct corelock *cl)
|
||||||
{
|
{
|
||||||
const unsigned int core = CURRENT_CORE;
|
const unsigned int core = CURRENT_CORE;
|
||||||
|
@ -179,256 +168,36 @@ int corelock_try_lock(struct corelock *cl)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*---------------------------------------------------------------------------
|
|
||||||
* Release ownership of the corelock
|
|
||||||
*---------------------------------------------------------------------------
|
|
||||||
*/
|
|
||||||
void corelock_unlock(struct corelock *cl)
|
void corelock_unlock(struct corelock *cl)
|
||||||
{
|
{
|
||||||
cl->myl[CURRENT_CORE] = 0;
|
cl->myl[CURRENT_CORE] = 0;
|
||||||
}
|
}
|
||||||
#endif /* ASM / C selection */
|
#endif /* ASM / C selection */
|
||||||
|
|
||||||
#endif /* CONFIG_CORELOCK == SW_CORELOCK */
|
|
||||||
|
|
||||||
/*---------------------------------------------------------------------------
|
/*---------------------------------------------------------------------------
|
||||||
* Put core in a power-saving state if waking list wasn't repopulated and if
|
* Do any device-specific inits for the threads and synchronize the kernel
|
||||||
* no other core requested a wakeup for it to perform a task.
|
* initializations.
|
||||||
*---------------------------------------------------------------------------
|
*---------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
static void INIT_ATTR core_thread_init(unsigned int core)
|
||||||
|
{
|
||||||
|
if (core == CPU)
|
||||||
|
{
|
||||||
|
/* Wake up coprocessor and let it initialize kernel and threads */
|
||||||
#ifdef CPU_PP502x
|
#ifdef CPU_PP502x
|
||||||
#if NUM_CORES == 1
|
MBX_MSG_CLR = 0x3f;
|
||||||
static inline void core_sleep(void)
|
#endif
|
||||||
{
|
wake_core(COP);
|
||||||
sleep_core(CURRENT_CORE);
|
/* Sleep until COP has finished */
|
||||||
enable_irq();
|
sleep_core(CPU);
|
||||||
}
|
}
|
||||||
#else
|
else
|
||||||
static inline void core_sleep(unsigned int core)
|
|
||||||
{
|
{
|
||||||
#if 1
|
/* Wake the CPU and return */
|
||||||
asm volatile (
|
wake_core(CPU);
|
||||||
"mov r0, #4 \n" /* r0 = 0x4 << core */
|
}
|
||||||
"mov r0, r0, lsl %[c] \n"
|
|
||||||
"str r0, [%[mbx], #4] \n" /* signal intent to sleep */
|
|
||||||
"ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
|
|
||||||
"tst r1, r0, lsl #2 \n"
|
|
||||||
"moveq r1, #0x80000000 \n" /* Then sleep */
|
|
||||||
"streq r1, [%[ctl], %[c], lsl #2] \n"
|
|
||||||
"moveq r1, #0 \n" /* Clear control reg */
|
|
||||||
"streq r1, [%[ctl], %[c], lsl #2] \n"
|
|
||||||
"orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
|
|
||||||
"str r1, [%[mbx], #8] \n"
|
|
||||||
"1: \n" /* Wait for wake procedure to finish */
|
|
||||||
"ldr r1, [%[mbx], #0] \n"
|
|
||||||
"tst r1, r0, lsr #2 \n"
|
|
||||||
"bne 1b \n"
|
|
||||||
:
|
|
||||||
: [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
|
|
||||||
: "r0", "r1");
|
|
||||||
#else /* C version for reference */
|
|
||||||
/* Signal intent to sleep */
|
|
||||||
MBX_MSG_SET = 0x4 << core;
|
|
||||||
|
|
||||||
/* Something waking or other processor intends to wake us? */
|
|
||||||
if ((MBX_MSG_STAT & (0x10 << core)) == 0)
|
|
||||||
{
|
|
||||||
sleep_core(core);
|
|
||||||
wake_core(core);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Signal wake - clear wake flag */
|
|
||||||
MBX_MSG_CLR = 0x14 << core;
|
|
||||||
|
|
||||||
/* Wait for other processor to finish wake procedure */
|
|
||||||
while (MBX_MSG_STAT & (0x1 << core));
|
|
||||||
#endif /* ASM/C selection */
|
|
||||||
enable_irq();
|
|
||||||
}
|
|
||||||
#endif /* NUM_CORES */
|
|
||||||
#elif CONFIG_CPU == PP5002
|
|
||||||
#if NUM_CORES == 1
|
|
||||||
static inline void core_sleep(void)
|
|
||||||
{
|
|
||||||
sleep_core(CURRENT_CORE);
|
|
||||||
enable_irq();
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
/* PP5002 has no mailboxes - emulate using bytes */
|
|
||||||
static inline void core_sleep(unsigned int core)
|
|
||||||
{
|
|
||||||
#if 1
|
|
||||||
asm volatile (
|
|
||||||
"mov r0, #1 \n" /* Signal intent to sleep */
|
|
||||||
"strb r0, [%[sem], #2] \n"
|
|
||||||
"ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
|
|
||||||
"cmp r0, #0 \n"
|
|
||||||
"bne 2f \n"
|
|
||||||
/* Sleep: PP5002 crashes if the instruction that puts it to sleep is
|
|
||||||
* located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
|
|
||||||
* that the correct alternative is executed. Don't change the order
|
|
||||||
* of the next 4 instructions! */
|
|
||||||
"tst pc, #0x0c \n"
|
|
||||||
"mov r0, #0xca \n"
|
|
||||||
"strne r0, [%[ctl], %[c], lsl #2] \n"
|
|
||||||
"streq r0, [%[ctl], %[c], lsl #2] \n"
|
|
||||||
"nop \n" /* nop's needed because of pipeline */
|
|
||||||
"nop \n"
|
|
||||||
"nop \n"
|
|
||||||
"2: \n"
|
|
||||||
"mov r0, #0 \n" /* Clear stay_awake and sleep intent */
|
|
||||||
"strb r0, [%[sem], #1] \n"
|
|
||||||
"strb r0, [%[sem], #2] \n"
|
|
||||||
"1: \n" /* Wait for wake procedure to finish */
|
|
||||||
"ldrb r0, [%[sem], #0] \n"
|
|
||||||
"cmp r0, #0 \n"
|
|
||||||
"bne 1b \n"
|
|
||||||
:
|
|
||||||
: [sem]"r"(&core_semaphores[core]), [c]"r"(core),
|
|
||||||
[ctl]"r"(&CPU_CTL)
|
|
||||||
: "r0"
|
|
||||||
);
|
|
||||||
#else /* C version for reference */
|
|
||||||
/* Signal intent to sleep */
|
|
||||||
core_semaphores[core].intend_sleep = 1;
|
|
||||||
|
|
||||||
/* Something waking or other processor intends to wake us? */
|
|
||||||
if (core_semaphores[core].stay_awake == 0)
|
|
||||||
{
|
|
||||||
sleep_core(core);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Signal wake - clear wake flag */
|
|
||||||
core_semaphores[core].stay_awake = 0;
|
|
||||||
core_semaphores[core].intend_sleep = 0;
|
|
||||||
|
|
||||||
/* Wait for other processor to finish wake procedure */
|
|
||||||
while (core_semaphores[core].intend_wake != 0);
|
|
||||||
|
|
||||||
/* Enable IRQ */
|
|
||||||
#endif /* ASM/C selection */
|
|
||||||
enable_irq();
|
|
||||||
}
|
|
||||||
#endif /* NUM_CORES */
|
|
||||||
#endif /* PP CPU type */
|
|
||||||
|
|
||||||
/*---------------------------------------------------------------------------
|
|
||||||
* Wake another processor core that is sleeping or prevent it from doing so
|
|
||||||
* if it was already destined. FIQ, IRQ should be disabled before calling.
|
|
||||||
*---------------------------------------------------------------------------
|
|
||||||
*/
|
|
||||||
#if NUM_CORES == 1
|
|
||||||
/* Shared single-core build debugging version */
|
|
||||||
void core_wake(void)
|
|
||||||
{
|
|
||||||
/* No wakey - core already wakey */
|
|
||||||
}
|
|
||||||
#elif defined (CPU_PP502x)
|
|
||||||
void core_wake(unsigned int othercore)
|
|
||||||
{
|
|
||||||
#if 1
|
|
||||||
/* avoid r0 since that contains othercore */
|
|
||||||
asm volatile (
|
|
||||||
"mrs r3, cpsr \n" /* Disable IRQ */
|
|
||||||
"orr r1, r3, #0x80 \n"
|
|
||||||
"msr cpsr_c, r1 \n"
|
|
||||||
"mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
|
|
||||||
"mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
|
|
||||||
"str r2, [%[mbx], #4] \n"
|
|
||||||
"1: \n" /* If it intends to sleep, let it first */
|
|
||||||
"ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
|
|
||||||
"eor r1, r1, #0xc \n"
|
|
||||||
"tst r1, r2, lsr #2 \n"
|
|
||||||
"ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
|
|
||||||
"tsteq r1, #0x80000000 \n"
|
|
||||||
"beq 1b \n" /* Wait for sleep or wake */
|
|
||||||
"tst r1, #0x80000000 \n" /* If sleeping, wake it */
|
|
||||||
"movne r1, #0x0 \n"
|
|
||||||
"strne r1, [%[ctl], %[oc], lsl #2] \n"
|
|
||||||
"mov r1, r2, lsr #4 \n"
|
|
||||||
"str r1, [%[mbx], #8] \n" /* Done with wake procedure */
|
|
||||||
"msr cpsr_c, r3 \n" /* Restore IRQ */
|
|
||||||
:
|
|
||||||
: [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
|
|
||||||
[oc]"r"(othercore)
|
|
||||||
: "r1", "r2", "r3");
|
|
||||||
#else /* C version for reference */
|
|
||||||
/* Disable interrupts - avoid reentrancy from the tick */
|
|
||||||
int oldlevel = disable_irq_save();
|
|
||||||
|
|
||||||
/* Signal intent to wake other processor - set stay awake */
|
|
||||||
MBX_MSG_SET = 0x11 << othercore;
|
|
||||||
|
|
||||||
/* If it intends to sleep, wait until it does or aborts */
|
|
||||||
while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
|
|
||||||
(PROC_CTL(othercore) & PROC_SLEEP) == 0);
|
|
||||||
|
|
||||||
/* If sleeping, wake it up */
|
|
||||||
if (PROC_CTL(othercore) & PROC_SLEEP)
|
|
||||||
PROC_CTL(othercore) = 0;
|
|
||||||
|
|
||||||
/* Done with wake procedure */
|
|
||||||
MBX_MSG_CLR = 0x1 << othercore;
|
|
||||||
restore_irq(oldlevel);
|
|
||||||
#endif /* ASM/C selection */
|
|
||||||
}
|
|
||||||
#elif CONFIG_CPU == PP5002
|
|
||||||
/* PP5002 has no mailboxes - emulate using bytes */
|
|
||||||
void core_wake(unsigned int othercore)
|
|
||||||
{
|
|
||||||
#if 1
|
|
||||||
/* avoid r0 since that contains othercore */
|
|
||||||
asm volatile (
|
|
||||||
"mrs r3, cpsr \n" /* Disable IRQ */
|
|
||||||
"orr r1, r3, #0x80 \n"
|
|
||||||
"msr cpsr_c, r1 \n"
|
|
||||||
"mov r1, #1 \n" /* Signal intent to wake other core */
|
|
||||||
"orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
|
|
||||||
"strh r1, [%[sem], #0] \n"
|
|
||||||
"mov r2, #0x8000 \n"
|
|
||||||
"1: \n" /* If it intends to sleep, let it first */
|
|
||||||
"ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
|
|
||||||
"cmp r1, #1 \n"
|
|
||||||
"ldr r1, [%[st]] \n" /* && not sleeping ? */
|
|
||||||
"tsteq r1, r2, lsr %[oc] \n"
|
|
||||||
"beq 1b \n" /* Wait for sleep or wake */
|
|
||||||
"tst r1, r2, lsr %[oc] \n"
|
|
||||||
"ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
|
|
||||||
"movne r1, #0xce \n"
|
|
||||||
"strne r1, [r2, %[oc], lsl #2] \n"
|
|
||||||
"mov r1, #0 \n" /* Done with wake procedure */
|
|
||||||
"strb r1, [%[sem], #0] \n"
|
|
||||||
"msr cpsr_c, r3 \n" /* Restore IRQ */
|
|
||||||
:
|
|
||||||
: [sem]"r"(&core_semaphores[othercore]),
|
|
||||||
[st]"r"(&PROC_STAT),
|
|
||||||
[oc]"r"(othercore)
|
|
||||||
: "r1", "r2", "r3"
|
|
||||||
);
|
|
||||||
#else /* C version for reference */
|
|
||||||
/* Disable interrupts - avoid reentrancy from the tick */
|
|
||||||
int oldlevel = disable_irq_save();
|
|
||||||
|
|
||||||
/* Signal intent to wake other processor - set stay awake */
|
|
||||||
core_semaphores[othercore].intend_wake = 1;
|
|
||||||
core_semaphores[othercore].stay_awake = 1;
|
|
||||||
|
|
||||||
/* If it intends to sleep, wait until it does or aborts */
|
|
||||||
while (core_semaphores[othercore].intend_sleep != 0 &&
|
|
||||||
(PROC_STAT & PROC_SLEEPING(othercore)) == 0);
|
|
||||||
|
|
||||||
/* If sleeping, wake it up */
|
|
||||||
if (PROC_STAT & PROC_SLEEPING(othercore))
|
|
||||||
wake_core(othercore);
|
|
||||||
|
|
||||||
/* Done with wake procedure */
|
|
||||||
core_semaphores[othercore].intend_wake = 0;
|
|
||||||
restore_irq(oldlevel);
|
|
||||||
#endif /* ASM/C selection */
|
|
||||||
}
|
|
||||||
#endif /* CPU type */
|
|
||||||
|
|
||||||
#if NUM_CORES > 1
|
|
||||||
/*---------------------------------------------------------------------------
|
/*---------------------------------------------------------------------------
|
||||||
* Switches to a stack that always resides in the Rockbox core.
|
* Switches to a stack that always resides in the Rockbox core.
|
||||||
*
|
*
|
||||||
|
@ -507,35 +276,266 @@ static void __attribute__((naked))
|
||||||
"mov lr, pc \n"
|
"mov lr, pc \n"
|
||||||
"bx r0 \n"
|
"bx r0 \n"
|
||||||
"ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
|
"ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
|
||||||
".ltorg \n" /* Dump constant pool */
|
|
||||||
: : "i"(IDLE_STACK_WORDS)
|
: : "i"(IDLE_STACK_WORDS)
|
||||||
);
|
);
|
||||||
(void)core; (void)thread;
|
(void)core; (void)thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** PP-model-specific dual-core code **/
|
||||||
|
|
||||||
|
#if CONFIG_CPU == PP5002
|
||||||
|
/* PP5002 has no mailboxes - Bytes to emulate the PP502x mailbox bits */
|
||||||
|
struct core_semaphores
|
||||||
|
{
|
||||||
|
volatile uint8_t intend_wake; /* 00h */
|
||||||
|
volatile uint8_t stay_awake; /* 01h */
|
||||||
|
volatile uint8_t intend_sleep; /* 02h */
|
||||||
|
volatile uint8_t unused; /* 03h */
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
|
||||||
|
|
||||||
|
#if 1 /* Select ASM */
|
||||||
/*---------------------------------------------------------------------------
|
/*---------------------------------------------------------------------------
|
||||||
* Do any device-specific inits for the threads and synchronize the kernel
|
* Put core in a power-saving state if waking list wasn't repopulated and if
|
||||||
* initializations.
|
* no other core requested a wakeup for it to perform a task.
|
||||||
*---------------------------------------------------------------------------
|
*---------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
static void core_thread_init(unsigned int core) INIT_ATTR;
|
static inline void core_sleep(unsigned int core)
|
||||||
static void core_thread_init(unsigned int core)
|
|
||||||
{
|
{
|
||||||
if (core == CPU)
|
asm volatile (
|
||||||
{
|
"mov r0, #1 \n" /* Signal intent to sleep */
|
||||||
/* Wake up coprocessor and let it initialize kernel and threads */
|
"strb r0, [%[sem], #2] \n"
|
||||||
#ifdef CPU_PP502x
|
"ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
|
||||||
MBX_MSG_CLR = 0x3f;
|
"cmp r0, #0 \n"
|
||||||
#endif
|
"bne 2f \n"
|
||||||
wake_core(COP);
|
/* Sleep: PP5002 crashes if the instruction that puts it to sleep is
|
||||||
/* Sleep until COP has finished */
|
* located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
|
||||||
sleep_core(CPU);
|
* that the correct alternative is executed. Don't change the order
|
||||||
|
* of the next 4 instructions! */
|
||||||
|
"tst pc, #0x0c \n"
|
||||||
|
"mov r0, #0xca \n"
|
||||||
|
"strne r0, [%[ctl], %[c], lsl #2] \n"
|
||||||
|
"streq r0, [%[ctl], %[c], lsl #2] \n"
|
||||||
|
"nop \n" /* nop's needed because of pipeline */
|
||||||
|
"nop \n"
|
||||||
|
"nop \n"
|
||||||
|
"2: \n"
|
||||||
|
"mov r0, #0 \n" /* Clear stay_awake and sleep intent */
|
||||||
|
"strb r0, [%[sem], #1] \n"
|
||||||
|
"strb r0, [%[sem], #2] \n"
|
||||||
|
"1: \n" /* Wait for wake procedure to finish */
|
||||||
|
"ldrb r0, [%[sem], #0] \n"
|
||||||
|
"cmp r0, #0 \n"
|
||||||
|
"bne 1b \n"
|
||||||
|
:
|
||||||
|
: [sem]"r"(&core_semaphores[core]), [c]"r"(core),
|
||||||
|
[ctl]"r"(&CPU_CTL)
|
||||||
|
: "r0"
|
||||||
|
);
|
||||||
|
enable_irq();
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
/* Wake the CPU and return */
|
|
||||||
wake_core(CPU);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* NUM_CORES */
|
|
||||||
|
|
||||||
|
/*---------------------------------------------------------------------------
|
||||||
|
* Wake another processor core that is sleeping or prevent it from doing so
|
||||||
|
* if it was already destined. FIQ, IRQ should be disabled before calling.
|
||||||
|
*---------------------------------------------------------------------------
|
||||||
|
*/
|
||||||
|
void core_wake(unsigned int othercore)
|
||||||
|
{
|
||||||
|
/* avoid r0 since that contains othercore */
|
||||||
|
asm volatile (
|
||||||
|
"mrs r3, cpsr \n" /* Disable IRQ */
|
||||||
|
"orr r1, r3, #0x80 \n"
|
||||||
|
"msr cpsr_c, r1 \n"
|
||||||
|
"mov r1, #1 \n" /* Signal intent to wake other core */
|
||||||
|
"orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
|
||||||
|
"strh r1, [%[sem], #0] \n"
|
||||||
|
"mov r2, #0x8000 \n"
|
||||||
|
"1: \n" /* If it intends to sleep, let it first */
|
||||||
|
"ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
|
||||||
|
"cmp r1, #1 \n"
|
||||||
|
"ldr r1, [%[st]] \n" /* && not sleeping ? */
|
||||||
|
"tsteq r1, r2, lsr %[oc] \n"
|
||||||
|
"beq 1b \n" /* Wait for sleep or wake */
|
||||||
|
"tst r1, r2, lsr %[oc] \n"
|
||||||
|
"ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
|
||||||
|
"movne r1, #0xce \n"
|
||||||
|
"strne r1, [r2, %[oc], lsl #2] \n"
|
||||||
|
"mov r1, #0 \n" /* Done with wake procedure */
|
||||||
|
"strb r1, [%[sem], #0] \n"
|
||||||
|
"msr cpsr_c, r3 \n" /* Restore IRQ */
|
||||||
|
:
|
||||||
|
: [sem]"r"(&core_semaphores[othercore]),
|
||||||
|
[st]"r"(&PROC_STAT),
|
||||||
|
[oc]"r"(othercore)
|
||||||
|
: "r1", "r2", "r3"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* C version for reference */
|
||||||
|
|
||||||
|
static inline void core_sleep(unsigned int core)
|
||||||
|
{
|
||||||
|
/* Signal intent to sleep */
|
||||||
|
core_semaphores[core].intend_sleep = 1;
|
||||||
|
|
||||||
|
/* Something waking or other processor intends to wake us? */
|
||||||
|
if (core_semaphores[core].stay_awake == 0)
|
||||||
|
{
|
||||||
|
sleep_core(core);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Signal wake - clear wake flag */
|
||||||
|
core_semaphores[core].stay_awake = 0;
|
||||||
|
core_semaphores[core].intend_sleep = 0;
|
||||||
|
|
||||||
|
/* Wait for other processor to finish wake procedure */
|
||||||
|
while (core_semaphores[core].intend_wake != 0);
|
||||||
|
|
||||||
|
/* Enable IRQ */
|
||||||
|
enable_irq();
|
||||||
|
}
|
||||||
|
|
||||||
|
void core_wake(unsigned int othercore)
|
||||||
|
{
|
||||||
|
/* Disable interrupts - avoid reentrancy from the tick */
|
||||||
|
int oldlevel = disable_irq_save();
|
||||||
|
|
||||||
|
/* Signal intent to wake other processor - set stay awake */
|
||||||
|
core_semaphores[othercore].intend_wake = 1;
|
||||||
|
core_semaphores[othercore].stay_awake = 1;
|
||||||
|
|
||||||
|
/* If it intends to sleep, wait until it does or aborts */
|
||||||
|
while (core_semaphores[othercore].intend_sleep != 0 &&
|
||||||
|
(PROC_STAT & PROC_SLEEPING(othercore)) == 0);
|
||||||
|
|
||||||
|
/* If sleeping, wake it up */
|
||||||
|
if (PROC_STAT & PROC_SLEEPING(othercore))
|
||||||
|
wake_core(othercore);
|
||||||
|
|
||||||
|
/* Done with wake procedure */
|
||||||
|
core_semaphores[othercore].intend_wake = 0;
|
||||||
|
restore_irq(oldlevel);
|
||||||
|
}
|
||||||
|
#endif /* ASM/C selection */
|
||||||
|
|
||||||
|
#elif defined (CPU_PP502x)
|
||||||
|
|
||||||
|
#if 1 /* Select ASM */
|
||||||
|
/*---------------------------------------------------------------------------
|
||||||
|
* Put core in a power-saving state if waking list wasn't repopulated and if
|
||||||
|
* no other core requested a wakeup for it to perform a task.
|
||||||
|
*---------------------------------------------------------------------------
|
||||||
|
*/
|
||||||
|
static inline void core_sleep(unsigned int core)
|
||||||
|
{
|
||||||
|
asm volatile (
|
||||||
|
"mov r0, #4 \n" /* r0 = 0x4 << core */
|
||||||
|
"mov r0, r0, lsl %[c] \n"
|
||||||
|
"str r0, [%[mbx], #4] \n" /* signal intent to sleep */
|
||||||
|
"ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
|
||||||
|
"tst r1, r0, lsl #2 \n"
|
||||||
|
"moveq r1, #0x80000000 \n" /* Then sleep */
|
||||||
|
"streq r1, [%[ctl], %[c], lsl #2] \n"
|
||||||
|
"moveq r1, #0 \n" /* Clear control reg */
|
||||||
|
"streq r1, [%[ctl], %[c], lsl #2] \n"
|
||||||
|
"orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
|
||||||
|
"str r1, [%[mbx], #8] \n"
|
||||||
|
"1: \n" /* Wait for wake procedure to finish */
|
||||||
|
"ldr r1, [%[mbx], #0] \n"
|
||||||
|
"tst r1, r0, lsr #2 \n"
|
||||||
|
"bne 1b \n"
|
||||||
|
:
|
||||||
|
: [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
|
||||||
|
: "r0", "r1");
|
||||||
|
enable_irq();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*---------------------------------------------------------------------------
|
||||||
|
* Wake another processor core that is sleeping or prevent it from doing so
|
||||||
|
* if it was already destined. FIQ, IRQ should be disabled before calling.
|
||||||
|
*---------------------------------------------------------------------------
|
||||||
|
*/
|
||||||
|
void core_wake(unsigned int othercore)
|
||||||
|
{
|
||||||
|
/* avoid r0 since that contains othercore */
|
||||||
|
asm volatile (
|
||||||
|
"mrs r3, cpsr \n" /* Disable IRQ */
|
||||||
|
"orr r1, r3, #0x80 \n"
|
||||||
|
"msr cpsr_c, r1 \n"
|
||||||
|
"mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
|
||||||
|
"mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
|
||||||
|
"str r2, [%[mbx], #4] \n"
|
||||||
|
"1: \n" /* If it intends to sleep, let it first */
|
||||||
|
"ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
|
||||||
|
"eor r1, r1, #0xc \n"
|
||||||
|
"tst r1, r2, lsr #2 \n"
|
||||||
|
"ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
|
||||||
|
"tsteq r1, #0x80000000 \n"
|
||||||
|
"beq 1b \n" /* Wait for sleep or wake */
|
||||||
|
"tst r1, #0x80000000 \n" /* If sleeping, wake it */
|
||||||
|
"movne r1, #0x0 \n"
|
||||||
|
"strne r1, [%[ctl], %[oc], lsl #2] \n"
|
||||||
|
"mov r1, r2, lsr #4 \n"
|
||||||
|
"str r1, [%[mbx], #8] \n" /* Done with wake procedure */
|
||||||
|
"msr cpsr_c, r3 \n" /* Restore IRQ */
|
||||||
|
:
|
||||||
|
: [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
|
||||||
|
[oc]"r"(othercore)
|
||||||
|
: "r1", "r2", "r3");
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* C version for reference */
|
||||||
|
|
||||||
|
static inline void core_sleep(unsigned int core)
|
||||||
|
{
|
||||||
|
/* Signal intent to sleep */
|
||||||
|
MBX_MSG_SET = 0x4 << core;
|
||||||
|
|
||||||
|
/* Something waking or other processor intends to wake us? */
|
||||||
|
if ((MBX_MSG_STAT & (0x10 << core)) == 0)
|
||||||
|
{
|
||||||
|
sleep_core(core);
|
||||||
|
wake_core(core);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Signal wake - clear wake flag */
|
||||||
|
MBX_MSG_CLR = 0x14 << core;
|
||||||
|
|
||||||
|
/* Wait for other processor to finish wake procedure */
|
||||||
|
while (MBX_MSG_STAT & (0x1 << core));
|
||||||
|
enable_irq();
|
||||||
|
}
|
||||||
|
|
||||||
|
void core_wake(unsigned int othercore)
|
||||||
|
{
|
||||||
|
/* Disable interrupts - avoid reentrancy from the tick */
|
||||||
|
int oldlevel = disable_irq_save();
|
||||||
|
|
||||||
|
/* Signal intent to wake other processor - set stay awake */
|
||||||
|
MBX_MSG_SET = 0x11 << othercore;
|
||||||
|
|
||||||
|
/* If it intends to sleep, wait until it does or aborts */
|
||||||
|
while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
|
||||||
|
(PROC_CTL(othercore) & PROC_SLEEP) == 0);
|
||||||
|
|
||||||
|
/* If sleeping, wake it up */
|
||||||
|
if (PROC_CTL(othercore) & PROC_SLEEP)
|
||||||
|
PROC_CTL(othercore) = 0;
|
||||||
|
|
||||||
|
/* Done with wake procedure */
|
||||||
|
MBX_MSG_CLR = 0x1 << othercore;
|
||||||
|
restore_irq(oldlevel);
|
||||||
|
}
|
||||||
|
#endif /* ASM/C selection */
|
||||||
|
|
||||||
|
#endif /* CPU_PPxxxx */
|
||||||
|
|
||||||
|
/* Keep constant pool in range of inline ASM */
|
||||||
|
static void __attribute__((naked, used)) dump_ltorg(void)
|
||||||
|
{
|
||||||
|
asm volatile (".ltorg");
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* NUM_CORES */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue