mirror of
https://github.com/Rockbox/rockbox.git
synced 2025-12-08 20:55:17 -05:00
kernel: Break out kernel primitives into separate files and move to separate dir.
No code changed, just shuffling stuff around. This should make it easier to build only select parts kernel and use different implementations. Change-Id: Ie1f00f93008833ce38419d760afd70062c5e22b5
This commit is contained in:
parent
8bae5f2644
commit
382d1861af
30 changed files with 1564 additions and 756 deletions
|
|
@ -19,9 +19,10 @@
|
|||
*
|
||||
****************************************************************************/
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "config.h"
|
||||
#include "system.h"
|
||||
#include <stdbool.h>
|
||||
#include "kernel.h"
|
||||
#include "font.h"
|
||||
#include "lcd.h"
|
||||
#include "button.h"
|
||||
|
|
|
|||
|
|
@ -51,128 +51,6 @@ static uintptr_t * const idle_stacks[NUM_CORES] =
|
|||
[COP] = cop_idlestackbegin
|
||||
};
|
||||
|
||||
/* Core locks using Peterson's mutual exclusion algorithm */
|
||||
|
||||
/*---------------------------------------------------------------------------
|
||||
* Initialize the corelock structure.
|
||||
*---------------------------------------------------------------------------
|
||||
*/
|
||||
void corelock_init(struct corelock *cl)
|
||||
{
|
||||
memset(cl, 0, sizeof (*cl));
|
||||
}
|
||||
|
||||
#if 1 /* Assembly locks to minimize overhead */
|
||||
/*---------------------------------------------------------------------------
|
||||
* Wait for the corelock to become free and acquire it when it does.
|
||||
*---------------------------------------------------------------------------
|
||||
*/
|
||||
void __attribute__((naked)) corelock_lock(struct corelock *cl)
|
||||
{
|
||||
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
|
||||
asm volatile (
|
||||
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
|
||||
"ldrb r1, [r1] \n"
|
||||
"strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
|
||||
"eor r2, r1, #0xff \n" /* r2 = othercore */
|
||||
"strb r2, [r0, #2] \n" /* cl->turn = othercore */
|
||||
"1: \n"
|
||||
"ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
|
||||
"cmp r3, #0 \n" /* yes? lock acquired */
|
||||
"bxeq lr \n"
|
||||
"ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
|
||||
"cmp r3, r1 \n"
|
||||
"bxeq lr \n" /* yes? lock acquired */
|
||||
"b 1b \n" /* keep trying */
|
||||
: : "i"(&PROCESSOR_ID)
|
||||
);
|
||||
(void)cl;
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------------------
|
||||
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
|
||||
*---------------------------------------------------------------------------
|
||||
*/
|
||||
int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
|
||||
{
|
||||
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
|
||||
asm volatile (
|
||||
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
|
||||
"ldrb r1, [r1] \n"
|
||||
"mov r3, r0 \n"
|
||||
"strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
|
||||
"eor r2, r1, #0xff \n" /* r2 = othercore */
|
||||
"strb r2, [r0, #2] \n" /* cl->turn = othercore */
|
||||
"ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
|
||||
"eors r0, r0, r2 \n" /* yes? lock acquired */
|
||||
"bxne lr \n"
|
||||
"ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
|
||||
"ands r0, r0, r1 \n"
|
||||
"streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
|
||||
"bx lr \n" /* return result */
|
||||
: : "i"(&PROCESSOR_ID)
|
||||
);
|
||||
|
||||
return 0;
|
||||
(void)cl;
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------------------
|
||||
* Release ownership of the corelock
|
||||
*---------------------------------------------------------------------------
|
||||
*/
|
||||
void __attribute__((naked)) corelock_unlock(struct corelock *cl)
|
||||
{
|
||||
asm volatile (
|
||||
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
|
||||
"ldrb r1, [r1] \n"
|
||||
"mov r2, #0 \n" /* cl->myl[core] = 0 */
|
||||
"strb r2, [r0, r1, lsr #7] \n"
|
||||
"bx lr \n"
|
||||
: : "i"(&PROCESSOR_ID)
|
||||
);
|
||||
(void)cl;
|
||||
}
|
||||
|
||||
#else /* C versions for reference */
|
||||
|
||||
void corelock_lock(struct corelock *cl)
|
||||
{
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
const unsigned int othercore = 1 - core;
|
||||
|
||||
cl->myl[core] = core;
|
||||
cl->turn = othercore;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (cl->myl[othercore] == 0 || cl->turn == core)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int corelock_try_lock(struct corelock *cl)
|
||||
{
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
const unsigned int othercore = 1 - core;
|
||||
|
||||
cl->myl[core] = core;
|
||||
cl->turn = othercore;
|
||||
|
||||
if (cl->myl[othercore] == 0 || cl->turn == core)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
cl->myl[core] = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void corelock_unlock(struct corelock *cl)
|
||||
{
|
||||
cl->myl[CURRENT_CORE] = 0;
|
||||
}
|
||||
#endif /* ASM / C selection */
|
||||
|
||||
/*---------------------------------------------------------------------------
|
||||
* Do any device-specific inits for the threads and synchronize the kernel
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@
|
|||
/* Taken from button-h10.c by Barry Wardell and reverse engineering by MrH. */
|
||||
|
||||
#include "system.h"
|
||||
#include "kernel.h"
|
||||
#include "button.h"
|
||||
#include "backlight.h"
|
||||
#include "powermgmt.h"
|
||||
|
|
|
|||
|
|
@ -22,8 +22,10 @@
|
|||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#include "config.h"
|
||||
#include "system.h"
|
||||
#include "kernel.h"
|
||||
#include "lcd.h"
|
||||
#include "lcd-target.h"
|
||||
|
||||
|
|
|
|||
|
|
@ -682,3 +682,53 @@ void thread_get_name(char *buffer, int size,
|
|||
snprintf(buffer, size, fmt, name);
|
||||
}
|
||||
}
|
||||
|
||||
/* Unless otherwise defined, do nothing */
|
||||
#ifndef YIELD_KERNEL_HOOK
|
||||
#define YIELD_KERNEL_HOOK() false
|
||||
#endif
|
||||
#ifndef SLEEP_KERNEL_HOOK
|
||||
#define SLEEP_KERNEL_HOOK(ticks) false
|
||||
#endif
|
||||
|
||||
|
||||
/*---------------------------------------------------------------------------
|
||||
* Suspends a thread's execution for at least the specified number of ticks.
|
||||
*
|
||||
* May result in CPU core entering wait-for-interrupt mode if no other thread
|
||||
* may be scheduled.
|
||||
*
|
||||
* NOTE: sleep(0) sleeps until the end of the current tick
|
||||
* sleep(n) that doesn't result in rescheduling:
|
||||
* n <= ticks suspended < n + 1
|
||||
* n to n+1 is a lower bound. Other factors may affect the actual time
|
||||
* a thread is suspended before it runs again.
|
||||
*---------------------------------------------------------------------------
|
||||
*/
|
||||
unsigned sleep(unsigned ticks)
|
||||
{
|
||||
/* In certain situations, certain bootloaders in particular, a normal
|
||||
* threading call is inappropriate. */
|
||||
if (SLEEP_KERNEL_HOOK(ticks))
|
||||
return 0; /* Handled */
|
||||
|
||||
disable_irq();
|
||||
sleep_thread(ticks);
|
||||
switch_thread();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------------------
|
||||
* Elects another thread to run or, if no other thread may be made ready to
|
||||
* run, immediately returns control back to the calling thread.
|
||||
*---------------------------------------------------------------------------
|
||||
*/
|
||||
void yield(void)
|
||||
{
|
||||
/* In certain situations, certain bootloaders in particular, a normal
|
||||
* threading call is inappropriate. */
|
||||
if (YIELD_KERNEL_HOOK())
|
||||
return; /* handled */
|
||||
|
||||
switch_thread();
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue