forked from len0rd/rockbox
switching should be more efficient and tasks are stored in linked lists to eliminate unnecessary task switching to improve performance. Audio should no longer skip on swcodec targets caused by too CPU hungry UI thread or background threads. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@10958 a1c6a512-1295-4272-9138-f99709370657
741 lines
23 KiB
C
741 lines
23 KiB
C
/***************************************************************************
|
|
* __________ __ ___.
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
* \/ \/ \/ \/ \/
|
|
* $Id$
|
|
*
|
|
* Copyright (C) 2002 by Ulf Ralberg
|
|
*
|
|
* All files in this archive are subject to the GNU General Public License.
|
|
* See the file COPYING in the source tree root for full license agreement.
|
|
*
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
* KIND, either express or implied.
|
|
*
|
|
****************************************************************************/
|
|
#include "config.h"
|
|
#include <stdbool.h>
|
|
#include "thread.h"
|
|
#include "panic.h"
|
|
#include "system.h"
|
|
#include "kernel.h"
|
|
#include "cpu.h"
|
|
#include "string.h"
|
|
|
|
#define DEADBEEF ((unsigned int)0xdeadbeef)
|
|
/* Cast to the the machine int type, whose size could be < 4. */
|
|
|
|
struct core_entry cores[NUM_CORES] IBSS_ATTR;
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
static unsigned short highest_priority IBSS_ATTR;
|
|
#endif
|
|
|
|
/* Define to enable additional checks for blocking violations etc. */
|
|
// #define THREAD_EXTRA_CHECKS
|
|
|
|
static const char main_thread_name[] = "main";
|
|
|
|
extern int stackbegin[];
|
|
extern int stackend[];
|
|
|
|
#ifdef CPU_PP
|
|
#ifndef BOOTLOADER
|
|
extern int cop_stackbegin[];
|
|
extern int cop_stackend[];
|
|
#else
|
|
/* The coprocessor stack is not set up in the bootloader code, but the
|
|
threading is. No threads are run on the coprocessor, so set up some dummy
|
|
stack */
|
|
int *cop_stackbegin = stackbegin;
|
|
int *cop_stackend = stackend;
|
|
#endif
|
|
#endif
|
|
|
|
/* Conserve IRAM
|
|
static void add_to_list(struct thread_entry **list,
|
|
struct thread_entry *thread) ICODE_ATTR;
|
|
static void remove_from_list(struct thread_entry **list,
|
|
struct thread_entry *thread) ICODE_ATTR;
|
|
*/
|
|
|
|
void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
|
ICODE_ATTR;
|
|
|
|
static inline void store_context(void* addr) __attribute__ ((always_inline));
|
|
static inline void load_context(const void* addr) __attribute__ ((always_inline));
|
|
|
|
#ifdef RB_PROFILE
|
|
#include <profile.h>
|
|
void profile_thread(void) {
|
|
profstart(cores[CURRENT_CORE].current_thread);
|
|
}
|
|
#endif
|
|
|
|
#if defined(CPU_ARM)
|
|
/*---------------------------------------------------------------------------
|
|
* Store non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void store_context(void* addr)
|
|
{
|
|
asm volatile(
|
|
"stmia %0, { r4-r11, sp, lr }\n"
|
|
: : "r" (addr)
|
|
);
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Load non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void load_context(const void* addr)
|
|
{
|
|
asm volatile(
|
|
"ldmia %0, { r4-r11, sp, lr }\n" /* load regs r4 to r14 from context */
|
|
"ldr r0, [%0, #40] \n" /* load start pointer */
|
|
"mov r1, #0 \n"
|
|
"cmp r0, r1 \n" /* check for NULL */
|
|
"strne r1, [%0, #40] \n" /* if it's NULL, we're already running */
|
|
"movne pc, r0 \n" /* not already running, so jump to start */
|
|
: : "r" (addr) : "r0", "r1"
|
|
);
|
|
}
|
|
|
|
#elif defined(CPU_COLDFIRE)
|
|
/*---------------------------------------------------------------------------
|
|
* Store non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void store_context(void* addr)
|
|
{
|
|
asm volatile (
|
|
"move.l %%macsr,%%d0 \n"
|
|
"movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
|
|
: : "a" (addr) : "d0" /* only! */
|
|
);
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Load non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void load_context(const void* addr)
|
|
{
|
|
asm volatile (
|
|
"movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
|
|
"move.l %%d0,%%macsr \n"
|
|
"move.l (52,%0),%%d0 \n" /* Get start address */
|
|
"beq.b .running \n" /* NULL -> already running */
|
|
"clr.l (52,%0) \n" /* Clear start address.. */
|
|
"move.l %%d0,%0 \n"
|
|
"jmp (%0) \n" /* ..and start the thread */
|
|
".running: \n"
|
|
: : "a" (addr) : "d0" /* only! */
|
|
);
|
|
}
|
|
|
|
#elif CONFIG_CPU == SH7034
|
|
/*---------------------------------------------------------------------------
|
|
* Store non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void store_context(void* addr)
|
|
{
|
|
asm volatile (
|
|
"add #36,%0 \n"
|
|
"sts.l pr, @-%0 \n"
|
|
"mov.l r15,@-%0 \n"
|
|
"mov.l r14,@-%0 \n"
|
|
"mov.l r13,@-%0 \n"
|
|
"mov.l r12,@-%0 \n"
|
|
"mov.l r11,@-%0 \n"
|
|
"mov.l r10,@-%0 \n"
|
|
"mov.l r9, @-%0 \n"
|
|
"mov.l r8, @-%0 \n"
|
|
: : "r" (addr)
|
|
);
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Load non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void load_context(const void* addr)
|
|
{
|
|
asm volatile (
|
|
"mov.l @%0+,r8 \n"
|
|
"mov.l @%0+,r9 \n"
|
|
"mov.l @%0+,r10 \n"
|
|
"mov.l @%0+,r11 \n"
|
|
"mov.l @%0+,r12 \n"
|
|
"mov.l @%0+,r13 \n"
|
|
"mov.l @%0+,r14 \n"
|
|
"mov.l @%0+,r15 \n"
|
|
"lds.l @%0+,pr \n"
|
|
"mov.l @%0,r0 \n" /* Get start address */
|
|
"tst r0,r0 \n"
|
|
"bt .running \n" /* NULL -> already running */
|
|
"lds r0,pr \n"
|
|
"mov #0,r0 \n"
|
|
"rts \n" /* Start the thread */
|
|
"mov.l r0,@%0 \n" /* Clear start address */
|
|
".running: \n"
|
|
: : "r" (addr) : "r0" /* only! */
|
|
);
|
|
}
|
|
|
|
#elif CONFIG_CPU == TCC730
|
|
/*---------------------------------------------------------------------------
|
|
* Store non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
#define store_context(addr) \
|
|
__asm__ volatile ( \
|
|
"push r0,r1\n\t" \
|
|
"push r2,r3\n\t" \
|
|
"push r4,r5\n\t" \
|
|
"push r6,r7\n\t" \
|
|
"push a8,a9\n\t" \
|
|
"push a10,a11\n\t" \
|
|
"push a12,a13\n\t" \
|
|
"push a14\n\t" \
|
|
"ldw @[%0+0], a15\n\t" : : "a" (addr) );
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Load non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
#define load_context(addr) \
|
|
{ \
|
|
if (!(addr)->started) { \
|
|
(addr)->started = 1; \
|
|
__asm__ volatile ( \
|
|
"ldw a15, @[%0+0]\n\t" \
|
|
"ldw a14, @[%0+4]\n\t" \
|
|
"jmp a14\n\t" : : "a" (addr) \
|
|
); \
|
|
} else \
|
|
__asm__ volatile ( \
|
|
"ldw a15, @[%0+0]\n\t" \
|
|
"pop a14\n\t" \
|
|
"pop a13,a12\n\t" \
|
|
"pop a11,a10\n\t" \
|
|
"pop a9,a8\n\t" \
|
|
"pop r7,r6\n\t" \
|
|
"pop r5,r4\n\t" \
|
|
"pop r3,r2\n\t" \
|
|
"pop r1,r0\n\t" : : "a" (addr) \
|
|
); \
|
|
\
|
|
}
|
|
|
|
#endif
|
|
|
|
static void add_to_list(struct thread_entry **list,
|
|
struct thread_entry *thread)
|
|
{
|
|
if (*list == NULL)
|
|
{
|
|
thread->next = thread;
|
|
thread->prev = thread;
|
|
*list = thread;
|
|
}
|
|
else
|
|
{
|
|
/* Insert last */
|
|
thread->next = *list;
|
|
thread->prev = (*list)->prev;
|
|
thread->prev->next = thread;
|
|
(*list)->prev = thread;
|
|
|
|
/* Insert next
|
|
thread->next = (*list)->next;
|
|
thread->prev = *list;
|
|
thread->next->prev = thread;
|
|
(*list)->next = thread;
|
|
*/
|
|
}
|
|
}
|
|
|
|
static void remove_from_list(struct thread_entry **list,
|
|
struct thread_entry *thread)
|
|
{
|
|
if (list != NULL)
|
|
{
|
|
if (thread == thread->next)
|
|
{
|
|
*list = NULL;
|
|
return;
|
|
}
|
|
|
|
if (thread == *list)
|
|
*list = thread->next;
|
|
}
|
|
|
|
/* Fix links to jump over the removed entry. */
|
|
thread->prev->next = thread->next;
|
|
thread->next->prev = thread->prev;
|
|
}
|
|
|
|
/* Compiler trick: Don't declare as static to prevent putting
|
|
* function in IRAM. */
|
|
void check_sleepers(void)
|
|
{
|
|
struct thread_entry *current, *next;
|
|
|
|
/* Check sleeping threads. */
|
|
current = cores[CURRENT_CORE].sleeping;
|
|
if (current == NULL)
|
|
return ;
|
|
|
|
for (;;)
|
|
{
|
|
next = current->next;
|
|
|
|
if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
|
|
{
|
|
/* Sleep timeout has been reached so bring the thread
|
|
* back to life again. */
|
|
remove_from_list(&cores[CURRENT_CORE].sleeping, current);
|
|
add_to_list(&cores[CURRENT_CORE].running, current);
|
|
|
|
/* If there is no more processes in the list, break the loop. */
|
|
if (cores[CURRENT_CORE].sleeping == NULL)
|
|
break;
|
|
|
|
current = next;
|
|
continue;
|
|
}
|
|
|
|
current = next;
|
|
|
|
/* Break the loop once we have walked through the list of all
|
|
* sleeping processes. */
|
|
if (current == cores[CURRENT_CORE].sleeping)
|
|
break;
|
|
}
|
|
}
|
|
|
|
static inline void sleep_core(void)
|
|
{
|
|
static long last_tick = 0;
|
|
|
|
for (;;)
|
|
{
|
|
if (last_tick != current_tick)
|
|
{
|
|
check_sleepers();
|
|
last_tick = current_tick;
|
|
}
|
|
|
|
/* We must sleep until there is at least one process in the list
|
|
* of running processes. */
|
|
if (cores[CURRENT_CORE].running != NULL)
|
|
break;
|
|
|
|
/* Enter sleep mode to reduce power usage, woken up on interrupt */
|
|
#ifdef CPU_COLDFIRE
|
|
asm volatile ("stop #0x2000");
|
|
#elif CONFIG_CPU == SH7034
|
|
and_b(0x7F, &SBYCR);
|
|
asm volatile ("sleep");
|
|
#elif CONFIG_CPU == PP5020
|
|
/* This should sleep the CPU. It appears to wake by itself on
|
|
interrupts */
|
|
CPU_CTL = 0x80000000;
|
|
#elif CONFIG_CPU == TCC730
|
|
/* Sleep mode is triggered by the SYS instr on CalmRisc16.
|
|
* Unfortunately, the manual doesn't specify which arg to use.
|
|
__asm__ volatile ("sys #0x0f");
|
|
0x1f seems to trigger a reset;
|
|
0x0f is the only one other argument used by Archos.
|
|
*/
|
|
#elif CONFIG_CPU == S3C2440
|
|
CLKCON |= 2;
|
|
#endif
|
|
}
|
|
}
|
|
|
|
#ifdef RB_PROFILE
|
|
static int get_threadnum(struct thread_entry *thread)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < MAXTHREADS; i++)
|
|
{
|
|
if (&cores[CURRENT_CORE].threads[i] == thread)
|
|
return i;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
#endif
|
|
|
|
/* Compiler trick: Don't declare as static to prevent putting
|
|
* function in IRAM. */
|
|
void change_thread_state(struct thread_entry **blocked_list)
|
|
{
|
|
struct thread_entry *old;
|
|
|
|
/* Remove the thread from the list of running threads. */
|
|
old = cores[CURRENT_CORE].running;
|
|
remove_from_list(&cores[CURRENT_CORE].running, old);
|
|
|
|
/* And put the thread into a new list of inactive threads. */
|
|
if (GET_STATE(old->statearg) == STATE_BLOCKED)
|
|
add_to_list(blocked_list, old);
|
|
else
|
|
add_to_list(&cores[CURRENT_CORE].sleeping, old);
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
/* Reset priorities */
|
|
if (old->priority == highest_priority)
|
|
highest_priority = 100;
|
|
#endif
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Switch thread in round robin fashion.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
|
{
|
|
#ifdef RB_PROFILE
|
|
profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running));
|
|
#endif
|
|
unsigned int *stackptr;
|
|
|
|
#ifdef SIMULATOR
|
|
/* Do nothing */
|
|
#else
|
|
|
|
/* Begin task switching by saving our current context so that we can
|
|
* restore the state of the current thread later to the point prior
|
|
* to this call. */
|
|
if (save_context)
|
|
{
|
|
store_context(&cores[CURRENT_CORE].running->context);
|
|
|
|
# if CONFIG_CPU != TCC730
|
|
/* Check if the current thread stack is overflown */
|
|
stackptr = cores[CURRENT_CORE].running->stack;
|
|
if(stackptr[0] != DEADBEEF)
|
|
panicf("Stkov %s", cores[CURRENT_CORE].running->name);
|
|
# endif
|
|
|
|
/* Check if a thread state change has been requested. */
|
|
if (cores[CURRENT_CORE].running->statearg)
|
|
{
|
|
/* Change running thread state and switch to next thread. */
|
|
change_thread_state(blocked_list);
|
|
}
|
|
else
|
|
{
|
|
/* Switch to the next running thread. */
|
|
cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
|
|
}
|
|
}
|
|
|
|
/* Go through the list of sleeping task to check if we need to wake up
|
|
* any of them due to timeout. Also puts core into sleep state until
|
|
* there is at least one running process again. */
|
|
sleep_core();
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
/* Select the new task based on priorities and the last time a process
|
|
* got CPU time. */
|
|
for (;;)
|
|
{
|
|
int priority = cores[CURRENT_CORE].running->priority;
|
|
|
|
if (priority < highest_priority)
|
|
highest_priority = priority;
|
|
|
|
if (priority == highest_priority || (current_tick
|
|
- cores[CURRENT_CORE].running->last_run > priority * 8))
|
|
{
|
|
break;
|
|
}
|
|
cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
|
|
}
|
|
|
|
/* Reset the value of thread's last running time to the current time. */
|
|
cores[CURRENT_CORE].running->last_run = current_tick;
|
|
#endif
|
|
|
|
#endif
|
|
/* And finally give control to the next thread. */
|
|
load_context(&cores[CURRENT_CORE].running->context);
|
|
|
|
#ifdef RB_PROFILE
|
|
profile_thread_started(get_threadnum(cores[CURRENT_CORE].running));
|
|
#endif
|
|
}
|
|
|
|
void sleep_thread(int ticks)
|
|
{
|
|
/* Set the thread's new state and timeout and finally force a task switch
|
|
* so that scheduler removes thread from the list of running processes
|
|
* and puts it in list of sleeping tasks. */
|
|
cores[CURRENT_CORE].running->statearg =
|
|
SET_STATE(STATE_SLEEPING, current_tick + ticks + 1);
|
|
switch_thread(true, NULL);
|
|
|
|
/* Clear all flags to indicate we are up and running again. */
|
|
cores[CURRENT_CORE].running->statearg = 0;
|
|
}
|
|
|
|
void block_thread(struct thread_entry **list, int timeout)
|
|
{
|
|
struct thread_entry *current;
|
|
|
|
/* Get the entry for the current running thread. */
|
|
current = cores[CURRENT_CORE].running;
|
|
|
|
/* At next task switch scheduler will immediately change the thread
|
|
* state (and we also force the task switch to happen). */
|
|
if (timeout)
|
|
{
|
|
#ifdef THREAD_EXTRA_CHECKS
|
|
/* We can store only one thread to the "list" if thread is used
|
|
* in other list (such as core's list for sleeping tasks). */
|
|
if (*list)
|
|
panicf("Blocking violation T->*B");
|
|
#endif
|
|
|
|
current->statearg =
|
|
SET_STATE(STATE_BLOCKED_W_TMO, current_tick + timeout);
|
|
*list = current;
|
|
|
|
/* Now force a task switch and block until we have been woken up
|
|
* by another thread or timeout is reached. */
|
|
switch_thread(true, NULL);
|
|
|
|
/* If timeout is reached, we must set list back to NULL here. */
|
|
*list = NULL;
|
|
}
|
|
else
|
|
{
|
|
#ifdef THREAD_EXTRA_CHECKS
|
|
/* We are not allowed to mix blocking types in one queue. */
|
|
if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
|
|
panicf("Blocking violation B->*T");
|
|
#endif
|
|
|
|
current->statearg = SET_STATE(STATE_BLOCKED, 0);
|
|
|
|
/* Now force a task switch and block until we have been woken up
|
|
* by another thread or timeout is reached. */
|
|
switch_thread(true, list);
|
|
}
|
|
|
|
/* Clear all flags to indicate we are up and running again. */
|
|
current->statearg = 0;
|
|
}
|
|
|
|
void wakeup_thread(struct thread_entry **list)
|
|
{
|
|
struct thread_entry *thread;
|
|
|
|
/* Check if there is a blocked thread at all. */
|
|
if (*list == NULL)
|
|
return ;
|
|
|
|
/* Wake up the last thread first. */
|
|
thread = *list;
|
|
|
|
/* Determine thread's current state. */
|
|
switch (GET_STATE(thread->statearg))
|
|
{
|
|
case STATE_BLOCKED:
|
|
/* Remove thread from the list of blocked threads and add it
|
|
* to the scheduler's list of running processes. */
|
|
remove_from_list(list, thread);
|
|
add_to_list(&cores[CURRENT_CORE].running, thread);
|
|
thread->statearg = 0;
|
|
break;
|
|
|
|
case STATE_BLOCKED_W_TMO:
|
|
/* Just remove the timeout to cause scheduler to immediately
|
|
* wake up the thread. */
|
|
thread->statearg &= 0xC0000000;
|
|
*list = NULL;
|
|
break;
|
|
|
|
default:
|
|
/* Nothing to do. Thread has already been woken up
|
|
* or it's state is not blocked or blocked with timeout. */
|
|
return ;
|
|
}
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Create thread on the current core.
|
|
* Return ID if context area could be allocated, else -1.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
struct thread_entry*
|
|
create_thread(void (*function)(void), void* stack, int stack_size,
|
|
const char *name IF_PRIO(, int priority))
|
|
{
|
|
return create_thread_on_core(CURRENT_CORE, function, stack, stack_size,
|
|
name IF_PRIO(, priority));
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Create thread on a specific core.
|
|
* Return ID if context area could be allocated, else -1.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
struct thread_entry*
|
|
create_thread_on_core(unsigned int core, void (*function)(void),
|
|
void* stack, int stack_size,
|
|
const char *name IF_PRIO(, int priority))
|
|
{
|
|
unsigned int i;
|
|
unsigned int stacklen;
|
|
unsigned int *stackptr;
|
|
int n;
|
|
struct regs *regs;
|
|
struct thread_entry *thread;
|
|
|
|
for (n = 0; n < MAXTHREADS; n++)
|
|
{
|
|
if (cores[core].threads[n].name == NULL)
|
|
break;
|
|
}
|
|
|
|
if (n == MAXTHREADS)
|
|
return NULL;
|
|
|
|
|
|
/* Munge the stack to make it easy to spot stack overflows */
|
|
stacklen = stack_size / sizeof(int);
|
|
stackptr = stack;
|
|
for(i = 0;i < stacklen;i++)
|
|
{
|
|
stackptr[i] = DEADBEEF;
|
|
}
|
|
|
|
/* Store interesting information */
|
|
thread = &cores[core].threads[n];
|
|
thread->name = name;
|
|
thread->stack = stack;
|
|
thread->stack_size = stack_size;
|
|
thread->statearg = 0;
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
thread->priority = priority;
|
|
highest_priority = 100;
|
|
#endif
|
|
add_to_list(&cores[core].running, thread);
|
|
|
|
regs = &thread->context;
|
|
#if defined(CPU_COLDFIRE) || (CONFIG_CPU == SH7034) || defined(CPU_ARM)
|
|
/* Align stack to an even 32 bit boundary */
|
|
regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
|
|
#elif CONFIG_CPU == TCC730
|
|
/* Align stack on word boundary */
|
|
regs->sp = (void*)(((unsigned long)stack + stack_size - 2) & ~1);
|
|
regs->started = 0;
|
|
#endif
|
|
regs->start = (void*)function;
|
|
|
|
return thread;
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Remove a thread on the current core from the scheduler.
|
|
* Parameter is the ID as returned from create_thread().
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
void remove_thread(struct thread_entry *thread)
|
|
{
|
|
if (thread == NULL)
|
|
thread = cores[CURRENT_CORE].running;
|
|
|
|
/* Free the entry by removing thread name. */
|
|
thread->name = NULL;
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
highest_priority = 100;
|
|
#endif
|
|
|
|
if (thread == cores[CURRENT_CORE].running)
|
|
{
|
|
remove_from_list(&cores[CURRENT_CORE].running, thread);
|
|
switch_thread(false, NULL);
|
|
return ;
|
|
}
|
|
|
|
if (thread == cores[CURRENT_CORE].sleeping)
|
|
remove_from_list(&cores[CURRENT_CORE].sleeping, thread);
|
|
|
|
remove_from_list(NULL, thread);
|
|
}
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
void thread_set_priority(struct thread_entry *thread, int priority)
|
|
{
|
|
if (thread == NULL)
|
|
thread = cores[CURRENT_CORE].running;
|
|
|
|
thread->priority = priority;
|
|
highest_priority = 100;
|
|
}
|
|
#endif
|
|
|
|
void init_threads(void)
|
|
{
|
|
unsigned int core = CURRENT_CORE;
|
|
|
|
memset(cores, 0, sizeof cores);
|
|
cores[core].sleeping = NULL;
|
|
cores[core].running = NULL;
|
|
cores[core].threads[0].name = main_thread_name;
|
|
cores[core].threads[0].statearg = 0;
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
cores[core].threads[0].priority = PRIORITY_USER_INTERFACE;
|
|
highest_priority = 100;
|
|
#endif
|
|
add_to_list(&cores[core].running, &cores[core].threads[0]);
|
|
|
|
/* In multiple core setups, each core has a different stack. There is probably
|
|
a much better way to do this. */
|
|
if (core == CPU)
|
|
{
|
|
cores[CPU].threads[0].stack = stackbegin;
|
|
cores[CPU].threads[0].stack_size = (int)stackend - (int)stackbegin;
|
|
} else {
|
|
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
|
|
cores[COP].threads[0].stack = cop_stackbegin;
|
|
cores[COP].threads[0].stack_size = (int)cop_stackend - (int)cop_stackbegin;
|
|
#endif
|
|
}
|
|
#if CONFIG_CPU == TCC730
|
|
cores[core].threads[0].context.started = 1;
|
|
#else
|
|
cores[core].threads[0].context.start = 0; /* thread 0 already running */
|
|
#endif
|
|
}
|
|
|
|
int thread_stack_usage(const struct thread_entry *thread)
|
|
{
|
|
unsigned int i;
|
|
unsigned int *stackptr = thread->stack;
|
|
|
|
for (i = 0;i < thread->stack_size/sizeof(int);i++)
|
|
{
|
|
if (stackptr[i] != DEADBEEF)
|
|
break;
|
|
}
|
|
|
|
return ((thread->stack_size - i * sizeof(int)) * 100) /
|
|
thread->stack_size;
|
|
}
|
|
|
|
int thread_get_status(const struct thread_entry *thread)
|
|
{
|
|
return GET_STATE(thread->statearg);
|
|
}
|