forked from len0rd/rockbox
Thread API enhancements.
1) block_thread -> block_thread + block_thread_w_tmo -- this call was always used in distinct ways so having one call with a conditional was ugly. 2) enhance Slasheri's scheduler controlled boost concept. now any thread may trigger a boost which will last until that thread next sleeps. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@11509 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
parent
806d8f3505
commit
8a82892e52
5 changed files with 146 additions and 110 deletions
|
@ -2151,7 +2151,11 @@ static bool audio_yield_codecs(void)
|
||||||
while ((pcmbuf_is_crossfade_active() || pcmbuf_is_lowdata())
|
while ((pcmbuf_is_crossfade_active() || pcmbuf_is_lowdata())
|
||||||
&& !ci.stop_codec && playing && !audio_filebuf_is_lowdata())
|
&& !ci.stop_codec && playing && !audio_filebuf_is_lowdata())
|
||||||
{
|
{
|
||||||
sleep(1);
|
if (filling)
|
||||||
|
yield();
|
||||||
|
else
|
||||||
|
sleep(2);
|
||||||
|
|
||||||
if (!queue_empty(&audio_queue))
|
if (!queue_empty(&audio_queue))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1229,7 +1229,7 @@ bool create_playlist(void)
|
||||||
if (fd < 0)
|
if (fd < 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
cpu_boost_id(true, CPUBOOSTID_TREE);
|
trigger_cpu_boost();
|
||||||
|
|
||||||
snprintf(filename, sizeof(filename), "%s",
|
snprintf(filename, sizeof(filename), "%s",
|
||||||
tc.currdir[1] ? tc.currdir : "/");
|
tc.currdir[1] ? tc.currdir : "/");
|
||||||
|
@ -1237,8 +1237,6 @@ bool create_playlist(void)
|
||||||
add_dir(filename, sizeof(filename), fd);
|
add_dir(filename, sizeof(filename), fd);
|
||||||
close(fd);
|
close(fd);
|
||||||
|
|
||||||
cpu_boost_id(false, CPUBOOSTID_TREE);
|
|
||||||
|
|
||||||
sleep(HZ);
|
sleep(HZ);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -78,14 +78,22 @@ struct regs
|
||||||
|
|
||||||
#endif /* !SIMULATOR */
|
#endif /* !SIMULATOR */
|
||||||
|
|
||||||
#define STATE_RUNNING 0
|
#define STATE_RUNNING 0x00000000
|
||||||
#define STATE_BLOCKED 1
|
#define STATE_BLOCKED 0x20000000
|
||||||
#define STATE_SLEEPING 2
|
#define STATE_SLEEPING 0x40000000
|
||||||
#define STATE_BLOCKED_W_TMO 3
|
#define STATE_BLOCKED_W_TMO 0x60000000
|
||||||
|
|
||||||
#define GET_STATE_ARG(state) (state & 0x3FFFFFFF)
|
#define THREAD_STATE_MASK 0x60000000
|
||||||
#define GET_STATE(state) ((state >> 30) & 3)
|
#define STATE_ARG_MASK 0x1FFFFFFF
|
||||||
#define SET_STATE(state,arg) ((state << 30) | (arg))
|
|
||||||
|
#define GET_STATE_ARG(state) (state & STATE_ARG_MASK)
|
||||||
|
#define GET_STATE(state) (state & THREAD_STATE_MASK)
|
||||||
|
#define SET_STATE(var,state,arg) (var = (state | ((arg) & STATE_ARG_MASK)))
|
||||||
|
#define CLEAR_STATE_ARG(var) (var &= ~STATE_ARG_MASK)
|
||||||
|
|
||||||
|
#define STATE_BOOSTED 0x80000000
|
||||||
|
#define STATE_IS_BOOSTED(var) (var & STATE_BOOSTED)
|
||||||
|
#define SET_BOOST_STATE(var) (var |= STATE_BOOSTED)
|
||||||
|
|
||||||
struct thread_entry {
|
struct thread_entry {
|
||||||
#ifndef SIMULATOR
|
#ifndef SIMULATOR
|
||||||
|
@ -133,7 +141,8 @@ void trigger_cpu_boost(void);
|
||||||
void remove_thread(struct thread_entry *thread);
|
void remove_thread(struct thread_entry *thread);
|
||||||
void switch_thread(bool save_context, struct thread_entry **blocked_list);
|
void switch_thread(bool save_context, struct thread_entry **blocked_list);
|
||||||
void sleep_thread(int ticks);
|
void sleep_thread(int ticks);
|
||||||
void block_thread(struct thread_entry **thread, int timeout);
|
void block_thread(struct thread_entry **thread);
|
||||||
|
void block_thread_w_tmo(struct thread_entry **thread, int timeout);
|
||||||
void wakeup_thread(struct thread_entry **thread);
|
void wakeup_thread(struct thread_entry **thread);
|
||||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||||
int thread_set_priority(struct thread_entry *thread, int priority);
|
int thread_set_priority(struct thread_entry *thread, int priority);
|
||||||
|
|
|
@ -132,7 +132,7 @@ void queue_wait(struct event_queue *q, struct event *ev)
|
||||||
{
|
{
|
||||||
if (q->read == q->write)
|
if (q->read == q->write)
|
||||||
{
|
{
|
||||||
block_thread(&q->thread, 0);
|
block_thread(&q->thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
*ev = q->events[(q->read++) & QUEUE_LENGTH_MASK];
|
*ev = q->events[(q->read++) & QUEUE_LENGTH_MASK];
|
||||||
|
@ -142,7 +142,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
||||||
{
|
{
|
||||||
if (q->read == q->write && ticks > 0)
|
if (q->read == q->write && ticks > 0)
|
||||||
{
|
{
|
||||||
block_thread(&q->thread, ticks);
|
block_thread_w_tmo(&q->thread, ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (q->read != q->write)
|
if (q->read != q->write)
|
||||||
|
@ -469,7 +469,7 @@ void mutex_lock(struct mutex *m)
|
||||||
if (m->locked)
|
if (m->locked)
|
||||||
{
|
{
|
||||||
/* Wait until the lock is open... */
|
/* Wait until the lock is open... */
|
||||||
block_thread(&m->thread, 0);
|
block_thread(&m->thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ...and lock it */
|
/* ...and lock it */
|
||||||
|
|
|
@ -36,11 +36,11 @@ struct core_entry cores[NUM_CORES] IBSS_ATTR;
|
||||||
static unsigned short highest_priority IBSS_ATTR;
|
static unsigned short highest_priority IBSS_ATTR;
|
||||||
#endif
|
#endif
|
||||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||||
static bool cpu_boosted IBSS_ATTR;
|
static int boosted_threads IBSS_ATTR;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Define to enable additional checks for blocking violations etc. */
|
/* Define to enable additional checks for blocking violations etc. */
|
||||||
// #define THREAD_EXTRA_CHECKS
|
#define THREAD_EXTRA_CHECKS
|
||||||
|
|
||||||
static const char main_thread_name[] = "main";
|
static const char main_thread_name[] = "main";
|
||||||
|
|
||||||
|
@ -52,9 +52,8 @@ extern int stackend[];
|
||||||
extern int cop_stackbegin[];
|
extern int cop_stackbegin[];
|
||||||
extern int cop_stackend[];
|
extern int cop_stackend[];
|
||||||
#else
|
#else
|
||||||
/* The coprocessor stack is not set up in the bootloader code, but the
|
/* The coprocessor stack is not set up in the bootloader code, but the threading
|
||||||
threading is. No threads are run on the coprocessor, so set up some dummy
|
* is. No threads are run on the coprocessor, so set up some dummy stack */
|
||||||
stack */
|
|
||||||
int *cop_stackbegin = stackbegin;
|
int *cop_stackbegin = stackbegin;
|
||||||
int *cop_stackend = stackend;
|
int *cop_stackend = stackend;
|
||||||
#endif
|
#endif
|
||||||
|
@ -71,7 +70,8 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
||||||
ICODE_ATTR;
|
ICODE_ATTR;
|
||||||
|
|
||||||
static inline void store_context(void* addr) __attribute__ ((always_inline));
|
static inline void store_context(void* addr) __attribute__ ((always_inline));
|
||||||
static inline void load_context(const void* addr) __attribute__ ((always_inline));
|
static inline void load_context(const void* addr)
|
||||||
|
__attribute__ ((always_inline));
|
||||||
|
|
||||||
#if defined(CPU_ARM)
|
#if defined(CPU_ARM)
|
||||||
/*---------------------------------------------------------------------------
|
/*---------------------------------------------------------------------------
|
||||||
|
@ -188,8 +188,7 @@ static inline void load_context(const void* addr)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void add_to_list(struct thread_entry **list,
|
static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
|
||||||
struct thread_entry *thread)
|
|
||||||
{
|
{
|
||||||
if (*list == NULL)
|
if (*list == NULL)
|
||||||
{
|
{
|
||||||
|
@ -255,6 +254,7 @@ void check_sleepers(void)
|
||||||
* back to life again. */
|
* back to life again. */
|
||||||
remove_from_list(&cores[CURRENT_CORE].sleeping, current);
|
remove_from_list(&cores[CURRENT_CORE].sleeping, current);
|
||||||
add_to_list(&cores[CURRENT_CORE].running, current);
|
add_to_list(&cores[CURRENT_CORE].running, current);
|
||||||
|
current->statearg = 0;
|
||||||
|
|
||||||
/* If there is no more processes in the list, break the loop. */
|
/* If there is no more processes in the list, break the loop. */
|
||||||
if (cores[CURRENT_CORE].sleeping == NULL)
|
if (cores[CURRENT_CORE].sleeping == NULL)
|
||||||
|
@ -290,14 +290,6 @@ static inline void sleep_core(void)
|
||||||
if (cores[CURRENT_CORE].running != NULL)
|
if (cores[CURRENT_CORE].running != NULL)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
|
||||||
if (cpu_boosted)
|
|
||||||
{
|
|
||||||
cpu_boost(false);
|
|
||||||
cpu_boosted = false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Enter sleep mode to reduce power usage, woken up on interrupt */
|
/* Enter sleep mode to reduce power usage, woken up on interrupt */
|
||||||
#ifdef CPU_COLDFIRE
|
#ifdef CPU_COLDFIRE
|
||||||
asm volatile ("stop #0x2000");
|
asm volatile ("stop #0x2000");
|
||||||
|
@ -338,22 +330,33 @@ void profile_thread(void) {
|
||||||
void change_thread_state(struct thread_entry **blocked_list)
|
void change_thread_state(struct thread_entry **blocked_list)
|
||||||
{
|
{
|
||||||
struct thread_entry *old;
|
struct thread_entry *old;
|
||||||
|
unsigned long new_state;
|
||||||
|
|
||||||
/* Remove the thread from the list of running threads. */
|
/* Remove the thread from the list of running threads. */
|
||||||
old = cores[CURRENT_CORE].running;
|
old = cores[CURRENT_CORE].running;
|
||||||
remove_from_list(&cores[CURRENT_CORE].running, old);
|
new_state = GET_STATE(old->statearg);
|
||||||
|
|
||||||
/* And put the thread into a new list of inactive threads. */
|
/* Check if a thread state change has been requested. */
|
||||||
if (GET_STATE(old->statearg) == STATE_BLOCKED)
|
if (new_state)
|
||||||
add_to_list(blocked_list, old);
|
{
|
||||||
else
|
/* Change running thread state and switch to next thread. */
|
||||||
add_to_list(&cores[CURRENT_CORE].sleeping, old);
|
remove_from_list(&cores[CURRENT_CORE].running, old);
|
||||||
|
|
||||||
|
/* And put the thread into a new list of inactive threads. */
|
||||||
|
if (new_state == STATE_BLOCKED)
|
||||||
|
add_to_list(blocked_list, old);
|
||||||
|
else
|
||||||
|
add_to_list(&cores[CURRENT_CORE].sleeping, old);
|
||||||
|
|
||||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||||
/* Reset priorities */
|
/* Reset priorities */
|
||||||
if (old->priority == highest_priority)
|
if (old->priority == highest_priority)
|
||||||
highest_priority = 100;
|
highest_priority = 100;
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
else
|
||||||
|
/* Switch to the next running thread. */
|
||||||
|
cores[CURRENT_CORE].running = old->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*---------------------------------------------------------------------------
|
/*---------------------------------------------------------------------------
|
||||||
|
@ -381,19 +384,10 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
||||||
/* Check if the current thread stack is overflown */
|
/* Check if the current thread stack is overflown */
|
||||||
stackptr = cores[CURRENT_CORE].running->stack;
|
stackptr = cores[CURRENT_CORE].running->stack;
|
||||||
if(stackptr[0] != DEADBEEF)
|
if(stackptr[0] != DEADBEEF)
|
||||||
panicf("Stkov %s", cores[CURRENT_CORE].running->name);
|
panicf("Stkov %s", cores[CURRENT_CORE].running->name);
|
||||||
|
|
||||||
/* Check if a thread state change has been requested. */
|
/* Rearrange thread lists as needed */
|
||||||
if (cores[CURRENT_CORE].running->statearg)
|
change_thread_state(blocked_list);
|
||||||
{
|
|
||||||
/* Change running thread state and switch to next thread. */
|
|
||||||
change_thread_state(blocked_list);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* Switch to the next running thread. */
|
|
||||||
cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Go through the list of sleeping task to check if we need to wake up
|
/* Go through the list of sleeping task to check if we need to wake up
|
||||||
|
@ -411,11 +405,11 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
||||||
if (priority < highest_priority)
|
if (priority < highest_priority)
|
||||||
highest_priority = priority;
|
highest_priority = priority;
|
||||||
|
|
||||||
if (priority == highest_priority || (current_tick
|
if (priority == highest_priority ||
|
||||||
- cores[CURRENT_CORE].running->last_run > priority * 8))
|
(current_tick - cores[CURRENT_CORE].running->last_run >
|
||||||
{
|
priority * 8))
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
|
cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,63 +428,94 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
||||||
|
|
||||||
void sleep_thread(int ticks)
|
void sleep_thread(int ticks)
|
||||||
{
|
{
|
||||||
|
struct thread_entry *current;
|
||||||
|
|
||||||
|
current = cores[CURRENT_CORE].running;
|
||||||
|
|
||||||
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||||
|
if (STATE_IS_BOOSTED(current->statearg)) {
|
||||||
|
boosted_threads--;
|
||||||
|
if (!boosted_threads)
|
||||||
|
cpu_boost(false);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Set the thread's new state and timeout and finally force a task switch
|
/* Set the thread's new state and timeout and finally force a task switch
|
||||||
* so that scheduler removes thread from the list of running processes
|
* so that scheduler removes thread from the list of running processes
|
||||||
* and puts it in list of sleeping tasks. */
|
* and puts it in list of sleeping tasks. */
|
||||||
cores[CURRENT_CORE].running->statearg =
|
SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
|
||||||
SET_STATE(STATE_SLEEPING, current_tick + ticks + 1);
|
|
||||||
switch_thread(true, NULL);
|
switch_thread(true, NULL);
|
||||||
|
|
||||||
/* Clear all flags to indicate we are up and running again. */
|
|
||||||
cores[CURRENT_CORE].running->statearg = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void block_thread(struct thread_entry **list, int timeout)
|
void block_thread(struct thread_entry **list)
|
||||||
{
|
{
|
||||||
struct thread_entry *current;
|
struct thread_entry *current;
|
||||||
|
/* Get the entry for the current running thread. */
|
||||||
|
current = cores[CURRENT_CORE].running;
|
||||||
|
|
||||||
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||||
|
/* Keep the boosted state over indefinite block calls, because
|
||||||
|
* we are waiting until the earliest time that someone else
|
||||||
|
* completes an action */
|
||||||
|
unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef THREAD_EXTRA_CHECKS
|
||||||
|
/* We are not allowed to mix blocking types in one queue. */
|
||||||
|
if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
|
||||||
|
panicf("Blocking violation B->*T");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Set the state to blocked and ask the scheduler to switch tasks,
|
||||||
|
* this takes us off of the run queue until we are explicitly woken */
|
||||||
|
SET_STATE(current->statearg, STATE_BLOCKED, 0);
|
||||||
|
switch_thread(true, list);
|
||||||
|
|
||||||
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||||
|
/* Reset only the boosted flag to indicate we are up and running again. */
|
||||||
|
current->statearg = boost_flag;
|
||||||
|
#else
|
||||||
|
/* Clear all flags to indicate we are up and running again. */
|
||||||
|
current->statearg = 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void block_thread_w_tmo(struct thread_entry **list, int timeout)
|
||||||
|
{
|
||||||
|
struct thread_entry *current;
|
||||||
/* Get the entry for the current running thread. */
|
/* Get the entry for the current running thread. */
|
||||||
current = cores[CURRENT_CORE].running;
|
current = cores[CURRENT_CORE].running;
|
||||||
|
|
||||||
/* At next task switch scheduler will immediately change the thread
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||||
* state (and we also force the task switch to happen). */
|
/* A block with a timeout is a sleep situation, whatever we are waiting
|
||||||
if (timeout)
|
* for _may or may not_ happen, regardless of boost state, (user input
|
||||||
{
|
* for instance), so this thread no longer needs to boost */
|
||||||
#ifdef THREAD_EXTRA_CHECKS
|
if (STATE_IS_BOOSTED(current->statearg)) {
|
||||||
/* We can store only one thread to the "list" if thread is used
|
boosted_threads--;
|
||||||
* in other list (such as core's list for sleeping tasks). */
|
if (!boosted_threads)
|
||||||
if (*list)
|
cpu_boost(false);
|
||||||
panicf("Blocking violation T->*B");
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
current->statearg =
|
|
||||||
SET_STATE(STATE_BLOCKED_W_TMO, current_tick + timeout);
|
|
||||||
*list = current;
|
|
||||||
|
|
||||||
/* Now force a task switch and block until we have been woken up
|
|
||||||
* by another thread or timeout is reached. */
|
|
||||||
switch_thread(true, NULL);
|
|
||||||
|
|
||||||
/* If timeout is reached, we must set list back to NULL here. */
|
|
||||||
*list = NULL;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
#ifdef THREAD_EXTRA_CHECKS
|
#ifdef THREAD_EXTRA_CHECKS
|
||||||
/* We are not allowed to mix blocking types in one queue. */
|
/* We can store only one thread to the "list" if thread is used
|
||||||
if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
|
* in other list (such as core's list for sleeping tasks). */
|
||||||
panicf("Blocking violation B->*T");
|
if (*list)
|
||||||
|
panicf("Blocking violation T->*B");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
current->statearg = SET_STATE(STATE_BLOCKED, 0);
|
|
||||||
|
|
||||||
/* Now force a task switch and block until we have been woken up
|
|
||||||
* by another thread or timeout is reached. */
|
|
||||||
switch_thread(true, list);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Clear all flags to indicate we are up and running again. */
|
/* Set the state to blocked with the specified timeout */
|
||||||
current->statearg = 0;
|
SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
|
||||||
|
|
||||||
|
/* Set the "list" for explicit wakeup */
|
||||||
|
*list = current;
|
||||||
|
|
||||||
|
/* Now force a task switch and block until we have been woken up
|
||||||
|
* by another thread or timeout is reached. */
|
||||||
|
switch_thread(true, NULL);
|
||||||
|
|
||||||
|
/* It is now safe for another thread to block on this "list" */
|
||||||
|
*list = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void wakeup_thread(struct thread_entry **list)
|
void wakeup_thread(struct thread_entry **list)
|
||||||
|
@ -512,14 +537,11 @@ void wakeup_thread(struct thread_entry **list)
|
||||||
* to the scheduler's list of running processes. */
|
* to the scheduler's list of running processes. */
|
||||||
remove_from_list(list, thread);
|
remove_from_list(list, thread);
|
||||||
add_to_list(&cores[CURRENT_CORE].running, thread);
|
add_to_list(&cores[CURRENT_CORE].running, thread);
|
||||||
thread->statearg = 0;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case STATE_BLOCKED_W_TMO:
|
case STATE_BLOCKED_W_TMO:
|
||||||
/* Just remove the timeout to cause scheduler to immediately
|
/* Just remove the timeout to cause scheduler to immediately
|
||||||
* wake up the thread. */
|
* wake up the thread. */
|
||||||
thread->statearg &= 0xC0000000;
|
thread->statearg = 0;
|
||||||
*list = NULL;
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -600,10 +622,12 @@ struct thread_entry*
|
||||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||||
void trigger_cpu_boost(void)
|
void trigger_cpu_boost(void)
|
||||||
{
|
{
|
||||||
if (!cpu_boosted)
|
if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
|
||||||
{
|
{
|
||||||
cpu_boost(true);
|
SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
|
||||||
cpu_boosted = true;
|
if (!boosted_threads)
|
||||||
|
cpu_boost(true);
|
||||||
|
boosted_threads++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -675,12 +699,12 @@ void init_threads(void)
|
||||||
highest_priority = 100;
|
highest_priority = 100;
|
||||||
#endif
|
#endif
|
||||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||||
cpu_boosted = false;
|
boosted_threads = 0;
|
||||||
#endif
|
#endif
|
||||||
add_to_list(&cores[core].running, &cores[core].threads[0]);
|
add_to_list(&cores[core].running, &cores[core].threads[0]);
|
||||||
|
|
||||||
/* In multiple core setups, each core has a different stack. There is probably
|
/* In multiple core setups, each core has a different stack. There is
|
||||||
a much better way to do this. */
|
* probably a much better way to do this. */
|
||||||
if (core == CPU)
|
if (core == CPU)
|
||||||
{
|
{
|
||||||
cores[CPU].threads[0].stack = stackbegin;
|
cores[CPU].threads[0].stack = stackbegin;
|
||||||
|
@ -688,7 +712,8 @@ void init_threads(void)
|
||||||
} else {
|
} else {
|
||||||
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
|
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
|
||||||
cores[COP].threads[0].stack = cop_stackbegin;
|
cores[COP].threads[0].stack = cop_stackbegin;
|
||||||
cores[COP].threads[0].stack_size = (int)cop_stackend - (int)cop_stackbegin;
|
cores[COP].threads[0].stack_size =
|
||||||
|
(int)cop_stackend - (int)cop_stackbegin;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
cores[core].threads[0].context.start = 0; /* thread 0 already running */
|
cores[core].threads[0].context.start = 0; /* thread 0 already running */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue