Fix a hole in the scheduler where collisions between waking blocked threads in mutexes with interrupts waking blocked threads in message queues can occur. Queue posts will put the threads on a separate list that is then added to the running list with IRQs disabled on the next task switch or CPU wakeup. Basically no overhead for other operations. Seems a likely cause of my occasional observation of the backlight fade causing playback threads to stop running and a recently reported blocking violation upon USB plugging. Time will tell but banging the backlight on and off frequently hasn't hiccuped again for me on H120.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@12915 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2007-03-26 03:24:36 +00:00
parent 5e2984ad80
commit 165f62d0cd
3 changed files with 70 additions and 7 deletions

View file

@ -119,6 +119,8 @@ struct core_entry {
struct thread_entry threads[MAXTHREADS];
struct thread_entry *running;
struct thread_entry *sleeping;
struct thread_entry *waking;
struct thread_entry **wakeup_list;
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
int switch_to_irq_level;
#define STAY_IRQ_LEVEL -1
@ -193,6 +195,7 @@ void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
#endif
#endif
void wakeup_thread(struct thread_entry **thread);
void wakeup_thread_irq_safe(struct thread_entry **thread);
#ifdef HAVE_PRIORITY_SCHEDULING
int thread_set_priority(struct thread_entry *thread, int priority);
int thread_get_priority(struct thread_entry *thread);

View file

@ -126,7 +126,7 @@ static void queue_release_sender(struct thread_entry **sender,
intptr_t retval)
{
(*sender)->retval = retval;
wakeup_thread(sender);
wakeup_thread_irq_safe(sender);
#if 0
/* This should _never_ happen - there must never be multiple
threads in this list and it is a corrupt state */
@ -289,11 +289,14 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
}
#endif
wakeup_thread(&q->thread);
wakeup_thread_irq_safe(&q->thread);
set_irq_level(oldlevel);
}
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
/* No wakeup_thread_irq_safe here because IRQ handlers are not allowed
use of this function - we only aim to protect the queue integrity by
turning them off. */
intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);

View file

@ -289,15 +289,56 @@ void check_sleepers(void)
}
}
/* Safely finish waking all threads potentialy woken by interrupts -
* statearg already zeroed in wakeup_thread. */
static void wake_list_awaken(void)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
/* No need for another check in the IRQ lock since IRQs are allowed
only to add threads to the waking list. They won't be adding more
until we're done here though. */
struct thread_entry *waking = cores[CURRENT_CORE].waking;
struct thread_entry *running = cores[CURRENT_CORE].running;
if (running != NULL)
{
/* Place waking threads at the end of the running list. */
struct thread_entry *tmp;
waking->prev->next = running;
running->prev->next = waking;
tmp = running->prev;
running->prev = waking->prev;
waking->prev = tmp;
}
else
{
/* Just transfer the list as-is - just came out of a core
* sleep. */
cores[CURRENT_CORE].running = waking;
}
/* Done with waking list */
cores[CURRENT_CORE].waking = NULL;
set_irq_level(oldlevel);
}
static inline void sleep_core(void)
{
static long last_tick = 0;
#if CONFIG_CPU == S3C2440
int i;
#endif
for (;;)
{
/* We want to do these ASAP as it may change the decision to sleep
the core or the core has woken because an interrupt occurred
and posted a message to a queue. */
if (cores[CURRENT_CORE].waking != NULL)
wake_list_awaken();
if (last_tick != current_tick)
{
check_sleepers();
@ -397,7 +438,7 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
#ifdef SIMULATOR
/* Do nothing */
#else
/* Begin task switching by saving our current context so that we can
* restore the state of the current thread later to the point prior
* to this call. */
@ -593,10 +634,12 @@ void wakeup_thread(struct thread_entry **list)
{
case STATE_BLOCKED:
/* Remove thread from the list of blocked threads and add it
* to the scheduler's list of running processes. */
* to the scheduler's list of running processes. List removal
* is safe since each object maintains it's own list of
* sleepers and queues protect against reentrancy. */
remove_from_list(list, thread);
add_to_list(&cores[CURRENT_CORE].running, thread);
add_to_list(cores[CURRENT_CORE].wakeup_list, thread);
case STATE_BLOCKED_W_TMO:
/* Just remove the timeout to cause scheduler to immediately
* wake up the thread. */
@ -610,6 +653,18 @@ void wakeup_thread(struct thread_entry **list)
}
}
/* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
before calling. */
void wakeup_thread_irq_safe(struct thread_entry **list)
{
struct core_entry *core = &cores[CURRENT_CORE];
/* Switch wakeup lists and call wakeup_thread */
core->wakeup_list = &core->waking;
wakeup_thread(list);
/* Switch back to normal running list */
core->wakeup_list = &core->running;
}
/*---------------------------------------------------------------------------
* Create a thread
* If using a dual core architecture, specify which core to start the thread
@ -794,6 +849,8 @@ void init_threads(void)
memset(cores, 0, sizeof cores);
cores[core].sleeping = NULL;
cores[core].running = NULL;
cores[core].waking = NULL;
cores[core].wakeup_list = &cores[core].running;
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
#endif