1
0
Fork 0
forked from len0rd/rockbox

Remove the event object in the kernel since it's rather extraneous at the moment. This makes the codecs and the plugins incompatible, so update fully.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@18867 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2008-10-23 13:13:00 +00:00
parent 188e898e3c
commit effceea229
10 changed files with 15 additions and 170 deletions

View file

@ -1264,122 +1264,6 @@ void semaphore_release(struct semaphore *s)
}
#endif /* HAVE_SEMAPHORE_OBJECTS */
/****************************************************************************
* Simple event functions ;)
****************************************************************************/
#ifdef HAVE_EVENT_OBJECTS
void event_init(struct event *e, unsigned int flags)
{
e->queues[STATE_NONSIGNALED] = NULL;
e->queues[STATE_SIGNALED] = NULL;
e->state = flags & STATE_SIGNALED;
e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
corelock_init(&e->cl);
}
void event_wait(struct event *e, unsigned int for_state)
{
struct thread_entry *current;
corelock_lock(&e->cl);
if(e->automatic != 0)
{
/* wait for false always satisfied by definition
or if it just changed to false */
if(e->state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
{
/* automatic - unsignal */
e->state = STATE_NONSIGNALED;
corelock_unlock(&e->cl);
return;
}
/* block until state matches */
}
else if(for_state == e->state)
{
/* the state being waited for is the current state */
corelock_unlock(&e->cl);
return;
}
/* block until state matches what callers requests */
current = cores[CURRENT_CORE].running;
IF_COP( current->obj_cl = &e->cl; )
current->bqp = &e->queues[for_state];
disable_irq();
block_thread(current);
corelock_unlock(&e->cl);
/* turn control over to next thread */
switch_thread();
}
void event_set_state(struct event *e, unsigned int state)
{
unsigned int result;
int oldlevel;
corelock_lock(&e->cl);
if(e->state == state)
{
/* no change */
corelock_unlock(&e->cl);
return;
}
IF_PRIO( result = THREAD_OK; )
oldlevel = disable_irq_save();
if(state == STATE_SIGNALED)
{
if(e->automatic != 0)
{
/* no thread should have ever blocked for nonsignaled */
KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL,
"set_event_state->queue[NS]:S\n");
/* pass to next thread and keep unsignaled - "pulse" */
result = wakeup_thread(&e->queues[STATE_SIGNALED]);
e->state = (result & THREAD_OK) ? STATE_NONSIGNALED : STATE_SIGNALED;
}
else
{
/* release all threads waiting for signaled */
e->state = STATE_SIGNALED;
IF_PRIO( result = )
thread_queue_wake(&e->queues[STATE_SIGNALED]);
}
}
else
{
/* release all threads waiting for nonsignaled */
/* no thread should have ever blocked if automatic */
KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL ||
e->automatic == 0, "set_event_state->queue[NS]:NS\n");
e->state = STATE_NONSIGNALED;
IF_PRIO( result = )
thread_queue_wake(&e->queues[STATE_NONSIGNALED]);
}
restore_irq(oldlevel);
corelock_unlock(&e->cl);
#ifdef HAVE_PRIORITY_SCHEDULING
if(result & THREAD_SWITCH)
switch_thread();
#endif
}
#endif /* HAVE_EVENT_OBJECTS */
#ifdef HAVE_WAKEUP_OBJECTS
/****************************************************************************
* Lightweight IRQ-compatible wakeup object
@ -1456,4 +1340,3 @@ int wakeup_signal(struct wakeup *w)
return ret;
}
#endif /* HAVE_WAKEUP_OBJECTS */