forked from len0rd/rockbox
Be sure to register a new mutex owner _before_ waking it. Won't be an issue now but would be with mutex recursion on one used for > 1 core where ownership transfer and cs entry/recursion are allowed to run in parallel (by design). TODO: Add true exchange to wakeup_thread but that's not really important for the time being.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15251 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
parent
d4f382252d
commit
6fac8fcc93
1 changed files with 8 additions and 1 deletions
|
@ -1076,7 +1076,13 @@ void mutex_unlock(struct mutex *m)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* transfer to next queued thread if any */
|
/* transfer to next queued thread if any */
|
||||||
m->thread = wakeup_thread_no_listlock(&m->queue);
|
|
||||||
|
/* This can become busy using SWP but is safe since only one thread
|
||||||
|
will be changing things at a time. Allowing timeout waits will
|
||||||
|
change that however but not now. There is also a hazard the thread
|
||||||
|
could be killed before performing the wakeup but that's just
|
||||||
|
irresponsible. :-) */
|
||||||
|
m->thread = m->queue;
|
||||||
|
|
||||||
if(m->thread == NULL)
|
if(m->thread == NULL)
|
||||||
{
|
{
|
||||||
|
@ -1087,6 +1093,7 @@ void mutex_unlock(struct mutex *m)
|
||||||
}
|
}
|
||||||
else /* another thread is waiting - remain locked */
|
else /* another thread is waiting - remain locked */
|
||||||
{
|
{
|
||||||
|
wakeup_thread_no_listlock(&m->queue);
|
||||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||||
corelock_unlock(&m->cl);
|
corelock_unlock(&m->cl);
|
||||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue