1
0
Fork 0
forked from len0rd/rockbox

Certain data accesses in the kernel should have volatile semantics to be correct and not rely on the whims of the compiler. Change queue clearing to simply catch read up to write rather than reset both to 0 to ensure sane results for queue_count and queue_empty with concurrency. Binsize may or may not increase a bit depending upon whether the output was as intended in all places; wrong stuff was already unlikely to cause any issue.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@28909 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2010-12-27 10:05:09 +00:00
parent 479414facc
commit 7b4eb44395
3 changed files with 54 additions and 38 deletions

View file

@ -106,7 +106,7 @@ struct queue_sender_list
struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
struct thread_entry *list; /* list of senders in map */
/* Send info for last message dequeued or NULL if replied or not sent */
struct thread_entry *curr_sender;
struct thread_entry * volatile curr_sender;
#ifdef HAVE_PRIORITY_SCHEDULING
struct blocker blocker;
#endif
@ -126,10 +126,10 @@ struct event_queue
{
struct thread_entry *queue; /* waiter list */
struct queue_event events[QUEUE_LENGTH]; /* list of events */
unsigned int read; /* head of queue */
unsigned int write; /* tail of queue */
unsigned int volatile read; /* head of queue */
unsigned int volatile write; /* tail of queue */
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
struct queue_sender_list *send; /* list of threads waiting for
struct queue_sender_list * volatile send; /* list of threads waiting for
reply to an event */
#ifdef HAVE_PRIORITY_SCHEDULING
struct blocker *blocker_p; /* priority inheritance info
@ -171,7 +171,7 @@ struct semaphore
struct wakeup
{
struct thread_entry *queue; /* waiter list */
bool signalled; /* signalled status */
bool volatile signalled; /* signalled status */
IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
#endif