mirror of
https://github.com/Rockbox/rockbox.git
synced 2025-12-10 13:45:10 -05:00
Update sync queues to use a statically allocated return value in order to facilitate upcoming COP updates.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@12881 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
parent
a83a94ea9c
commit
0caf3b8cae
3 changed files with 75 additions and 79 deletions
|
|
@ -57,18 +57,13 @@ struct event
|
|||
};
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
struct queue_sender
|
||||
{
|
||||
struct thread_entry *thread;
|
||||
intptr_t retval;
|
||||
};
|
||||
|
||||
struct queue_sender_list
|
||||
{
|
||||
/* If non-NULL, there is a thread waiting for the corresponding event */
|
||||
struct queue_sender *senders[QUEUE_LENGTH];
|
||||
/* Must be statically allocated to put in non-cached ram. */
|
||||
struct thread_entry *senders[QUEUE_LENGTH];
|
||||
/* Send info for last message dequeued or NULL if replied or not sent */
|
||||
struct queue_sender *curr_sender;
|
||||
struct thread_entry *curr_sender;
|
||||
};
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#define THREAD_H
|
||||
|
||||
#include "config.h"
|
||||
#include <inttypes.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
|
||||
|
|
@ -109,6 +110,9 @@ struct thread_entry {
|
|||
long last_run;
|
||||
#endif
|
||||
struct thread_entry *next, *prev;
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
intptr_t retval;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct core_entry {
|
||||
|
|
@ -127,6 +131,44 @@ struct core_entry {
|
|||
#define IF_PRIO(empty, type)
|
||||
#endif
|
||||
|
||||
/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
|
||||
* Just use it for ARM, Coldfire and whatever else well...why not?
|
||||
*/
|
||||
|
||||
/* Macros generate better code than an inline function is this case */
|
||||
#if defined (CPU_PP) || defined (CPU_ARM)
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint32_t old; \
|
||||
asm volatile ( \
|
||||
"swpb %[old], %[v], [%[x]] \r\n" \
|
||||
: [old]"=r"(old) \
|
||||
: [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \
|
||||
); \
|
||||
old; \
|
||||
})
|
||||
#elif defined (CPU_COLDFIRE)
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint8_t old; \
|
||||
asm volatile ( \
|
||||
"bset.l %[v], (%[x]) \r\n" \
|
||||
"sne.b %[old] \r\n" \
|
||||
: [old]"=d,d"(old) \
|
||||
: [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \
|
||||
); \
|
||||
old; \
|
||||
})
|
||||
#else
|
||||
/* default for no asm version */
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint32_t old = *(uint32_t *)x_; \
|
||||
*(uint32_t *)x_ = v_; \
|
||||
old; \
|
||||
})
|
||||
#endif
|
||||
|
||||
struct thread_entry*
|
||||
create_thread(void (*function)(void), void* stack, int stack_size,
|
||||
const char *name IF_PRIO(, int priority)
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ static void queue_fetch_sender(struct queue_sender_list *send,
|
|||
unsigned int i)
|
||||
{
|
||||
int old_level = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
struct queue_sender **spp = &send->senders[i];
|
||||
struct thread_entry **spp = &send->senders[i];
|
||||
|
||||
if (*spp)
|
||||
{
|
||||
|
|
@ -114,11 +114,11 @@ static void queue_fetch_sender(struct queue_sender_list *send,
|
|||
|
||||
/* Puts the specified return value in the waiting thread's return value
|
||||
and wakes the thread - a sender should be confirmed to exist first */
|
||||
static void queue_release_sender(struct queue_sender **sender,
|
||||
static void queue_release_sender(struct thread_entry **sender,
|
||||
intptr_t retval)
|
||||
{
|
||||
(*sender)->retval = retval;
|
||||
wakeup_thread(&(*sender)->thread);
|
||||
wakeup_thread(sender);
|
||||
*sender = NULL;
|
||||
}
|
||||
|
||||
|
|
@ -131,8 +131,9 @@ static void queue_release_all_senders(struct event_queue *q)
|
|||
unsigned int i;
|
||||
for(i = q->read; i != q->write; i++)
|
||||
{
|
||||
struct queue_sender **spp =
|
||||
struct thread_entry **spp =
|
||||
&q->send->senders[i & QUEUE_LENGTH_MASK];
|
||||
|
||||
if(*spp)
|
||||
{
|
||||
queue_release_sender(spp, 0);
|
||||
|
|
@ -261,7 +262,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
|
|||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if(q->send)
|
||||
{
|
||||
struct queue_sender **spp = &q->send->senders[wr];
|
||||
struct thread_entry **spp = &q->send->senders[wr];
|
||||
|
||||
if (*spp)
|
||||
{
|
||||
|
|
@ -280,14 +281,12 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
|
||||
q->events[wr].id = id;
|
||||
q->events[wr].data = data;
|
||||
|
||||
if(q->send)
|
||||
{
|
||||
struct queue_sender **spp = &q->send->senders[wr];
|
||||
struct queue_sender sender;
|
||||
struct thread_entry **spp = &q->send->senders[wr];
|
||||
|
||||
if (*spp)
|
||||
{
|
||||
|
|
@ -295,17 +294,15 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
queue_release_sender(spp, 0);
|
||||
}
|
||||
|
||||
*spp = &sender;
|
||||
sender.thread = NULL;
|
||||
|
||||
wakeup_thread(&q->thread);
|
||||
set_irq_level_and_block_thread(&sender.thread, oldlevel);
|
||||
return sender.retval;
|
||||
set_irq_level_and_block_thread(spp, oldlevel);
|
||||
return thread_get_current()->retval;
|
||||
}
|
||||
|
||||
/* Function as queue_post if sending is not enabled */
|
||||
wakeup_thread(&q->thread);
|
||||
set_irq_level(oldlevel);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -363,7 +360,7 @@ void queue_remove_from_head(struct event_queue *q, long id)
|
|||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if(q->send)
|
||||
{
|
||||
struct queue_sender **spp = &q->send->senders[rd];
|
||||
struct thread_entry **spp = &q->send->senders[rd];
|
||||
|
||||
if (*spp)
|
||||
{
|
||||
|
|
@ -681,44 +678,6 @@ void mutex_init(struct mutex *m)
|
|||
m->thread = NULL;
|
||||
}
|
||||
|
||||
/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
|
||||
* Just use it for ARM, Coldfire and whatever else well...why not?
|
||||
*/
|
||||
|
||||
/* Macros generate better code than an inline function is this case */
|
||||
#if defined (CPU_PP) || defined (CPU_ARM)
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint32_t old; \
|
||||
asm volatile ( \
|
||||
"swpb %[old], %[v], [%[x]] \r\n" \
|
||||
: [old]"=r"(old) \
|
||||
: [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \
|
||||
); \
|
||||
old; \
|
||||
})
|
||||
#elif defined (CPU_COLDFIRE)
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint8_t old; \
|
||||
asm volatile ( \
|
||||
"bset.l %[v], (%[x]) \r\n" \
|
||||
"sne.b %[old] \r\n" \
|
||||
: [old]"=d,d"(old) \
|
||||
: [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \
|
||||
); \
|
||||
old; \
|
||||
})
|
||||
#else
|
||||
/* default for no asm version */
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint32_t old = *(uint32_t *)x_; \
|
||||
*(uint32_t *)x_ = v_; \
|
||||
old; \
|
||||
})
|
||||
#endif
|
||||
|
||||
void mutex_lock(struct mutex *m)
|
||||
{
|
||||
if (test_and_set(&m->locked, 1))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue