1
0
Fork 0
forked from len0rd/rockbox

Make scheduler functions thread safe core wise. A big step towards playback running on COP (not yet possible because more protection on file system level is necessary).

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@12926 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Miika Pekkarinen 2007-03-26 16:55:17 +00:00
parent 6c487eb5d1
commit 66258a30a4
10 changed files with 368 additions and 148 deletions

View file

@ -92,7 +92,7 @@
/*---------------------------------------------------*/ /*---------------------------------------------------*/
extern int ata_device; extern int ata_device;
extern int ata_io_address; extern int ata_io_address;
extern struct core_entry cores[NUM_CORES]; extern struct thread_entry threads[MAXTHREADS];
#ifndef SIMULATOR #ifndef SIMULATOR
static char thread_status_char(int status) static char thread_status_char(int status)
@ -112,34 +112,33 @@ static char thread_status_char(int status)
#else #else
#define IF_COP2(...) #define IF_COP2(...)
#endif #endif
/* the MSB of thread_ids[..] is the core, so this will need changing
if we ever get a target with more than 2 cores...
The next 7 bits are used for the thread number on that core...
SO, MAXTHREADS must be kept under 256... which shouldnt be a problem */
static unsigned char thread_ids[NUM_CORES * MAXTHREADS];
static char* dbg_os_getname(int selected_item, void * data, char *buffer) static char* dbg_os_getname(int selected_item, void * data, char *buffer)
{ {
(void)data; (void)data;
struct thread_entry *thread = NULL; struct thread_entry *thread = NULL;
int status, usage; int status, usage;
int core = (thread_ids[selected_item]&0x80)>>7; thread = &threads[selected_item];
int thread_number = thread_ids[selected_item]&0x7F;
thread = &cores[core].threads[thread_number]; if (thread->name == NULL)
{
snprintf(buffer, MAX_PATH, "%2d: ---", selected_item);
return buffer;
}
if (thread == NULL)
return "";
usage = thread_stack_usage(thread); usage = thread_stack_usage(thread);
status = thread_get_status(thread); status = thread_get_status(thread);
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
snprintf(buffer, MAX_PATH, IF_COP2("(%d) ") "%c%c %d %2d%% %s", snprintf(buffer, MAX_PATH, "%2d: " IF_COP2("(%d) ") "%c%c %d %2d%% %s",
IF_COP2(core,) selected_item,
IF_COP2(thread->core,)
(status == STATE_RUNNING) ? '*' : ' ', (status == STATE_RUNNING) ? '*' : ' ',
thread_status_char(status), thread_status_char(status),
thread->priority, thread->priority,
usage, thread->name); usage, thread->name);
#else #else
snprintf(buffer, MAX_PATH, IF_COP2("(%d) ") "%c%c %2d%% %s", snprintf(buffer, MAX_PATH, "%2d: " IF_COP2("(%d) ") "%c%c %2d%% %s",
IF_COP2(core,) selected_item,
IF_COP2(thread->core,)
(status == STATE_RUNNING) ? '*' : ' ', (status == STATE_RUNNING) ? '*' : ' ',
thread_status_char(status), thread_status_char(status),
usage, thread->name); usage, thread->name);
@ -151,30 +150,12 @@ static char* dbg_os_getname(int selected_item, void * data, char *buffer)
static bool dbg_os(void) static bool dbg_os(void)
{ {
struct gui_synclist lists; struct gui_synclist lists;
struct thread_entry *thread = NULL; int action;
int action, i;
int thread_count = 0;
int core = 0;
#if NUM_CORES > 1
for(core = 0; core < NUM_CORES; core++)
{
#endif
for(i = 0;i < MAXTHREADS; i++)
{
thread = &cores[core].threads[i];
if (thread->name != NULL)
{
thread_ids[thread_count] = (core<<7)|i;
thread_count++;
}
}
#if NUM_CORES > 1
}
#endif
gui_synclist_init(&lists, dbg_os_getname, NULL, false, 1); gui_synclist_init(&lists, dbg_os_getname, NULL, false, 1);
gui_synclist_set_title(&lists, IF_COP2("Core and ") "Stack usage:", NOICON); gui_synclist_set_title(&lists, IF_COP2("Core and ") "Stack usage:", NOICON);
gui_synclist_set_icon_callback(&lists, NULL); gui_synclist_set_icon_callback(&lists, NULL);
gui_synclist_set_nb_items(&lists, thread_count); gui_synclist_set_nb_items(&lists, MAXTHREADS);
action_signalscreenchange(); action_signalscreenchange();
while(1) while(1)
{ {

View file

@ -3806,7 +3806,7 @@ void audio_init(void)
codec_thread_p = create_thread( codec_thread_p = create_thread(
codec_thread, codec_stack, sizeof(codec_stack), codec_thread, codec_stack, sizeof(codec_stack),
codec_thread_name IF_PRIO(, PRIORITY_PLAYBACK) codec_thread_name IF_PRIO(, PRIORITY_PLAYBACK)
IF_COP(, COP, true)); IF_COP(, CPU, true));
create_thread(audio_thread, audio_stack, sizeof(audio_stack), create_thread(audio_thread, audio_stack, sizeof(audio_stack),
audio_thread_name IF_PRIO(, PRIORITY_BUFFERING) audio_thread_name IF_PRIO(, PRIORITY_BUFFERING)

View file

@ -37,6 +37,14 @@
#include "lcd-remote.h" #include "lcd-remote.h"
#endif #endif
#if 0
/* Older than MAX_EVENT_AGE button events are going to be ignored.
* Used to prevent for example volume going up uncontrollable when events
* are getting queued and UI is lagging too much.
*/
#define MAX_EVENT_AGE HZ
#endif
struct event_queue button_queue; struct event_queue button_queue;
static long lastbtn; /* Last valid button status */ static long lastbtn; /* Last valid button status */
@ -290,6 +298,14 @@ long button_get(bool block)
if ( block || pending_count ) if ( block || pending_count )
{ {
queue_wait(&button_queue, &ev); queue_wait(&button_queue, &ev);
#if 0
/* Ignore if the event was too old and for simplicity, just
* wait for a new button_get() request. */
if (current_tick - ev.tick > MAX_EVENT_AGE)
return BUTTON_NONE;
#endif
return ev.id; return ev.id;
} }
@ -318,6 +334,11 @@ void button_init(void)
button_init_device(); button_init_device();
queue_init(&button_queue, true); queue_init(&button_queue, true);
/* Enable less protection which would kill IRQ handler. Writing queue is
* no longer core-wise thread safe. */
queue_set_irq_safe(&button_queue, true);
button_read(); button_read();
lastbtn = button_read(); lastbtn = button_read();
tick_add_task(button_tick); tick_add_task(button_tick);

View file

@ -73,6 +73,9 @@ struct event_queue
struct thread_entry *thread; struct thread_entry *thread;
unsigned int read; unsigned int read;
unsigned int write; unsigned int write;
#if NUM_CORES > 1
bool irq_safe;
#endif
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
struct queue_sender_list *send; struct queue_sender_list *send;
#endif #endif
@ -105,6 +108,11 @@ int tick_add_task(void (*f)(void));
int tick_remove_task(void (*f)(void)); int tick_remove_task(void (*f)(void));
extern void queue_init(struct event_queue *q, bool register_queue); extern void queue_init(struct event_queue *q, bool register_queue);
#if NUM_CORES > 1
extern void queue_set_irq_safe(struct event_queue *q, bool state);
#else
#define queue_set_irq_safe(q,state)
#endif
extern void queue_delete(struct event_queue *q); extern void queue_delete(struct event_queue *q);
extern void queue_wait(struct event_queue *q, struct event *ev); extern void queue_wait(struct event_queue *q, struct event *ev);
extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks); extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks);

View file

@ -58,7 +58,7 @@ bool detect_flashed_ramimage(void);
bool detect_original_firmware(void); bool detect_original_firmware(void);
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) \ #if defined(HAVE_ADJUSTABLE_CPU_FREQ) \
&& defined(ROCKBOX_HAS_LOGF) && defined(ROCKBOX_HAS_LOGF) && (NUM_CORES == 1)
#define CPU_BOOST_LOGGING #define CPU_BOOST_LOGGING
#endif #endif

View file

@ -105,8 +105,11 @@ struct thread_entry {
unsigned long statearg; unsigned long statearg;
unsigned short stack_size; unsigned short stack_size;
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
unsigned short priority; unsigned char priority;
unsigned long priority_x; unsigned char priority_x;
# if NUM_CORES > 1
unsigned char core; /* To which core threads belongs to. */
# endif
long last_run; long last_run;
#endif #endif
struct thread_entry *next, *prev; struct thread_entry *next, *prev;
@ -116,11 +119,18 @@ struct thread_entry {
}; };
struct core_entry { struct core_entry {
struct thread_entry threads[MAXTHREADS];
struct thread_entry *running; struct thread_entry *running;
struct thread_entry *sleeping; struct thread_entry *sleeping;
struct thread_entry *waking; struct thread_entry *waking;
struct thread_entry **wakeup_list; struct thread_entry **wakeup_list;
#ifdef HAVE_PRIORITY_SCHEDULING
long highest_priority;
#endif
#if NUM_CORES > 1
volatile bool lock_issued;
volatile bool kernel_running;
#endif
long last_tick;
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
int switch_to_irq_level; int switch_to_irq_level;
#define STAY_IRQ_LEVEL -1 #define STAY_IRQ_LEVEL -1
@ -171,6 +181,14 @@ struct core_entry {
}) })
#endif #endif
#if NUM_CORES > 1
inline void lock_cores(void);
inline void unlock_cores(void);
#else
#define lock_cores(...)
#define unlock_cores(...)
#endif
struct thread_entry* struct thread_entry*
create_thread(void (*function)(void), void* stack, int stack_size, create_thread(void (*function)(void), void* stack, int stack_size,
const char *name IF_PRIO(, int priority) const char *name IF_PRIO(, int priority)

View file

@ -32,8 +32,8 @@ long current_tick NOCACHEDATA_ATTR = 0;
static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
/* This array holds all queues that are initiated. It is used for broadcast. */ /* This array holds all queues that are initiated. It is used for broadcast. */
static struct event_queue *all_queues[32]; static struct event_queue *all_queues[32] NOCACHEBSS_ATTR;
static int num_queues; static int num_queues NOCACHEBSS_ATTR;
void queue_wait(struct event_queue *q, struct event *ev) ICODE_ATTR; void queue_wait(struct event_queue *q, struct event *ev) ICODE_ATTR;
@ -163,7 +163,7 @@ void queue_enable_queue_send(struct event_queue *q,
struct queue_sender_list *send) struct queue_sender_list *send)
{ {
q->send = send; q->send = send;
memset(send, 0, sizeof(*send)); memset(send, 0, sizeof(struct queue_sender_list));
} }
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
@ -173,6 +173,9 @@ void queue_init(struct event_queue *q, bool register_queue)
q->read = 0; q->read = 0;
q->write = 0; q->write = 0;
q->thread = NULL; q->thread = NULL;
#if NUM_CORES > 1
q->irq_safe = false;
#endif
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
q->send = NULL; /* No message sending by default */ q->send = NULL; /* No message sending by default */
#endif #endif
@ -184,11 +187,28 @@ void queue_init(struct event_queue *q, bool register_queue)
} }
} }
#if NUM_CORES > 1
/**
* If IRQ mode is enabled, some core-wise locking mechanisms are disabled
* causing accessing queue to be no longer thread safe from the other core.
* However, that locking mechanism would also kill IRQ handlers.
*
* @param q struct of an event_queue
* @param state enable/disable IRQ mode
* @default state disabled
*/
void queue_set_irq_safe(struct event_queue *q, bool state)
{
q->irq_safe = state;
}
#endif
void queue_delete(struct event_queue *q) void queue_delete(struct event_queue *q)
{ {
int i; int i;
bool found = false; bool found = false;
lock_cores();
wakeup_thread(&q->thread); wakeup_thread(&q->thread);
/* Find the queue to be deleted */ /* Find the queue to be deleted */
@ -219,15 +239,20 @@ void queue_delete(struct event_queue *q)
num_queues--; num_queues--;
} }
unlock_cores();
} }
void queue_wait(struct event_queue *q, struct event *ev) void queue_wait(struct event_queue *q, struct event *ev)
{ {
unsigned int rd; unsigned int rd;
lock_cores();
if (q->read == q->write) if (q->read == q->write)
{ {
block_thread(&q->thread); block_thread(&q->thread);
lock_cores();
} }
rd = q->read++ & QUEUE_LENGTH_MASK; rd = q->read++ & QUEUE_LENGTH_MASK;
@ -240,13 +265,18 @@ void queue_wait(struct event_queue *q, struct event *ev)
queue_fetch_sender(q->send, rd); queue_fetch_sender(q->send, rd);
} }
#endif #endif
unlock_cores();
} }
void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
{ {
lock_cores();
if (q->read == q->write && ticks > 0) if (q->read == q->write && ticks > 0)
{ {
block_thread_w_tmo(&q->thread, ticks); block_thread_w_tmo(&q->thread, ticks);
lock_cores();
} }
if (q->read != q->write) if (q->read != q->write)
@ -266,12 +296,21 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
{ {
ev->id = SYS_TIMEOUT; ev->id = SYS_TIMEOUT;
} }
unlock_cores();
} }
void queue_post(struct event_queue *q, long id, intptr_t data) void queue_post(struct event_queue *q, long id, intptr_t data)
{ {
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; unsigned int wr;
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
wr = q->write++ & QUEUE_LENGTH_MASK;
q->events[wr].id = id; q->events[wr].id = id;
q->events[wr].data = data; q->events[wr].data = data;
@ -290,7 +329,12 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
#endif #endif
wakeup_thread_irq_safe(&q->thread); wakeup_thread_irq_safe(&q->thread);
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
set_irq_level(oldlevel); set_irq_level(oldlevel);
} }
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
@ -300,7 +344,12 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
intptr_t queue_send(struct event_queue *q, long id, intptr_t data) intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
{ {
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; unsigned int wr;
lock_cores();
wr = q->write++ & QUEUE_LENGTH_MASK;
q->events[wr].id = id; q->events[wr].id = id;
q->events[wr].data = data; q->events[wr].data = data;
@ -321,6 +370,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
/* Function as queue_post if sending is not enabled */ /* Function as queue_post if sending is not enabled */
wakeup_thread(&q->thread); wakeup_thread(&q->thread);
unlock_cores();
set_irq_level(oldlevel); set_irq_level(oldlevel);
return 0; return 0;
@ -337,6 +387,7 @@ bool queue_in_queue_send(struct event_queue *q)
/* Replies with retval to any dequeued message sent with queue_send */ /* Replies with retval to any dequeued message sent with queue_send */
void queue_reply(struct event_queue *q, intptr_t retval) void queue_reply(struct event_queue *q, intptr_t retval)
{ {
lock_cores();
if(q->send && q->send->curr_sender) if(q->send && q->send->curr_sender)
{ {
int level = set_irq_level(HIGHEST_IRQ_LEVEL); int level = set_irq_level(HIGHEST_IRQ_LEVEL);
@ -346,18 +397,37 @@ void queue_reply(struct event_queue *q, intptr_t retval)
} }
set_irq_level(level); set_irq_level(level);
} }
unlock_cores();
} }
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
bool queue_empty(const struct event_queue* q) bool queue_empty(const struct event_queue* q)
{ {
return ( q->read == q->write ); bool is_empty;
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
is_empty = ( q->read == q->write );
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
return is_empty;
} }
void queue_clear(struct event_queue* q) void queue_clear(struct event_queue* q)
{ {
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
/* Release all thread waiting in the queue for a reply - /* Release all thread waiting in the queue for a reply -
dequeued sent message will be handled by owning thread */ dequeued sent message will be handled by owning thread */
@ -366,6 +436,12 @@ void queue_clear(struct event_queue* q)
q->read = 0; q->read = 0;
q->write = 0; q->write = 0;
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
set_irq_level(oldlevel); set_irq_level(oldlevel);
} }
@ -373,6 +449,11 @@ void queue_remove_from_head(struct event_queue *q, long id)
{ {
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
while(q->read != q->write) while(q->read != q->write)
{ {
unsigned int rd = q->read & QUEUE_LENGTH_MASK; unsigned int rd = q->read & QUEUE_LENGTH_MASK;
@ -397,6 +478,11 @@ void queue_remove_from_head(struct event_queue *q, long id)
q->read++; q->read++;
} }
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
set_irq_level(oldlevel); set_irq_level(oldlevel);
} }
@ -411,11 +497,21 @@ int queue_count(const struct event_queue *q)
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
int result = 0; int result = 0;
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
if (q->read <= q->write) if (q->read <= q->write)
result = q->write - q->read; result = q->write - q->read;
else else
result = QUEUE_LENGTH - (q->read - q->write); result = QUEUE_LENGTH - (q->read - q->write);
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
set_irq_level(oldlevel); set_irq_level(oldlevel);
return result; return result;
@ -712,10 +808,14 @@ void mutex_lock(struct mutex *m)
void mutex_unlock(struct mutex *m) void mutex_unlock(struct mutex *m)
{ {
lock_cores();
if (m->thread == NULL) if (m->thread == NULL)
m->locked = 0; m->locked = 0;
else else
wakeup_thread(&m->thread); wakeup_thread(&m->thread);
unlock_cores();
} }
void spinlock_lock(struct mutex *m) void spinlock_lock(struct mutex *m)

View file

@ -23,17 +23,20 @@
#include "font.h" #include "font.h"
#include "system.h" #include "system.h"
#include "kernel.h" #include "kernel.h"
#include "thread.h"
#include "timer.h" #include "timer.h"
#include "inttypes.h" #include "inttypes.h"
#include "string.h" #include "string.h"
#ifndef SIMULATOR #ifndef SIMULATOR
long cpu_frequency = CPU_FREQ; long cpu_frequency NOCACHEBSS_ATTR = CPU_FREQ;
#endif #endif
#ifdef HAVE_ADJUSTABLE_CPU_FREQ #ifdef HAVE_ADJUSTABLE_CPU_FREQ
static int boost_counter = 0; static int boost_counter NOCACHEBSS_ATTR = 0;
static bool cpu_idle = false; static bool cpu_idle NOCACHEBSS_ATTR = false;
struct mutex boostctrl_mtx NOCACHEBSS_ATTR;
int get_cpu_boost_counter(void) int get_cpu_boost_counter(void)
{ {
@ -722,8 +725,9 @@ void set_cpu_frequency(long frequency)
{ {
unsigned long postmult; unsigned long postmult;
if (CURRENT_CORE == CPU) /* Using mutex or spinlock isn't safe here. */
{ while (test_and_set(&boostctrl_mtx.locked, 1)) ;
if (frequency == CPUFREQ_NORMAL) if (frequency == CPUFREQ_NORMAL)
postmult = CPUFREQ_NORMAL_MULT; postmult = CPUFREQ_NORMAL_MULT;
else if (frequency == CPUFREQ_MAX) else if (frequency == CPUFREQ_MAX)
@ -747,7 +751,7 @@ void set_cpu_frequency(long frequency)
/* Select PLL as clock source? */ /* Select PLL as clock source? */
outl((inl(0x60006020) & 0x0fffff0f) | 0x20000070, 0x60006020); outl((inl(0x60006020) & 0x0fffff0f) | 0x20000070, 0x60006020);
#if defined(IPOD_COLOR) || defined(IPOD_4G) || defined(IPOD_MINI) || defined(IRIVER_H10) || defined(IRIVER_H10_5GB) # if defined(IPOD_COLOR) || defined(IPOD_4G) || defined(IPOD_MINI) || defined(IRIVER_H10) || defined(IRIVER_H10_5GB)
/* We don't know why the timer interrupt gets disabled on the PP5020 /* We don't know why the timer interrupt gets disabled on the PP5020
based ipods, but without the following line, the 4Gs will freeze based ipods, but without the following line, the 4Gs will freeze
when CPU frequency changing is enabled. when CPU frequency changing is enabled.
@ -761,8 +765,9 @@ void set_cpu_frequency(long frequency)
/* unmask interrupt source */ /* unmask interrupt source */
CPU_INT_EN |= TIMER1_MASK; CPU_INT_EN |= TIMER1_MASK;
COP_INT_EN |= TIMER1_MASK; COP_INT_EN |= TIMER1_MASK;
#endif # endif
}
boostctrl_mtx.locked = 0;
} }
#elif !defined(BOOTLOADER) #elif !defined(BOOTLOADER)
void ipod_set_cpu_frequency(void) void ipod_set_cpu_frequency(void)
@ -804,6 +809,8 @@ void system_init(void)
outl(-1, 0x60001038); outl(-1, 0x60001038);
outl(-1, 0x60001028); outl(-1, 0x60001028);
outl(-1, 0x6000101c); outl(-1, 0x6000101c);
spinlock_init(&boostctrl_mtx);
#if (!defined HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES == 1) #if (!defined HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES == 1)
ipod_set_cpu_frequency(); ipod_set_cpu_frequency();
#endif #endif
@ -890,6 +897,7 @@ static void ipod_init_cache(void)
outl(0x3, 0xcf004024); outl(0x3, 0xcf004024);
} }
#endif #endif
#ifdef HAVE_ADJUSTABLE_CPU_FREQ #ifdef HAVE_ADJUSTABLE_CPU_FREQ

View file

@ -28,13 +28,17 @@
#include <profile.h> #include <profile.h>
#endif #endif
#if NUM_CORES > 1
# define IF_COP2(x) x
#else
# define IF_COP2(x) CURRENT_CORE
#endif
#define DEADBEEF ((unsigned int)0xdeadbeef) #define DEADBEEF ((unsigned int)0xdeadbeef)
/* Cast to the the machine int type, whose size could be < 4. */ /* Cast to the the machine int type, whose size could be < 4. */
struct core_entry cores[NUM_CORES] IBSS_ATTR; struct core_entry cores[NUM_CORES] IBSS_ATTR;
#ifdef HAVE_PRIORITY_SCHEDULING struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
static unsigned short highest_priority IBSS_ATTR;
#endif
#ifdef HAVE_SCHEDULER_BOOSTCTRL #ifdef HAVE_SCHEDULER_BOOSTCTRL
static int boosted_threads IBSS_ATTR; static int boosted_threads IBSS_ATTR;
#endif #endif
@ -59,8 +63,29 @@ int *cop_stackend = stackend;
#endif #endif
#endif #endif
#if (NUM_CORES > 1) #if NUM_CORES > 1
bool IDATA_ATTR kernel_running_on_cop = false; static long cores_locked IBSS_ATTR;
#define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
#define UNLOCK(...) cores_locked = 0
inline void lock_cores(void)
{
if (!cores[CURRENT_CORE].lock_issued)
{
LOCK();
cores[CURRENT_CORE].lock_issued = true;
}
}
inline void unlock_cores(void)
{
if (cores[CURRENT_CORE].lock_issued)
{
cores[CURRENT_CORE].lock_issued = false;
UNLOCK();
}
}
#endif #endif
/* Conserve IRAM /* Conserve IRAM
@ -326,7 +351,6 @@ static void wake_list_awaken(void)
static inline void sleep_core(void) static inline void sleep_core(void)
{ {
static long last_tick = 0;
#if CONFIG_CPU == S3C2440 #if CONFIG_CPU == S3C2440
int i; int i;
#endif #endif
@ -339,10 +363,10 @@ static inline void sleep_core(void)
if (cores[CURRENT_CORE].waking != NULL) if (cores[CURRENT_CORE].waking != NULL)
wake_list_awaken(); wake_list_awaken();
if (last_tick != current_tick) if (cores[CURRENT_CORE].last_tick != current_tick)
{ {
check_sleepers(); check_sleepers();
last_tick = current_tick; cores[CURRENT_CORE].last_tick = current_tick;
} }
/* We must sleep until there is at least one process in the list /* We must sleep until there is at least one process in the list
@ -357,17 +381,22 @@ static inline void sleep_core(void)
and_b(0x7F, &SBYCR); and_b(0x7F, &SBYCR);
asm volatile ("sleep"); asm volatile ("sleep");
#elif defined (CPU_PP) #elif defined (CPU_PP)
unlock_cores();
/* This should sleep the CPU. It appears to wake by itself on /* This should sleep the CPU. It appears to wake by itself on
interrupts */ interrupts */
if (CURRENT_CORE == CPU) if (CURRENT_CORE == CPU)
CPU_CTL = PROC_SLEEP; CPU_CTL = PROC_SLEEP;
else else
COP_CTL = PROC_SLEEP; COP_CTL = PROC_SLEEP;
lock_cores();
#elif CONFIG_CPU == S3C2440 #elif CONFIG_CPU == S3C2440
CLKCON |= (1 << 2); /* set IDLE bit */ CLKCON |= (1 << 2); /* set IDLE bit */
for(i=0; i<10; i++); /* wait for IDLE */ for(i=0; i<10; i++); /* wait for IDLE */
CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */ CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
#endif #endif
} }
} }
@ -378,7 +407,7 @@ static int get_threadnum(struct thread_entry *thread)
for (i = 0; i < MAXTHREADS; i++) for (i = 0; i < MAXTHREADS; i++)
{ {
if (&cores[CURRENT_CORE].threads[i] == thread) if (&threads[i] == thread)
return i; return i;
} }
@ -415,8 +444,8 @@ void change_thread_state(struct thread_entry **blocked_list)
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
/* Reset priorities */ /* Reset priorities */
if (old->priority == highest_priority) if (old->priority == cores[CURRENT_CORE].highest_priority)
highest_priority = 100; cores[CURRENT_CORE].highest_priority = 100;
#endif #endif
} }
else else
@ -439,6 +468,8 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
/* Do nothing */ /* Do nothing */
#else #else
lock_cores();
/* Begin task switching by saving our current context so that we can /* Begin task switching by saving our current context so that we can
* restore the state of the current thread later to the point prior * restore the state of the current thread later to the point prior
* to this call. */ * to this call. */
@ -479,14 +510,16 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
{ {
int priority = cores[CURRENT_CORE].running->priority; int priority = cores[CURRENT_CORE].running->priority;
if (priority < highest_priority) if (priority < cores[CURRENT_CORE].highest_priority)
highest_priority = priority; cores[CURRENT_CORE].highest_priority = priority;
if (priority == highest_priority || if (priority == cores[CURRENT_CORE].highest_priority ||
(current_tick - cores[CURRENT_CORE].running->last_run > (current_tick - cores[CURRENT_CORE].running->last_run >
priority * 8) || priority * 8) ||
cores[CURRENT_CORE].running->priority_x != 0) cores[CURRENT_CORE].running->priority_x != 0)
{
break; break;
}
cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next; cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
} }
@ -496,6 +529,8 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
#endif #endif
#endif #endif
unlock_cores();
/* And finally give control to the next thread. */ /* And finally give control to the next thread. */
load_context(&cores[CURRENT_CORE].running->context); load_context(&cores[CURRENT_CORE].running->context);
@ -508,10 +543,13 @@ void sleep_thread(int ticks)
{ {
struct thread_entry *current; struct thread_entry *current;
lock_cores();
current = cores[CURRENT_CORE].running; current = cores[CURRENT_CORE].running;
#ifdef HAVE_SCHEDULER_BOOSTCTRL #ifdef HAVE_SCHEDULER_BOOSTCTRL
if (STATE_IS_BOOSTED(current->statearg)) { if (STATE_IS_BOOSTED(current->statearg))
{
boosted_threads--; boosted_threads--;
if (!boosted_threads) if (!boosted_threads)
{ {
@ -524,12 +562,16 @@ void sleep_thread(int ticks)
* so that scheduler removes thread from the list of running processes * so that scheduler removes thread from the list of running processes
* and puts it in list of sleeping tasks. */ * and puts it in list of sleeping tasks. */
SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1); SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
switch_thread(true, NULL); switch_thread(true, NULL);
} }
void block_thread(struct thread_entry **list) void block_thread(struct thread_entry **list)
{ {
struct thread_entry *current; struct thread_entry *current;
lock_cores();
/* Get the entry for the current running thread. */ /* Get the entry for the current running thread. */
current = cores[CURRENT_CORE].running; current = cores[CURRENT_CORE].running;
@ -567,11 +609,13 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
/* Get the entry for the current running thread. */ /* Get the entry for the current running thread. */
current = cores[CURRENT_CORE].running; current = cores[CURRENT_CORE].running;
lock_cores();
#ifdef HAVE_SCHEDULER_BOOSTCTRL #ifdef HAVE_SCHEDULER_BOOSTCTRL
/* A block with a timeout is a sleep situation, whatever we are waiting /* A block with a timeout is a sleep situation, whatever we are waiting
* for _may or may not_ happen, regardless of boost state, (user input * for _may or may not_ happen, regardless of boost state, (user input
* for instance), so this thread no longer needs to boost */ * for instance), so this thread no longer needs to boost */
if (STATE_IS_BOOSTED(current->statearg)) { if (STATE_IS_BOOSTED(current->statearg))
{
boosted_threads--; boosted_threads--;
if (!boosted_threads) if (!boosted_threads)
{ {
@ -624,7 +668,9 @@ void wakeup_thread(struct thread_entry **list)
/* Check if there is a blocked thread at all. */ /* Check if there is a blocked thread at all. */
if (*list == NULL) if (*list == NULL)
{
return ; return ;
}
/* Wake up the last thread first. */ /* Wake up the last thread first. */
thread = *list; thread = *list;
@ -638,7 +684,7 @@ void wakeup_thread(struct thread_entry **list)
* is safe since each object maintains it's own list of * is safe since each object maintains it's own list of
* sleepers and queues protect against reentrancy. */ * sleepers and queues protect against reentrancy. */
remove_from_list(list, thread); remove_from_list(list, thread);
add_to_list(cores[CURRENT_CORE].wakeup_list, thread); add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread);
case STATE_BLOCKED_W_TMO: case STATE_BLOCKED_W_TMO:
/* Just remove the timeout to cause scheduler to immediately /* Just remove the timeout to cause scheduler to immediately
@ -653,6 +699,19 @@ void wakeup_thread(struct thread_entry **list)
} }
} }
inline static int find_empty_thread_slot(void)
{
int n;
for (n = 0; n < MAXTHREADS; n++)
{
if (threads[n].name == NULL)
return n;
}
return -1;
}
/* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
before calling. */ before calling. */
void wakeup_thread_irq_safe(struct thread_entry **list) void wakeup_thread_irq_safe(struct thread_entry **list)
@ -680,7 +739,7 @@ struct thread_entry*
unsigned int i; unsigned int i;
unsigned int stacklen; unsigned int stacklen;
unsigned int *stackptr; unsigned int *stackptr;
int n; int slot;
struct regs *regs; struct regs *regs;
struct thread_entry *thread; struct thread_entry *thread;
@ -697,7 +756,7 @@ struct thread_entry*
/* If the kernel hasn't initialised on the COP (most likely due to an old /* If the kernel hasn't initialised on the COP (most likely due to an old
* bootloader) then refuse to start threads on the COP * bootloader) then refuse to start threads on the COP
*/ */
if((core == COP) && !kernel_running_on_cop) if ((core == COP) && !cores[core].kernel_running)
{ {
if (fallback) if (fallback)
return create_thread(function, stack, stack_size, name return create_thread(function, stack, stack_size, name
@ -707,20 +766,12 @@ struct thread_entry*
} }
#endif #endif
for (n = 0; n < MAXTHREADS; n++) lock_cores();
{
if (cores[core].threads[n].name == NULL)
break;
}
if (n == MAXTHREADS) slot = find_empty_thread_slot();
if (slot < 0)
{ {
#if NUM_CORES > 1 unlock_cores();
if (fallback)
return create_thread(function, stack, stack_size, name
IF_PRIO(, priority) IF_COP(, 1 - core, fallback));
else
#endif
return NULL; return NULL;
} }
@ -733,7 +784,7 @@ struct thread_entry*
} }
/* Store interesting information */ /* Store interesting information */
thread = &cores[core].threads[n]; thread = &threads[slot];
thread->name = name; thread->name = name;
thread->stack = stack; thread->stack = stack;
thread->stack_size = stack_size; thread->stack_size = stack_size;
@ -741,9 +792,12 @@ struct thread_entry*
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
thread->priority_x = 0; thread->priority_x = 0;
thread->priority = priority; thread->priority = priority;
highest_priority = 100; cores[core].highest_priority = 100;
#endif
#if NUM_CORES > 1
thread->core = core;
#endif #endif
add_to_list(&cores[core].running, thread);
regs = &thread->context; regs = &thread->context;
/* Align stack to an even 32 bit boundary */ /* Align stack to an even 32 bit boundary */
@ -754,6 +808,9 @@ struct thread_entry*
to have access to valid data */ to have access to valid data */
THREAD_CPU_INIT(core, thread); THREAD_CPU_INIT(core, thread);
add_to_list(&cores[core].running, thread);
unlock_cores();
return thread; return thread;
#if NUM_CORES == 1 #if NUM_CORES == 1
#undef core #undef core
@ -763,6 +820,8 @@ struct thread_entry*
#ifdef HAVE_SCHEDULER_BOOSTCTRL #ifdef HAVE_SCHEDULER_BOOSTCTRL
void trigger_cpu_boost(void) void trigger_cpu_boost(void)
{ {
lock_cores();
if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg)) if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
{ {
SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg); SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
@ -772,6 +831,8 @@ void trigger_cpu_boost(void)
} }
boosted_threads++; boosted_threads++;
} }
unlock_cores();
} }
#endif #endif
@ -782,26 +843,30 @@ void trigger_cpu_boost(void)
*/ */
void remove_thread(struct thread_entry *thread) void remove_thread(struct thread_entry *thread)
{ {
lock_cores();
if (thread == NULL) if (thread == NULL)
thread = cores[CURRENT_CORE].running; thread = cores[IF_COP2(thread->core)].running;
/* Free the entry by removing thread name. */ /* Free the entry by removing thread name. */
thread->name = NULL; thread->name = NULL;
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
highest_priority = 100; cores[IF_COP2(thread->core)].highest_priority = 100;
#endif #endif
if (thread == cores[CURRENT_CORE].running) if (thread == cores[IF_COP2(thread->core)].running)
{ {
remove_from_list(&cores[CURRENT_CORE].running, thread); remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
switch_thread(false, NULL); switch_thread(false, NULL);
return ; return ;
} }
if (thread == cores[CURRENT_CORE].sleeping) if (thread == cores[IF_COP2(thread->core)].sleeping)
remove_from_list(&cores[CURRENT_CORE].sleeping, thread); remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
else else
remove_from_list(NULL, thread); remove_from_list(NULL, thread);
unlock_cores();
} }
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
@ -809,12 +874,14 @@ int thread_set_priority(struct thread_entry *thread, int priority)
{ {
int old_priority; int old_priority;
lock_cores();
if (thread == NULL) if (thread == NULL)
thread = cores[CURRENT_CORE].running; thread = cores[CURRENT_CORE].running;
old_priority = thread->priority; old_priority = thread->priority;
thread->priority = priority; thread->priority = priority;
highest_priority = 100; cores[IF_COP2(thread->core)].highest_priority = 100;
unlock_cores();
return old_priority; return old_priority;
} }
@ -844,9 +911,19 @@ struct thread_entry * thread_get_current(void)
void init_threads(void) void init_threads(void)
{ {
unsigned int core = CURRENT_CORE; unsigned int core = CURRENT_CORE;
int slot;
/* Let main CPU initialize first. */
#if NUM_CORES > 1
if (core != CPU)
{
while (!cores[CPU].kernel_running) ;
}
#endif
lock_cores();
slot = find_empty_thread_slot();
if (core == CPU)
memset(cores, 0, sizeof cores);
cores[core].sleeping = NULL; cores[core].sleeping = NULL;
cores[core].running = NULL; cores[core].running = NULL;
cores[core].waking = NULL; cores[core].waking = NULL;
@ -854,36 +931,41 @@ void init_threads(void)
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
cores[core].switch_to_irq_level = STAY_IRQ_LEVEL; cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
#endif #endif
cores[core].threads[0].name = main_thread_name; threads[slot].name = main_thread_name;
cores[core].threads[0].statearg = 0; threads[slot].statearg = 0;
threads[slot].context.start = 0; /* core's main thread already running */
#if NUM_CORES > 1
threads[slot].core = core;
#endif
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
cores[core].threads[0].priority = PRIORITY_USER_INTERFACE; threads[slot].priority = PRIORITY_USER_INTERFACE;
cores[core].threads[0].priority_x = 0; threads[slot].priority_x = 0;
highest_priority = 100; cores[core].highest_priority = 100;
#endif #endif
#ifdef HAVE_SCHEDULER_BOOSTCTRL #ifdef HAVE_SCHEDULER_BOOSTCTRL
boosted_threads = 0; boosted_threads = 0;
#endif #endif
add_to_list(&cores[core].running, &cores[core].threads[0]); add_to_list(&cores[core].running, &threads[slot]);
/* In multiple core setups, each core has a different stack. There is /* In multiple core setups, each core has a different stack. There is
* probably a much better way to do this. */ * probably a much better way to do this. */
if (core == CPU) if (core == CPU)
{ {
cores[CPU].threads[0].stack = stackbegin; threads[slot].stack = stackbegin;
cores[CPU].threads[0].stack_size = (int)stackend - (int)stackbegin; threads[slot].stack_size = (int)stackend - (int)stackbegin;
} else {
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
cores[COP].threads[0].stack = cop_stackbegin;
cores[COP].threads[0].stack_size =
(int)cop_stackend - (int)cop_stackbegin;
#endif
} }
cores[core].threads[0].context.start = 0; /* thread 0 already running */ #if NUM_CORES > 1 /* This code path will not be run on single core targets */
#if NUM_CORES > 1 else
if(core == COP) {
kernel_running_on_cop = true; /* can we use context.start for this? */ threads[slot].stack = cop_stackbegin;
threads[slot].stack_size =
(int)cop_stackend - (int)cop_stackbegin;
}
cores[core].kernel_running = true;
#endif #endif
unlock_cores();
} }
int thread_stack_usage(const struct thread_entry *thread) int thread_stack_usage(const struct thread_entry *thread)

View file

@ -427,6 +427,8 @@ void usb_init(void)
#ifndef BOOTLOADER #ifndef BOOTLOADER
queue_init(&usb_queue, true); queue_init(&usb_queue, true);
queue_set_irq_safe(&usb_queue, true);
create_thread(usb_thread, usb_stack, sizeof(usb_stack), create_thread(usb_thread, usb_stack, sizeof(usb_stack),
usb_thread_name IF_PRIO(, PRIORITY_SYSTEM) usb_thread_name IF_PRIO(, PRIORITY_SYSTEM)
IF_COP(, CPU, false)); IF_COP(, CPU, false));