forked from len0rd/rockbox
Make scheduler functions thread safe core wise. A big step towards playback running on COP (not yet possible because more protection on file system level is necessary).
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@12926 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
parent
6c487eb5d1
commit
66258a30a4
10 changed files with 368 additions and 148 deletions
|
@ -92,7 +92,7 @@
|
|||
/*---------------------------------------------------*/
|
||||
extern int ata_device;
|
||||
extern int ata_io_address;
|
||||
extern struct core_entry cores[NUM_CORES];
|
||||
extern struct thread_entry threads[MAXTHREADS];
|
||||
|
||||
#ifndef SIMULATOR
|
||||
static char thread_status_char(int status)
|
||||
|
@ -112,34 +112,33 @@ static char thread_status_char(int status)
|
|||
#else
|
||||
#define IF_COP2(...)
|
||||
#endif
|
||||
/* the MSB of thread_ids[..] is the core, so this will need changing
|
||||
if we ever get a target with more than 2 cores...
|
||||
The next 7 bits are used for the thread number on that core...
|
||||
SO, MAXTHREADS must be kept under 256... which shouldnt be a problem */
|
||||
static unsigned char thread_ids[NUM_CORES * MAXTHREADS];
|
||||
static char* dbg_os_getname(int selected_item, void * data, char *buffer)
|
||||
{
|
||||
(void)data;
|
||||
struct thread_entry *thread = NULL;
|
||||
int status, usage;
|
||||
int core = (thread_ids[selected_item]&0x80)>>7;
|
||||
int thread_number = thread_ids[selected_item]&0x7F;
|
||||
thread = &cores[core].threads[thread_number];
|
||||
thread = &threads[selected_item];
|
||||
|
||||
if (thread->name == NULL)
|
||||
{
|
||||
snprintf(buffer, MAX_PATH, "%2d: ---", selected_item);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
if (thread == NULL)
|
||||
return "";
|
||||
usage = thread_stack_usage(thread);
|
||||
status = thread_get_status(thread);
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
snprintf(buffer, MAX_PATH, IF_COP2("(%d) ") "%c%c %d %2d%% %s",
|
||||
IF_COP2(core,)
|
||||
snprintf(buffer, MAX_PATH, "%2d: " IF_COP2("(%d) ") "%c%c %d %2d%% %s",
|
||||
selected_item,
|
||||
IF_COP2(thread->core,)
|
||||
(status == STATE_RUNNING) ? '*' : ' ',
|
||||
thread_status_char(status),
|
||||
thread->priority,
|
||||
usage, thread->name);
|
||||
#else
|
||||
snprintf(buffer, MAX_PATH, IF_COP2("(%d) ") "%c%c %2d%% %s",
|
||||
IF_COP2(core,)
|
||||
snprintf(buffer, MAX_PATH, "%2d: " IF_COP2("(%d) ") "%c%c %2d%% %s",
|
||||
selected_item,
|
||||
IF_COP2(thread->core,)
|
||||
(status == STATE_RUNNING) ? '*' : ' ',
|
||||
thread_status_char(status),
|
||||
usage, thread->name);
|
||||
|
@ -151,30 +150,12 @@ static char* dbg_os_getname(int selected_item, void * data, char *buffer)
|
|||
static bool dbg_os(void)
|
||||
{
|
||||
struct gui_synclist lists;
|
||||
struct thread_entry *thread = NULL;
|
||||
int action, i;
|
||||
int thread_count = 0;
|
||||
int core = 0;
|
||||
#if NUM_CORES > 1
|
||||
for(core = 0; core < NUM_CORES; core++)
|
||||
{
|
||||
#endif
|
||||
for(i = 0;i < MAXTHREADS; i++)
|
||||
{
|
||||
thread = &cores[core].threads[i];
|
||||
if (thread->name != NULL)
|
||||
{
|
||||
thread_ids[thread_count] = (core<<7)|i;
|
||||
thread_count++;
|
||||
}
|
||||
}
|
||||
#if NUM_CORES > 1
|
||||
}
|
||||
#endif
|
||||
int action;
|
||||
|
||||
gui_synclist_init(&lists, dbg_os_getname, NULL, false, 1);
|
||||
gui_synclist_set_title(&lists, IF_COP2("Core and ") "Stack usage:", NOICON);
|
||||
gui_synclist_set_icon_callback(&lists, NULL);
|
||||
gui_synclist_set_nb_items(&lists, thread_count);
|
||||
gui_synclist_set_nb_items(&lists, MAXTHREADS);
|
||||
action_signalscreenchange();
|
||||
while(1)
|
||||
{
|
||||
|
|
|
@ -3806,7 +3806,7 @@ void audio_init(void)
|
|||
codec_thread_p = create_thread(
|
||||
codec_thread, codec_stack, sizeof(codec_stack),
|
||||
codec_thread_name IF_PRIO(, PRIORITY_PLAYBACK)
|
||||
IF_COP(, COP, true));
|
||||
IF_COP(, CPU, true));
|
||||
|
||||
create_thread(audio_thread, audio_stack, sizeof(audio_stack),
|
||||
audio_thread_name IF_PRIO(, PRIORITY_BUFFERING)
|
||||
|
|
|
@ -37,6 +37,14 @@
|
|||
#include "lcd-remote.h"
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
/* Older than MAX_EVENT_AGE button events are going to be ignored.
|
||||
* Used to prevent for example volume going up uncontrollable when events
|
||||
* are getting queued and UI is lagging too much.
|
||||
*/
|
||||
#define MAX_EVENT_AGE HZ
|
||||
#endif
|
||||
|
||||
struct event_queue button_queue;
|
||||
|
||||
static long lastbtn; /* Last valid button status */
|
||||
|
@ -290,6 +298,14 @@ long button_get(bool block)
|
|||
if ( block || pending_count )
|
||||
{
|
||||
queue_wait(&button_queue, &ev);
|
||||
|
||||
#if 0
|
||||
/* Ignore if the event was too old and for simplicity, just
|
||||
* wait for a new button_get() request. */
|
||||
if (current_tick - ev.tick > MAX_EVENT_AGE)
|
||||
return BUTTON_NONE;
|
||||
#endif
|
||||
|
||||
return ev.id;
|
||||
}
|
||||
|
||||
|
@ -318,6 +334,11 @@ void button_init(void)
|
|||
button_init_device();
|
||||
|
||||
queue_init(&button_queue, true);
|
||||
|
||||
/* Enable less protection which would kill IRQ handler. Writing queue is
|
||||
* no longer core-wise thread safe. */
|
||||
queue_set_irq_safe(&button_queue, true);
|
||||
|
||||
button_read();
|
||||
lastbtn = button_read();
|
||||
tick_add_task(button_tick);
|
||||
|
|
|
@ -73,6 +73,9 @@ struct event_queue
|
|||
struct thread_entry *thread;
|
||||
unsigned int read;
|
||||
unsigned int write;
|
||||
#if NUM_CORES > 1
|
||||
bool irq_safe;
|
||||
#endif
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
struct queue_sender_list *send;
|
||||
#endif
|
||||
|
@ -105,6 +108,11 @@ int tick_add_task(void (*f)(void));
|
|||
int tick_remove_task(void (*f)(void));
|
||||
|
||||
extern void queue_init(struct event_queue *q, bool register_queue);
|
||||
#if NUM_CORES > 1
|
||||
extern void queue_set_irq_safe(struct event_queue *q, bool state);
|
||||
#else
|
||||
#define queue_set_irq_safe(q,state)
|
||||
#endif
|
||||
extern void queue_delete(struct event_queue *q);
|
||||
extern void queue_wait(struct event_queue *q, struct event *ev);
|
||||
extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks);
|
||||
|
|
|
@ -58,7 +58,7 @@ bool detect_flashed_ramimage(void);
|
|||
bool detect_original_firmware(void);
|
||||
|
||||
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) \
|
||||
&& defined(ROCKBOX_HAS_LOGF)
|
||||
&& defined(ROCKBOX_HAS_LOGF) && (NUM_CORES == 1)
|
||||
#define CPU_BOOST_LOGGING
|
||||
#endif
|
||||
|
||||
|
|
|
@ -105,8 +105,11 @@ struct thread_entry {
|
|||
unsigned long statearg;
|
||||
unsigned short stack_size;
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
unsigned short priority;
|
||||
unsigned long priority_x;
|
||||
unsigned char priority;
|
||||
unsigned char priority_x;
|
||||
# if NUM_CORES > 1
|
||||
unsigned char core; /* To which core threads belongs to. */
|
||||
# endif
|
||||
long last_run;
|
||||
#endif
|
||||
struct thread_entry *next, *prev;
|
||||
|
@ -116,11 +119,18 @@ struct thread_entry {
|
|||
};
|
||||
|
||||
struct core_entry {
|
||||
struct thread_entry threads[MAXTHREADS];
|
||||
struct thread_entry *running;
|
||||
struct thread_entry *sleeping;
|
||||
struct thread_entry *waking;
|
||||
struct thread_entry **wakeup_list;
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
long highest_priority;
|
||||
#endif
|
||||
#if NUM_CORES > 1
|
||||
volatile bool lock_issued;
|
||||
volatile bool kernel_running;
|
||||
#endif
|
||||
long last_tick;
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
int switch_to_irq_level;
|
||||
#define STAY_IRQ_LEVEL -1
|
||||
|
@ -171,6 +181,14 @@ struct core_entry {
|
|||
})
|
||||
#endif
|
||||
|
||||
#if NUM_CORES > 1
|
||||
inline void lock_cores(void);
|
||||
inline void unlock_cores(void);
|
||||
#else
|
||||
#define lock_cores(...)
|
||||
#define unlock_cores(...)
|
||||
#endif
|
||||
|
||||
struct thread_entry*
|
||||
create_thread(void (*function)(void), void* stack, int stack_size,
|
||||
const char *name IF_PRIO(, int priority)
|
||||
|
|
|
@ -32,8 +32,8 @@ long current_tick NOCACHEDATA_ATTR = 0;
|
|||
static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
|
||||
|
||||
/* This array holds all queues that are initiated. It is used for broadcast. */
|
||||
static struct event_queue *all_queues[32];
|
||||
static int num_queues;
|
||||
static struct event_queue *all_queues[32] NOCACHEBSS_ATTR;
|
||||
static int num_queues NOCACHEBSS_ATTR;
|
||||
|
||||
void queue_wait(struct event_queue *q, struct event *ev) ICODE_ATTR;
|
||||
|
||||
|
@ -163,7 +163,7 @@ void queue_enable_queue_send(struct event_queue *q,
|
|||
struct queue_sender_list *send)
|
||||
{
|
||||
q->send = send;
|
||||
memset(send, 0, sizeof(*send));
|
||||
memset(send, 0, sizeof(struct queue_sender_list));
|
||||
}
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
|
@ -173,6 +173,9 @@ void queue_init(struct event_queue *q, bool register_queue)
|
|||
q->read = 0;
|
||||
q->write = 0;
|
||||
q->thread = NULL;
|
||||
#if NUM_CORES > 1
|
||||
q->irq_safe = false;
|
||||
#endif
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
q->send = NULL; /* No message sending by default */
|
||||
#endif
|
||||
|
@ -184,11 +187,28 @@ void queue_init(struct event_queue *q, bool register_queue)
|
|||
}
|
||||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
/**
|
||||
* If IRQ mode is enabled, some core-wise locking mechanisms are disabled
|
||||
* causing accessing queue to be no longer thread safe from the other core.
|
||||
* However, that locking mechanism would also kill IRQ handlers.
|
||||
*
|
||||
* @param q struct of an event_queue
|
||||
* @param state enable/disable IRQ mode
|
||||
* @default state disabled
|
||||
*/
|
||||
void queue_set_irq_safe(struct event_queue *q, bool state)
|
||||
{
|
||||
q->irq_safe = state;
|
||||
}
|
||||
#endif
|
||||
|
||||
void queue_delete(struct event_queue *q)
|
||||
{
|
||||
int i;
|
||||
bool found = false;
|
||||
|
||||
lock_cores();
|
||||
wakeup_thread(&q->thread);
|
||||
|
||||
/* Find the queue to be deleted */
|
||||
|
@ -219,15 +239,20 @@ void queue_delete(struct event_queue *q)
|
|||
|
||||
num_queues--;
|
||||
}
|
||||
|
||||
unlock_cores();
|
||||
}
|
||||
|
||||
void queue_wait(struct event_queue *q, struct event *ev)
|
||||
{
|
||||
unsigned int rd;
|
||||
|
||||
lock_cores();
|
||||
|
||||
if (q->read == q->write)
|
||||
{
|
||||
block_thread(&q->thread);
|
||||
lock_cores();
|
||||
}
|
||||
|
||||
rd = q->read++ & QUEUE_LENGTH_MASK;
|
||||
|
@ -240,13 +265,18 @@ void queue_wait(struct event_queue *q, struct event *ev)
|
|||
queue_fetch_sender(q->send, rd);
|
||||
}
|
||||
#endif
|
||||
|
||||
unlock_cores();
|
||||
}
|
||||
|
||||
void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
||||
{
|
||||
lock_cores();
|
||||
|
||||
if (q->read == q->write && ticks > 0)
|
||||
{
|
||||
block_thread_w_tmo(&q->thread, ticks);
|
||||
lock_cores();
|
||||
}
|
||||
|
||||
if (q->read != q->write)
|
||||
|
@ -266,12 +296,21 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
|||
{
|
||||
ev->id = SYS_TIMEOUT;
|
||||
}
|
||||
|
||||
unlock_cores();
|
||||
}
|
||||
|
||||
void queue_post(struct event_queue *q, long id, intptr_t data)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
unsigned int wr;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
lock_cores();
|
||||
#endif
|
||||
|
||||
wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
|
||||
q->events[wr].id = id;
|
||||
q->events[wr].data = data;
|
||||
|
@ -290,7 +329,12 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
|
|||
#endif
|
||||
|
||||
wakeup_thread_irq_safe(&q->thread);
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
unlock_cores();
|
||||
#endif
|
||||
set_irq_level(oldlevel);
|
||||
|
||||
}
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
|
@ -300,7 +344,12 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
|
|||
intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
unsigned int wr;
|
||||
|
||||
lock_cores();
|
||||
|
||||
wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
|
||||
q->events[wr].id = id;
|
||||
q->events[wr].data = data;
|
||||
|
||||
|
@ -321,6 +370,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
|
||||
/* Function as queue_post if sending is not enabled */
|
||||
wakeup_thread(&q->thread);
|
||||
unlock_cores();
|
||||
set_irq_level(oldlevel);
|
||||
|
||||
return 0;
|
||||
|
@ -337,6 +387,7 @@ bool queue_in_queue_send(struct event_queue *q)
|
|||
/* Replies with retval to any dequeued message sent with queue_send */
|
||||
void queue_reply(struct event_queue *q, intptr_t retval)
|
||||
{
|
||||
lock_cores();
|
||||
if(q->send && q->send->curr_sender)
|
||||
{
|
||||
int level = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
|
@ -346,18 +397,37 @@ void queue_reply(struct event_queue *q, intptr_t retval)
|
|||
}
|
||||
set_irq_level(level);
|
||||
}
|
||||
unlock_cores();
|
||||
}
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
bool queue_empty(const struct event_queue* q)
|
||||
{
|
||||
return ( q->read == q->write );
|
||||
bool is_empty;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
lock_cores();
|
||||
#endif
|
||||
|
||||
is_empty = ( q->read == q->write );
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
unlock_cores();
|
||||
#endif
|
||||
|
||||
return is_empty;
|
||||
}
|
||||
|
||||
void queue_clear(struct event_queue* q)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
lock_cores();
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* Release all thread waiting in the queue for a reply -
|
||||
dequeued sent message will be handled by owning thread */
|
||||
|
@ -366,6 +436,12 @@ void queue_clear(struct event_queue* q)
|
|||
|
||||
q->read = 0;
|
||||
q->write = 0;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
unlock_cores();
|
||||
#endif
|
||||
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
|
||||
|
@ -373,6 +449,11 @@ void queue_remove_from_head(struct event_queue *q, long id)
|
|||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
lock_cores();
|
||||
#endif
|
||||
|
||||
while(q->read != q->write)
|
||||
{
|
||||
unsigned int rd = q->read & QUEUE_LENGTH_MASK;
|
||||
|
@ -397,6 +478,11 @@ void queue_remove_from_head(struct event_queue *q, long id)
|
|||
q->read++;
|
||||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
unlock_cores();
|
||||
#endif
|
||||
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
|
||||
|
@ -411,11 +497,21 @@ int queue_count(const struct event_queue *q)
|
|||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
int result = 0;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
lock_cores();
|
||||
#endif
|
||||
|
||||
if (q->read <= q->write)
|
||||
result = q->write - q->read;
|
||||
else
|
||||
result = QUEUE_LENGTH - (q->read - q->write);
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (!q->irq_safe)
|
||||
unlock_cores();
|
||||
#endif
|
||||
|
||||
set_irq_level(oldlevel);
|
||||
|
||||
return result;
|
||||
|
@ -712,10 +808,14 @@ void mutex_lock(struct mutex *m)
|
|||
|
||||
void mutex_unlock(struct mutex *m)
|
||||
{
|
||||
lock_cores();
|
||||
|
||||
if (m->thread == NULL)
|
||||
m->locked = 0;
|
||||
else
|
||||
wakeup_thread(&m->thread);
|
||||
|
||||
unlock_cores();
|
||||
}
|
||||
|
||||
void spinlock_lock(struct mutex *m)
|
||||
|
|
|
@ -23,17 +23,20 @@
|
|||
#include "font.h"
|
||||
#include "system.h"
|
||||
#include "kernel.h"
|
||||
#include "thread.h"
|
||||
#include "timer.h"
|
||||
#include "inttypes.h"
|
||||
#include "string.h"
|
||||
|
||||
#ifndef SIMULATOR
|
||||
long cpu_frequency = CPU_FREQ;
|
||||
long cpu_frequency NOCACHEBSS_ATTR = CPU_FREQ;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
|
||||
static int boost_counter = 0;
|
||||
static bool cpu_idle = false;
|
||||
static int boost_counter NOCACHEBSS_ATTR = 0;
|
||||
static bool cpu_idle NOCACHEBSS_ATTR = false;
|
||||
|
||||
struct mutex boostctrl_mtx NOCACHEBSS_ATTR;
|
||||
|
||||
int get_cpu_boost_counter(void)
|
||||
{
|
||||
|
@ -722,47 +725,49 @@ void set_cpu_frequency(long frequency)
|
|||
{
|
||||
unsigned long postmult;
|
||||
|
||||
if (CURRENT_CORE == CPU)
|
||||
{
|
||||
if (frequency == CPUFREQ_NORMAL)
|
||||
postmult = CPUFREQ_NORMAL_MULT;
|
||||
else if (frequency == CPUFREQ_MAX)
|
||||
postmult = CPUFREQ_MAX_MULT;
|
||||
else
|
||||
postmult = CPUFREQ_DEFAULT_MULT;
|
||||
cpu_frequency = frequency;
|
||||
/* Using mutex or spinlock isn't safe here. */
|
||||
while (test_and_set(&boostctrl_mtx.locked, 1)) ;
|
||||
|
||||
/* Enable PLL? */
|
||||
outl(inl(0x70000020) | (1<<30), 0x70000020);
|
||||
if (frequency == CPUFREQ_NORMAL)
|
||||
postmult = CPUFREQ_NORMAL_MULT;
|
||||
else if (frequency == CPUFREQ_MAX)
|
||||
postmult = CPUFREQ_MAX_MULT;
|
||||
else
|
||||
postmult = CPUFREQ_DEFAULT_MULT;
|
||||
cpu_frequency = frequency;
|
||||
|
||||
/* Select 24MHz crystal as clock source? */
|
||||
outl((inl(0x60006020) & 0x0fffff0f) | 0x20000020, 0x60006020);
|
||||
/* Enable PLL? */
|
||||
outl(inl(0x70000020) | (1<<30), 0x70000020);
|
||||
|
||||
/* Clock frequency = (24/8)*postmult */
|
||||
outl(0xaa020000 | 8 | (postmult << 8), 0x60006034);
|
||||
/* Select 24MHz crystal as clock source? */
|
||||
outl((inl(0x60006020) & 0x0fffff0f) | 0x20000020, 0x60006020);
|
||||
|
||||
/* Wait for PLL relock? */
|
||||
udelay(2000);
|
||||
/* Clock frequency = (24/8)*postmult */
|
||||
outl(0xaa020000 | 8 | (postmult << 8), 0x60006034);
|
||||
|
||||
/* Select PLL as clock source? */
|
||||
outl((inl(0x60006020) & 0x0fffff0f) | 0x20000070, 0x60006020);
|
||||
/* Wait for PLL relock? */
|
||||
udelay(2000);
|
||||
|
||||
#if defined(IPOD_COLOR) || defined(IPOD_4G) || defined(IPOD_MINI) || defined(IRIVER_H10) || defined(IRIVER_H10_5GB)
|
||||
/* We don't know why the timer interrupt gets disabled on the PP5020
|
||||
based ipods, but without the following line, the 4Gs will freeze
|
||||
when CPU frequency changing is enabled.
|
||||
/* Select PLL as clock source? */
|
||||
outl((inl(0x60006020) & 0x0fffff0f) | 0x20000070, 0x60006020);
|
||||
|
||||
Note also that a simple "CPU_INT_EN = TIMER1_MASK;" (as used
|
||||
elsewhere to enable interrupts) doesn't work, we need "|=".
|
||||
# if defined(IPOD_COLOR) || defined(IPOD_4G) || defined(IPOD_MINI) || defined(IRIVER_H10) || defined(IRIVER_H10_5GB)
|
||||
/* We don't know why the timer interrupt gets disabled on the PP5020
|
||||
based ipods, but without the following line, the 4Gs will freeze
|
||||
when CPU frequency changing is enabled.
|
||||
|
||||
It's not needed on the PP5021 and PP5022 ipods.
|
||||
*/
|
||||
Note also that a simple "CPU_INT_EN = TIMER1_MASK;" (as used
|
||||
elsewhere to enable interrupts) doesn't work, we need "|=".
|
||||
|
||||
/* unmask interrupt source */
|
||||
CPU_INT_EN |= TIMER1_MASK;
|
||||
COP_INT_EN |= TIMER1_MASK;
|
||||
#endif
|
||||
}
|
||||
It's not needed on the PP5021 and PP5022 ipods.
|
||||
*/
|
||||
|
||||
/* unmask interrupt source */
|
||||
CPU_INT_EN |= TIMER1_MASK;
|
||||
COP_INT_EN |= TIMER1_MASK;
|
||||
# endif
|
||||
|
||||
boostctrl_mtx.locked = 0;
|
||||
}
|
||||
#elif !defined(BOOTLOADER)
|
||||
void ipod_set_cpu_frequency(void)
|
||||
|
@ -804,6 +809,8 @@ void system_init(void)
|
|||
outl(-1, 0x60001038);
|
||||
outl(-1, 0x60001028);
|
||||
outl(-1, 0x6000101c);
|
||||
|
||||
spinlock_init(&boostctrl_mtx);
|
||||
#if (!defined HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES == 1)
|
||||
ipod_set_cpu_frequency();
|
||||
#endif
|
||||
|
@ -890,6 +897,7 @@ static void ipod_init_cache(void)
|
|||
|
||||
outl(0x3, 0xcf004024);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
|
||||
|
|
|
@ -28,13 +28,17 @@
|
|||
#include <profile.h>
|
||||
#endif
|
||||
|
||||
#if NUM_CORES > 1
|
||||
# define IF_COP2(x) x
|
||||
#else
|
||||
# define IF_COP2(x) CURRENT_CORE
|
||||
#endif
|
||||
|
||||
#define DEADBEEF ((unsigned int)0xdeadbeef)
|
||||
/* Cast to the the machine int type, whose size could be < 4. */
|
||||
|
||||
struct core_entry cores[NUM_CORES] IBSS_ATTR;
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
static unsigned short highest_priority IBSS_ATTR;
|
||||
#endif
|
||||
struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
static int boosted_threads IBSS_ATTR;
|
||||
#endif
|
||||
|
@ -59,8 +63,29 @@ int *cop_stackend = stackend;
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#if (NUM_CORES > 1)
|
||||
bool IDATA_ATTR kernel_running_on_cop = false;
|
||||
#if NUM_CORES > 1
|
||||
static long cores_locked IBSS_ATTR;
|
||||
|
||||
#define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
|
||||
#define UNLOCK(...) cores_locked = 0
|
||||
|
||||
inline void lock_cores(void)
|
||||
{
|
||||
if (!cores[CURRENT_CORE].lock_issued)
|
||||
{
|
||||
LOCK();
|
||||
cores[CURRENT_CORE].lock_issued = true;
|
||||
}
|
||||
}
|
||||
|
||||
inline void unlock_cores(void)
|
||||
{
|
||||
if (cores[CURRENT_CORE].lock_issued)
|
||||
{
|
||||
cores[CURRENT_CORE].lock_issued = false;
|
||||
UNLOCK();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Conserve IRAM
|
||||
|
@ -326,7 +351,6 @@ static void wake_list_awaken(void)
|
|||
|
||||
static inline void sleep_core(void)
|
||||
{
|
||||
static long last_tick = 0;
|
||||
#if CONFIG_CPU == S3C2440
|
||||
int i;
|
||||
#endif
|
||||
|
@ -339,10 +363,10 @@ static inline void sleep_core(void)
|
|||
if (cores[CURRENT_CORE].waking != NULL)
|
||||
wake_list_awaken();
|
||||
|
||||
if (last_tick != current_tick)
|
||||
if (cores[CURRENT_CORE].last_tick != current_tick)
|
||||
{
|
||||
check_sleepers();
|
||||
last_tick = current_tick;
|
||||
cores[CURRENT_CORE].last_tick = current_tick;
|
||||
}
|
||||
|
||||
/* We must sleep until there is at least one process in the list
|
||||
|
@ -357,17 +381,22 @@ static inline void sleep_core(void)
|
|||
and_b(0x7F, &SBYCR);
|
||||
asm volatile ("sleep");
|
||||
#elif defined (CPU_PP)
|
||||
unlock_cores();
|
||||
|
||||
/* This should sleep the CPU. It appears to wake by itself on
|
||||
interrupts */
|
||||
if (CURRENT_CORE == CPU)
|
||||
CPU_CTL = PROC_SLEEP;
|
||||
else
|
||||
COP_CTL = PROC_SLEEP;
|
||||
|
||||
lock_cores();
|
||||
#elif CONFIG_CPU == S3C2440
|
||||
CLKCON |= (1 << 2); /* set IDLE bit */
|
||||
for(i=0; i<10; i++); /* wait for IDLE */
|
||||
CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
|
||||
#endif
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -378,7 +407,7 @@ static int get_threadnum(struct thread_entry *thread)
|
|||
|
||||
for (i = 0; i < MAXTHREADS; i++)
|
||||
{
|
||||
if (&cores[CURRENT_CORE].threads[i] == thread)
|
||||
if (&threads[i] == thread)
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -415,8 +444,8 @@ void change_thread_state(struct thread_entry **blocked_list)
|
|||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
/* Reset priorities */
|
||||
if (old->priority == highest_priority)
|
||||
highest_priority = 100;
|
||||
if (old->priority == cores[CURRENT_CORE].highest_priority)
|
||||
cores[CURRENT_CORE].highest_priority = 100;
|
||||
#endif
|
||||
}
|
||||
else
|
||||
|
@ -439,6 +468,8 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
|||
/* Do nothing */
|
||||
#else
|
||||
|
||||
lock_cores();
|
||||
|
||||
/* Begin task switching by saving our current context so that we can
|
||||
* restore the state of the current thread later to the point prior
|
||||
* to this call. */
|
||||
|
@ -479,14 +510,16 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
|||
{
|
||||
int priority = cores[CURRENT_CORE].running->priority;
|
||||
|
||||
if (priority < highest_priority)
|
||||
highest_priority = priority;
|
||||
if (priority < cores[CURRENT_CORE].highest_priority)
|
||||
cores[CURRENT_CORE].highest_priority = priority;
|
||||
|
||||
if (priority == highest_priority ||
|
||||
if (priority == cores[CURRENT_CORE].highest_priority ||
|
||||
(current_tick - cores[CURRENT_CORE].running->last_run >
|
||||
priority * 8) ||
|
||||
cores[CURRENT_CORE].running->priority_x != 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
|
||||
}
|
||||
|
@ -496,6 +529,8 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
|||
#endif
|
||||
|
||||
#endif
|
||||
unlock_cores();
|
||||
|
||||
/* And finally give control to the next thread. */
|
||||
load_context(&cores[CURRENT_CORE].running->context);
|
||||
|
||||
|
@ -508,10 +543,13 @@ void sleep_thread(int ticks)
|
|||
{
|
||||
struct thread_entry *current;
|
||||
|
||||
lock_cores();
|
||||
|
||||
current = cores[CURRENT_CORE].running;
|
||||
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
if (STATE_IS_BOOSTED(current->statearg)) {
|
||||
if (STATE_IS_BOOSTED(current->statearg))
|
||||
{
|
||||
boosted_threads--;
|
||||
if (!boosted_threads)
|
||||
{
|
||||
|
@ -524,12 +562,16 @@ void sleep_thread(int ticks)
|
|||
* so that scheduler removes thread from the list of running processes
|
||||
* and puts it in list of sleeping tasks. */
|
||||
SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
|
||||
|
||||
switch_thread(true, NULL);
|
||||
}
|
||||
|
||||
void block_thread(struct thread_entry **list)
|
||||
{
|
||||
struct thread_entry *current;
|
||||
|
||||
lock_cores();
|
||||
|
||||
/* Get the entry for the current running thread. */
|
||||
current = cores[CURRENT_CORE].running;
|
||||
|
||||
|
@ -567,11 +609,13 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
|
|||
/* Get the entry for the current running thread. */
|
||||
current = cores[CURRENT_CORE].running;
|
||||
|
||||
lock_cores();
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
/* A block with a timeout is a sleep situation, whatever we are waiting
|
||||
* for _may or may not_ happen, regardless of boost state, (user input
|
||||
* for instance), so this thread no longer needs to boost */
|
||||
if (STATE_IS_BOOSTED(current->statearg)) {
|
||||
if (STATE_IS_BOOSTED(current->statearg))
|
||||
{
|
||||
boosted_threads--;
|
||||
if (!boosted_threads)
|
||||
{
|
||||
|
@ -624,7 +668,9 @@ void wakeup_thread(struct thread_entry **list)
|
|||
|
||||
/* Check if there is a blocked thread at all. */
|
||||
if (*list == NULL)
|
||||
{
|
||||
return ;
|
||||
}
|
||||
|
||||
/* Wake up the last thread first. */
|
||||
thread = *list;
|
||||
|
@ -638,7 +684,7 @@ void wakeup_thread(struct thread_entry **list)
|
|||
* is safe since each object maintains it's own list of
|
||||
* sleepers and queues protect against reentrancy. */
|
||||
remove_from_list(list, thread);
|
||||
add_to_list(cores[CURRENT_CORE].wakeup_list, thread);
|
||||
add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread);
|
||||
|
||||
case STATE_BLOCKED_W_TMO:
|
||||
/* Just remove the timeout to cause scheduler to immediately
|
||||
|
@ -653,6 +699,19 @@ void wakeup_thread(struct thread_entry **list)
|
|||
}
|
||||
}
|
||||
|
||||
inline static int find_empty_thread_slot(void)
|
||||
{
|
||||
int n;
|
||||
|
||||
for (n = 0; n < MAXTHREADS; n++)
|
||||
{
|
||||
if (threads[n].name == NULL)
|
||||
return n;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
|
||||
before calling. */
|
||||
void wakeup_thread_irq_safe(struct thread_entry **list)
|
||||
|
@ -680,7 +739,7 @@ struct thread_entry*
|
|||
unsigned int i;
|
||||
unsigned int stacklen;
|
||||
unsigned int *stackptr;
|
||||
int n;
|
||||
int slot;
|
||||
struct regs *regs;
|
||||
struct thread_entry *thread;
|
||||
|
||||
|
@ -697,7 +756,7 @@ struct thread_entry*
|
|||
/* If the kernel hasn't initialised on the COP (most likely due to an old
|
||||
* bootloader) then refuse to start threads on the COP
|
||||
*/
|
||||
if((core == COP) && !kernel_running_on_cop)
|
||||
if ((core == COP) && !cores[core].kernel_running)
|
||||
{
|
||||
if (fallback)
|
||||
return create_thread(function, stack, stack_size, name
|
||||
|
@ -707,21 +766,13 @@ struct thread_entry*
|
|||
}
|
||||
#endif
|
||||
|
||||
for (n = 0; n < MAXTHREADS; n++)
|
||||
{
|
||||
if (cores[core].threads[n].name == NULL)
|
||||
break;
|
||||
}
|
||||
lock_cores();
|
||||
|
||||
if (n == MAXTHREADS)
|
||||
slot = find_empty_thread_slot();
|
||||
if (slot < 0)
|
||||
{
|
||||
#if NUM_CORES > 1
|
||||
if (fallback)
|
||||
return create_thread(function, stack, stack_size, name
|
||||
IF_PRIO(, priority) IF_COP(, 1 - core, fallback));
|
||||
else
|
||||
#endif
|
||||
return NULL;
|
||||
unlock_cores();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Munge the stack to make it easy to spot stack overflows */
|
||||
|
@ -733,7 +784,7 @@ struct thread_entry*
|
|||
}
|
||||
|
||||
/* Store interesting information */
|
||||
thread = &cores[core].threads[n];
|
||||
thread = &threads[slot];
|
||||
thread->name = name;
|
||||
thread->stack = stack;
|
||||
thread->stack_size = stack_size;
|
||||
|
@ -741,9 +792,12 @@ struct thread_entry*
|
|||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
thread->priority_x = 0;
|
||||
thread->priority = priority;
|
||||
highest_priority = 100;
|
||||
cores[core].highest_priority = 100;
|
||||
#endif
|
||||
|
||||
#if NUM_CORES > 1
|
||||
thread->core = core;
|
||||
#endif
|
||||
add_to_list(&cores[core].running, thread);
|
||||
|
||||
regs = &thread->context;
|
||||
/* Align stack to an even 32 bit boundary */
|
||||
|
@ -754,6 +808,9 @@ struct thread_entry*
|
|||
to have access to valid data */
|
||||
THREAD_CPU_INIT(core, thread);
|
||||
|
||||
add_to_list(&cores[core].running, thread);
|
||||
unlock_cores();
|
||||
|
||||
return thread;
|
||||
#if NUM_CORES == 1
|
||||
#undef core
|
||||
|
@ -763,6 +820,8 @@ struct thread_entry*
|
|||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
void trigger_cpu_boost(void)
|
||||
{
|
||||
lock_cores();
|
||||
|
||||
if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
|
||||
{
|
||||
SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
|
||||
|
@ -772,6 +831,8 @@ void trigger_cpu_boost(void)
|
|||
}
|
||||
boosted_threads++;
|
||||
}
|
||||
|
||||
unlock_cores();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -782,26 +843,30 @@ void trigger_cpu_boost(void)
|
|||
*/
|
||||
void remove_thread(struct thread_entry *thread)
|
||||
{
|
||||
lock_cores();
|
||||
|
||||
if (thread == NULL)
|
||||
thread = cores[CURRENT_CORE].running;
|
||||
thread = cores[IF_COP2(thread->core)].running;
|
||||
|
||||
/* Free the entry by removing thread name. */
|
||||
thread->name = NULL;
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
highest_priority = 100;
|
||||
cores[IF_COP2(thread->core)].highest_priority = 100;
|
||||
#endif
|
||||
|
||||
if (thread == cores[CURRENT_CORE].running)
|
||||
if (thread == cores[IF_COP2(thread->core)].running)
|
||||
{
|
||||
remove_from_list(&cores[CURRENT_CORE].running, thread);
|
||||
remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
|
||||
switch_thread(false, NULL);
|
||||
return ;
|
||||
}
|
||||
|
||||
if (thread == cores[CURRENT_CORE].sleeping)
|
||||
remove_from_list(&cores[CURRENT_CORE].sleeping, thread);
|
||||
if (thread == cores[IF_COP2(thread->core)].sleeping)
|
||||
remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
|
||||
else
|
||||
remove_from_list(NULL, thread);
|
||||
|
||||
unlock_cores();
|
||||
}
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
|
@ -809,12 +874,14 @@ int thread_set_priority(struct thread_entry *thread, int priority)
|
|||
{
|
||||
int old_priority;
|
||||
|
||||
lock_cores();
|
||||
if (thread == NULL)
|
||||
thread = cores[CURRENT_CORE].running;
|
||||
|
||||
old_priority = thread->priority;
|
||||
thread->priority = priority;
|
||||
highest_priority = 100;
|
||||
cores[IF_COP2(thread->core)].highest_priority = 100;
|
||||
unlock_cores();
|
||||
|
||||
return old_priority;
|
||||
}
|
||||
|
@ -844,9 +911,19 @@ struct thread_entry * thread_get_current(void)
|
|||
void init_threads(void)
|
||||
{
|
||||
unsigned int core = CURRENT_CORE;
|
||||
int slot;
|
||||
|
||||
/* Let main CPU initialize first. */
|
||||
#if NUM_CORES > 1
|
||||
if (core != CPU)
|
||||
{
|
||||
while (!cores[CPU].kernel_running) ;
|
||||
}
|
||||
#endif
|
||||
|
||||
lock_cores();
|
||||
slot = find_empty_thread_slot();
|
||||
|
||||
if (core == CPU)
|
||||
memset(cores, 0, sizeof cores);
|
||||
cores[core].sleeping = NULL;
|
||||
cores[core].running = NULL;
|
||||
cores[core].waking = NULL;
|
||||
|
@ -854,36 +931,41 @@ void init_threads(void)
|
|||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
|
||||
#endif
|
||||
cores[core].threads[0].name = main_thread_name;
|
||||
cores[core].threads[0].statearg = 0;
|
||||
threads[slot].name = main_thread_name;
|
||||
threads[slot].statearg = 0;
|
||||
threads[slot].context.start = 0; /* core's main thread already running */
|
||||
#if NUM_CORES > 1
|
||||
threads[slot].core = core;
|
||||
#endif
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
cores[core].threads[0].priority = PRIORITY_USER_INTERFACE;
|
||||
cores[core].threads[0].priority_x = 0;
|
||||
highest_priority = 100;
|
||||
threads[slot].priority = PRIORITY_USER_INTERFACE;
|
||||
threads[slot].priority_x = 0;
|
||||
cores[core].highest_priority = 100;
|
||||
#endif
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
boosted_threads = 0;
|
||||
#endif
|
||||
add_to_list(&cores[core].running, &cores[core].threads[0]);
|
||||
add_to_list(&cores[core].running, &threads[slot]);
|
||||
|
||||
/* In multiple core setups, each core has a different stack. There is
|
||||
* probably a much better way to do this. */
|
||||
if (core == CPU)
|
||||
{
|
||||
cores[CPU].threads[0].stack = stackbegin;
|
||||
cores[CPU].threads[0].stack_size = (int)stackend - (int)stackbegin;
|
||||
} else {
|
||||
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
|
||||
cores[COP].threads[0].stack = cop_stackbegin;
|
||||
cores[COP].threads[0].stack_size =
|
||||
(int)cop_stackend - (int)cop_stackbegin;
|
||||
#endif
|
||||
threads[slot].stack = stackbegin;
|
||||
threads[slot].stack_size = (int)stackend - (int)stackbegin;
|
||||
}
|
||||
cores[core].threads[0].context.start = 0; /* thread 0 already running */
|
||||
#if NUM_CORES > 1
|
||||
if(core == COP)
|
||||
kernel_running_on_cop = true; /* can we use context.start for this? */
|
||||
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
|
||||
else
|
||||
{
|
||||
threads[slot].stack = cop_stackbegin;
|
||||
threads[slot].stack_size =
|
||||
(int)cop_stackend - (int)cop_stackbegin;
|
||||
}
|
||||
|
||||
cores[core].kernel_running = true;
|
||||
#endif
|
||||
|
||||
unlock_cores();
|
||||
}
|
||||
|
||||
int thread_stack_usage(const struct thread_entry *thread)
|
||||
|
|
|
@ -427,6 +427,8 @@ void usb_init(void)
|
|||
|
||||
#ifndef BOOTLOADER
|
||||
queue_init(&usb_queue, true);
|
||||
queue_set_irq_safe(&usb_queue, true);
|
||||
|
||||
create_thread(usb_thread, usb_stack, sizeof(usb_stack),
|
||||
usb_thread_name IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU, false));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue