1
0
Fork 0
forked from len0rd/rockbox

Remove struct spinlock to cleanup some mess and simplify. It's only used in boosting for multiprocesors and a pure two-corelock heirarchy will do just fine.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@19910 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2009-02-03 12:16:45 +00:00
parent e2a169bce5
commit 3cf148945e
4 changed files with 18 additions and 99 deletions

View file

@ -159,15 +159,6 @@ struct mutex
unsigned char locked; /* locked semaphore */
};
#if NUM_CORES > 1
struct spinlock
{
struct thread_entry *thread; /* lock owner */
int count; /* lock owner recursion count */
struct corelock cl; /* multiprocessor sync */
};
#endif
#ifdef HAVE_SEMAPHORE_OBJECTS
struct semaphore
{
@ -284,11 +275,6 @@ extern void mutex_unlock(struct mutex *m);
static inline void mutex_set_preempt(struct mutex *m, bool preempt)
{ m->no_preempt = !preempt; }
#endif
#if NUM_CORES > 1
extern void spinlock_init(struct spinlock *l);
extern void spinlock_lock(struct spinlock *l);
extern void spinlock_unlock(struct spinlock *l);
#endif
#ifdef HAVE_SEMAPHORE_OBJECTS
extern void semaphore_init(struct semaphore *s, int max, int start);
extern void semaphore_wait(struct semaphore *s);

View file

@ -961,57 +961,6 @@ void mutex_unlock(struct mutex *m)
}
}
/****************************************************************************
* Simpl-er mutex functions ;)
****************************************************************************/
#if NUM_CORES > 1
void spinlock_init(struct spinlock *l)
{
corelock_init(&l->cl);
l->thread = NULL;
l->count = 0;
}
void spinlock_lock(struct spinlock *l)
{
const unsigned int core = CURRENT_CORE;
struct thread_entry *current = cores[core].running;
if(l->thread == current)
{
/* current core already owns it */
l->count++;
return;
}
/* lock against other processor cores */
corelock_lock(&l->cl);
/* take ownership */
l->thread = current;
}
void spinlock_unlock(struct spinlock *l)
{
/* unlocker not being the owner is an unlocking violation */
KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
"spinlock_unlock->wrong thread\n");
if(l->count > 0)
{
/* this core still owns lock */
l->count--;
return;
}
/* clear owner */
l->thread = NULL;
/* release lock */
corelock_unlock(&l->cl);
}
#endif /* NUM_CORES > 1 */
/****************************************************************************
* Simple semaphore functions ;)
****************************************************************************/

View file

@ -33,10 +33,10 @@ long cpu_frequency SHAREDBSS_ATTR = CPU_FREQ;
static int boost_counter SHAREDBSS_ATTR = 0;
static bool cpu_idle SHAREDBSS_ATTR = false;
#if NUM_CORES > 1
struct spinlock boostctrl_spin SHAREDBSS_ATTR;
static struct corelock boostctrl_cl SHAREDBSS_ATTR;
void cpu_boost_init(void)
{
spinlock_init(&boostctrl_spin);
corelock_init(&boostctrl_cl);
}
#endif
@ -57,9 +57,7 @@ int cpu_boost_log_getcount(void)
char * cpu_boost_log_getlog_first(void)
{
char *first;
#if NUM_CORES > 1
spinlock_lock(&boostctrl_spin);
#endif
corelock_lock(&boostctrl_cl);
first = NULL;
@ -69,10 +67,7 @@ char * cpu_boost_log_getlog_first(void)
first = cpu_boost_calls[cpu_boost_first];
}
#if NUM_CORES > 1
spinlock_unlock(&boostctrl_spin);
#endif
corelock_unlock(&boostctrl_cl);
return first;
}
@ -81,9 +76,7 @@ char * cpu_boost_log_getlog_next(void)
int message;
char *next;
#if NUM_CORES > 1
spinlock_lock(&boostctrl_spin);
#endif
corelock_lock(&boostctrl_cl);
message = (cpu_boost_track_message+cpu_boost_first)%MAX_BOOST_LOG;
next = NULL;
@ -94,18 +87,13 @@ char * cpu_boost_log_getlog_next(void)
next = cpu_boost_calls[message];
}
#if NUM_CORES > 1
spinlock_unlock(&boostctrl_spin);
#endif
corelock_unlock(&boostctrl_cl);
return next;
}
void cpu_boost_(bool on_off, char* location, int line)
{
#if NUM_CORES > 1
spinlock_lock(&boostctrl_spin);
#endif
corelock_lock(&boostctrl_cl);
if (cpu_boost_calls_count == MAX_BOOST_LOG)
{
@ -124,10 +112,7 @@ void cpu_boost_(bool on_off, char* location, int line)
#else
void cpu_boost(bool on_off)
{
#if NUM_CORES > 1
spinlock_lock(&boostctrl_spin);
#endif
corelock_lock(&boostctrl_cl);
#endif /* CPU_BOOST_LOGGING */
if(on_off)
{
@ -153,16 +138,12 @@ void cpu_boost(bool on_off)
}
}
#if NUM_CORES > 1
spinlock_unlock(&boostctrl_spin);
#endif
corelock_unlock(&boostctrl_cl);
}
void cpu_idle_mode(bool on_off)
{
#if NUM_CORES > 1
spinlock_lock(&boostctrl_spin);
#endif
corelock_lock(&boostctrl_cl);
cpu_idle = on_off;
@ -176,9 +157,7 @@ void cpu_idle_mode(bool on_off)
set_cpu_frequency(CPUFREQ_NORMAL);
}
#if NUM_CORES > 1
spinlock_unlock(&boostctrl_spin);
#endif
corelock_unlock(&boostctrl_cl);
}
#endif /* HAVE_ADJUSTABLE_CPU_FREQ */

View file

@ -35,6 +35,10 @@ extern void SERIAL0(void);
extern void ipod_mini_button_int(void); /* iPod Mini 1st gen only */
extern void ipod_4g_button_int(void); /* iPod 4th gen and higher only */
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
static struct corelock cpufreq_cl SHAREDBSS_ATTR;
#endif
void __attribute__((interrupt("IRQ"))) irq_handler(void)
{
if(CURRENT_CORE == CPU)
@ -236,7 +240,7 @@ static void pp_set_cpu_frequency(long frequency)
#endif
{
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
spinlock_lock(&boostctrl_spin);
corelock_lock(&cpufreq_cl);
#endif
switch (frequency)
@ -347,7 +351,7 @@ static void pp_set_cpu_frequency(long frequency)
}
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
spinlock_unlock(&boostctrl_spin);
corelock_unlock(&cpufreq_cl);
#endif
}
#endif /* !BOOTLOADER || SANSA_E200 || SANSA_C200 */
@ -475,6 +479,7 @@ void system_init(void)
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
#if NUM_CORES > 1
corelock_init(&cpufreq_cl);
cpu_boost_init();
#endif
#else