mirror of
https://github.com/Rockbox/rockbox.git
synced 2025-11-09 21:22:39 -05:00
Switch iPod 3G to use EABI toolchain. Make necessary threading changes to avoid use of stack after switching to idle stack.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26898 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
parent
69028d5d31
commit
2b640ba4b8
5 changed files with 54 additions and 33 deletions
|
|
@ -694,7 +694,7 @@ static void init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CPU_PP
|
#ifdef CPU_PP
|
||||||
void cop_main(void)
|
void __attribute__((noreturn)) cop_main(void)
|
||||||
{
|
{
|
||||||
/* This is the entry point for the coprocessor
|
/* This is the entry point for the coprocessor
|
||||||
Anyone not running an upgraded bootloader will never reach this point,
|
Anyone not running an upgraded bootloader will never reach this point,
|
||||||
|
|
@ -705,7 +705,6 @@ void cop_main(void)
|
||||||
destroyed for purposes of continuity. The cop sits idle until at least
|
destroyed for purposes of continuity. The cop sits idle until at least
|
||||||
one thread exists on it. */
|
one thread exists on it. */
|
||||||
|
|
||||||
/* 3G doesn't have Rolo or dual core support yet */
|
|
||||||
#if NUM_CORES > 1
|
#if NUM_CORES > 1
|
||||||
system_init();
|
system_init();
|
||||||
kernel_init();
|
kernel_init();
|
||||||
|
|
@ -717,5 +716,4 @@ void cop_main(void)
|
||||||
}
|
}
|
||||||
#endif /* CPU_PP */
|
#endif /* CPU_PP */
|
||||||
|
|
||||||
#endif
|
#endif /* SIMULATOR */
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -375,7 +375,7 @@ void thread_thaw(unsigned int thread_id);
|
||||||
/* Wait for a thread to exit */
|
/* Wait for a thread to exit */
|
||||||
void thread_wait(unsigned int thread_id);
|
void thread_wait(unsigned int thread_id);
|
||||||
/* Exit the current thread */
|
/* Exit the current thread */
|
||||||
void thread_exit(void);
|
void thread_exit(void) __attribute__((noreturn));
|
||||||
#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
|
#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
|
||||||
#define ALLOW_REMOVE_THREAD
|
#define ALLOW_REMOVE_THREAD
|
||||||
/* Remove a thread from the scheduler */
|
/* Remove a thread from the scheduler */
|
||||||
|
|
|
||||||
|
|
@ -630,6 +630,11 @@ void remove_thread(unsigned int thread_id)
|
||||||
void thread_exit(void)
|
void thread_exit(void)
|
||||||
{
|
{
|
||||||
remove_thread(THREAD_ID_CURRENT);
|
remove_thread(THREAD_ID_CURRENT);
|
||||||
|
/* This should never and must never be reached - if it is, the
|
||||||
|
* state is corrupted */
|
||||||
|
THREAD_PANICF("thread_exit->K:*R",
|
||||||
|
thread_id_entry(THREAD_ID_CURRENT));
|
||||||
|
while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread_wait(unsigned int thread_id)
|
void thread_wait(unsigned int thread_id)
|
||||||
|
|
|
||||||
|
|
@ -156,6 +156,14 @@ static inline void store_context(void* addr)
|
||||||
static inline void load_context(const void* addr)
|
static inline void load_context(const void* addr)
|
||||||
__attribute__((always_inline));
|
__attribute__((always_inline));
|
||||||
|
|
||||||
|
#if NUM_CORES > 1
|
||||||
|
static void __attribute__((noinline, noreturn))
|
||||||
|
thread_final_exit(struct thread_entry *current);
|
||||||
|
#else
|
||||||
|
static void __attribute__((always_inline, noreturn))
|
||||||
|
thread_final_exit(struct thread_entry *current);
|
||||||
|
#endif
|
||||||
|
|
||||||
void switch_thread(void)
|
void switch_thread(void)
|
||||||
__attribute__((noinline));
|
__attribute__((noinline));
|
||||||
|
|
||||||
|
|
@ -219,7 +227,7 @@ static void thread_stkov(struct thread_entry *thread)
|
||||||
#define LOCK_THREAD(thread) \
|
#define LOCK_THREAD(thread) \
|
||||||
({ corelock_lock(&(thread)->slot_cl); })
|
({ corelock_lock(&(thread)->slot_cl); })
|
||||||
#define TRY_LOCK_THREAD(thread) \
|
#define TRY_LOCK_THREAD(thread) \
|
||||||
({ corelock_try_lock(&thread->slot_cl); })
|
({ corelock_try_lock(&(thread)->slot_cl); })
|
||||||
#define UNLOCK_THREAD(thread) \
|
#define UNLOCK_THREAD(thread) \
|
||||||
({ corelock_unlock(&(thread)->slot_cl); })
|
({ corelock_unlock(&(thread)->slot_cl); })
|
||||||
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
|
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
|
||||||
|
|
@ -854,7 +862,8 @@ struct thread_entry *
|
||||||
* catch something.
|
* catch something.
|
||||||
*---------------------------------------------------------------------------
|
*---------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
static void check_for_obj_waiters(const char *function, struct thread_entry *thread)
|
static void __attribute__((noinline)) check_for_obj_waiters(
|
||||||
|
const char *function, struct thread_entry *thread)
|
||||||
{
|
{
|
||||||
/* Only one bit in the mask should be set with a frequency on 1 which
|
/* Only one bit in the mask should be set with a frequency on 1 which
|
||||||
* represents the thread's own base priority */
|
* represents the thread's own base priority */
|
||||||
|
|
@ -1663,10 +1672,39 @@ void thread_wait(unsigned int thread_id)
|
||||||
* Exit the current thread. The Right Way to Do Things (TM).
|
* Exit the current thread. The Right Way to Do Things (TM).
|
||||||
*---------------------------------------------------------------------------
|
*---------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
/* This is done to foil optimizations that may require the current stack,
|
||||||
|
* such as optimizing subexpressions that put variables on the stack that
|
||||||
|
* get used after switching stacks. */
|
||||||
|
static void thread_final_exit(struct thread_entry *current)
|
||||||
|
{
|
||||||
|
#if NUM_CORES > 1
|
||||||
|
cpucache_flush();
|
||||||
|
|
||||||
|
/* Switch to the idle stack if not on the main core (where "main"
|
||||||
|
* runs) - we can hope gcc doesn't need the old stack beyond this
|
||||||
|
* point. */
|
||||||
|
if (current->core != CPU)
|
||||||
|
{
|
||||||
|
switch_to_idle_stack(current->core);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* At this point, this thread isn't using resources allocated for
|
||||||
|
* execution except the slot itself. */
|
||||||
|
#endif /* NUM_CORES */
|
||||||
|
|
||||||
|
/* Signal this thread */
|
||||||
|
thread_queue_wake(¤t->queue);
|
||||||
|
corelock_unlock(¤t->waiter_cl);
|
||||||
|
switch_thread();
|
||||||
|
/* This should never and must never be reached - if it is, the
|
||||||
|
* state is corrupted */
|
||||||
|
THREAD_PANICF("thread_exit->K:*R", current);
|
||||||
|
while (1);
|
||||||
|
}
|
||||||
|
|
||||||
void thread_exit(void)
|
void thread_exit(void)
|
||||||
{
|
{
|
||||||
const unsigned int core = CURRENT_CORE;
|
register struct thread_entry * current = cores[CURRENT_CORE].running;
|
||||||
struct thread_entry *current = cores[core].running;
|
|
||||||
|
|
||||||
/* Cancel CPU boost if any */
|
/* Cancel CPU boost if any */
|
||||||
cancel_cpu_boost();
|
cancel_cpu_boost();
|
||||||
|
|
@ -1701,34 +1739,14 @@ void thread_exit(void)
|
||||||
/* Switch tasks and never return */
|
/* Switch tasks and never return */
|
||||||
block_thread_on_l(current, STATE_KILLED);
|
block_thread_on_l(current, STATE_KILLED);
|
||||||
|
|
||||||
#if NUM_CORES > 1
|
/* Slot must be unusable until thread is really gone */
|
||||||
/* Switch to the idle stack if not on the main core (where "main"
|
UNLOCK_THREAD_AT_TASK_SWITCH(current);
|
||||||
* runs) - we can hope gcc doesn't need the old stack beyond this
|
|
||||||
* point. */
|
|
||||||
if (core != CPU)
|
|
||||||
{
|
|
||||||
switch_to_idle_stack(core);
|
|
||||||
}
|
|
||||||
|
|
||||||
cpucache_flush();
|
|
||||||
|
|
||||||
/* At this point, this thread isn't using resources allocated for
|
|
||||||
* execution except the slot itself. */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Update ID for this slot */
|
/* Update ID for this slot */
|
||||||
new_thread_id(current->id, current);
|
new_thread_id(current->id, current);
|
||||||
current->name = NULL;
|
current->name = NULL;
|
||||||
|
|
||||||
/* Signal this thread */
|
thread_final_exit(current);
|
||||||
thread_queue_wake(¤t->queue);
|
|
||||||
corelock_unlock(¤t->waiter_cl);
|
|
||||||
/* Slot must be unusable until thread is really gone */
|
|
||||||
UNLOCK_THREAD_AT_TASK_SWITCH(current);
|
|
||||||
switch_thread();
|
|
||||||
/* This should never and must never be reached - if it is, the
|
|
||||||
* state is corrupted */
|
|
||||||
THREAD_PANICF("thread_exit->K:*R", current);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ALLOW_REMOVE_THREAD
|
#ifdef ALLOW_REMOVE_THREAD
|
||||||
|
|
|
||||||
2
tools/configure
vendored
2
tools/configure
vendored
|
|
@ -40,7 +40,7 @@ prefixtools () {
|
||||||
}
|
}
|
||||||
|
|
||||||
findarmgcc() {
|
findarmgcc() {
|
||||||
models_not_checked_with_eabi="ipodnano1g ipod3g ipod4g ipodmini1g ipod1g2g vibe500 cowond2"
|
models_not_checked_with_eabi="ipodnano1g ipod4g ipodmini1g ipod1g2g vibe500 cowond2"
|
||||||
if [ "$ARG_ARM_EABI" != 1 ]; then # eabi not explicitely enabled
|
if [ "$ARG_ARM_EABI" != 1 ]; then # eabi not explicitely enabled
|
||||||
for model in $models_not_checked_with_eabi; do
|
for model in $models_not_checked_with_eabi; do
|
||||||
if [ "$modelname" = "$model" ]; then
|
if [ "$modelname" = "$model" ]; then
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue