Create fimrware/asm directory for assembly optimized stuff.

This dir is suitable for stuff that doesn't fit the target tree, e.g. because
it also builds on hosted or otherwise. It also has a generic subfolder for
fallback C implementations so that not all archs need to provide asm files.

SOURCES should only contain "foo.c" where foo.c includes the specific
<arch>/foo.c files from the subdirs using the preprocessor. This way automatic
selection of asm versions or generic C verion is possible.

For the start, the thread support files are moved, since ASM threads can
be used on hosted platforms as well. Since core_sleep() remains platform
specific it's moved to the corresponding system.h headers.

Change-Id: Iebff272f3407a6eaafeb7656ceb0ae9eca3f7cb9
This commit is contained in:
Thomas Martitz 2012-01-04 18:07:21 +01:00
parent eaa83bd647
commit 991ae1e395
24 changed files with 352 additions and 295 deletions

View file

@ -347,4 +347,31 @@ static inline uint32_t swaw32_hw(uint32_t value)
}
#if defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \
|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \
|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 \
|| CONFIG_CPU == S5L8702
/* Use the generic ARMv4/v5/v6 wait for IRQ */
static inline void core_sleep(void)
{
asm volatile (
"mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */
#if CONFIG_CPU == IMX31L
"nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */
#endif
: : "r"(0)
);
enable_irq();
}
#else
/* Skip this if special code is required and implemented */
#if !(defined(CPU_PP)) && CONFIG_CPU != RK27XX && CONFIG_CPU != IMX233
static inline void core_sleep(void)
{
/* TODO: core_sleep not implemented, battery life will be decreased */
enable_irq();
}
#endif /* CPU_PP */
#endif
#endif /* SYSTEM_ARM_H */

View file

@ -1,121 +0,0 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2005 by Thom Johansen
*
* Generic ARM threading support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
/*---------------------------------------------------------------------------
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
static void __attribute__((naked)) USED_ATTR start_thread(void)
{
/* r0 = context */
asm volatile (
"ldr sp, [r0, #32] \n" /* Load initial sp */
"ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
"mov r1, #0 \n" /* Mark thread as running */
"str r1, [r0, #40] \n"
#if NUM_CORES > 1
"ldr r0, =commit_discard_idcache \n" /* Invalidate this core's cache. */
"mov lr, pc \n" /* This could be the first entry into */
"bx r0 \n" /* plugin or codec code for this core. */
#endif
"mov lr, pc \n" /* Call thread function */
"bx r4 \n"
); /* No clobber list - new thread doesn't care */
thread_exit();
#if 0
asm volatile (".ltorg"); /* Dump constant pool */
#endif
}
/* For startup, place context pointer in r4 slot, start_thread pointer in r5
* slot, and thread function pointer in context.start. See load_context for
* what happens when thread is initially going to run. */
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
(thread)->context.r[1] = (uint32_t)start_thread, \
(thread)->context.start = (uint32_t)function; })
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void store_context(void* addr)
{
asm volatile(
"stmia %0, { r4-r11, sp, lr } \n"
: : "r" (addr)
);
}
/*---------------------------------------------------------------------------
* Load non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void load_context(const void* addr)
{
asm volatile(
"ldr r0, [%0, #40] \n" /* Load start pointer */
"cmp r0, #0 \n" /* Check for NULL */
/* If not already running, jump to start */
#if ARM_ARCH == 4 && defined(USE_THUMB)
"ldmneia %0, { r0, r12 } \n"
"bxne r12 \n"
#else
"ldmneia %0, { r0, pc } \n"
#endif
"ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
: : "r" (addr) : "r0" /* only! */
);
}
#if defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \
|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \
|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 \
|| CONFIG_CPU == S5L8702
/* Use the generic ARMv4/v5/v6 wait for IRQ */
static inline void core_sleep(void)
{
asm volatile (
"mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */
#if CONFIG_CPU == IMX31L
"nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */
#endif
: : "r"(0)
);
enable_irq();
}
#else
/* Skip this if special code is required and implemented */
#if !(defined(CPU_PP)) && CONFIG_CPU != RK27XX && CONFIG_CPU != IMX233
static inline void core_sleep(void)
{
/* TODO: core_sleep not implemented, battery life will be decreased */
enable_irq();
}
#endif /* CPU_PP */
#endif

View file

@ -223,4 +223,14 @@ void commit_discard_idcache(void);
static inline void commit_discard_dcache(void) {}
static inline void commit_dcache(void) {}
/*---------------------------------------------------------------------------
* Put core in a power-saving state if waking list wasn't repopulated.
*---------------------------------------------------------------------------
*/
static inline void core_sleep(void)
{
/* Supervisor mode, interrupts enabled upon wakeup */
asm volatile ("stop #0x2000");
};
#endif /* SYSTEM_TARGET_H */

View file

@ -1,111 +0,0 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2004 by Linus Nielsen Feltzing
*
* Coldfire processor threading support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
/*---------------------------------------------------------------------------
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
void start_thread(void); /* Provide C access to ASM label */
static void USED_ATTR __start_thread(void)
{
/* a0=macsr, a1=context */
asm volatile (
"start_thread: \n" /* Start here - no naked attribute */
"move.l %a0, %macsr \n" /* Set initial mac status reg */
"lea.l 48(%a1), %a1 \n"
"move.l (%a1)+, %sp \n" /* Set initial stack */
"move.l (%a1), %a2 \n" /* Fetch thread function pointer */
"clr.l (%a1) \n" /* Mark thread running */
"jsr (%a2) \n" /* Call thread function */
);
thread_exit();
}
/* Set EMAC unit to fractional mode with saturation for each new thread,
* since that's what'll be the most useful for most things which the dsp
* will do. Codecs should still initialize their preferred modes
* explicitly. Context pointer is placed in d2 slot and start_thread
* pointer in d3 slot. thread function pointer is placed in context.start.
* See load_context for what happens when thread is initially going to
* run.
*/
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
(thread)->context.d[0] = (uint32_t)&(thread)->context, \
(thread)->context.d[1] = (uint32_t)start_thread, \
(thread)->context.start = (uint32_t)(function); })
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void store_context(void* addr)
{
asm volatile (
"move.l %%macsr,%%d0 \n"
"movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
: : "a" (addr) : "d0" /* only! */
);
}
/*---------------------------------------------------------------------------
* Load non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void load_context(const void* addr)
{
asm volatile (
"move.l 52(%0), %%d0 \n" /* Get start address */
"beq.b 1f \n" /* NULL -> already running */
"movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
"jmp (%%a2) \n" /* Start the thread */
"1: \n"
"movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
"move.l %%d0, %%macsr \n"
: : "a" (addr) : "d0" /* only! */
);
}
/*---------------------------------------------------------------------------
* Put core in a power-saving state if waking list wasn't repopulated.
*---------------------------------------------------------------------------
*/
static inline void core_sleep(void)
{
/* Supervisor mode, interrupts enabled upon wakeup */
asm volatile ("stop #0x2000");
};
/*---------------------------------------------------------------------------
* Call this from asm to make sure the sp is pointing to the
* correct place before the context is saved.
*---------------------------------------------------------------------------
*/
static inline void _profile_thread_stopped(int current_thread)
{
asm volatile ("move.l %[id], -(%%sp)\n\t"
"jsr profile_thread_stopped\n\t"
"addq.l #4, %%sp\n\t"
:: [id] "r" (current_thread)
: "cc", "memory");
}

View file

@ -23,10 +23,7 @@
#define __SYSTEM_TARGET_H__
#include "kernel-unix.h"
static inline void commit_dcache(void) {}
static inline void commit_discard_dcache(void) {}
static inline void commit_discard_idcache(void) {}
#include "system-hosted.h"
/* don't pull in jni.h for every user of this file, it should be only needed
* within the target tree (if at all)

View file

@ -41,6 +41,8 @@ int set_irq_level(int level);
#define restore_irq(level) \
((void)set_irq_level(level))
#include "system-hosted.h"
void sim_enter_irq_handler(void);
void sim_exit_irq_handler(void);
void sim_kernel_shutdown(void);
@ -48,17 +50,10 @@ void sys_poweroff(void);
void sys_handle_argv(int argc, char *argv[]);
void gui_message_loop(void);
void sim_do_exit(void) NORETURN_ATTR;
#ifndef HAVE_SDL_THREADS
void wait_for_interrupt(void);
#endif
extern bool background; /* True if the background image is enabled */
extern bool showremote;
extern int display_zoom;
extern long start_tick;
static inline void commit_dcache(void) {}
static inline void commit_discard_dcache(void) {}
static inline void commit_discard_idcache(void) {}
#endif /* _SYSTEM_SDL_H_ */

View file

@ -0,0 +1,37 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2010 by Thomas Martitz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef __SYSTEM_HOSTED_H__
#define __SYSTEM_HOSTED_H__
#include "system.h"
static inline void commit_dcache(void) {}
static inline void commit_discard_dcache(void) {}
static inline void commit_discard_idcache(void) {}
static inline void core_sleep(void)
{
enable_irq();
wait_for_interrupt();
}
#endif

View file

@ -1,99 +0,0 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2005 by Thom Johansen
* Copyright (C) 2010 by Thomas Martitz (Android-suitable core_sleep())
*
* Generic ARM threading support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include <system.h>
/*---------------------------------------------------------------------------
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
static void __attribute__((naked)) USED_ATTR start_thread(void)
{
/* r0 = context */
asm volatile (
"ldr sp, [r0, #32] \n" /* Load initial sp */
"ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
"mov r1, #0 \n" /* Mark thread as running */
"str r1, [r0, #40] \n"
"mov lr, pc \n" /* Call thread function */
"bx r4 \n"
); /* No clobber list - new thread doesn't care */
thread_exit();
}
/* For startup, place context pointer in r4 slot, start_thread pointer in r5
* slot, and thread function pointer in context.start. See load_context for
* what happens when thread is initially going to run. */
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
(thread)->context.r[1] = (uint32_t)start_thread, \
(thread)->context.start = (uint32_t)function; })
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void store_context(void* addr)
{
asm volatile(
"stmia %0, { r4-r11, sp, lr } \n"
: : "r" (addr)
);
}
/*---------------------------------------------------------------------------
* Load non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void load_context(const void* addr)
{
asm volatile(
"ldr r0, [%0, #40] \n" /* Load start pointer */
"cmp r0, #0 \n" /* Check for NULL */
/* If not already running, jump to start */
"ldmneia %0, { r0, pc } \n"
"ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
: : "r" (addr) : "r0" /* only! */
);
}
/*
* this core sleep suspends the OS thread rockbox runs under, which greatly
* reduces cpu usage (~100% to <10%)
*
* it returns when when the tick timer is called, other interrupt-like
* events occur
*
* wait_for_interrupt is implemented in kernel-<platform>.c
**/
static inline void core_sleep(void)
{
enable_irq();
wait_for_interrupt();
}

View file

@ -1,316 +0,0 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2011 by Thomas Martitz
*
* Generic unix threading support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include <stdlib.h>
#include <stdbool.h>
#include <signal.h>
#include <stdio.h>
#include <setjmp.h>
#include <unistd.h>
#include <pthread.h>
#include <errno.h>
#include "debug.h"
static volatile bool sig_handler_called;
static volatile jmp_buf tramp_buf;
static volatile jmp_buf bootstrap_buf;
static void (*thread_func)(void);
static const int trampoline_sig = SIGUSR1;
static pthread_t main_thread;
static struct ctx {
jmp_buf thread_buf;
} thread_bufs[MAXTHREADS];
static struct ctx* thread_context, *target_context;
static int curr_uc;
static void trampoline(int sig);
static void bootstrap_context(void) __attribute__((noinline));
/* The *_context functions are heavily based on Gnu pth
* http://www.gnu.org/software/pth/
*
* adjusted to work in a multi-thread environment to
* offer a ucontext-like API
*/
/*
* VARIANT 2: THE SIGNAL STACK TRICK
*
* This uses sigstack/sigaltstack() and friends and is really the
* most tricky part of Pth. When you understand the following
* stuff you're a good Unix hacker and then you've already
* understood the gory ingredients of Pth. So, either welcome to
* the club of hackers, or do yourself a favor and skip this ;)
*
* The ingenious fact is that this variant runs really on _all_ POSIX
* compliant systems without special platform kludges. But be _VERY_
* carefully when you change something in the following code. The slightest
* change or reordering can lead to horribly broken code. Really every
* function call in the following case is intended to be how it is, doubt
* me...
*
* For more details we strongly recommend you to read the companion
* paper ``Portable Multithreading -- The Signal Stack Trick for
* User-Space Thread Creation'' from Ralf S. Engelschall. A copy of the
* draft of this paper you can find in the file rse-pmt.ps inside the
* GNU Pth distribution.
*/
static int make_context(struct ctx *ctx, void (*f)(void), char *sp, size_t stack_size)
{
struct sigaction sa;
struct sigaction osa;
stack_t ss;
stack_t oss;
sigset_t osigs;
sigset_t sigs;
disable_irq();
/*
* Preserve the trampoline_sig signal state, block trampoline_sig,
* and establish our signal handler. The signal will
* later transfer control onto the signal stack.
*/
sigemptyset(&sigs);
sigaddset(&sigs, trampoline_sig);
sigprocmask(SIG_BLOCK, &sigs, &osigs);
sa.sa_handler = trampoline;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK;
if (sigaction(trampoline_sig, &sa, &osa) != 0)
{
DEBUGF("%s(): %s\n", __func__, strerror(errno));
return false;
}
/*
* Set the new stack.
*
* For sigaltstack we're lucky [from sigaltstack(2) on
* FreeBSD 3.1]: ``Signal stacks are automatically adjusted
* for the direction of stack growth and alignment
* requirements''
*
* For sigstack we have to decide ourself [from sigstack(2)
* on Solaris 2.6]: ``The direction of stack growth is not
* indicated in the historical definition of struct sigstack.
* The only way to portably establish a stack pointer is for
* the application to determine stack growth direction.''
*/
ss.ss_sp = sp;
ss.ss_size = stack_size;
ss.ss_flags = 0;
if (sigaltstack(&ss, &oss) < 0)
{
DEBUGF("%s(): %s\n", __func__, strerror(errno));
return false;
}
/*
* Now transfer control onto the signal stack and set it up.
* It will return immediately via "return" after the setjmp()
* was performed. Be careful here with race conditions. The
* signal can be delivered the first time sigsuspend() is
* called.
*/
sig_handler_called = false;
main_thread = pthread_self();
sigfillset(&sigs);
sigdelset(&sigs, trampoline_sig);
pthread_kill(main_thread, trampoline_sig);
while(!sig_handler_called)
sigsuspend(&sigs);
/*
* Inform the system that we are back off the signal stack by
* removing the alternative signal stack. Be careful here: It
* first has to be disabled, before it can be removed.
*/
sigaltstack(NULL, &ss);
ss.ss_flags = SS_DISABLE;
if (sigaltstack(&ss, NULL) < 0)
{
DEBUGF("%s(): %s\n", __func__, strerror(errno));
return false;
}
sigaltstack(NULL, &ss);
if (!(ss.ss_flags & SS_DISABLE))
{
DEBUGF("%s(): %s\n", __func__, strerror(errno));
return false;
}
if (!(oss.ss_flags & SS_DISABLE))
sigaltstack(&oss, NULL);
/*
* Restore the old trampoline_sig signal handler and mask
*/
sigaction(trampoline_sig, &osa, NULL);
sigprocmask(SIG_SETMASK, &osigs, NULL);
/*
* Tell the trampoline and bootstrap function where to dump
* the new machine context, and what to do afterwards...
*/
thread_func = f;
thread_context = ctx;
/*
* Now enter the trampoline again, but this time not as a signal
* handler. Instead we jump into it directly. The functionally
* redundant ping-pong pointer arithmentic is neccessary to avoid
* type-conversion warnings related to the `volatile' qualifier and
* the fact that `jmp_buf' usually is an array type.
*/
if (setjmp(*((jmp_buf *)&bootstrap_buf)) == 0)
longjmp(*((jmp_buf *)&tramp_buf), 1);
/*
* Ok, we returned again, so now we're finished
*/
enable_irq();
return true;
}
static void trampoline(int sig)
{
(void)sig;
/* sanity check, no other thread should be here */
if (pthread_self() != main_thread)
return;
if (setjmp(*((jmp_buf *)&tramp_buf)) == 0)
{
sig_handler_called = true;
return;
}
/* longjump'd back in */
bootstrap_context();
}
void bootstrap_context(void)
{
/* copy to local storage so we can spawn further threads
* in the meantime */
void (*thread_entry)(void) = thread_func;
struct ctx *t = thread_context;
/*
* Save current machine state (on new stack) and
* go back to caller until we're scheduled for real...
*/
if (setjmp(t->thread_buf) == 0)
longjmp(*((jmp_buf *)&bootstrap_buf), 1);
/*
* The new thread is now running: GREAT!
* Now we just invoke its init function....
*/
thread_entry();
DEBUGF("thread left\n");
thread_exit();
}
static inline void set_context(struct ctx *c)
{
longjmp(c->thread_buf, 1);
}
static inline void swap_context(struct ctx *old, struct ctx *new)
{
if (setjmp(old->thread_buf) == 0)
longjmp(new->thread_buf, 1);
}
static inline void get_context(struct ctx *c)
{
setjmp(c->thread_buf);
}
static void setup_thread(struct regs *context);
#define INIT_MAIN_THREAD
static void init_main_thread(void *addr)
{
/* get a context for the main thread so that we can jump to it from
* other threads */
struct regs *context = (struct regs*)addr;
context->uc = &thread_bufs[curr_uc++];
get_context(context->uc);
}
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.stack_size = (thread)->stack_size, \
(thread)->context.stack = (uintptr_t)(thread)->stack; \
(thread)->context.start = function; })
/*
* Prepare context to make the thread runnable by calling swapcontext on it
*/
static void setup_thread(struct regs *context)
{
void (*fn)(void) = context->start;
context->uc = &thread_bufs[curr_uc++];
while (!make_context(context->uc, fn, (char*)context->stack, context->stack_size))
DEBUGF("Thread creation failed. Retrying");
}
/*
* Save the ucontext_t pointer for later use in swapcontext()
*
* Cannot do getcontext() here, because jumping back to the context
* resumes after the getcontext call (i.e. store_context), but we need
* to resume from load_context()
*/
static inline void store_context(void* addr)
{
struct regs *r = (struct regs*)addr;
target_context = r->uc;
}
/*
* Perform context switch
*/
static inline void load_context(const void* addr)
{
struct regs *r = (struct regs*)addr;
if (UNLIKELY(r->start))
{
setup_thread(r);
r->start = NULL;
}
swap_context(target_context, r->uc);
}
/*
* play nice with the host and sleep while waiting for the tick */
extern void wait_for_interrupt(void);
static inline void core_sleep(void)
{
enable_irq();
wait_for_interrupt();
}

View file

@ -1,85 +0,0 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2010 by Thomas Martitz
*
* Generic ARM threading support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include <windows.h>
#include "system.h"
#define INIT_MAIN_THREAD
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.stack_size = (thread)->stack_size, \
(thread)->context.stack = (uintptr_t)(thread)->stack; \
(thread)->context.start = function; })
static void init_main_thread(void *addr)
{
struct regs *context = (struct regs*)addr;
/* we must convert the current main thread to a fiber to be able to
* schedule other fibers */
context->uc = ConvertThreadToFiber(NULL);
context->stack_size = 0;
}
static inline void store_context(void* addr)
{
(void)addr;
/* nothing to do here, Fibers continue after the SwitchToFiber call */
}
static void start_thread(void)
{
void (*func)(void) = GetFiberData();
func();
/* go out if thread function returns */
thread_exit();
}
/*
* Load context and run it
*
* Resume execution from the last load_context call for the thread
*/
static inline void load_context(const void* addr)
{
struct regs *context = (struct regs*)addr;
if (UNLIKELY(context->start))
{ /* need setup before switching to it */
context->uc = CreateFiber(context->stack_size,
(LPFIBER_START_ROUTINE)start_thread, context->start);
/* can't assign stack pointer, only stack size */
context->stack_size = 0;
context->start = NULL;
}
SwitchToFiber(context->uc);
}
/*
* play nice with the host and sleep while waiting for the tick */
static inline void core_sleep(void)
{
enable_irq();
wait_for_interrupt();
}

View file

@ -22,10 +22,7 @@
#define __SYSTEM_TARGET_H__
#include "kernel-unix.h"
static inline void commit_dcache(void) {}
static inline void commit_discard_dcache(void) {}
static inline void commit_discard_idcache(void) {}
#include "system-hosted.h"
#define NEED_GENERIC_BYTESWAPS
#endif /* __SYSTEM_TARGET_H__ */

View file

@ -97,4 +97,28 @@ void dma_disable(void);
#define DMA_IRQ(n) (IRQ_DMA_0 + (n))
#define GPIO_IRQ(n) (IRQ_GPIO_0 + (n))
/*---------------------------------------------------------------------------
* Put core in a power-saving state.
*---------------------------------------------------------------------------
*/
static inline void core_sleep(void)
{
#if CONFIG_CPU == JZ4732
__cpm_idle_mode();
#endif
asm volatile(".set mips32r2 \n"
"mfc0 $8, $12 \n" /* mfc t0, $12 */
"move $9, $8 \n" /* move t1, t0 */
"la $10, 0x8000000 \n" /* la t2, 0x8000000 */
"or $8, $8, $10 \n" /* Enable reduced power mode */
"mtc0 $8, $12 \n" /* mtc t0, $12 */
"wait \n"
"mtc0 $9, $12 \n" /* mtc t1, $12 */
".set mips0 \n"
::: "t0", "t1", "t2"
);
enable_irq();
}
#endif /* __SYSTEM_TARGET_H_ */

View file

@ -1,133 +0,0 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2008 by Maurus Cuelenaere
*
* 32-bit MIPS threading support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
/*---------------------------------------------------------------------------
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
void start_thread(void); /* Provide C access to ASM label */
static void USED_ATTR _start_thread(void)
{
/* t1 = context */
asm volatile (
"start_thread: \n"
".set noreorder \n"
".set noat \n"
"lw $8, 4($9) \n" /* Fetch thread function pointer ($8 = t0, $9 = t1) */
"lw $29, 36($9) \n" /* Set initial sp(=$29) */
"jalr $8 \n" /* Start the thread */
"sw $0, 44($9) \n" /* Clear start address */
".set at \n"
".set reorder \n"
);
thread_exit();
}
/* Place context pointer in s0 slot, function pointer in s1 slot, and
* start_thread pointer in context_start */
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
(thread)->context.r[1] = (uint32_t)(function), \
(thread)->context.start = (uint32_t)start_thread; })
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void store_context(void* addr)
{
asm volatile (
".set noreorder \n"
".set noat \n"
"sw $16, 0(%0) \n" /* s0 */
"sw $17, 4(%0) \n" /* s1 */
"sw $18, 8(%0) \n" /* s2 */
"sw $19, 12(%0) \n" /* s3 */
"sw $20, 16(%0) \n" /* s4 */
"sw $21, 20(%0) \n" /* s5 */
"sw $22, 24(%0) \n" /* s6 */
"sw $23, 28(%0) \n" /* s7 */
"sw $30, 32(%0) \n" /* fp */
"sw $29, 36(%0) \n" /* sp */
"sw $31, 40(%0) \n" /* ra */
".set at \n"
".set reorder \n"
: : "r" (addr)
);
}
/*---------------------------------------------------------------------------
* Load non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void load_context(const void* addr)
{
asm volatile (
".set noat \n"
".set noreorder \n"
"lw $8, 44(%0) \n" /* Get start address ($8 = t0) */
"beqz $8, running \n" /* NULL -> already running */
"nop \n"
"jr $8 \n"
"move $9, %0 \n" /* t1 = context */
"running: \n"
"lw $16, 0(%0) \n" /* s0 */
"lw $17, 4(%0) \n" /* s1 */
"lw $18, 8(%0) \n" /* s2 */
"lw $19, 12(%0) \n" /* s3 */
"lw $20, 16(%0) \n" /* s4 */
"lw $21, 20(%0) \n" /* s5 */
"lw $22, 24(%0) \n" /* s6 */
"lw $23, 28(%0) \n" /* s7 */
"lw $30, 32(%0) \n" /* fp */
"lw $29, 36(%0) \n" /* sp */
"lw $31, 40(%0) \n" /* ra */
".set at \n"
".set reorder \n"
: : "r" (addr) : "t0", "t1"
);
}
/*---------------------------------------------------------------------------
* Put core in a power-saving state.
*---------------------------------------------------------------------------
*/
static inline void core_sleep(void)
{
#if CONFIG_CPU == JZ4732
__cpm_idle_mode();
#endif
asm volatile(".set mips32r2 \n"
"mfc0 $8, $12 \n" /* mfc t0, $12 */
"move $9, $8 \n" /* move t1, t0 */
"la $10, 0x8000000 \n" /* la t2, 0x8000000 */
"or $8, $8, $10 \n" /* Enable reduced power mode */
"mtc0 $8, $12 \n" /* mtc t0, $12 */
"wait \n"
"mtc0 $9, $12 \n" /* mtc t1, $12 */
".set mips0 \n"
::: "t0", "t1", "t2"
);
enable_irq();
}

View file

@ -137,4 +137,18 @@ static inline void commit_dcache(void) {}
static inline void commit_discard_dcache(void) {}
static inline void commit_discard_idcache(void) {}
/*---------------------------------------------------------------------------
* Put core in a power-saving state.
*---------------------------------------------------------------------------
*/
static inline void core_sleep(void)
{
asm volatile (
"and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
"mov #0, r1 \n" /* Enable interrupts */
"ldc r1, sr \n" /* Following instruction cannot be interrupted */
"sleep \n" /* Execute standby */
: : "z"(&SBYCR-GBR) : "r1");
}
#endif /* SYSTEM_TARGET_H */

View file

@ -1,109 +0,0 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Ulf Ralberg
*
* SH processor threading support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
/*---------------------------------------------------------------------------
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
void start_thread(void); /* Provide C access to ASM label */
static void USED_ATTR __start_thread(void)
{
/* r8 = context */
asm volatile (
"_start_thread: \n" /* Start here - no naked attribute */
"mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
"mov.l @(28, r8), r15 \n" /* Set initial sp */
"mov #0, r1 \n" /* Start the thread */
"jsr @r0 \n"
"mov.l r1, @(36, r8) \n" /* Clear start address */
);
thread_exit();
}
/* Place context pointer in r8 slot, function pointer in r9 slot, and
* start_thread pointer in context_start */
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
(thread)->context.r[1] = (uint32_t)(function), \
(thread)->context.start = (uint32_t)start_thread; })
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void store_context(void* addr)
{
asm volatile (
"add #36, %0 \n" /* Start at last reg. By the time routine */
"sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
"mov.l r15,@-%0 \n"
"mov.l r14,@-%0 \n"
"mov.l r13,@-%0 \n"
"mov.l r12,@-%0 \n"
"mov.l r11,@-%0 \n"
"mov.l r10,@-%0 \n"
"mov.l r9, @-%0 \n"
"mov.l r8, @-%0 \n"
: : "r" (addr)
);
}
/*---------------------------------------------------------------------------
* Load non-volatile context.
*---------------------------------------------------------------------------
*/
static inline void load_context(const void* addr)
{
asm volatile (
"mov.l @(36, %0), r0 \n" /* Get start address */
"tst r0, r0 \n"
"bt .running \n" /* NULL -> already running */
"jmp @r0 \n" /* r8 = context */
".running: \n"
"mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
"mov.l @%0+, r9 \n"
"mov.l @%0+, r10 \n"
"mov.l @%0+, r11 \n"
"mov.l @%0+, r12 \n"
"mov.l @%0+, r13 \n"
"mov.l @%0+, r14 \n"
"mov.l @%0+, r15 \n"
"lds.l @%0+, pr \n"
: : "r" (addr) : "r0" /* only! */
);
}
/*---------------------------------------------------------------------------
* Put core in a power-saving state.
*---------------------------------------------------------------------------
*/
static inline void core_sleep(void)
{
asm volatile (
"and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
"mov #0, r1 \n" /* Enable interrupts */
"ldc r1, sr \n" /* Following instruction cannot be interrupted */
"sleep \n" /* Execute standby */
: : "z"(&SBYCR-GBR) : "r1");
}