arm: add initial ARM Cortex-M support

M-profile cores manage interrupts differently from classic cores
and lack the FIQ. Split the interrupt management parts out into
separate headers but keep the endian swapping routines (which are
not profile-dependent) in the common system-arm header.

The initial part of the vector table is common to all Cortex-M
CPUs and is intended to be included by the target linker script,
with the vendor-specific part of the vector table appended to it.

Change-Id: Ib2ad5b9dc41db27940e39033cfef4308923db66d
This commit is contained in:
Aidan MacDonald 2024-11-14 15:41:52 +00:00
parent 96b6a7b4e4
commit 3ed9fb3115
10 changed files with 520 additions and 256 deletions

View file

@ -500,7 +500,7 @@ static void init(void)
power_init();
enable_irq();
#ifdef CPU_ARM
#if defined(CPU_ARM_CLASSIC)
enable_fiq();
#endif
/* current_tick should be ticking by now */

View file

@ -609,7 +609,13 @@ target/arm/mmu-arm.S
target/arm/bits-armv6.S
target/arm/mmu-armv6.S
# endif
target/arm/system-arm.c
#if defined(CPU_ARM_CLASSIC)
target/arm/system-arm-classic.c
#elif defined(CPU_ARM_MICRO)
target/arm/system-arm-micro.c
target/arm/vectors-arm-micro.S
#endif
#if CONFIG_STORAGE & STORAGE_ATA
# ifdef CPU_PP502x
@ -828,7 +834,7 @@ target/arm/s5l8702/crt0.S
target/arm/imx233/crt0.S
#elif CONFIG_CPU==RK27XX
target/arm/rk27xx/crt0.S
#elif defined(CPU_ARM)
#elif defined(CPU_ARM_CLASSIC)
target/arm/crt0.S
#endif /* defined(CPU_*) */

View file

@ -86,7 +86,7 @@ void panicf( const char *fmt, ...)
#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
/* Disable interrupts */
#ifdef CPU_ARM
#if defined(CPU_ARM_CLASSIC)
disable_interrupt(IRQ_FIQ_STATUS);
#else
set_irq_level(DISABLE_INTERRUPTS);

View file

@ -332,7 +332,7 @@ int rolo_load(const char* filename)
#endif
#if CONFIG_CPU != IMX31L /* We're not finished yet */
#ifdef CPU_ARM
#if defined(CPU_ARM_CLASSIC)
/* Should do these together since some ARM version should never have
* FIQ disabled and not IRQ (imx31 errata). */
disable_interrupt(IRQ_FIQ_STATUS);

View file

@ -0,0 +1,280 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Alan Korr
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef SYSTEM_ARM_CLASSIC_H
#define SYSTEM_ARM_CLASSIC_H
#define IRQ_ENABLED 0x00
#define IRQ_DISABLED 0x80
#define IRQ_STATUS 0x80
#define FIQ_ENABLED 0x00
#define FIQ_DISABLED 0x40
#define FIQ_STATUS 0x40
#define IRQ_FIQ_ENABLED 0x00
#define IRQ_FIQ_DISABLED 0xc0
#define IRQ_FIQ_STATUS 0xc0
#define HIGHEST_IRQ_LEVEL IRQ_DISABLED
#define set_irq_level(status) \
set_interrupt_status((status), IRQ_STATUS)
#define set_fiq_status(status) \
set_interrupt_status((status), FIQ_STATUS)
#define disable_irq_save() \
disable_interrupt_save(IRQ_STATUS)
#define disable_fiq_save() \
disable_interrupt_save(FIQ_STATUS)
#define restore_irq(cpsr) \
restore_interrupt(cpsr)
#define restore_fiq(cpsr) \
restore_interrupt(cpsr)
#define disable_irq() \
disable_interrupt(IRQ_STATUS)
#define enable_irq() \
enable_interrupt(IRQ_STATUS)
#define disable_fiq() \
disable_interrupt(FIQ_STATUS)
#define enable_fiq() \
enable_interrupt(FIQ_STATUS)
#define irq_enabled() \
interrupt_enabled(IRQ_STATUS)
#define fiq_enabled() \
interrupt_enabled(FIQ_STATUS)
#define ints_enabled() \
interrupt_enabled(IRQ_FIQ_STATUS)
#define irq_enabled_checkval(val) \
(((val) & IRQ_STATUS) == 0)
#define fiq_enabled_checkval(val) \
(((val) & FIQ_STATUS) == 0)
#define ints_enabled_checkval(val) \
(((val) & IRQ_FIQ_STATUS) == 0)
#define CPU_MODE_USER 0x10
#define CPU_MODE_FIQ 0x11
#define CPU_MODE_IRQ 0x12
#define CPU_MODE_SVC 0x13
#define CPU_MODE_ABT 0x17
#define CPU_MODE_UNDEF 0x1b
#define CPU_MODE_SYS 0x1f
/* We run in SYS mode */
#define CPU_MODE_THREAD_CONTEXT CPU_MODE_SYS
#define is_thread_context() \
(get_processor_mode() == CPU_MODE_THREAD_CONTEXT)
/* Assert that the processor is in the desired execution mode
* mode: Processor mode value to test for
* rstatus...: Provide if you already have the value saved, otherwise leave
* blank to get it automatically.
*/
#define ASSERT_CPU_MODE(mode, rstatus...) \
({ unsigned long __massert = (mode); \
unsigned long __mproc = *#rstatus ? \
((rstatus +0) & 0x1f) : get_processor_mode(); \
if (__mproc != __massert) \
panicf("Incorrect CPU mode in %s (0x%02lx!=0x%02lx)", \
__func__, __mproc, __massert); })
/* Core-level interrupt masking */
static inline int set_interrupt_status(int status, int mask)
{
unsigned long cpsr;
int oldstatus;
/* Read the old levels and set the new ones */
#if (defined(CREATIVE_ZVM) || defined(CREATIVE_ZV)) && defined(BOOTLOADER)
// FIXME: This workaround is for a problem with inlining;
// for some reason 'mask' gets treated as a variable/non-immediate constant
// but only on this build. All others (including the nearly-identical mrobe500boot) are fine
asm volatile (
"mrs %1, cpsr \n"
"bic %0, %1, %[mask] \n"
"orr %0, %0, %2 \n"
"msr cpsr_c, %0 \n"
: "=&r,r"(cpsr), "=&r,r"(oldstatus)
: "r,i"(status & mask), [mask]"r,i"(mask));
#else
asm volatile (
"mrs %1, cpsr \n"
"bic %0, %1, %[mask] \n"
"orr %0, %0, %2 \n"
"msr cpsr_c, %0 \n"
: "=&r,r"(cpsr), "=&r,r"(oldstatus)
: "r,i"(status & mask), [mask]"i,i"(mask));
#endif
return oldstatus;
}
static inline void restore_interrupt(int cpsr)
{
/* Set cpsr_c from value returned by disable_interrupt_save
* or set_interrupt_status */
asm volatile ("msr cpsr_c, %0" : : "r"(cpsr));
}
static inline bool interrupt_enabled(int status)
{
unsigned long cpsr;
asm ("mrs %0, cpsr" : "=r"(cpsr));
return (cpsr & status) == 0;
}
static inline unsigned long get_processor_mode(void)
{
unsigned long cpsr;
asm ("mrs %0, cpsr" : "=r"(cpsr));
return cpsr & 0x1f;
}
/* ARM_ARCH version section for architecture*/
#if ARM_ARCH >= 6
static inline void enable_interrupt(int mask)
{
/* Clear I and/or F disable bit */
/* mask is expected to be constant and so only relevent branch
* is preserved */
switch (mask & IRQ_FIQ_STATUS)
{
case IRQ_STATUS:
asm volatile ("cpsie i");
break;
case FIQ_STATUS:
asm volatile ("cpsie f");
break;
case IRQ_FIQ_STATUS:
asm volatile ("cpsie if");
break;
}
}
static inline void disable_interrupt(int mask)
{
/* Set I and/or F disable bit */
/* mask is expected to be constant and so only relevent branch
* is preserved */
switch (mask & IRQ_FIQ_STATUS)
{
case IRQ_STATUS:
asm volatile ("cpsid i");
break;
case FIQ_STATUS:
asm volatile ("cpsid f");
break;
case IRQ_FIQ_STATUS:
asm volatile ("cpsid if");
break;
}
}
static inline int disable_interrupt_save(int mask)
{
/* Set I and/or F disable bit and return old cpsr value */
int cpsr;
/* mask is expected to be constant and so only relevent branch
* is preserved */
asm volatile("mrs %0, cpsr" : "=r"(cpsr));
switch (mask & IRQ_FIQ_STATUS)
{
case IRQ_STATUS:
asm volatile ("cpsid i");
break;
case FIQ_STATUS:
asm volatile ("cpsid f");
break;
case IRQ_FIQ_STATUS:
asm volatile ("cpsid if");
break;
}
return cpsr;
}
#else /* ARM_ARCH < 6 */
static inline void enable_interrupt(int mask)
{
/* Clear I and/or F disable bit */
int tmp;
asm volatile (
"mrs %0, cpsr \n"
"bic %0, %0, %1 \n"
"msr cpsr_c, %0 \n"
: "=&r"(tmp) : "i"(mask));
}
static inline void disable_interrupt(int mask)
{
/* Set I and/or F disable bit */
int tmp;
asm volatile (
"mrs %0, cpsr \n"
"orr %0, %0, %1 \n"
"msr cpsr_c, %0 \n"
: "=&r"(tmp) : "i"(mask));
}
static inline int disable_interrupt_save(int mask)
{
/* Set I and/or F disable bit and return old cpsr value */
int cpsr, tmp;
asm volatile (
"mrs %1, cpsr \n"
"orr %0, %1, %2 \n"
"msr cpsr_c, %0 \n"
: "=&r"(tmp), "=&r"(cpsr)
: "i"(mask));
return cpsr;
}
#endif /* ARM_ARCH */
#if defined(CPU_TCC780X) /* Single core only for now */ \
|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \
|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 \
|| CONFIG_CPU == S5L8702 || CONFIG_CPU == S5L8720
/* Use the generic ARMv4/v5/v6 wait for IRQ */
static inline void core_sleep(void)
{
asm volatile (
"mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */
#if CONFIG_CPU == IMX31L
"nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */
#endif
: : "r"(0)
);
enable_irq();
}
#else
/* Skip this if special code is required and implemented */
#if !(defined(CPU_PP)) && CONFIG_CPU != RK27XX && CONFIG_CPU != IMX233
static inline void core_sleep(void)
{
/* TODO: core_sleep not implemented, battery life will be decreased */
enable_irq();
}
#endif /* CPU_PP */
#endif
#endif /* SYSTEM_ARM_CLASSIC_H */

View file

@ -0,0 +1,58 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2025 by Aidan MacDonald
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#include "system.h"
struct armv7m_exception_frame
{
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r12;
unsigned long lr;
unsigned long pc;
unsigned long xpsr;
};
void UIE(void)
{
while (1);
}
#define ATTR_IRQ_HANDLER __attribute__((weak, alias("UIE")))
void nmi_handler(void) ATTR_IRQ_HANDLER;
void hardfault_handler(void) ATTR_IRQ_HANDLER;
void pendsv_handler(void) ATTR_IRQ_HANDLER;
void svcall_handler(void) ATTR_IRQ_HANDLER;
void systick_handler(void) ATTR_IRQ_HANDLER;
#if ARCH_VERSION >= 7
void memmanage_handler(void) ATTR_IRQ_HANDLER;
void busfault_handler(void) ATTR_IRQ_HANDLER;
void usagefault_handler(void) ATTR_IRQ_HANDLER;
void debugmonitor_handler(void) ATTR_IRQ_HANDLER;
#endif
#if ARCH_VERSION >= 8
void securefault_handler(void) ATTR_IRQ_HANDLER;
#endif

View file

@ -0,0 +1,99 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2025 by Aidan MacDonald
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef SYSTEM_ARM_MICRO_H
#define SYSTEM_ARM_MICRO_H
#define IRQ_ENABLED 0x00
#define IRQ_DISABLED 0x01
#define IRQ_STATUS 0x01
#define HIGHEST_IRQ_LEVEL IRQ_DISABLED
#define disable_irq_save() \
set_irq_level(IRQ_DISABLED)
/* For compatibility with ARM classic */
#define CPU_MODE_THREAD_CONTEXT 0
#define is_thread_context() \
(get_interrupt_number() == CPU_MODE_THREAD_CONTEXT)
/* Assert that the processor is in the desired execution mode
* mode: Processor mode value to test for
* rstatus...: Ignored; for compatibility with ARM classic only.
*/
#define ASSERT_CPU_MODE(mode, rstatus...) \
({ unsigned long __massert = (mode); \
unsigned long __mproc = get_interrupt_number(); \
if (__mproc != __massert) \
panicf("Incorrect CPU mode in %s (0x%02lx!=0x%02lx)", \
__func__, __mproc, __massert); })
/* Core-level interrupt masking */
static inline int set_irq_level(int primask)
{
int oldvalue;
asm volatile ("mrs %0, primask\n"
"msr primask, %1\n"
: "=r"(oldvalue) : "r"(primask));
return oldvalue;
}
static inline void restore_irq(int primask)
{
asm volatile ("msr primask, %0" :: "r"(primask));
}
static inline void enable_irq(void)
{
asm volatile ("cpsie i");
}
static inline void disable_irq(void)
{
asm volatile ("cpsid i");
}
static inline bool irq_enabled(void)
{
int primask;
asm volatile ("mrs %0, primask" : "=r"(primask));
return !(primask & 1);
}
static inline unsigned long get_interrupt_number(void)
{
unsigned long ipsr;
asm volatile ("mrs %0, ipsr" : "=r"(ipsr));
return ipsr & 0x1ff;
}
static inline void core_sleep(void)
{
asm volatile ("wfi");
enable_irq();
}
#endif /* SYSTEM_ARM_MICRO_H */

View file

@ -21,6 +21,14 @@
#ifndef SYSTEM_ARM_H
#define SYSTEM_ARM_H
#if ARM_PROFILE == ARM_PROFILE_CLASSIC
# include "system-arm-classic.h"
#elif ARM_PROFILE == ARM_PROFILE_MICRO
# include "system-arm-micro.h"
#else
# error "Unknown ARM architecture profile!"
#endif
/* Common to all ARM_ARCH */
#define nop \
asm volatile ("nop")
@ -29,135 +37,6 @@
void __div0(void);
#endif
#define IRQ_ENABLED 0x00
#define IRQ_DISABLED 0x80
#define IRQ_STATUS 0x80
#define FIQ_ENABLED 0x00
#define FIQ_DISABLED 0x40
#define FIQ_STATUS 0x40
#define IRQ_FIQ_ENABLED 0x00
#define IRQ_FIQ_DISABLED 0xc0
#define IRQ_FIQ_STATUS 0xc0
#define HIGHEST_IRQ_LEVEL IRQ_DISABLED
#define set_irq_level(status) \
set_interrupt_status((status), IRQ_STATUS)
#define set_fiq_status(status) \
set_interrupt_status((status), FIQ_STATUS)
#define disable_irq_save() \
disable_interrupt_save(IRQ_STATUS)
#define disable_fiq_save() \
disable_interrupt_save(FIQ_STATUS)
#define restore_irq(cpsr) \
restore_interrupt(cpsr)
#define restore_fiq(cpsr) \
restore_interrupt(cpsr)
#define disable_irq() \
disable_interrupt(IRQ_STATUS)
#define enable_irq() \
enable_interrupt(IRQ_STATUS)
#define disable_fiq() \
disable_interrupt(FIQ_STATUS)
#define enable_fiq() \
enable_interrupt(FIQ_STATUS)
#define irq_enabled() \
interrupt_enabled(IRQ_STATUS)
#define fiq_enabled() \
interrupt_enabled(FIQ_STATUS)
#define ints_enabled() \
interrupt_enabled(IRQ_FIQ_STATUS)
#define irq_enabled_checkval(val) \
(((val) & IRQ_STATUS) == 0)
#define fiq_enabled_checkval(val) \
(((val) & FIQ_STATUS) == 0)
#define ints_enabled_checkval(val) \
(((val) & IRQ_FIQ_STATUS) == 0)
#define CPU_MODE_USER 0x10
#define CPU_MODE_FIQ 0x11
#define CPU_MODE_IRQ 0x12
#define CPU_MODE_SVC 0x13
#define CPU_MODE_ABT 0x17
#define CPU_MODE_UNDEF 0x1b
#define CPU_MODE_SYS 0x1f
/* We run in SYS mode */
#define CPU_MODE_THREAD_CONTEXT CPU_MODE_SYS
#define is_thread_context() \
(get_processor_mode() == CPU_MODE_THREAD_CONTEXT)
/* Assert that the processor is in the desired execution mode
* mode: Processor mode value to test for
* rstatus...: Provide if you already have the value saved, otherwise leave
* blank to get it automatically.
*/
#define ASSERT_CPU_MODE(mode, rstatus...) \
({ unsigned long __massert = (mode); \
unsigned long __mproc = *#rstatus ? \
((rstatus +0) & 0x1f) : get_processor_mode(); \
if (__mproc != __massert) \
panicf("Incorrect CPU mode in %s (0x%02lx!=0x%02lx)", \
__func__, __mproc, __massert); })
/* Core-level interrupt masking */
static inline int set_interrupt_status(int status, int mask)
{
unsigned long cpsr;
int oldstatus;
/* Read the old levels and set the new ones */
#if (defined(CREATIVE_ZVM) ||defined(CREATIVE_ZV)) && defined(BOOTLOADER)
// FIXME: This workaround is for a problem with inlining;
// for some reason 'mask' gets treated as a variable/non-immediate constant
// but only on this build. All others (including the nearly-identical mrobe500boot) are fine
asm volatile (
"mrs %1, cpsr \n"
"bic %0, %1, %[mask] \n"
"orr %0, %0, %2 \n"
"msr cpsr_c, %0 \n"
: "=&r,r"(cpsr), "=&r,r"(oldstatus)
: "r,i"(status & mask), [mask]"r,i"(mask));
#else
asm volatile (
"mrs %1, cpsr \n"
"bic %0, %1, %[mask] \n"
"orr %0, %0, %2 \n"
"msr cpsr_c, %0 \n"
: "=&r,r"(cpsr), "=&r,r"(oldstatus)
: "r,i"(status & mask), [mask]"i,i"(mask));
#endif
return oldstatus;
}
static inline void restore_interrupt(int cpsr)
{
/* Set cpsr_c from value returned by disable_interrupt_save
* or set_interrupt_status */
asm volatile ("msr cpsr_c, %0" : : "r"(cpsr));
}
static inline bool interrupt_enabled(int status)
{
unsigned long cpsr;
asm ("mrs %0, cpsr" : "=r"(cpsr));
return (cpsr & status) == 0;
}
static inline unsigned long get_processor_mode(void)
{
unsigned long cpsr;
asm ("mrs %0, cpsr" : "=r"(cpsr));
return cpsr & 0x1f;
}
/* ARM_ARCH version section for architecture*/
#if ARM_ARCH >= 6
static inline uint16_t swap16_hw(uint16_t value)
/*
@ -197,66 +76,6 @@ static inline uint32_t swap_odd_even32_hw(uint32_t value)
return retval;
}
static inline void enable_interrupt(int mask)
{
/* Clear I and/or F disable bit */
/* mask is expected to be constant and so only relevent branch
* is preserved */
switch (mask & IRQ_FIQ_STATUS)
{
case IRQ_STATUS:
asm volatile ("cpsie i");
break;
case FIQ_STATUS:
asm volatile ("cpsie f");
break;
case IRQ_FIQ_STATUS:
asm volatile ("cpsie if");
break;
}
}
static inline void disable_interrupt(int mask)
{
/* Set I and/or F disable bit */
/* mask is expected to be constant and so only relevent branch
* is preserved */
switch (mask & IRQ_FIQ_STATUS)
{
case IRQ_STATUS:
asm volatile ("cpsid i");
break;
case FIQ_STATUS:
asm volatile ("cpsid f");
break;
case IRQ_FIQ_STATUS:
asm volatile ("cpsid if");
break;
}
}
static inline int disable_interrupt_save(int mask)
{
/* Set I and/or F disable bit and return old cpsr value */
int cpsr;
/* mask is expected to be constant and so only relevent branch
* is preserved */
asm volatile("mrs %0, cpsr" : "=r"(cpsr));
switch (mask & IRQ_FIQ_STATUS)
{
case IRQ_STATUS:
asm volatile ("cpsid i");
break;
case FIQ_STATUS:
asm volatile ("cpsid f");
break;
case IRQ_FIQ_STATUS:
asm volatile ("cpsid if");
break;
}
return cpsr;
}
#else /* ARM_ARCH < 6 */
static inline uint16_t swap16_hw(uint16_t value)
@ -326,41 +145,6 @@ static inline uint32_t swap_odd_even32_hw(uint32_t value)
return value;
}
static inline void enable_interrupt(int mask)
{
/* Clear I and/or F disable bit */
int tmp;
asm volatile (
"mrs %0, cpsr \n"
"bic %0, %0, %1 \n"
"msr cpsr_c, %0 \n"
: "=&r"(tmp) : "i"(mask));
}
static inline void disable_interrupt(int mask)
{
/* Set I and/or F disable bit */
int tmp;
asm volatile (
"mrs %0, cpsr \n"
"orr %0, %0, %1 \n"
"msr cpsr_c, %0 \n"
: "=&r"(tmp) : "i"(mask));
}
static inline int disable_interrupt_save(int mask)
{
/* Set I and/or F disable bit and return old cpsr value */
int cpsr, tmp;
asm volatile (
"mrs %1, cpsr \n"
"orr %0, %1, %2 \n"
"msr cpsr_c, %0 \n"
: "=&r"(tmp), "=&r"(cpsr)
: "i"(mask));
return cpsr;
}
#endif /* ARM_ARCH */
static inline uint32_t swaw32_hw(uint32_t value)
@ -384,31 +168,4 @@ static inline uint32_t swaw32_hw(uint32_t value)
}
#if defined(CPU_TCC780X) /* Single core only for now */ \
|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \
|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 \
|| CONFIG_CPU == S5L8702 || CONFIG_CPU == S5L8720
/* Use the generic ARMv4/v5/v6 wait for IRQ */
static inline void core_sleep(void)
{
asm volatile (
"mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */
#if CONFIG_CPU == IMX31L
"nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */
#endif
: : "r"(0)
);
enable_irq();
}
#else
/* Skip this if special code is required and implemented */
#if !(defined(CPU_PP)) && CONFIG_CPU != RK27XX && CONFIG_CPU != IMX233
static inline void core_sleep(void)
{
/* TODO: core_sleep not implemented, battery life will be decreased */
enable_irq();
}
#endif /* CPU_PP */
#endif
#endif /* SYSTEM_ARM_H */

View file

@ -0,0 +1,64 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2025 by Aidan MacDonald
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#if ARCH_VERSION < 7
# define memmanage_handler UIE
# define busfault_handler UIE
# define usagefault_handler UIE
#endif
#if ARCH_VERSION < 8
# define securefault_handler UIE
#endif
.syntax unified
.text
/*
* Architecturally defined exception vectors for ARMv6/7/8-M.
*
* The target linker script is expected to place these in the
* correct location and then append the implementation-specific
* vector table entries, which would normally be defined at the
* manufacturer level of the target tree.
*/
.section .vectors.arm,"ax",%progbits
.global __vectors_arm
__vectors_arm:
.word irqstackend /* [ 0] Stack pointer */
.word reset_handler /* [ 1] Reset */
.word nmi_handler /* [ 2] Non-maskable interrupt */
.word hardfault_handler /* [ 3] */
.word memmanage_handler /* [ 4] (ARMv7-M and later only) */
.word busfault_handler /* [ 5] (ARMv7-M and later only) */
.word usagefault_handler /* [ 6] (ARMv7-M and later only) */
.word securefault_handler /* [ 7] (ARMv8-M only) */
.word UIE /* [ 8] Reserved */
.word UIE /* [ 9] Reserved */
.word UIE /* [10] Reserved */
.word svcall_handler /* [11] */
.word debugmonitor_handler /* [12] (ARMv7-M and later only) */
.word UIE /* [13] Reserved */
.word pendsv_handler /* [14] */
.word systick_handler /* [15] (Optional on ARMv6-M) */