forked from len0rd/rockbox
Add CPU mode asserts to kernel on blocking functions.
This scourge finds it's way back in far too often. Right now, only defined for ARM. Have fun! Change-Id: Ib21be09ebf71dec10dc652a7a664779251f49644
This commit is contained in:
parent
da46457231
commit
3e73866110
7 changed files with 54 additions and 1 deletions
|
@ -204,6 +204,16 @@ enum {
|
||||||
#include "bitswap.h"
|
#include "bitswap.h"
|
||||||
#include "rbendian.h"
|
#include "rbendian.h"
|
||||||
|
|
||||||
|
#ifndef ASSERT_CPU_MODE
|
||||||
|
/* Very useful to have defined properly for your architecture */
|
||||||
|
#define ASSERT_CPU_MODE(mode, rstatus...) \
|
||||||
|
({ (mode); rstatus; })
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef CPU_MODE_THREAD_CONTEXT
|
||||||
|
#define CPU_MODE_THREAD_CONTEXT 0
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef BIT_N
|
#ifndef BIT_N
|
||||||
#define BIT_N(n) (1U << (n))
|
#define BIT_N(n) (1U << (n))
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -159,6 +159,8 @@ void mrsw_init(struct mrsw_lock *mrsw)
|
||||||
* access recursively. The current writer is ignored and gets access. */
|
* access recursively. The current writer is ignored and gets access. */
|
||||||
void mrsw_read_acquire(struct mrsw_lock *mrsw)
|
void mrsw_read_acquire(struct mrsw_lock *mrsw)
|
||||||
{
|
{
|
||||||
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT);
|
||||||
|
|
||||||
struct thread_entry *current = __running_self_entry();
|
struct thread_entry *current = __running_self_entry();
|
||||||
|
|
||||||
if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 ))
|
if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 ))
|
||||||
|
@ -268,6 +270,8 @@ void mrsw_read_release(struct mrsw_lock *mrsw)
|
||||||
* safely call recursively. */
|
* safely call recursively. */
|
||||||
void mrsw_write_acquire(struct mrsw_lock *mrsw)
|
void mrsw_write_acquire(struct mrsw_lock *mrsw)
|
||||||
{
|
{
|
||||||
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT);
|
||||||
|
|
||||||
struct thread_entry *current = __running_self_entry();
|
struct thread_entry *current = __running_self_entry();
|
||||||
|
|
||||||
if (current == mrsw->splay.blocker.thread)
|
if (current == mrsw->splay.blocker.thread)
|
||||||
|
|
|
@ -39,6 +39,8 @@ void mutex_init(struct mutex *m)
|
||||||
/* Gain ownership of a mutex object or block until it becomes free */
|
/* Gain ownership of a mutex object or block until it becomes free */
|
||||||
void mutex_lock(struct mutex *m)
|
void mutex_lock(struct mutex *m)
|
||||||
{
|
{
|
||||||
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT);
|
||||||
|
|
||||||
struct thread_entry *current = __running_self_entry();
|
struct thread_entry *current = __running_self_entry();
|
||||||
|
|
||||||
if(current == m->blocker.thread)
|
if(current == m->blocker.thread)
|
||||||
|
|
|
@ -283,6 +283,9 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
oldlevel = disable_irq_save();
|
oldlevel = disable_irq_save();
|
||||||
|
|
||||||
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel);
|
||||||
|
|
||||||
corelock_lock(&q->cl);
|
corelock_lock(&q->cl);
|
||||||
|
|
||||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||||
|
@ -335,6 +338,10 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
oldlevel = disable_irq_save();
|
oldlevel = disable_irq_save();
|
||||||
|
|
||||||
|
if (ticks != TIMEOUT_NOBLOCK)
|
||||||
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel);
|
||||||
|
|
||||||
corelock_lock(&q->cl);
|
corelock_lock(&q->cl);
|
||||||
|
|
||||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||||
|
@ -421,6 +428,9 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
||||||
unsigned int wr;
|
unsigned int wr;
|
||||||
|
|
||||||
oldlevel = disable_irq_save();
|
oldlevel = disable_irq_save();
|
||||||
|
|
||||||
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel);
|
||||||
|
|
||||||
corelock_lock(&q->cl);
|
corelock_lock(&q->cl);
|
||||||
|
|
||||||
wr = q->write++ & QUEUE_LENGTH_MASK;
|
wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||||
|
|
|
@ -57,6 +57,8 @@ int semaphore_wait(struct semaphore *s, int timeout)
|
||||||
}
|
}
|
||||||
else if(timeout != 0)
|
else if(timeout != 0)
|
||||||
{
|
{
|
||||||
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel);
|
||||||
|
|
||||||
/* too many waits - block until count is upped... */
|
/* too many waits - block until count is upped... */
|
||||||
struct thread_entry *current = __running_self_entry();
|
struct thread_entry *current = __running_self_entry();
|
||||||
|
|
||||||
|
|
|
@ -1234,6 +1234,8 @@ unsigned int create_thread(void (*function)(void),
|
||||||
*/
|
*/
|
||||||
void thread_wait(unsigned int thread_id)
|
void thread_wait(unsigned int thread_id)
|
||||||
{
|
{
|
||||||
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT);
|
||||||
|
|
||||||
struct thread_entry *current = __running_self_entry();
|
struct thread_entry *current = __running_self_entry();
|
||||||
struct thread_entry *thread = __thread_id_entry(thread_id);
|
struct thread_entry *thread = __thread_id_entry(thread_id);
|
||||||
|
|
||||||
|
|
|
@ -76,9 +76,32 @@ void __div0(void);
|
||||||
#define ints_enabled_checkval(val) \
|
#define ints_enabled_checkval(val) \
|
||||||
(((val) & IRQ_FIQ_STATUS) == 0)
|
(((val) & IRQ_FIQ_STATUS) == 0)
|
||||||
|
|
||||||
|
#define CPU_MODE_USER 0x10
|
||||||
|
#define CPU_MODE_FIQ 0x11
|
||||||
|
#define CPU_MODE_IRQ 0x12
|
||||||
|
#define CPU_MODE_SVC 0x13
|
||||||
|
#define CPU_MODE_ABT 0x17
|
||||||
|
#define CPU_MODE_UNDEF 0x1b
|
||||||
|
#define CPU_MODE_SYS 0x1f
|
||||||
|
|
||||||
/* We run in SYS mode */
|
/* We run in SYS mode */
|
||||||
|
#define CPU_MODE_THREAD_CONTEXT CPU_MODE_SYS
|
||||||
|
|
||||||
#define is_thread_context() \
|
#define is_thread_context() \
|
||||||
(get_processor_mode() == 0x1f)
|
(get_processor_mode() == CPU_MODE_THREAD_CONTEXT)
|
||||||
|
|
||||||
|
/* Assert that the processor is in the desired execution mode
|
||||||
|
* mode: Processor mode value to test for
|
||||||
|
* rstatus...: Provide if you already have the value saved, otherwise leave
|
||||||
|
* blank to get it automatically.
|
||||||
|
*/
|
||||||
|
#define ASSERT_CPU_MODE(mode, rstatus...) \
|
||||||
|
({ unsigned long __massert = (mode); \
|
||||||
|
unsigned long __mproc = *#rstatus ? \
|
||||||
|
((rstatus +0) & 0x1f) : get_processor_mode(); \
|
||||||
|
if (__mproc != __massert) \
|
||||||
|
panicf("Incorrect CPU mode in %s (0x%02lx!=0x%02lx)", \
|
||||||
|
__func__, __mproc, __massert); })
|
||||||
|
|
||||||
/* Core-level interrupt masking */
|
/* Core-level interrupt masking */
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue