mirror of
https://github.com/Rockbox/rockbox.git
synced 2025-10-13 18:17:39 -04:00
kernel: Break out kernel primitives into separate files and move to separate dir.
No code changed, just shuffling stuff around. This should make it easier to build only select parts kernel and use different implementations. Change-Id: Ie1f00f93008833ce38419d760afd70062c5e22b5
This commit is contained in:
parent
8bae5f2644
commit
382d1861af
30 changed files with 1564 additions and 756 deletions
40
firmware/kernel/corelock.c
Normal file
40
firmware/kernel/corelock.c
Normal file
|
@ -0,0 +1,40 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2007 by Daniel Ankers
|
||||
*
|
||||
* PP5002 and PP502x SoC threading support
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#include <string.h>
|
||||
#include "corelock.h"
|
||||
|
||||
/* Core locks using Peterson's mutual exclusion algorithm */
|
||||
|
||||
|
||||
/*---------------------------------------------------------------------------
|
||||
* Initialize the corelock structure.
|
||||
*---------------------------------------------------------------------------
|
||||
*/
|
||||
void corelock_init(struct corelock *cl)
|
||||
{
|
||||
memset(cl, 0, sizeof (*cl));
|
||||
}
|
||||
|
||||
/* other corelock methods are ASM-optimized */
|
||||
#include "asm/corelock.c"
|
53
firmware/kernel/include/corelock.h
Normal file
53
firmware/kernel/include/corelock.h
Normal file
|
@ -0,0 +1,53 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Ulf Ralberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
|
||||
#ifndef CORELOCK_H
|
||||
#define CORELOCK_H
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifndef HAVE_CORELOCK_OBJECT
|
||||
|
||||
/* No atomic corelock op needed or just none defined */
|
||||
#define corelock_init(cl)
|
||||
#define corelock_lock(cl)
|
||||
#define corelock_try_lock(cl)
|
||||
#define corelock_unlock(cl)
|
||||
|
||||
#else
|
||||
|
||||
/* No reliable atomic instruction available - use Peterson's algorithm */
|
||||
struct corelock
|
||||
{
|
||||
volatile unsigned char myl[NUM_CORES];
|
||||
volatile unsigned char turn;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Too big to inline everywhere */
|
||||
extern void corelock_init(struct corelock *cl);
|
||||
extern void corelock_lock(struct corelock *cl);
|
||||
extern int corelock_try_lock(struct corelock *cl);
|
||||
extern void corelock_unlock(struct corelock *cl);
|
||||
|
||||
#endif /* HAVE_CORELOCK_OBJECT */
|
||||
|
||||
#endif /* CORELOCK_H */
|
69
firmware/kernel/include/kernel.h
Normal file
69
firmware/kernel/include/kernel.h
Normal file
|
@ -0,0 +1,69 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
#ifndef KERNEL_H
|
||||
#define KERNEL_H
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "system.h"
|
||||
#include "queue.h"
|
||||
#include "mutex.h"
|
||||
#include "tick.h"
|
||||
|
||||
#ifdef INCLUDE_TIMEOUT_API
|
||||
#include "timeout.h"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_SEMAPHORE_OBJECTS
|
||||
#include "semaphore.h"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
#include "corelock.h"
|
||||
#endif
|
||||
|
||||
#define OBJ_WAIT_TIMEDOUT (-1)
|
||||
#define OBJ_WAIT_FAILED 0
|
||||
#define OBJ_WAIT_SUCCEEDED 1
|
||||
|
||||
#define TIMEOUT_BLOCK -1
|
||||
#define TIMEOUT_NOBLOCK 0
|
||||
|
||||
static inline void kernel_init(void)
|
||||
{
|
||||
/* Init the threading API */
|
||||
init_threads();
|
||||
|
||||
/* Other processors will not reach this point in a multicore build.
|
||||
* In a single-core build with multiple cores they fall-through and
|
||||
* sleep in cop_main without returning. */
|
||||
if (CURRENT_CORE == CPU)
|
||||
{
|
||||
init_queues();
|
||||
init_tick();
|
||||
#ifdef KDEV_INIT
|
||||
kernel_device_init();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif /* KERNEL_H */
|
62
firmware/kernel/include/mutex.h
Normal file
62
firmware/kernel/include/mutex.h
Normal file
|
@ -0,0 +1,62 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef MUTEX_H
|
||||
#define MUTEX_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "config.h"
|
||||
#include "thread.h"
|
||||
|
||||
struct mutex
|
||||
{
|
||||
struct thread_entry *queue; /* waiter list */
|
||||
int recursion; /* lock owner recursion count */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
struct blocker blocker; /* priority inheritance info
|
||||
for waiters */
|
||||
bool no_preempt; /* don't allow higher-priority thread
|
||||
to be scheduled even if woken */
|
||||
#else
|
||||
struct thread_entry *thread; /* Indicates owner thread - an owner
|
||||
implies a locked state - same goes
|
||||
for priority scheduling
|
||||
(in blocker struct for that) */
|
||||
#endif
|
||||
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
||||
};
|
||||
|
||||
extern void mutex_init(struct mutex *m);
|
||||
extern void mutex_lock(struct mutex *m);
|
||||
extern void mutex_unlock(struct mutex *m);
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
/* Deprecated temporary function to disable mutex preempting a thread on
|
||||
* unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c -
|
||||
* reliance on it is a bug! */
|
||||
static inline void mutex_set_preempt(struct mutex *m, bool preempt)
|
||||
{ m->no_preempt = !preempt; }
|
||||
#else
|
||||
/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
|
||||
static inline bool mutex_test(const struct mutex *m)
|
||||
{ return m->thread != NULL; }
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
|
||||
#endif /* MUTEX_H */
|
157
firmware/kernel/include/queue.h
Normal file
157
firmware/kernel/include/queue.h
Normal file
|
@ -0,0 +1,157 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef QUEUE_H
|
||||
#define QUEUE_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "config.h"
|
||||
#include "thread.h"
|
||||
|
||||
/* System defined message ID's - |sign bit = 1|class|id| */
|
||||
/* Event class list */
|
||||
#define SYS_EVENT_CLS_QUEUE 0
|
||||
#define SYS_EVENT_CLS_USB 1
|
||||
#define SYS_EVENT_CLS_POWER 2
|
||||
#define SYS_EVENT_CLS_FILESYS 3
|
||||
#define SYS_EVENT_CLS_PLUG 4
|
||||
#define SYS_EVENT_CLS_MISC 5
|
||||
#define SYS_EVENT_CLS_PRIVATE 7 /* For use inside plugins */
|
||||
/* make sure SYS_EVENT_CLS_BITS has enough range */
|
||||
|
||||
/* Bit 31->|S|c...c|i...i| */
|
||||
#define SYS_EVENT ((long)(int)(1 << 31))
|
||||
#define SYS_EVENT_CLS_BITS (3)
|
||||
#define SYS_EVENT_CLS_SHIFT (31-SYS_EVENT_CLS_BITS)
|
||||
#define SYS_EVENT_CLS_MASK (((1l << SYS_EVENT_CLS_BITS)-1) << SYS_EVENT_SHIFT)
|
||||
#define MAKE_SYS_EVENT(cls, id) (SYS_EVENT | ((long)(cls) << SYS_EVENT_CLS_SHIFT) | (long)(id))
|
||||
/* Macros for extracting codes */
|
||||
#define SYS_EVENT_CLS(e) (((e) & SYS_EVENT_CLS_MASK) >> SYS_EVENT_SHIFT)
|
||||
#define SYS_EVENT_ID(e) ((e) & ~(SYS_EVENT|SYS_EVENT_CLS_MASK))
|
||||
|
||||
#define SYS_TIMEOUT MAKE_SYS_EVENT(SYS_EVENT_CLS_QUEUE, 0)
|
||||
#define SYS_USB_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 0)
|
||||
#define SYS_USB_CONNECTED_ACK MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 1)
|
||||
#define SYS_USB_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 2)
|
||||
#define SYS_USB_LUN_LOCKED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 4)
|
||||
#define SYS_USB_READ_DATA MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 5)
|
||||
#define SYS_USB_WRITE_DATA MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 6)
|
||||
#define SYS_POWEROFF MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 0)
|
||||
#define SYS_CHARGER_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 1)
|
||||
#define SYS_CHARGER_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 2)
|
||||
#define SYS_BATTERY_UPDATE MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 3)
|
||||
#define SYS_FS_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_FILESYS, 0)
|
||||
#define SYS_HOTSWAP_INSERTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 0)
|
||||
#define SYS_HOTSWAP_EXTRACTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 1)
|
||||
#define SYS_PHONE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 2)
|
||||
#define SYS_PHONE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 3)
|
||||
#define SYS_REMOTE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 4)
|
||||
#define SYS_REMOTE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 5)
|
||||
#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
|
||||
#define SYS_CALL_INCOMING MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 3)
|
||||
#define SYS_CALL_HUNG_UP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 4)
|
||||
#define SYS_VOLUME_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 5)
|
||||
|
||||
#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
|
||||
|
||||
#define MAX_NUM_QUEUES 32
|
||||
#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
|
||||
#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
|
||||
|
||||
struct queue_event
|
||||
{
|
||||
long id;
|
||||
intptr_t data;
|
||||
};
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
struct queue_sender_list
|
||||
{
|
||||
/* If non-NULL, there is a thread waiting for the corresponding event */
|
||||
/* Must be statically allocated to put in non-cached ram. */
|
||||
struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
|
||||
struct thread_entry *list; /* list of senders in map */
|
||||
/* Send info for last message dequeued or NULL if replied or not sent */
|
||||
struct thread_entry * volatile curr_sender;
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
struct blocker blocker;
|
||||
#endif
|
||||
};
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
#define QUEUE_GET_THREAD(q) \
|
||||
(((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
|
||||
#else
|
||||
/* Queue without priority enabled have no owner provision _at this time_ */
|
||||
#define QUEUE_GET_THREAD(q) \
|
||||
(NULL)
|
||||
#endif
|
||||
|
||||
struct event_queue
|
||||
{
|
||||
struct thread_entry *queue; /* waiter list */
|
||||
struct queue_event events[QUEUE_LENGTH]; /* list of events */
|
||||
unsigned int volatile read; /* head of queue */
|
||||
unsigned int volatile write; /* tail of queue */
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
struct queue_sender_list * volatile send; /* list of threads waiting for
|
||||
reply to an event */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
struct blocker *blocker_p; /* priority inheritance info
|
||||
for sync message senders */
|
||||
#endif
|
||||
#endif
|
||||
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
||||
};
|
||||
|
||||
extern void queue_init(struct event_queue *q, bool register_queue);
|
||||
extern void queue_delete(struct event_queue *q);
|
||||
extern void queue_wait(struct event_queue *q, struct queue_event *ev);
|
||||
extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
|
||||
int ticks);
|
||||
extern void queue_post(struct event_queue *q, long id, intptr_t data);
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
extern void queue_enable_queue_send(struct event_queue *q,
|
||||
struct queue_sender_list *send,
|
||||
unsigned int owner_id);
|
||||
extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data);
|
||||
extern void queue_reply(struct event_queue *q, intptr_t retval);
|
||||
extern bool queue_in_queue_send(struct event_queue *q);
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
extern bool queue_empty(const struct event_queue* q);
|
||||
extern bool queue_peek(struct event_queue *q, struct queue_event *ev);
|
||||
|
||||
#define QPEEK_FILTER_COUNT_MASK (0xffu) /* 0x00=1 filter, 0xff=256 filters */
|
||||
#define QPEEK_FILTER_HEAD_ONLY (1u << 8) /* Ignored if no filters */
|
||||
#define QPEEK_REMOVE_EVENTS (1u << 9) /* Remove or discard events */
|
||||
extern bool queue_peek_ex(struct event_queue *q,
|
||||
struct queue_event *ev,
|
||||
unsigned int flags,
|
||||
const long (*filters)[2]);
|
||||
|
||||
extern void queue_clear(struct event_queue* q);
|
||||
extern void queue_remove_from_head(struct event_queue *q, long id);
|
||||
extern int queue_count(const struct event_queue *q);
|
||||
extern int queue_broadcast(long id, intptr_t data);
|
||||
extern void init_queues(void);
|
||||
|
||||
#endif /* QUEUE_H */
|
40
firmware/kernel/include/semaphore.h
Normal file
40
firmware/kernel/include/semaphore.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef SEMAPHORE_H
|
||||
#define SEMAPHORE_H
|
||||
|
||||
#include "config.h"
|
||||
#include "thread.h"
|
||||
|
||||
struct semaphore
|
||||
{
|
||||
struct thread_entry *queue; /* Waiter list */
|
||||
int volatile count; /* # of waits remaining before unsignaled */
|
||||
int max; /* maximum # of waits to remain signaled */
|
||||
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
||||
};
|
||||
|
||||
extern void semaphore_init(struct semaphore *s, int max, int start);
|
||||
extern int semaphore_wait(struct semaphore *s, int timeout);
|
||||
extern void semaphore_release(struct semaphore *s);
|
||||
|
||||
#endif /* SEMAPHORE_H */
|
387
firmware/kernel/include/thread.h
Normal file
387
firmware/kernel/include/thread.h
Normal file
|
@ -0,0 +1,387 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Ulf Ralberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef THREAD_H
|
||||
#define THREAD_H
|
||||
|
||||
#include "config.h"
|
||||
#include <inttypes.h>
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include "gcc_extensions.h"
|
||||
#include "corelock.h"
|
||||
|
||||
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
|
||||
* by giving high priority threads more CPU time than lower priority threads
|
||||
* when they need it. Priority is differential such that the priority
|
||||
* difference between a lower priority runnable thread and the highest priority
|
||||
* runnable thread determines the amount of aging necessary for the lower
|
||||
* priority thread to be scheduled in order to prevent starvation.
|
||||
*
|
||||
* If software playback codec pcm buffer is going down to critical, codec
|
||||
* can gradually raise its own priority to override user interface and
|
||||
* prevent playback skipping.
|
||||
*/
|
||||
#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
|
||||
#define PRIORITY_RESERVED_LOW 32 /* Reserved */
|
||||
#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
|
||||
#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
|
||||
/* Realtime range reserved for threads that will not allow threads of lower
|
||||
* priority to age and run (future expansion) */
|
||||
#define PRIORITY_REALTIME_1 1
|
||||
#define PRIORITY_REALTIME_2 2
|
||||
#define PRIORITY_REALTIME_3 3
|
||||
#define PRIORITY_REALTIME_4 4
|
||||
#define PRIORITY_REALTIME 4 /* Lowest realtime range */
|
||||
#define PRIORITY_BUFFERING 15 /* Codec buffering thread */
|
||||
#define PRIORITY_USER_INTERFACE 16 /* The main thread */
|
||||
#define PRIORITY_RECORDING 16 /* Recording thread */
|
||||
#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
|
||||
#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
|
||||
#define PRIORITY_SYSTEM 18 /* All other firmware threads */
|
||||
#define PRIORITY_BACKGROUND 20 /* Normal application threads */
|
||||
#define NUM_PRIORITIES 32
|
||||
#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
|
||||
|
||||
#define IO_PRIORITY_IMMEDIATE 0
|
||||
#define IO_PRIORITY_BACKGROUND 32
|
||||
|
||||
|
||||
#if CONFIG_CODEC == SWCODEC
|
||||
# ifdef HAVE_HARDWARE_CLICK
|
||||
# define BASETHREADS 17
|
||||
# else
|
||||
# define BASETHREADS 16
|
||||
# endif
|
||||
#else
|
||||
# define BASETHREADS 11
|
||||
#endif /* CONFIG_CODE == * */
|
||||
|
||||
#ifndef TARGET_EXTRA_THREADS
|
||||
#define TARGET_EXTRA_THREADS 0
|
||||
#endif
|
||||
|
||||
#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
|
||||
/*
|
||||
* We need more stack when we run under a host
|
||||
* maybe more expensive C lib functions?
|
||||
*
|
||||
* simulator (possibly) doesn't simulate stack usage anyway but well ... */
|
||||
|
||||
#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
|
||||
struct regs
|
||||
{
|
||||
void *t; /* OS thread */
|
||||
void *told; /* Last thread in slot (explained in thead-sdl.c) */
|
||||
void *s; /* Semaphore for blocking and wakeup */
|
||||
void (*start)(void); /* Start function */
|
||||
};
|
||||
|
||||
#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
|
||||
#else
|
||||
#include "asm/thread.h"
|
||||
#endif /* HAVE_SDL_THREADS */
|
||||
|
||||
/* NOTE: The use of the word "queue" may also refer to a linked list of
|
||||
threads being maintained that are normally dealt with in FIFO order
|
||||
and not necessarily kernel event_queue */
|
||||
enum
|
||||
{
|
||||
/* States without a timeout must be first */
|
||||
STATE_KILLED = 0, /* Thread is killed (default) */
|
||||
STATE_RUNNING, /* Thread is currently running */
|
||||
STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
|
||||
/* These states involve adding the thread to the tmo list */
|
||||
STATE_SLEEPING, /* Thread is sleeping with a timeout */
|
||||
STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
|
||||
/* Miscellaneous states */
|
||||
STATE_FROZEN, /* Thread is suspended and will not run until
|
||||
thread_thaw is called with its ID */
|
||||
THREAD_NUM_STATES,
|
||||
TIMEOUT_STATE_FIRST = STATE_SLEEPING,
|
||||
};
|
||||
|
||||
#if NUM_CORES > 1
|
||||
/* Pointer value for name field to indicate thread is being killed. Using
|
||||
* an alternate STATE_* won't work since that would interfere with operation
|
||||
* while the thread is still running. */
|
||||
#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
|
||||
#endif
|
||||
|
||||
/* Link information for lists thread is in */
|
||||
struct thread_entry; /* forward */
|
||||
struct thread_list
|
||||
{
|
||||
struct thread_entry *prev; /* Previous thread in a list */
|
||||
struct thread_entry *next; /* Next thread in a list */
|
||||
};
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
struct blocker
|
||||
{
|
||||
struct thread_entry * volatile thread; /* thread blocking other threads
|
||||
(aka. object owner) */
|
||||
int priority; /* highest priority waiter */
|
||||
struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
|
||||
};
|
||||
|
||||
/* Choices of wakeup protocol */
|
||||
|
||||
/* For transfer of object ownership by one thread to another thread by
|
||||
* the owning thread itself (mutexes) */
|
||||
struct thread_entry *
|
||||
wakeup_priority_protocol_transfer(struct thread_entry *thread);
|
||||
|
||||
/* For release by owner where ownership doesn't change - other threads,
|
||||
* interrupts, timeouts, etc. (mutex timeout, queues) */
|
||||
struct thread_entry *
|
||||
wakeup_priority_protocol_release(struct thread_entry *thread);
|
||||
|
||||
|
||||
struct priority_distribution
|
||||
{
|
||||
uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
|
||||
uint32_t mask; /* Bitmask of hist entries that are not zero */
|
||||
};
|
||||
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
|
||||
/* Information kept in each thread slot
|
||||
* members are arranged according to size - largest first - in order
|
||||
* to ensure both alignment and packing at the same time.
|
||||
*/
|
||||
struct thread_entry
|
||||
{
|
||||
struct regs context; /* Register context at switch -
|
||||
_must_ be first member */
|
||||
uintptr_t *stack; /* Pointer to top of stack */
|
||||
const char *name; /* Thread name */
|
||||
long tmo_tick; /* Tick when thread should be woken from
|
||||
timeout -
|
||||
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
|
||||
struct thread_list l; /* Links for blocked/waking/running -
|
||||
circular linkage in both directions */
|
||||
struct thread_list tmo; /* Links for timeout list -
|
||||
Circular in reverse direction, NULL-terminated in
|
||||
forward direction -
|
||||
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
|
||||
struct thread_entry **bqp; /* Pointer to list variable in kernel
|
||||
object where thread is blocked - used
|
||||
for implicit unblock and explicit wake
|
||||
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
struct corelock *obj_cl; /* Object corelock where thead is blocked -
|
||||
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
||||
struct corelock waiter_cl; /* Corelock for thread_wait */
|
||||
struct corelock slot_cl; /* Corelock to lock thread slot */
|
||||
unsigned char core; /* The core to which thread belongs */
|
||||
#endif
|
||||
struct thread_entry *queue; /* List of threads waiting for thread to be
|
||||
removed */
|
||||
#ifdef HAVE_WAKEUP_EXT_CB
|
||||
void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
|
||||
performs special steps needed when being
|
||||
forced off of an object's wait queue that
|
||||
go beyond the standard wait queue removal
|
||||
and priority disinheritance */
|
||||
/* Only enabled when using queue_send for now */
|
||||
#endif
|
||||
#if defined(HAVE_SEMAPHORE_OBJECTS) || \
|
||||
defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
|
||||
NUM_CORES > 1
|
||||
volatile intptr_t retval; /* Return value from a blocked operation/
|
||||
misc. use */
|
||||
#endif
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
/* Priority summary of owned objects that support inheritance */
|
||||
struct blocker *blocker; /* Pointer to blocker when this thread is blocked
|
||||
on an object that supports PIP -
|
||||
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
||||
struct priority_distribution pdist; /* Priority summary of owned objects
|
||||
that have blocked threads and thread's own
|
||||
base priority */
|
||||
int skip_count; /* Number of times skipped if higher priority
|
||||
thread was running */
|
||||
unsigned char base_priority; /* Base priority (set explicitly during
|
||||
creation or thread_set_priority) */
|
||||
unsigned char priority; /* Scheduled priority (higher of base or
|
||||
all threads blocked by this one) */
|
||||
#endif
|
||||
uint16_t id; /* Current slot id */
|
||||
unsigned short stack_size; /* Size of stack in bytes */
|
||||
unsigned char state; /* Thread slot state (STATE_*) */
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
unsigned char cpu_boost; /* CPU frequency boost flag */
|
||||
#endif
|
||||
#ifdef HAVE_IO_PRIORITY
|
||||
unsigned char io_priority;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*** Macros for internal use ***/
|
||||
/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
|
||||
#define THREAD_ID_VERSION_SHIFT 8
|
||||
#define THREAD_ID_VERSION_MASK 0xff00
|
||||
#define THREAD_ID_SLOT_MASK 0x00ff
|
||||
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
|
||||
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
/* Operations to be performed just before stopping a thread and starting
|
||||
a new one if specified before calling switch_thread */
|
||||
enum
|
||||
{
|
||||
TBOP_CLEAR = 0, /* No operation to do */
|
||||
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
|
||||
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
|
||||
};
|
||||
|
||||
struct thread_blk_ops
|
||||
{
|
||||
struct corelock *cl_p; /* pointer to corelock */
|
||||
unsigned char flags; /* TBOP_* flags */
|
||||
};
|
||||
#endif /* NUM_CORES > 1 */
|
||||
|
||||
/* Information kept for each core
|
||||
* Members are arranged for the same reason as in thread_entry
|
||||
*/
|
||||
struct core_entry
|
||||
{
|
||||
/* "Active" lists - core is constantly active on these and are never
|
||||
locked and interrupts do not access them */
|
||||
struct thread_entry *running; /* threads that are running (RTR) */
|
||||
struct thread_entry *timeout; /* threads that are on a timeout before
|
||||
running again */
|
||||
struct thread_entry *block_task; /* Task going off running list */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
struct priority_distribution rtr; /* Summary of running and ready-to-run
|
||||
threads */
|
||||
#endif
|
||||
long next_tmo_check; /* soonest time to check tmo threads */
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
struct thread_blk_ops blk_ops; /* operations to perform when
|
||||
blocking a thread */
|
||||
struct corelock rtr_cl; /* Lock for rtr list */
|
||||
#endif /* NUM_CORES */
|
||||
};
|
||||
|
||||
extern void yield(void);
|
||||
extern unsigned sleep(unsigned ticks);
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
#define IF_PRIO(...) __VA_ARGS__
|
||||
#define IFN_PRIO(...)
|
||||
#else
|
||||
#define IF_PRIO(...)
|
||||
#define IFN_PRIO(...) __VA_ARGS__
|
||||
#endif
|
||||
|
||||
void core_idle(void);
|
||||
void core_wake(IF_COP_VOID(unsigned int core));
|
||||
|
||||
/* Initialize the scheduler */
|
||||
void init_threads(void) INIT_ATTR;
|
||||
|
||||
/* Allocate a thread in the scheduler */
|
||||
#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
|
||||
unsigned int create_thread(void (*function)(void),
|
||||
void* stack, size_t stack_size,
|
||||
unsigned flags, const char *name
|
||||
IF_PRIO(, int priority)
|
||||
IF_COP(, unsigned int core));
|
||||
|
||||
/* Set and clear the CPU frequency boost flag for the calling thread */
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
void trigger_cpu_boost(void);
|
||||
void cancel_cpu_boost(void);
|
||||
#else
|
||||
#define trigger_cpu_boost() do { } while(0)
|
||||
#define cancel_cpu_boost() do { } while(0)
|
||||
#endif
|
||||
/* Return thread entry from id */
|
||||
struct thread_entry *thread_id_entry(unsigned int thread_id);
|
||||
/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
|
||||
* Has no effect on a thread not frozen. */
|
||||
void thread_thaw(unsigned int thread_id);
|
||||
/* Wait for a thread to exit */
|
||||
void thread_wait(unsigned int thread_id);
|
||||
/* Exit the current thread */
|
||||
void thread_exit(void) NORETURN_ATTR;
|
||||
#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
|
||||
#define ALLOW_REMOVE_THREAD
|
||||
/* Remove a thread from the scheduler */
|
||||
void remove_thread(unsigned int thread_id);
|
||||
#endif
|
||||
|
||||
/* Switch to next runnable thread */
|
||||
void switch_thread(void);
|
||||
/* Blocks a thread for at least the specified number of ticks (0 = wait until
|
||||
* next tick) */
|
||||
void sleep_thread(int ticks);
|
||||
/* Indefinitely blocks the current thread on a thread queue */
|
||||
void block_thread(struct thread_entry *current);
|
||||
/* Blocks the current thread on a thread queue until explicitely woken or
|
||||
* the timeout is reached */
|
||||
void block_thread_w_tmo(struct thread_entry *current, int timeout);
|
||||
|
||||
/* Return bit flags for thread wakeup */
|
||||
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
|
||||
#define THREAD_OK 0x1 /* A thread was woken up */
|
||||
#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
|
||||
higher priority than current were woken) */
|
||||
|
||||
/* A convenience function for waking an entire queue of threads. */
|
||||
unsigned int thread_queue_wake(struct thread_entry **list);
|
||||
|
||||
/* Wakeup a thread at the head of a list */
|
||||
unsigned int wakeup_thread(struct thread_entry **list);
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
int thread_set_priority(unsigned int thread_id, int priority);
|
||||
int thread_get_priority(unsigned int thread_id);
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
#ifdef HAVE_IO_PRIORITY
|
||||
void thread_set_io_priority(unsigned int thread_id, int io_priority);
|
||||
int thread_get_io_priority(unsigned int thread_id);
|
||||
#endif /* HAVE_IO_PRIORITY */
|
||||
#if NUM_CORES > 1
|
||||
unsigned int switch_core(unsigned int new_core);
|
||||
#endif
|
||||
|
||||
/* Return the id of the calling thread. */
|
||||
unsigned int thread_self(void);
|
||||
|
||||
/* Return the thread_entry for the calling thread.
|
||||
* INTERNAL: Intended for use by kernel and not for programs. */
|
||||
struct thread_entry* thread_self_entry(void);
|
||||
|
||||
/* Debugging info - only! */
|
||||
int thread_stack_usage(const struct thread_entry *thread);
|
||||
#if NUM_CORES > 1
|
||||
int idle_stack_usage(unsigned int core);
|
||||
#endif
|
||||
void thread_get_name(char *buffer, int size,
|
||||
struct thread_entry *thread);
|
||||
#ifdef RB_PROFILE
|
||||
void profile_thread(void);
|
||||
#endif
|
||||
|
||||
#endif /* THREAD_H */
|
67
firmware/kernel/include/tick.h
Normal file
67
firmware/kernel/include/tick.h
Normal file
|
@ -0,0 +1,67 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
#ifndef TICK_H
|
||||
#define TICK_H
|
||||
|
||||
#include "config.h"
|
||||
#include "system.h" /* for NULL */
|
||||
extern void init_tick(void);
|
||||
|
||||
#define HZ 100 /* number of ticks per second */
|
||||
|
||||
#define MAX_NUM_TICK_TASKS 8
|
||||
|
||||
/* global tick variable */
|
||||
#if defined(CPU_PP) && defined(BOOTLOADER) && \
|
||||
!defined(HAVE_BOOTLOADER_USB_MODE)
|
||||
/* We don't enable interrupts in the PP bootloader unless USB mode is
|
||||
enabled for it, so we need to fake the current_tick variable */
|
||||
#define current_tick (signed)(USEC_TIMER/10000)
|
||||
|
||||
static inline void call_tick_tasks(void)
|
||||
{
|
||||
}
|
||||
#else
|
||||
extern volatile long current_tick;
|
||||
|
||||
/* inline helper for implementing target interrupt handler */
|
||||
static inline void call_tick_tasks(void)
|
||||
{
|
||||
extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
|
||||
void (**p)(void) = tick_funcs;
|
||||
void (*fn)(void);
|
||||
|
||||
current_tick++;
|
||||
|
||||
for(fn = *p; fn != NULL; fn = *(++p))
|
||||
{
|
||||
fn();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* implemented in target tree */
|
||||
extern void tick_start(unsigned int interval_in_ms) INIT_ATTR;
|
||||
|
||||
extern int tick_add_task(void (*f)(void));
|
||||
extern int tick_remove_task(void (*f)(void));
|
||||
|
||||
#endif /* TICK_H */
|
46
firmware/kernel/include/timeout.h
Normal file
46
firmware/kernel/include/timeout.h
Normal file
|
@ -0,0 +1,46 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
#ifndef _KERNEL_H_
|
||||
#define _KERNEL_H_
|
||||
|
||||
#include "config.h"
|
||||
|
||||
struct timeout;
|
||||
|
||||
/* timeout callback type
|
||||
* tmo - pointer to struct timeout associated with event
|
||||
* return next interval or <= 0 to stop event
|
||||
*/
|
||||
#define MAX_NUM_TIMEOUTS 8
|
||||
typedef int (* timeout_cb_type)(struct timeout *tmo);
|
||||
|
||||
struct timeout
|
||||
{
|
||||
timeout_cb_type callback;/* callback - returning false cancels */
|
||||
intptr_t data; /* data passed to callback */
|
||||
long expires; /* expiration tick */
|
||||
};
|
||||
|
||||
void timeout_register(struct timeout *tmo, timeout_cb_type callback,
|
||||
int ticks, intptr_t data);
|
||||
void timeout_cancel(struct timeout *tmo);
|
||||
|
||||
#endif /* _KERNEL_H_ */
|
49
firmware/kernel/kernel-internal.h
Normal file
49
firmware/kernel/kernel-internal.h
Normal file
|
@ -0,0 +1,49 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Ulf Ralberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef KERNEL_INTERNAL_H
|
||||
#define KERNEL_INTERNAL_H
|
||||
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
|
||||
/* Make this nonzero to enable more elaborate checks on objects */
|
||||
#if defined(DEBUG) || defined(SIMULATOR)
|
||||
#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
|
||||
#else
|
||||
#define KERNEL_OBJECT_CHECKS 0
|
||||
#endif
|
||||
|
||||
#if KERNEL_OBJECT_CHECKS
|
||||
#ifdef SIMULATOR
|
||||
#include <stdlib.h>
|
||||
#define KERNEL_ASSERT(exp, msg...) \
|
||||
({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
|
||||
#else
|
||||
#define KERNEL_ASSERT(exp, msg...) \
|
||||
({ if (!({ exp; })) panicf(msg); })
|
||||
#endif
|
||||
#else
|
||||
#define KERNEL_ASSERT(exp, msg...) ({})
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* KERNEL_INTERNAL_H */
|
152
firmware/kernel/mutex.c
Normal file
152
firmware/kernel/mutex.c
Normal file
|
@ -0,0 +1,152 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
* Simple mutex functions ;)
|
||||
****************************************************************************/
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "config.h"
|
||||
#include "system.h"
|
||||
#include "mutex.h"
|
||||
#include "corelock.h"
|
||||
#include "thread-internal.h"
|
||||
#include "kernel-internal.h"
|
||||
|
||||
static inline void __attribute__((always_inline))
|
||||
mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
|
||||
{
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
mtx->blocker.thread = td;
|
||||
#else
|
||||
mtx->thread = td;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct thread_entry * __attribute__((always_inline))
|
||||
mutex_get_thread(volatile struct mutex *mtx)
|
||||
{
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
return mtx->blocker.thread;
|
||||
#else
|
||||
return mtx->thread;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Initialize a mutex object - call before any use and do not call again once
|
||||
* the object is available to other threads */
|
||||
void mutex_init(struct mutex *m)
|
||||
{
|
||||
corelock_init(&m->cl);
|
||||
m->queue = NULL;
|
||||
m->recursion = 0;
|
||||
mutex_set_thread(m, NULL);
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
m->blocker.priority = PRIORITY_IDLE;
|
||||
m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
|
||||
m->no_preempt = false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Gain ownership of a mutex object or block until it becomes free */
|
||||
void mutex_lock(struct mutex *m)
|
||||
{
|
||||
struct thread_entry *current = thread_self_entry();
|
||||
|
||||
if(current == mutex_get_thread(m))
|
||||
{
|
||||
/* current thread already owns this mutex */
|
||||
m->recursion++;
|
||||
return;
|
||||
}
|
||||
|
||||
/* lock out other cores */
|
||||
corelock_lock(&m->cl);
|
||||
|
||||
/* must read thread again inside cs (a multiprocessor concern really) */
|
||||
if(LIKELY(mutex_get_thread(m) == NULL))
|
||||
{
|
||||
/* lock is open */
|
||||
mutex_set_thread(m, current);
|
||||
corelock_unlock(&m->cl);
|
||||
return;
|
||||
}
|
||||
|
||||
/* block until the lock is open... */
|
||||
IF_COP( current->obj_cl = &m->cl; )
|
||||
IF_PRIO( current->blocker = &m->blocker; )
|
||||
current->bqp = &m->queue;
|
||||
|
||||
disable_irq();
|
||||
block_thread(current);
|
||||
|
||||
corelock_unlock(&m->cl);
|
||||
|
||||
/* ...and turn control over to next thread */
|
||||
switch_thread();
|
||||
}
|
||||
|
||||
/* Release ownership of a mutex object - only owning thread must call this */
|
||||
void mutex_unlock(struct mutex *m)
|
||||
{
|
||||
/* unlocker not being the owner is an unlocking violation */
|
||||
KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
|
||||
"mutex_unlock->wrong thread (%s != %s)\n",
|
||||
mutex_get_thread(m)->name,
|
||||
thread_self_entry()->name);
|
||||
|
||||
if(m->recursion > 0)
|
||||
{
|
||||
/* this thread still owns lock */
|
||||
m->recursion--;
|
||||
return;
|
||||
}
|
||||
|
||||
/* lock out other cores */
|
||||
corelock_lock(&m->cl);
|
||||
|
||||
/* transfer to next queued thread if any */
|
||||
if(LIKELY(m->queue == NULL))
|
||||
{
|
||||
/* no threads waiting - open the lock */
|
||||
mutex_set_thread(m, NULL);
|
||||
corelock_unlock(&m->cl);
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
const int oldlevel = disable_irq_save();
|
||||
/* Tranfer of owning thread is handled in the wakeup protocol
|
||||
* if priorities are enabled otherwise just set it from the
|
||||
* queue head. */
|
||||
IFN_PRIO( mutex_set_thread(m, m->queue); )
|
||||
IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
|
||||
restore_irq(oldlevel);
|
||||
|
||||
corelock_unlock(&m->cl);
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
if((result & THREAD_SWITCH) && !m->no_preempt)
|
||||
switch_thread();
|
||||
#endif
|
||||
}
|
||||
}
|
786
firmware/kernel/queue.c
Normal file
786
firmware/kernel/queue.c
Normal file
|
@ -0,0 +1,786 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#include <string.h>
|
||||
#include "config.h"
|
||||
#include "kernel.h"
|
||||
#include "system.h"
|
||||
#include "queue.h"
|
||||
#include "corelock.h"
|
||||
#include "kernel-internal.h"
|
||||
#include "general.h"
|
||||
#include "panic.h"
|
||||
|
||||
/* This array holds all queues that are initiated. It is used for broadcast. */
|
||||
static struct
|
||||
{
|
||||
struct event_queue *queues[MAX_NUM_QUEUES+1];
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
struct corelock cl;
|
||||
#endif
|
||||
} all_queues SHAREDBSS_ATTR;
|
||||
|
||||
/****************************************************************************
|
||||
* Queue handling stuff
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/****************************************************************************
|
||||
* Sender thread queue structure that aids implementation of priority
|
||||
* inheritance on queues because the send list structure is the same as
|
||||
* for all other kernel objects:
|
||||
*
|
||||
* Example state:
|
||||
* E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
|
||||
* E3 was posted with queue_post
|
||||
* 4 events remain enqueued (E1-E4)
|
||||
*
|
||||
* rd wr
|
||||
* q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
|
||||
* q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
|
||||
* \/ \/ \/
|
||||
* q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
|
||||
* q->send->curr_sender: /\
|
||||
*
|
||||
* Thread has E0 in its own struct queue_event.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/* Puts the specified return value in the waiting thread's return value
|
||||
* and wakes the thread.
|
||||
*
|
||||
* A sender should be confirmed to exist before calling which makes it
|
||||
* more efficent to reject the majority of cases that don't need this
|
||||
* called.
|
||||
*/
|
||||
static void queue_release_sender(struct thread_entry * volatile * sender,
|
||||
intptr_t retval)
|
||||
{
|
||||
struct thread_entry *thread = *sender;
|
||||
|
||||
*sender = NULL; /* Clear slot. */
|
||||
#ifdef HAVE_WAKEUP_EXT_CB
|
||||
thread->wakeup_ext_cb = NULL; /* Clear callback. */
|
||||
#endif
|
||||
thread->retval = retval; /* Assign thread-local return value. */
|
||||
*thread->bqp = thread; /* Move blocking queue head to thread since
|
||||
wakeup_thread wakes the first thread in
|
||||
the list. */
|
||||
wakeup_thread(thread->bqp);
|
||||
}
|
||||
|
||||
/* Releases any waiting threads that are queued with queue_send -
|
||||
* reply with 0.
|
||||
*/
|
||||
static void queue_release_all_senders(struct event_queue *q)
|
||||
{
|
||||
if(q->send)
|
||||
{
|
||||
unsigned int i;
|
||||
for(i = q->read; i != q->write; i++)
|
||||
{
|
||||
struct thread_entry **spp =
|
||||
&q->send->senders[i & QUEUE_LENGTH_MASK];
|
||||
|
||||
if(*spp)
|
||||
{
|
||||
queue_release_sender(spp, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Callback to do extra forced removal steps from sender list in addition
|
||||
* to the normal blocking queue removal and priority dis-inherit */
|
||||
static void queue_remove_sender_thread_cb(struct thread_entry *thread)
|
||||
{
|
||||
*((struct thread_entry **)thread->retval) = NULL;
|
||||
#ifdef HAVE_WAKEUP_EXT_CB
|
||||
thread->wakeup_ext_cb = NULL;
|
||||
#endif
|
||||
thread->retval = 0;
|
||||
}
|
||||
|
||||
/* Enables queue_send on the specified queue - caller allocates the extra
|
||||
* data structure. Only queues which are taken to be owned by a thread should
|
||||
* enable this however an official owner is not compulsory but must be
|
||||
* specified for priority inheritance to operate.
|
||||
*
|
||||
* Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
|
||||
* messages results in an undefined order of message replies or possible default
|
||||
* replies if two or more waits happen before a reply is done.
|
||||
*/
|
||||
void queue_enable_queue_send(struct event_queue *q,
|
||||
struct queue_sender_list *send,
|
||||
unsigned int owner_id)
|
||||
{
|
||||
int oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
if(send != NULL && q->send == NULL)
|
||||
{
|
||||
memset(send, 0, sizeof(*send));
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
|
||||
send->blocker.priority = PRIORITY_IDLE;
|
||||
if(owner_id != 0)
|
||||
{
|
||||
send->blocker.thread = thread_id_entry(owner_id);
|
||||
q->blocker_p = &send->blocker;
|
||||
}
|
||||
#endif
|
||||
q->send = send;
|
||||
}
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
|
||||
(void)owner_id;
|
||||
}
|
||||
|
||||
/* Unblock a blocked thread at a given event index */
|
||||
static inline void queue_do_unblock_sender(struct queue_sender_list *send,
|
||||
unsigned int i)
|
||||
{
|
||||
if(send)
|
||||
{
|
||||
struct thread_entry **spp = &send->senders[i];
|
||||
|
||||
if(UNLIKELY(*spp))
|
||||
{
|
||||
queue_release_sender(spp, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Perform the auto-reply sequence */
|
||||
static inline void queue_do_auto_reply(struct queue_sender_list *send)
|
||||
{
|
||||
if(send && send->curr_sender)
|
||||
{
|
||||
/* auto-reply */
|
||||
queue_release_sender(&send->curr_sender, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Moves waiting thread's refrence from the senders array to the
|
||||
* current_sender which represents the thread waiting for a reponse to the
|
||||
* last message removed from the queue. This also protects the thread from
|
||||
* being bumped due to overflow which would not be a valid action since its
|
||||
* message _is_ being processed at this point. */
|
||||
static inline void queue_do_fetch_sender(struct queue_sender_list *send,
|
||||
unsigned int rd)
|
||||
{
|
||||
if(send)
|
||||
{
|
||||
struct thread_entry **spp = &send->senders[rd];
|
||||
|
||||
if(*spp)
|
||||
{
|
||||
/* Move thread reference from array to the next thread
|
||||
that queue_reply will release */
|
||||
send->curr_sender = *spp;
|
||||
(*spp)->retval = (intptr_t)spp;
|
||||
*spp = NULL;
|
||||
}
|
||||
/* else message was posted asynchronously with queue_post */
|
||||
}
|
||||
}
|
||||
#else
|
||||
/* Empty macros for when synchoronous sending is not made */
|
||||
#define queue_release_all_senders(q)
|
||||
#define queue_do_unblock_sender(send, i)
|
||||
#define queue_do_auto_reply(send)
|
||||
#define queue_do_fetch_sender(send, rd)
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
/* Queue must not be available for use during this call */
|
||||
void queue_init(struct event_queue *q, bool register_queue)
|
||||
{
|
||||
int oldlevel = disable_irq_save();
|
||||
|
||||
if(register_queue)
|
||||
{
|
||||
corelock_lock(&all_queues.cl);
|
||||
}
|
||||
|
||||
corelock_init(&q->cl);
|
||||
q->queue = NULL;
|
||||
/* What garbage is in write is irrelevant because of the masking design-
|
||||
* any other functions the empty the queue do this as well so that
|
||||
* queue_count and queue_empty return sane values in the case of a
|
||||
* concurrent change without locking inside them. */
|
||||
q->read = q->write;
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
q->send = NULL; /* No message sending by default */
|
||||
IF_PRIO( q->blocker_p = NULL; )
|
||||
#endif
|
||||
|
||||
if(register_queue)
|
||||
{
|
||||
void **queues = (void **)all_queues.queues;
|
||||
void **p = find_array_ptr(queues, q);
|
||||
|
||||
if(p - queues >= MAX_NUM_QUEUES)
|
||||
{
|
||||
panicf("queue_init->out of queues");
|
||||
}
|
||||
|
||||
if(*p == NULL)
|
||||
{
|
||||
/* Add it to the all_queues array */
|
||||
*p = q;
|
||||
corelock_unlock(&all_queues.cl);
|
||||
}
|
||||
}
|
||||
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
|
||||
/* Queue must not be available for use during this call */
|
||||
void queue_delete(struct event_queue *q)
|
||||
{
|
||||
int oldlevel = disable_irq_save();
|
||||
corelock_lock(&all_queues.cl);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
/* Remove the queue if registered */
|
||||
remove_array_ptr((void **)all_queues.queues, q);
|
||||
|
||||
corelock_unlock(&all_queues.cl);
|
||||
|
||||
/* Release thread(s) waiting on queue head */
|
||||
thread_queue_wake(&q->queue);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if(q->send)
|
||||
{
|
||||
/* Release threads waiting for replies */
|
||||
queue_release_all_senders(q);
|
||||
|
||||
/* Reply to any dequeued message waiting for one */
|
||||
queue_do_auto_reply(q->send);
|
||||
|
||||
q->send = NULL;
|
||||
IF_PRIO( q->blocker_p = NULL; )
|
||||
}
|
||||
#endif
|
||||
|
||||
q->read = q->write;
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
|
||||
/* NOTE: multiple threads waiting on a queue head cannot have a well-
|
||||
defined release order if timeouts are used. If multiple threads must
|
||||
access the queue head, use a dispatcher or queue_wait only. */
|
||||
void queue_wait(struct event_queue *q, struct queue_event *ev)
|
||||
{
|
||||
int oldlevel;
|
||||
unsigned int rd;
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
|
||||
QUEUE_GET_THREAD(q) == thread_self_entry(),
|
||||
"queue_wait->wrong thread\n");
|
||||
#endif
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
|
||||
queue_do_auto_reply(q->send);
|
||||
#endif
|
||||
|
||||
while(1)
|
||||
{
|
||||
struct thread_entry *current;
|
||||
|
||||
rd = q->read;
|
||||
if (rd != q->write) /* A waking message could disappear */
|
||||
break;
|
||||
|
||||
current = thread_self_entry();
|
||||
|
||||
IF_COP( current->obj_cl = &q->cl; )
|
||||
current->bqp = &q->queue;
|
||||
|
||||
block_thread(current);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
switch_thread();
|
||||
|
||||
disable_irq();
|
||||
corelock_lock(&q->cl);
|
||||
}
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if(ev)
|
||||
#endif
|
||||
{
|
||||
q->read = rd + 1;
|
||||
rd &= QUEUE_LENGTH_MASK;
|
||||
*ev = q->events[rd];
|
||||
|
||||
/* Get data for a waiting thread if one */
|
||||
queue_do_fetch_sender(q->send, rd);
|
||||
}
|
||||
/* else just waiting on non-empty */
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
|
||||
void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
|
||||
{
|
||||
int oldlevel;
|
||||
unsigned int rd, wr;
|
||||
|
||||
/* this function works only with a positive number (or zero) of ticks */
|
||||
if (ticks == TIMEOUT_BLOCK)
|
||||
{
|
||||
queue_wait(q, ev);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
|
||||
QUEUE_GET_THREAD(q) == thread_self_entry(),
|
||||
"queue_wait_w_tmo->wrong thread\n");
|
||||
#endif
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
|
||||
queue_do_auto_reply(q->send);
|
||||
#endif
|
||||
|
||||
rd = q->read;
|
||||
wr = q->write;
|
||||
if (rd == wr && ticks > 0)
|
||||
{
|
||||
struct thread_entry *current = thread_self_entry();
|
||||
|
||||
IF_COP( current->obj_cl = &q->cl; )
|
||||
current->bqp = &q->queue;
|
||||
|
||||
block_thread_w_tmo(current, ticks);
|
||||
corelock_unlock(&q->cl);
|
||||
|
||||
switch_thread();
|
||||
|
||||
disable_irq();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
rd = q->read;
|
||||
wr = q->write;
|
||||
}
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if(ev)
|
||||
#endif
|
||||
{
|
||||
/* no worry about a removed message here - status is checked inside
|
||||
locks - perhaps verify if timeout or false alarm */
|
||||
if (rd != wr)
|
||||
{
|
||||
q->read = rd + 1;
|
||||
rd &= QUEUE_LENGTH_MASK;
|
||||
*ev = q->events[rd];
|
||||
/* Get data for a waiting thread if one */
|
||||
queue_do_fetch_sender(q->send, rd);
|
||||
}
|
||||
else
|
||||
{
|
||||
ev->id = SYS_TIMEOUT;
|
||||
}
|
||||
}
|
||||
/* else just waiting on non-empty */
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
|
||||
void queue_post(struct event_queue *q, long id, intptr_t data)
|
||||
{
|
||||
int oldlevel;
|
||||
unsigned int wr;
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
|
||||
KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
|
||||
"queue_post ovf q=%08lX", (long)q);
|
||||
|
||||
q->events[wr].id = id;
|
||||
q->events[wr].data = data;
|
||||
|
||||
/* overflow protect - unblock any thread waiting at this index */
|
||||
queue_do_unblock_sender(q->send, wr);
|
||||
|
||||
/* Wakeup a waiting thread if any */
|
||||
wakeup_thread(&q->queue);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* IRQ handlers are not allowed use of this function - we only aim to
|
||||
protect the queue integrity by turning them off. */
|
||||
intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
||||
{
|
||||
int oldlevel;
|
||||
unsigned int wr;
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
|
||||
KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
|
||||
"queue_send ovf q=%08lX", (long)q);
|
||||
|
||||
q->events[wr].id = id;
|
||||
q->events[wr].data = data;
|
||||
|
||||
if(LIKELY(q->send))
|
||||
{
|
||||
struct queue_sender_list *send = q->send;
|
||||
struct thread_entry **spp = &send->senders[wr];
|
||||
struct thread_entry *current = thread_self_entry();
|
||||
|
||||
if(UNLIKELY(*spp))
|
||||
{
|
||||
/* overflow protect - unblock any thread waiting at this index */
|
||||
queue_release_sender(spp, 0);
|
||||
}
|
||||
|
||||
/* Wakeup a waiting thread if any */
|
||||
wakeup_thread(&q->queue);
|
||||
|
||||
/* Save thread in slot, add to list and wait for reply */
|
||||
*spp = current;
|
||||
IF_COP( current->obj_cl = &q->cl; )
|
||||
IF_PRIO( current->blocker = q->blocker_p; )
|
||||
#ifdef HAVE_WAKEUP_EXT_CB
|
||||
current->wakeup_ext_cb = queue_remove_sender_thread_cb;
|
||||
#endif
|
||||
current->retval = (intptr_t)spp;
|
||||
current->bqp = &send->list;
|
||||
|
||||
block_thread(current);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
switch_thread();
|
||||
|
||||
return current->retval;
|
||||
}
|
||||
|
||||
/* Function as queue_post if sending is not enabled */
|
||||
wakeup_thread(&q->queue);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0 /* not used now but probably will be later */
|
||||
/* Query if the last message dequeued was added by queue_send or not */
|
||||
bool queue_in_queue_send(struct event_queue *q)
|
||||
{
|
||||
bool in_send;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
int oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
#endif
|
||||
|
||||
in_send = q->send && q->send->curr_sender;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
#endif
|
||||
|
||||
return in_send;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Replies with retval to the last dequeued message sent with queue_send */
|
||||
void queue_reply(struct event_queue *q, intptr_t retval)
|
||||
{
|
||||
if(q->send && q->send->curr_sender)
|
||||
{
|
||||
struct queue_sender_list *sender;
|
||||
|
||||
int oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
sender = q->send;
|
||||
|
||||
/* Double-check locking */
|
||||
if(LIKELY(sender && sender->curr_sender))
|
||||
queue_release_sender(&sender->curr_sender, retval);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
}
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* Scan the even queue from head to tail, returning any event from the
|
||||
filter list that was found, optionally removing the event. If an
|
||||
event is returned, synchronous events are handled in the same manner as
|
||||
with queue_wait(_w_tmo); if discarded, then as queue_clear.
|
||||
If filters are NULL, any event matches. If filters exist, the default
|
||||
is to search the full queue depth.
|
||||
Earlier filters take precedence.
|
||||
|
||||
Return true if an event was found, false otherwise. */
|
||||
bool queue_peek_ex(struct event_queue *q, struct queue_event *ev,
|
||||
unsigned int flags, const long (*filters)[2])
|
||||
{
|
||||
bool have_msg;
|
||||
unsigned int rd, wr;
|
||||
int oldlevel;
|
||||
|
||||
if(LIKELY(q->read == q->write))
|
||||
return false; /* Empty: do nothing further */
|
||||
|
||||
have_msg = false;
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
/* Starting at the head, find first match */
|
||||
for(rd = q->read, wr = q->write; rd != wr; rd++)
|
||||
{
|
||||
struct queue_event *e = &q->events[rd & QUEUE_LENGTH_MASK];
|
||||
|
||||
if(filters)
|
||||
{
|
||||
/* Have filters - find the first thing that passes */
|
||||
const long (* f)[2] = filters;
|
||||
const long (* const f_last)[2] =
|
||||
&filters[flags & QPEEK_FILTER_COUNT_MASK];
|
||||
long id = e->id;
|
||||
|
||||
do
|
||||
{
|
||||
if(UNLIKELY(id >= (*f)[0] && id <= (*f)[1]))
|
||||
goto passed_filter;
|
||||
}
|
||||
while(++f <= f_last);
|
||||
|
||||
if(LIKELY(!(flags & QPEEK_FILTER_HEAD_ONLY)))
|
||||
continue; /* No match; test next event */
|
||||
else
|
||||
break; /* Only check the head */
|
||||
}
|
||||
/* else - anything passes */
|
||||
|
||||
passed_filter:
|
||||
|
||||
/* Found a matching event */
|
||||
have_msg = true;
|
||||
|
||||
if(ev)
|
||||
*ev = *e; /* Caller wants the event */
|
||||
|
||||
if(flags & QPEEK_REMOVE_EVENTS)
|
||||
{
|
||||
/* Do event removal */
|
||||
unsigned int r = q->read;
|
||||
q->read = r + 1; /* Advance head */
|
||||
|
||||
if(ev)
|
||||
{
|
||||
/* Auto-reply */
|
||||
queue_do_auto_reply(q->send);
|
||||
/* Get the thread waiting for reply, if any */
|
||||
queue_do_fetch_sender(q->send, rd & QUEUE_LENGTH_MASK);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Release any thread waiting on this message */
|
||||
queue_do_unblock_sender(q->send, rd & QUEUE_LENGTH_MASK);
|
||||
}
|
||||
|
||||
/* Slide messages forward into the gap if not at the head */
|
||||
while(rd != r)
|
||||
{
|
||||
unsigned int dst = rd & QUEUE_LENGTH_MASK;
|
||||
unsigned int src = --rd & QUEUE_LENGTH_MASK;
|
||||
|
||||
q->events[dst] = q->events[src];
|
||||
/* Keep sender wait list in sync */
|
||||
if(q->send)
|
||||
q->send->senders[dst] = q->send->senders[src];
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
|
||||
return have_msg;
|
||||
}
|
||||
|
||||
bool queue_peek(struct event_queue *q, struct queue_event *ev)
|
||||
{
|
||||
return queue_peek_ex(q, ev, 0, NULL);
|
||||
}
|
||||
|
||||
void queue_remove_from_head(struct event_queue *q, long id)
|
||||
{
|
||||
const long f[2] = { id, id };
|
||||
while (queue_peek_ex(q, NULL,
|
||||
QPEEK_FILTER_HEAD_ONLY | QPEEK_REMOVE_EVENTS, &f));
|
||||
}
|
||||
#else /* !HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
/* The more powerful routines aren't required */
|
||||
bool queue_peek(struct event_queue *q, struct queue_event *ev)
|
||||
{
|
||||
unsigned int rd;
|
||||
|
||||
if(q->read == q->write)
|
||||
return false;
|
||||
|
||||
bool have_msg = false;
|
||||
|
||||
int oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
rd = q->read;
|
||||
if(rd != q->write)
|
||||
{
|
||||
*ev = q->events[rd & QUEUE_LENGTH_MASK];
|
||||
have_msg = true;
|
||||
}
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
|
||||
return have_msg;
|
||||
}
|
||||
|
||||
void queue_remove_from_head(struct event_queue *q, long id)
|
||||
{
|
||||
int oldlevel;
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
while(q->read != q->write)
|
||||
{
|
||||
unsigned int rd = q->read & QUEUE_LENGTH_MASK;
|
||||
|
||||
if(q->events[rd].id != id)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
/* Release any thread waiting on this message */
|
||||
queue_do_unblock_sender(q->send, rd);
|
||||
|
||||
q->read++;
|
||||
}
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
/* Poll queue to see if a message exists - careful in using the result if
|
||||
* queue_remove_from_head is called when messages are posted - possibly use
|
||||
* queue_wait_w_tmo(&q, 0) in that case or else a removed message that
|
||||
* unsignals the queue may cause an unwanted block */
|
||||
bool queue_empty(const struct event_queue* q)
|
||||
{
|
||||
return ( q->read == q->write );
|
||||
}
|
||||
|
||||
void queue_clear(struct event_queue* q)
|
||||
{
|
||||
int oldlevel;
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
/* Release all threads waiting in the queue for a reply -
|
||||
dequeued sent message will be handled by owning thread */
|
||||
queue_release_all_senders(q);
|
||||
|
||||
q->read = q->write;
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of events waiting in the queue.
|
||||
*
|
||||
* @param struct of event_queue
|
||||
* @return number of events in the queue
|
||||
*/
|
||||
int queue_count(const struct event_queue *q)
|
||||
{
|
||||
return q->write - q->read;
|
||||
}
|
||||
|
||||
int queue_broadcast(long id, intptr_t data)
|
||||
{
|
||||
struct event_queue **p = all_queues.queues;
|
||||
struct event_queue *q;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
int oldlevel = disable_irq_save();
|
||||
corelock_lock(&all_queues.cl);
|
||||
#endif
|
||||
|
||||
for(q = *p; q != NULL; q = *(++p))
|
||||
{
|
||||
queue_post(q, id, data);
|
||||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
corelock_unlock(&all_queues.cl);
|
||||
restore_irq(oldlevel);
|
||||
#endif
|
||||
|
||||
return p - all_queues.queues;
|
||||
}
|
||||
|
||||
void init_queues(void)
|
||||
{
|
||||
corelock_init(&all_queues.cl);
|
||||
}
|
142
firmware/kernel/semaphore.c
Normal file
142
firmware/kernel/semaphore.c
Normal file
|
@ -0,0 +1,142 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
* Simple mutex functions ;)
|
||||
****************************************************************************/
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "config.h"
|
||||
#include "kernel.h"
|
||||
#include "semaphore.h"
|
||||
#include "kernel-internal.h"
|
||||
#include "thread-internal.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Simple semaphore functions ;)
|
||||
****************************************************************************/
|
||||
/* Initialize the semaphore object.
|
||||
* max = maximum up count the semaphore may assume (max >= 1)
|
||||
* start = initial count of semaphore (0 <= count <= max) */
|
||||
void semaphore_init(struct semaphore *s, int max, int start)
|
||||
{
|
||||
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
|
||||
"semaphore_init->inv arg\n");
|
||||
s->queue = NULL;
|
||||
s->max = max;
|
||||
s->count = start;
|
||||
corelock_init(&s->cl);
|
||||
}
|
||||
|
||||
/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
|
||||
* it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
|
||||
* safely be used in an ISR. */
|
||||
int semaphore_wait(struct semaphore *s, int timeout)
|
||||
{
|
||||
int ret;
|
||||
int oldlevel;
|
||||
int count;
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&s->cl);
|
||||
|
||||
count = s->count;
|
||||
|
||||
if(LIKELY(count > 0))
|
||||
{
|
||||
/* count is not zero; down it */
|
||||
s->count = count - 1;
|
||||
ret = OBJ_WAIT_SUCCEEDED;
|
||||
}
|
||||
else if(timeout == 0)
|
||||
{
|
||||
/* just polling it */
|
||||
ret = OBJ_WAIT_TIMEDOUT;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* too many waits - block until count is upped... */
|
||||
struct thread_entry * current = thread_self_entry();
|
||||
IF_COP( current->obj_cl = &s->cl; )
|
||||
current->bqp = &s->queue;
|
||||
/* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
|
||||
* explicit in semaphore_release */
|
||||
current->retval = OBJ_WAIT_TIMEDOUT;
|
||||
|
||||
if(timeout > 0)
|
||||
block_thread_w_tmo(current, timeout); /* ...or timed out... */
|
||||
else
|
||||
block_thread(current); /* -timeout = infinite */
|
||||
|
||||
corelock_unlock(&s->cl);
|
||||
|
||||
/* ...and turn control over to next thread */
|
||||
switch_thread();
|
||||
|
||||
return current->retval;
|
||||
}
|
||||
|
||||
corelock_unlock(&s->cl);
|
||||
restore_irq(oldlevel);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Up the semaphore's count and release any thread waiting at the head of the
|
||||
* queue. The count is saturated to the value of the 'max' parameter specified
|
||||
* in 'semaphore_init'. */
|
||||
void semaphore_release(struct semaphore *s)
|
||||
{
|
||||
unsigned int result = THREAD_NONE;
|
||||
int oldlevel;
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
corelock_lock(&s->cl);
|
||||
|
||||
if(LIKELY(s->queue != NULL))
|
||||
{
|
||||
/* a thread was queued - wake it up and keep count at 0 */
|
||||
KERNEL_ASSERT(s->count == 0,
|
||||
"semaphore_release->threads queued but count=%d!\n", s->count);
|
||||
s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
|
||||
result = wakeup_thread(&s->queue);
|
||||
}
|
||||
else
|
||||
{
|
||||
int count = s->count;
|
||||
if(count < s->max)
|
||||
{
|
||||
/* nothing waiting - up it */
|
||||
s->count = count + 1;
|
||||
}
|
||||
}
|
||||
|
||||
corelock_unlock(&s->cl);
|
||||
restore_irq(oldlevel);
|
||||
|
||||
#if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context)
|
||||
/* No thread switch if not thread context */
|
||||
if((result & THREAD_SWITCH) && is_thread_context())
|
||||
switch_thread();
|
||||
#endif
|
||||
(void)result;
|
||||
}
|
357
firmware/kernel/thread-internal.h
Normal file
357
firmware/kernel/thread-internal.h
Normal file
|
@ -0,0 +1,357 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Ulf Ralberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef THREAD_H
|
||||
#define THREAD_H
|
||||
|
||||
#include "config.h"
|
||||
#include <inttypes.h>
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include "gcc_extensions.h"
|
||||
|
||||
/*
|
||||
* We need more stack when we run under a host
|
||||
* maybe more expensive C lib functions?
|
||||
*
|
||||
* simulator (possibly) doesn't simulate stack usage anyway but well ... */
|
||||
|
||||
#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
|
||||
struct regs
|
||||
{
|
||||
void *t; /* OS thread */
|
||||
void *told; /* Last thread in slot (explained in thead-sdl.c) */
|
||||
void *s; /* Semaphore for blocking and wakeup */
|
||||
void (*start)(void); /* Start function */
|
||||
};
|
||||
|
||||
#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
|
||||
#else
|
||||
#include "asm/thread.h"
|
||||
#endif /* HAVE_SDL_THREADS */
|
||||
|
||||
#ifdef CPU_PP
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
/* No reliable atomic instruction available - use Peterson's algorithm */
|
||||
struct corelock
|
||||
{
|
||||
volatile unsigned char myl[NUM_CORES];
|
||||
volatile unsigned char turn;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Too big to inline everywhere */
|
||||
void corelock_init(struct corelock *cl);
|
||||
void corelock_lock(struct corelock *cl);
|
||||
int corelock_try_lock(struct corelock *cl);
|
||||
void corelock_unlock(struct corelock *cl);
|
||||
#endif /* HAVE_CORELOCK_OBJECT */
|
||||
#endif /* CPU_PP */
|
||||
|
||||
/* NOTE: The use of the word "queue" may also refer to a linked list of
|
||||
threads being maintained that are normally dealt with in FIFO order
|
||||
and not necessarily kernel event_queue */
|
||||
enum
|
||||
{
|
||||
/* States without a timeout must be first */
|
||||
STATE_KILLED = 0, /* Thread is killed (default) */
|
||||
STATE_RUNNING, /* Thread is currently running */
|
||||
STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
|
||||
/* These states involve adding the thread to the tmo list */
|
||||
STATE_SLEEPING, /* Thread is sleeping with a timeout */
|
||||
STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
|
||||
/* Miscellaneous states */
|
||||
STATE_FROZEN, /* Thread is suspended and will not run until
|
||||
thread_thaw is called with its ID */
|
||||
THREAD_NUM_STATES,
|
||||
TIMEOUT_STATE_FIRST = STATE_SLEEPING,
|
||||
};
|
||||
|
||||
#if NUM_CORES > 1
|
||||
/* Pointer value for name field to indicate thread is being killed. Using
|
||||
* an alternate STATE_* won't work since that would interfere with operation
|
||||
* while the thread is still running. */
|
||||
#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
|
||||
#endif
|
||||
|
||||
/* Link information for lists thread is in */
|
||||
struct thread_entry; /* forward */
|
||||
struct thread_list
|
||||
{
|
||||
struct thread_entry *prev; /* Previous thread in a list */
|
||||
struct thread_entry *next; /* Next thread in a list */
|
||||
};
|
||||
|
||||
#ifndef HAVE_CORELOCK_OBJECT
|
||||
/* No atomic corelock op needed or just none defined */
|
||||
#define corelock_init(cl)
|
||||
#define corelock_lock(cl)
|
||||
#define corelock_try_lock(cl)
|
||||
#define corelock_unlock(cl)
|
||||
#endif /* HAVE_CORELOCK_OBJECT */
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
struct blocker
|
||||
{
|
||||
struct thread_entry * volatile thread; /* thread blocking other threads
|
||||
(aka. object owner) */
|
||||
int priority; /* highest priority waiter */
|
||||
struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
|
||||
};
|
||||
|
||||
/* Choices of wakeup protocol */
|
||||
|
||||
/* For transfer of object ownership by one thread to another thread by
|
||||
* the owning thread itself (mutexes) */
|
||||
struct thread_entry *
|
||||
wakeup_priority_protocol_transfer(struct thread_entry *thread);
|
||||
|
||||
/* For release by owner where ownership doesn't change - other threads,
|
||||
* interrupts, timeouts, etc. (mutex timeout, queues) */
|
||||
struct thread_entry *
|
||||
wakeup_priority_protocol_release(struct thread_entry *thread);
|
||||
|
||||
|
||||
struct priority_distribution
|
||||
{
|
||||
uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
|
||||
uint32_t mask; /* Bitmask of hist entries that are not zero */
|
||||
};
|
||||
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
|
||||
/* Information kept in each thread slot
|
||||
* members are arranged according to size - largest first - in order
|
||||
* to ensure both alignment and packing at the same time.
|
||||
*/
|
||||
struct thread_entry
|
||||
{
|
||||
struct regs context; /* Register context at switch -
|
||||
_must_ be first member */
|
||||
uintptr_t *stack; /* Pointer to top of stack */
|
||||
const char *name; /* Thread name */
|
||||
long tmo_tick; /* Tick when thread should be woken from
|
||||
timeout -
|
||||
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
|
||||
struct thread_list l; /* Links for blocked/waking/running -
|
||||
circular linkage in both directions */
|
||||
struct thread_list tmo; /* Links for timeout list -
|
||||
Circular in reverse direction, NULL-terminated in
|
||||
forward direction -
|
||||
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
|
||||
struct thread_entry **bqp; /* Pointer to list variable in kernel
|
||||
object where thread is blocked - used
|
||||
for implicit unblock and explicit wake
|
||||
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
struct corelock *obj_cl; /* Object corelock where thead is blocked -
|
||||
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
||||
struct corelock waiter_cl; /* Corelock for thread_wait */
|
||||
struct corelock slot_cl; /* Corelock to lock thread slot */
|
||||
unsigned char core; /* The core to which thread belongs */
|
||||
#endif
|
||||
struct thread_entry *queue; /* List of threads waiting for thread to be
|
||||
removed */
|
||||
#ifdef HAVE_WAKEUP_EXT_CB
|
||||
void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
|
||||
performs special steps needed when being
|
||||
forced off of an object's wait queue that
|
||||
go beyond the standard wait queue removal
|
||||
and priority disinheritance */
|
||||
/* Only enabled when using queue_send for now */
|
||||
#endif
|
||||
#if defined(HAVE_SEMAPHORE_OBJECTS) || \
|
||||
defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
|
||||
NUM_CORES > 1
|
||||
volatile intptr_t retval; /* Return value from a blocked operation/
|
||||
misc. use */
|
||||
#endif
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
/* Priority summary of owned objects that support inheritance */
|
||||
struct blocker *blocker; /* Pointer to blocker when this thread is blocked
|
||||
on an object that supports PIP -
|
||||
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
||||
struct priority_distribution pdist; /* Priority summary of owned objects
|
||||
that have blocked threads and thread's own
|
||||
base priority */
|
||||
int skip_count; /* Number of times skipped if higher priority
|
||||
thread was running */
|
||||
unsigned char base_priority; /* Base priority (set explicitly during
|
||||
creation or thread_set_priority) */
|
||||
unsigned char priority; /* Scheduled priority (higher of base or
|
||||
all threads blocked by this one) */
|
||||
#endif
|
||||
uint16_t id; /* Current slot id */
|
||||
unsigned short stack_size; /* Size of stack in bytes */
|
||||
unsigned char state; /* Thread slot state (STATE_*) */
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
unsigned char cpu_boost; /* CPU frequency boost flag */
|
||||
#endif
|
||||
#ifdef HAVE_IO_PRIORITY
|
||||
unsigned char io_priority;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*** Macros for internal use ***/
|
||||
/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
|
||||
#define THREAD_ID_VERSION_SHIFT 8
|
||||
#define THREAD_ID_VERSION_MASK 0xff00
|
||||
#define THREAD_ID_SLOT_MASK 0x00ff
|
||||
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
|
||||
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
/* Operations to be performed just before stopping a thread and starting
|
||||
a new one if specified before calling switch_thread */
|
||||
enum
|
||||
{
|
||||
TBOP_CLEAR = 0, /* No operation to do */
|
||||
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
|
||||
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
|
||||
};
|
||||
|
||||
struct thread_blk_ops
|
||||
{
|
||||
struct corelock *cl_p; /* pointer to corelock */
|
||||
unsigned char flags; /* TBOP_* flags */
|
||||
};
|
||||
#endif /* NUM_CORES > 1 */
|
||||
|
||||
/* Information kept for each core
|
||||
* Members are arranged for the same reason as in thread_entry
|
||||
*/
|
||||
struct core_entry
|
||||
{
|
||||
/* "Active" lists - core is constantly active on these and are never
|
||||
locked and interrupts do not access them */
|
||||
struct thread_entry *running; /* threads that are running (RTR) */
|
||||
struct thread_entry *timeout; /* threads that are on a timeout before
|
||||
running again */
|
||||
struct thread_entry *block_task; /* Task going off running list */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
struct priority_distribution rtr; /* Summary of running and ready-to-run
|
||||
threads */
|
||||
#endif
|
||||
long next_tmo_check; /* soonest time to check tmo threads */
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
struct thread_blk_ops blk_ops; /* operations to perform when
|
||||
blocking a thread */
|
||||
struct corelock rtr_cl; /* Lock for rtr list */
|
||||
#endif /* NUM_CORES */
|
||||
};
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
#define IF_PRIO(...) __VA_ARGS__
|
||||
#define IFN_PRIO(...)
|
||||
#else
|
||||
#define IF_PRIO(...)
|
||||
#define IFN_PRIO(...) __VA_ARGS__
|
||||
#endif
|
||||
|
||||
void core_idle(void);
|
||||
void core_wake(IF_COP_VOID(unsigned int core));
|
||||
|
||||
/* Initialize the scheduler */
|
||||
void init_threads(void) INIT_ATTR;
|
||||
|
||||
/* Allocate a thread in the scheduler */
|
||||
#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
|
||||
unsigned int create_thread(void (*function)(void),
|
||||
void* stack, size_t stack_size,
|
||||
unsigned flags, const char *name
|
||||
IF_PRIO(, int priority)
|
||||
IF_COP(, unsigned int core));
|
||||
|
||||
/* Set and clear the CPU frequency boost flag for the calling thread */
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
void trigger_cpu_boost(void);
|
||||
void cancel_cpu_boost(void);
|
||||
#else
|
||||
#define trigger_cpu_boost() do { } while(0)
|
||||
#define cancel_cpu_boost() do { } while(0)
|
||||
#endif
|
||||
/* Return thread entry from id */
|
||||
struct thread_entry *thread_id_entry(unsigned int thread_id);
|
||||
/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
|
||||
* Has no effect on a thread not frozen. */
|
||||
void thread_thaw(unsigned int thread_id);
|
||||
/* Wait for a thread to exit */
|
||||
void thread_wait(unsigned int thread_id);
|
||||
/* Exit the current thread */
|
||||
void thread_exit(void) NORETURN_ATTR;
|
||||
#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
|
||||
#define ALLOW_REMOVE_THREAD
|
||||
/* Remove a thread from the scheduler */
|
||||
void remove_thread(unsigned int thread_id);
|
||||
#endif
|
||||
|
||||
/* Switch to next runnable thread */
|
||||
void switch_thread(void);
|
||||
/* Blocks a thread for at least the specified number of ticks (0 = wait until
|
||||
* next tick) */
|
||||
void sleep_thread(int ticks);
|
||||
/* Indefinitely blocks the current thread on a thread queue */
|
||||
void block_thread(struct thread_entry *current);
|
||||
/* Blocks the current thread on a thread queue until explicitely woken or
|
||||
* the timeout is reached */
|
||||
void block_thread_w_tmo(struct thread_entry *current, int timeout);
|
||||
|
||||
/* Return bit flags for thread wakeup */
|
||||
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
|
||||
#define THREAD_OK 0x1 /* A thread was woken up */
|
||||
#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
|
||||
higher priority than current were woken) */
|
||||
|
||||
/* A convenience function for waking an entire queue of threads. */
|
||||
unsigned int thread_queue_wake(struct thread_entry **list);
|
||||
|
||||
/* Wakeup a thread at the head of a list */
|
||||
unsigned int wakeup_thread(struct thread_entry **list);
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
int thread_set_priority(unsigned int thread_id, int priority);
|
||||
int thread_get_priority(unsigned int thread_id);
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
#ifdef HAVE_IO_PRIORITY
|
||||
void thread_set_io_priority(unsigned int thread_id, int io_priority);
|
||||
int thread_get_io_priority(unsigned int thread_id);
|
||||
#endif /* HAVE_IO_PRIORITY */
|
||||
#if NUM_CORES > 1
|
||||
unsigned int switch_core(unsigned int new_core);
|
||||
#endif
|
||||
|
||||
/* Return the id of the calling thread. */
|
||||
unsigned int thread_self(void);
|
||||
|
||||
/* Return the thread_entry for the calling thread.
|
||||
* INTERNAL: Intended for use by kernel and not for programs. */
|
||||
struct thread_entry* thread_self_entry(void);
|
||||
|
||||
/* Debugging info - only! */
|
||||
int thread_stack_usage(const struct thread_entry *thread);
|
||||
#if NUM_CORES > 1
|
||||
int idle_stack_usage(unsigned int core);
|
||||
#endif
|
||||
void thread_get_name(char *buffer, int size,
|
||||
struct thread_entry *thread);
|
||||
#ifdef RB_PROFILE
|
||||
void profile_thread(void);
|
||||
#endif
|
||||
|
||||
#endif /* THREAD_H */
|
2442
firmware/kernel/thread.c
Normal file
2442
firmware/kernel/thread.c
Normal file
File diff suppressed because it is too large
Load diff
74
firmware/kernel/tick.c
Normal file
74
firmware/kernel/tick.c
Normal file
|
@ -0,0 +1,74 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2002 by Björn Stenberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#include "config.h"
|
||||
#include "tick.h"
|
||||
#include "general.h"
|
||||
#include "panic.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Timer tick
|
||||
*****************************************************************************/
|
||||
|
||||
|
||||
/* List of tick tasks - final element always NULL for termination */
|
||||
void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
|
||||
|
||||
#if !defined(CPU_PP) || !defined(BOOTLOADER) || \
|
||||
defined(HAVE_BOOTLOADER_USB_MODE)
|
||||
volatile long current_tick SHAREDDATA_ATTR = 0;
|
||||
#endif
|
||||
|
||||
/* - Timer initialization and interrupt handler is defined at
|
||||
* the target level: tick_start() is implemented in the target tree */
|
||||
|
||||
int tick_add_task(void (*f)(void))
|
||||
{
|
||||
int oldlevel = disable_irq_save();
|
||||
void **arr = (void **)tick_funcs;
|
||||
void **p = find_array_ptr(arr, f);
|
||||
|
||||
/* Add a task if there is room */
|
||||
if(p - arr < MAX_NUM_TICK_TASKS)
|
||||
{
|
||||
*p = f; /* If already in list, no problem. */
|
||||
}
|
||||
else
|
||||
{
|
||||
panicf("Error! tick_add_task(): out of tasks");
|
||||
}
|
||||
|
||||
restore_irq(oldlevel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tick_remove_task(void (*f)(void))
|
||||
{
|
||||
int oldlevel = disable_irq_save();
|
||||
int rc = remove_array_ptr((void **)tick_funcs, f);
|
||||
restore_irq(oldlevel);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void init_tick(void)
|
||||
{
|
||||
tick_start(1000/HZ);
|
||||
}
|
97
firmware/kernel/timeout.c
Normal file
97
firmware/kernel/timeout.c
Normal file
|
@ -0,0 +1,97 @@
|
|||
|
||||
/****************************************************************************
|
||||
* Tick-based interval timers/one-shots - be mindful this is not really
|
||||
* intended for continuous timers but for events that need to run for a short
|
||||
* time and be cancelled without further software intervention.
|
||||
****************************************************************************/
|
||||
|
||||
#include "config.h"
|
||||
#include "system.h" /* TIME_AFTER */
|
||||
#include "kernel.h"
|
||||
#include "timeout.h"
|
||||
#include "general.h"
|
||||
|
||||
/* list of active timeout events */
|
||||
static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
|
||||
|
||||
/* timeout tick task - calls event handlers when they expire
|
||||
* Event handlers may alter expiration, callback and data during operation.
|
||||
*/
|
||||
static void timeout_tick(void)
|
||||
{
|
||||
unsigned long tick = current_tick;
|
||||
struct timeout **p = tmo_list;
|
||||
struct timeout *curr;
|
||||
|
||||
for(curr = *p; curr != NULL; curr = *(++p))
|
||||
{
|
||||
int ticks;
|
||||
|
||||
if(TIME_BEFORE(tick, curr->expires))
|
||||
continue;
|
||||
|
||||
/* this event has expired - call callback */
|
||||
ticks = curr->callback(curr);
|
||||
if(ticks > 0)
|
||||
{
|
||||
curr->expires = tick + ticks; /* reload */
|
||||
}
|
||||
else
|
||||
{
|
||||
timeout_cancel(curr); /* cancel */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Cancels a timeout callback - can be called from the ISR */
|
||||
void timeout_cancel(struct timeout *tmo)
|
||||
{
|
||||
int oldlevel = disable_irq_save();
|
||||
int rc = remove_array_ptr((void **)tmo_list, tmo);
|
||||
|
||||
if(rc >= 0 && *tmo_list == NULL)
|
||||
{
|
||||
tick_remove_task(timeout_tick); /* Last one - remove task */
|
||||
}
|
||||
|
||||
restore_irq(oldlevel);
|
||||
}
|
||||
|
||||
/* Adds a timeout callback - calling with an active timeout resets the
|
||||
interval - can be called from the ISR */
|
||||
void timeout_register(struct timeout *tmo, timeout_cb_type callback,
|
||||
int ticks, intptr_t data)
|
||||
{
|
||||
int oldlevel;
|
||||
void **arr, **p;
|
||||
|
||||
if(tmo == NULL)
|
||||
return;
|
||||
|
||||
oldlevel = disable_irq_save();
|
||||
|
||||
/* See if this one is already registered */
|
||||
arr = (void **)tmo_list;
|
||||
p = find_array_ptr(arr, tmo);
|
||||
|
||||
if(p - arr < MAX_NUM_TIMEOUTS)
|
||||
{
|
||||
/* Vacancy */
|
||||
if(*p == NULL)
|
||||
{
|
||||
/* Not present */
|
||||
if(*tmo_list == NULL)
|
||||
{
|
||||
tick_add_task(timeout_tick); /* First one - add task */
|
||||
}
|
||||
|
||||
*p = tmo;
|
||||
}
|
||||
|
||||
tmo->callback = callback;
|
||||
tmo->data = data;
|
||||
tmo->expires = current_tick + ticks;
|
||||
}
|
||||
|
||||
restore_irq(oldlevel);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue