1
0
Fork 0
forked from len0rd/rockbox

Unify kernel list management for ticks, registered queues and timeout objects by using NULL-terminated lists of pointers. Redo timeout API a bit to simplify it and integrate it. Should give some small binsize reduction accross the board but more if timeout objects are being included.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@19808 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2009-01-21 02:44:20 +00:00
parent a7ec73cddd
commit 580d91f097
5 changed files with 122 additions and 120 deletions

View file

@ -85,13 +85,13 @@ static int button_read(void);
static struct timeout hp_detect_timeout; /* Debouncer for headphone plug/unplug */ static struct timeout hp_detect_timeout; /* Debouncer for headphone plug/unplug */
/* This callback can be used for many different functions if needed - /* This callback can be used for many different functions if needed -
just check to which object tmo points */ just check to which object tmo points */
static bool btn_detect_callback(struct timeout *tmo) static int btn_detect_callback(struct timeout *tmo)
{ {
/* Try to post only transistions */ /* Try to post only transistions */
const long id = tmo->data ? SYS_PHONE_PLUGGED : SYS_PHONE_UNPLUGGED; const long id = tmo->data ? SYS_PHONE_PLUGGED : SYS_PHONE_UNPLUGGED;
queue_remove_from_head(&button_queue, id); queue_remove_from_head(&button_queue, id);
queue_post(&button_queue, id, 0); queue_post(&button_queue, id, 0);
return false; return 0;
} }
#endif #endif

View file

@ -7,7 +7,7 @@
* \/ \/ \/ \/ \/ * \/ \/ \/ \/ \/
* $Id$ * $Id$
* *
* Copyright (C) 2002 by Björn Stenberg * Copyright (C) 2002 by Björn Stenberg
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
@ -204,12 +204,15 @@ extern volatile long current_tick;
static inline void call_tick_tasks(void) static inline void call_tick_tasks(void)
{ {
extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void); extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
int i; void (**p)(void) = tick_funcs;
void (*fn)(void);
current_tick++; current_tick++;
for (i = 0; tick_funcs[i] != NULL; i++) for(fn = *p; fn != NULL; fn = *(++p))
tick_funcs[i](); {
fn();
}
} }
#endif #endif
@ -229,18 +232,16 @@ struct timeout;
/* timeout callback type /* timeout callback type
* tmo - pointer to struct timeout associated with event * tmo - pointer to struct timeout associated with event
* return next interval or <= 0 to stop event
*/ */
typedef bool (* timeout_cb_type)(struct timeout *tmo); #define MAX_NUM_TIMEOUTS 8
typedef int (* timeout_cb_type)(struct timeout *tmo);
struct timeout struct timeout
{ {
/* for use by callback/internal - read/write */
timeout_cb_type callback;/* callback - returning false cancels */ timeout_cb_type callback;/* callback - returning false cancels */
int ticks; /* timeout period in ticks */
intptr_t data; /* data passed to callback */ intptr_t data; /* data passed to callback */
/* internal use - read-only */ long expires; /* expiration tick */
const struct timeout * const next; /* next timeout in list */
const long expires; /* expiration tick */
}; };
void timeout_register(struct timeout *tmo, timeout_cb_type callback, void timeout_register(struct timeout *tmo, timeout_cb_type callback,

View file

@ -56,18 +56,50 @@ volatile long current_tick SHAREDDATA_ATTR = 0;
/* List of tick tasks - final element always NULL for termination */ /* List of tick tasks - final element always NULL for termination */
void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void); void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
static int num_tick_funcs = 0;
extern struct core_entry cores[NUM_CORES]; extern struct core_entry cores[NUM_CORES];
/* This array holds all queues that are initiated. It is used for broadcast. */ /* This array holds all queues that are initiated. It is used for broadcast. */
static struct static struct
{ {
int count; struct event_queue *queues[MAX_NUM_QUEUES+1];
struct event_queue *queues[MAX_NUM_QUEUES];
IF_COP( struct corelock cl; ) IF_COP( struct corelock cl; )
} all_queues SHAREDBSS_ATTR; } all_queues SHAREDBSS_ATTR;
/****************************************************************************
* Common utilities
****************************************************************************/
/* Find a pointer in a pointer array. Returns the addess of the element if
* found or the address of the terminating NULL otherwise. */
static void ** find_array_ptr(void **arr, void *ptr)
{
void *curr;
for(curr = *arr; curr != NULL && curr != ptr; curr = *(++arr));
return arr;
}
/* Remove a pointer from a pointer array if it exists. Compacts it so that
* no gaps exist. Returns 0 on success and -1 if the element wasn't found. */
static int remove_array_ptr(void **arr, void *ptr)
{
void *curr;
arr = find_array_ptr(arr, ptr);
if(*arr == NULL)
return -1;
/* Found. Slide up following items. */
do
{
void **arr1 = arr + 1;
*arr++ = curr = *arr1;
}
while(curr != NULL);
return 0;
}
/**************************************************************************** /****************************************************************************
* Standard kernel stuff * Standard kernel stuff
****************************************************************************/ ****************************************************************************/
@ -98,43 +130,29 @@ void kernel_init(void)
int tick_add_task(void (*f)(void)) int tick_add_task(void (*f)(void))
{ {
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
void **arr = (void **)tick_funcs;
void **p = find_array_ptr(arr, f);
/* Add a task if there is room */ /* Add a task if there is room */
if(num_tick_funcs < MAX_NUM_TICK_TASKS) if(p - arr < MAX_NUM_TICK_TASKS)
{ {
tick_funcs[num_tick_funcs++] = f; *p = f; /* If already in list, no problem. */
restore_irq(oldlevel); }
return 0; else
{
panicf("Error! tick_add_task(): out of tasks");
} }
restore_irq(oldlevel); restore_irq(oldlevel);
panicf("Error! tick_add_task(): out of tasks"); return 0;
return -1;
} }
int tick_remove_task(void (*f)(void)) int tick_remove_task(void (*f)(void))
{ {
int i;
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
int rc = remove_array_ptr((void **)tick_funcs, f);
/* Remove a task if it is there */
for(i = 0;i < num_tick_funcs;i++)
{
if(tick_funcs[i] == f)
{
/* Compact function list - propagates NULL-terminator as well */
for(; i < num_tick_funcs; i++)
tick_funcs[i] = tick_funcs[i+1];
num_tick_funcs--;
restore_irq(oldlevel);
return 0;
}
}
restore_irq(oldlevel); restore_irq(oldlevel);
return -1; return rc;
} }
/**************************************************************************** /****************************************************************************
@ -143,28 +161,35 @@ int tick_remove_task(void (*f)(void))
* time and be cancelled without further software intervention. * time and be cancelled without further software intervention.
****************************************************************************/ ****************************************************************************/
#ifdef INCLUDE_TIMEOUT_API #ifdef INCLUDE_TIMEOUT_API
static struct timeout *tmo_list = NULL; /* list of active timeout events */ /* list of active timeout events */
static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
/* timeout tick task - calls event handlers when they expire /* timeout tick task - calls event handlers when they expire
* Event handlers may alter ticks, callback and data during operation. * Event handlers may alter expiration, callback and data during operation.
*/ */
static void timeout_tick(void) static void timeout_tick(void)
{ {
unsigned long tick = current_tick; unsigned long tick = current_tick;
struct timeout *curr, *next; struct timeout **p = tmo_list;
struct timeout *curr;
for (curr = tmo_list; curr != NULL; curr = next) for(curr = *p; curr != NULL; curr = *(++p))
{ {
next = (struct timeout *)curr->next; int ticks;
if (TIME_BEFORE(tick, curr->expires)) if(TIME_BEFORE(tick, curr->expires))
continue; continue;
/* this event has expired - call callback */ /* this event has expired - call callback */
if (curr->callback(curr)) ticks = curr->callback(curr);
*(long *)&curr->expires = tick + curr->ticks; /* reload */ if(ticks > 0)
{
curr->expires = tick + ticks; /* reload */
}
else else
{
timeout_cancel(curr); /* cancel */ timeout_cancel(curr); /* cancel */
}
} }
} }
@ -172,30 +197,12 @@ static void timeout_tick(void)
void timeout_cancel(struct timeout *tmo) void timeout_cancel(struct timeout *tmo)
{ {
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
void **arr = (void **)tmo_list;
int rc = remove_array_ptr(arr, tmo);
if (tmo_list != NULL) if(rc >= 0 && *arr == NULL)
{ {
struct timeout *curr = tmo_list; tick_remove_task(timeout_tick); /* Last one - remove task */
struct timeout *prev = NULL;
while (curr != tmo && curr != NULL)
{
prev = curr;
curr = (struct timeout *)curr->next;
}
if (curr != NULL)
{
/* in list */
if (prev == NULL)
tmo_list = (struct timeout *)curr->next;
else
*(const struct timeout **)&prev->next = curr->next;
if (tmo_list == NULL)
tick_remove_task(timeout_tick); /* last one - remove task */
}
/* not in list or tmo == NULL */
} }
restore_irq(oldlevel); restore_irq(oldlevel);
@ -207,33 +214,36 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
int ticks, intptr_t data) int ticks, intptr_t data)
{ {
int oldlevel; int oldlevel;
struct timeout *curr; void **arr, **p;
if (tmo == NULL) if(tmo == NULL)
return; return;
oldlevel = disable_irq_save(); oldlevel = disable_irq_save();
/* see if this one is already registered */ /* See if this one is already registered */
curr = tmo_list; arr = (void **)tmo_list;
while (curr != tmo && curr != NULL) p = find_array_ptr(arr, tmo);
curr = (struct timeout *)curr->next;
if (curr == NULL) if(p - arr < MAX_NUM_TIMEOUTS)
{ {
/* not found - add it */ /* Vacancy */
if (tmo_list == NULL) if(*p == NULL)
tick_add_task(timeout_tick); /* first one - add task */ {
/* Not present */
if(*arr == NULL)
{
tick_add_task(timeout_tick); /* First one - add task */
}
*(struct timeout **)&tmo->next = tmo_list; *p = tmo;
tmo_list = tmo; }
tmo->callback = callback;
tmo->data = data;
tmo->expires = current_tick + ticks;
} }
tmo->callback = callback;
tmo->ticks = ticks;
tmo->data = data;
*(long *)&tmo->expires = current_tick + ticks;
restore_irq(oldlevel); restore_irq(oldlevel);
} }
@ -460,13 +470,20 @@ void queue_init(struct event_queue *q, bool register_queue)
if(register_queue) if(register_queue)
{ {
if(all_queues.count >= MAX_NUM_QUEUES) void **queues = (void **)all_queues.queues;
void **p = find_array_ptr(queues, q);
if(p - queues >= MAX_NUM_QUEUES)
{ {
panicf("queue_init->out of queues"); panicf("queue_init->out of queues");
} }
/* Add it to the all_queues array */
all_queues.queues[all_queues.count++] = q; if(*p == NULL)
corelock_unlock(&all_queues.cl); {
/* Add it to the all_queues array */
*p = q;
corelock_unlock(&all_queues.cl);
}
} }
restore_irq(oldlevel); restore_irq(oldlevel);
@ -475,29 +492,12 @@ void queue_init(struct event_queue *q, bool register_queue)
/* Queue must not be available for use during this call */ /* Queue must not be available for use during this call */
void queue_delete(struct event_queue *q) void queue_delete(struct event_queue *q)
{ {
int oldlevel; int oldlevel = disable_irq_save();
int i;
oldlevel = disable_irq_save();
corelock_lock(&all_queues.cl); corelock_lock(&all_queues.cl);
corelock_lock(&q->cl); corelock_lock(&q->cl);
/* Find the queue to be deleted */ /* Remove the queue if registered */
for(i = 0;i < all_queues.count;i++) remove_array_ptr((void **)all_queues.queues, q);
{
if(all_queues.queues[i] == q)
{
/* Move the following queues up in the list */
all_queues.count--;
for(;i < all_queues.count;i++)
{
all_queues.queues[i] = all_queues.queues[i+1];
}
break;
}
}
corelock_unlock(&all_queues.cl); corelock_unlock(&all_queues.cl);
@ -834,16 +834,17 @@ int queue_count(const struct event_queue *q)
int queue_broadcast(long id, intptr_t data) int queue_broadcast(long id, intptr_t data)
{ {
int i; struct event_queue **p = all_queues.queues;
struct event_queue *q;
#if NUM_CORES > 1 #if NUM_CORES > 1
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
corelock_lock(&all_queues.cl); corelock_lock(&all_queues.cl);
#endif #endif
for(i = 0;i < all_queues.count;i++) for(q = *p; q != NULL; q = *(++p))
{ {
queue_post(all_queues.queues[i], id, data); queue_post(q, id, data);
} }
#if NUM_CORES > 1 #if NUM_CORES > 1
@ -851,7 +852,7 @@ int queue_broadcast(long id, intptr_t data)
restore_irq(oldlevel); restore_irq(oldlevel);
#endif #endif
return i; return p - all_queues.queues;
} }
/**************************************************************************** /****************************************************************************

View file

@ -142,7 +142,7 @@ static void mci_set_clock_divider(const int drive, int divider)
#ifdef HAVE_HOTSWAP #ifdef HAVE_HOTSWAP
#if defined(SANSA_E200V2) || defined(SANSA_FUZE) #if defined(SANSA_E200V2) || defined(SANSA_FUZE)
static bool sd1_oneshot_callback(struct timeout *tmo) static int sd1_oneshot_callback(struct timeout *tmo)
{ {
(void)tmo; (void)tmo;
@ -155,7 +155,7 @@ static bool sd1_oneshot_callback(struct timeout *tmo)
else else
queue_broadcast(SYS_HOTSWAP_EXTRACTED, 0); queue_broadcast(SYS_HOTSWAP_EXTRACTED, 0);
return false; return 0;
} }
void INT_GPIOA(void) void INT_GPIOA(void)

View file

@ -1215,7 +1215,7 @@ bool card_detect_target(void)
} }
#ifdef HAVE_HOTSWAP #ifdef HAVE_HOTSWAP
static bool sd1_oneshot_callback(struct timeout *tmo) static int sd1_oneshot_callback(struct timeout *tmo)
{ {
(void)tmo; (void)tmo;
@ -1226,7 +1226,7 @@ static bool sd1_oneshot_callback(struct timeout *tmo)
else else
queue_broadcast(SYS_HOTSWAP_EXTRACTED, 0); queue_broadcast(SYS_HOTSWAP_EXTRACTED, 0);
return false; return 0;
} }
/* called on insertion/removal interrupt */ /* called on insertion/removal interrupt */