mirror of
https://github.com/Rockbox/rockbox.git
synced 2025-10-13 18:17:39 -04:00
Remove buflib from the pluginlib and use the core one.
buflib_get_data() isn't inlined for plugins anymore, but can be if really needed. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@30387 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
parent
456170afdf
commit
42f10e04df
6 changed files with 39 additions and 417 deletions
|
@ -791,6 +791,19 @@ static const struct plugin_api rockbox_api = {
|
|||
the API gets incompatible */
|
||||
tree_get_entries,
|
||||
tree_get_entry_at,
|
||||
|
||||
/* the buflib memory management library */
|
||||
buflib_init,
|
||||
buflib_available,
|
||||
buflib_alloc,
|
||||
buflib_alloc_ex,
|
||||
buflib_alloc_maximum,
|
||||
buflib_buffer_in,
|
||||
buflib_buffer_out,
|
||||
buflib_free,
|
||||
buflib_shrink,
|
||||
buflib_get_data,
|
||||
buflib_get_name,
|
||||
};
|
||||
|
||||
int plugin_load(const char* plugin, const void* parameter)
|
||||
|
|
|
@ -91,6 +91,7 @@ void* plugin_get_buffer(size_t *buffer_size);
|
|||
#include "list.h"
|
||||
#include "tree.h"
|
||||
#include "color_picker.h"
|
||||
#include "buflib.h"
|
||||
#include "buffering.h"
|
||||
#include "tagcache.h"
|
||||
#include "viewport.h"
|
||||
|
@ -928,6 +929,23 @@ struct plugin_api {
|
|||
the API gets incompatible */
|
||||
struct entry* (*tree_get_entries)(struct tree_context* t);
|
||||
struct entry* (*tree_get_entry_at)(struct tree_context* t, int index);
|
||||
|
||||
/* the buflib memory management library */
|
||||
void (*buflib_init)(struct buflib_context* ctx, void* buf, size_t size);
|
||||
size_t (*buflib_available)(struct buflib_context* ctx);
|
||||
int (*buflib_alloc)(struct buflib_context* ctx, size_t size);
|
||||
int (*buflib_alloc_ex)(struct buflib_context* ctx, size_t size,
|
||||
const char* name, struct buflib_callbacks *ops);
|
||||
int (*buflib_alloc_maximum)(struct buflib_context* ctx, const char* name,
|
||||
size_t* size, struct buflib_callbacks *ops);
|
||||
void (*buflib_buffer_in)(struct buflib_context* ctx, int size);
|
||||
void* (*buflib_buffer_out)(struct buflib_context* ctx, size_t* size);
|
||||
int (*buflib_free)(struct buflib_context* ctx, int handle);
|
||||
bool (*buflib_shrink)(struct buflib_context* ctx, int handle,
|
||||
void* new_start, size_t new_size);
|
||||
void* (*buflib_get_data)(struct buflib_context* ctx, int handle);
|
||||
const char* (*buflib_get_name)(struct buflib_context* ctx, int handle);
|
||||
|
||||
};
|
||||
|
||||
/* plugin header */
|
||||
|
|
|
@ -7,7 +7,6 @@ configfile.c
|
|||
fixedpoint.c
|
||||
playback_control.c
|
||||
rgb_hsv.c
|
||||
buflib.c
|
||||
highscore.c
|
||||
simple_viewer.c
|
||||
display_text.c
|
||||
|
|
|
@ -1,349 +0,0 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* This is a memory allocator designed to provide reasonable management of free
|
||||
* space and fast access to allocated data. More than one allocator can be used
|
||||
* at a time by initializing multiple contexts.
|
||||
*
|
||||
* Copyright (C) 2009 Andrew Mahone
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#include <stdlib.h> /* for abs() */
|
||||
#include "buflib.h"
|
||||
/* The main goal of this design is fast fetching of the pointer for a handle.
|
||||
* For that reason, the handles are stored in a table at the end of the buffer
|
||||
* with a fixed address, so that returning the pointer for a handle is a simple
|
||||
* table lookup. To reduce the frequency with which allocated blocks will need
|
||||
* to be moved to free space, allocations grow up in address from the start of
|
||||
* the buffer. The buffer is treated as an array of union buflib_data. Blocks
|
||||
* start with a length marker, which is included in their length. Free blocks
|
||||
* are marked by negative length, allocated ones use the second buflib_data in
|
||||
* the block to store a pointer to their handle table entry, so that it can be
|
||||
* quickly found and updated during compaction. The allocator functions are
|
||||
* passed a context struct so that two allocators can be run, for example, one
|
||||
* per core may be used, with convenience wrappers for the single-allocator
|
||||
* case that use a predefined context.
|
||||
*/
|
||||
|
||||
/* Initialize buffer manager */
|
||||
void
|
||||
buflib_init(struct buflib_context *ctx, void *buf, size_t size)
|
||||
{
|
||||
union buflib_data *bd_buf = buf;
|
||||
|
||||
/* Align on sizeof(buflib_data), to prevent unaligned access */
|
||||
ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data));
|
||||
size /= sizeof(union buflib_data);
|
||||
/* The handle table is initialized with no entries */
|
||||
ctx->handle_table = bd_buf + size;
|
||||
ctx->last_handle = bd_buf + size;
|
||||
ctx->first_free_handle = bd_buf + size - 1;
|
||||
ctx->first_free_block = bd_buf;
|
||||
ctx->buf_start = bd_buf;
|
||||
/* A marker is needed for the end of allocated data, to make sure that it
|
||||
* does not collide with the handle table, and to detect end-of-buffer.
|
||||
*/
|
||||
ctx->alloc_end = bd_buf;
|
||||
ctx->compact = true;
|
||||
}
|
||||
|
||||
/* Allocate a new handle, returning 0 on failure */
|
||||
static inline
|
||||
union buflib_data* handle_alloc(struct buflib_context *ctx)
|
||||
{
|
||||
union buflib_data *handle;
|
||||
/* first_free_handle is a lower bound on free handles, work through the
|
||||
* table from there until a handle containing NULL is found, or the end
|
||||
* of the table is reached.
|
||||
*/
|
||||
for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--)
|
||||
if (!handle->ptr)
|
||||
break;
|
||||
/* If the search went past the end of the table, it means we need to extend
|
||||
* the table to get a new handle.
|
||||
*/
|
||||
if (handle < ctx->last_handle)
|
||||
{
|
||||
if (handle >= ctx->alloc_end)
|
||||
ctx->last_handle--;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
handle->val = -1;
|
||||
return handle;
|
||||
}
|
||||
|
||||
/* Free one handle, shrinking the handle table if it's the last one */
|
||||
static inline
|
||||
void handle_free(struct buflib_context *ctx, union buflib_data *handle)
|
||||
{
|
||||
handle->ptr = 0;
|
||||
/* Update free handle lower bound if this handle has a lower index than the
|
||||
* old one.
|
||||
*/
|
||||
if (handle > ctx->first_free_handle)
|
||||
ctx->first_free_handle = handle;
|
||||
if (handle == ctx->last_handle)
|
||||
ctx->last_handle++;
|
||||
else
|
||||
ctx->compact = false;
|
||||
}
|
||||
|
||||
/* Shrink the handle table, returning true if its size was reduced, false if
|
||||
* not
|
||||
*/
|
||||
static inline
|
||||
bool
|
||||
handle_table_shrink(struct buflib_context *ctx)
|
||||
{
|
||||
bool rv;
|
||||
union buflib_data *handle;
|
||||
for (handle = ctx->last_handle; !(handle->ptr); handle++);
|
||||
if (handle > ctx->first_free_handle)
|
||||
ctx->first_free_handle = handle - 1;
|
||||
rv = handle == ctx->last_handle;
|
||||
ctx->last_handle = handle;
|
||||
return rv;
|
||||
}
|
||||
|
||||
/* Compact allocations and handle table, adjusting handle pointers as needed.
|
||||
* Return true if any space was freed or consolidated, false otherwise.
|
||||
*/
|
||||
static bool
|
||||
buflib_compact(struct buflib_context *ctx)
|
||||
{
|
||||
union buflib_data *block = ctx->first_free_block, *new_block;
|
||||
int shift = 0, len;
|
||||
/* Store the results of attempting to shrink the handle table */
|
||||
bool ret = handle_table_shrink(ctx);
|
||||
for(; block != ctx->alloc_end; block += len)
|
||||
{
|
||||
len = block->val;
|
||||
/* This block is free, add its length to the shift value */
|
||||
if (len < 0)
|
||||
{
|
||||
shift += len;
|
||||
len = -len;
|
||||
continue;
|
||||
}
|
||||
/* If shift is non-zero, it represents the number of places to move
|
||||
* blocks down in memory. Calculate the new address for this block,
|
||||
* update its entry in the handle table, and then move its contents.
|
||||
*/
|
||||
if (shift)
|
||||
{
|
||||
new_block = block + shift;
|
||||
block[1].ptr->ptr = new_block + 2;
|
||||
rb->memmove(new_block, block, len * sizeof(union buflib_data));
|
||||
}
|
||||
}
|
||||
/* Move the end-of-allocation mark, and return true if any new space has
|
||||
* been freed.
|
||||
*/
|
||||
ctx->alloc_end += shift;
|
||||
ctx->first_free_block = ctx->alloc_end;
|
||||
ctx->compact = true;
|
||||
return ret || shift;
|
||||
}
|
||||
|
||||
/* Shift buffered items by size units, and update handle pointers. The shift
|
||||
* value must be determined to be safe *before* calling.
|
||||
*/
|
||||
static void
|
||||
buflib_buffer_shift(struct buflib_context *ctx, int shift)
|
||||
{
|
||||
rb->memmove(ctx->buf_start + shift, ctx->buf_start,
|
||||
(ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data));
|
||||
union buflib_data *ptr;
|
||||
for (ptr = ctx->last_handle; ptr < ctx->handle_table; ptr++)
|
||||
if (ptr->ptr)
|
||||
ptr->ptr += shift;
|
||||
ctx->first_free_block += shift;
|
||||
ctx->buf_start += shift;
|
||||
ctx->alloc_end += shift;
|
||||
}
|
||||
|
||||
/* Shift buffered items up by size bytes, or as many as possible if size == 0.
|
||||
* Set size to the number of bytes freed.
|
||||
*/
|
||||
void*
|
||||
buflib_buffer_out(struct buflib_context *ctx, size_t *size)
|
||||
{
|
||||
if (!ctx->compact)
|
||||
buflib_compact(ctx);
|
||||
size_t avail = ctx->last_handle - ctx->alloc_end;
|
||||
size_t avail_b = avail * sizeof(union buflib_data);
|
||||
if (*size && *size < avail_b)
|
||||
{
|
||||
avail = (*size + sizeof(union buflib_data) - 1)
|
||||
/ sizeof(union buflib_data);
|
||||
avail_b = avail * sizeof(union buflib_data);
|
||||
}
|
||||
*size = avail_b;
|
||||
void *ret = ctx->buf_start;
|
||||
buflib_buffer_shift(ctx, avail);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Shift buffered items down by size bytes */
|
||||
void
|
||||
buflib_buffer_in(struct buflib_context *ctx, int size)
|
||||
{
|
||||
size /= sizeof(union buflib_data);
|
||||
buflib_buffer_shift(ctx, -size);
|
||||
}
|
||||
|
||||
/* Allocate a buffer of size bytes, returning a handle for it */
|
||||
int
|
||||
buflib_alloc(struct buflib_context *ctx, size_t size)
|
||||
{
|
||||
union buflib_data *handle, *block;
|
||||
bool last = false;
|
||||
/* This really is assigned a value before use */
|
||||
int block_len;
|
||||
size = (size + sizeof(union buflib_data) - 1) /
|
||||
sizeof(union buflib_data) + 2;
|
||||
handle_alloc:
|
||||
handle = handle_alloc(ctx);
|
||||
if (!handle)
|
||||
{
|
||||
/* If allocation has failed, and compaction has succeded, it may be
|
||||
* possible to get a handle by trying again.
|
||||
*/
|
||||
if (!ctx->compact && buflib_compact(ctx))
|
||||
goto handle_alloc;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
buffer_alloc:
|
||||
for (block = ctx->first_free_block;; block += block_len)
|
||||
{
|
||||
/* If the last used block extends all the way to the handle table, the
|
||||
* block "after" it doesn't have a header. Because of this, it's easier
|
||||
* to always find the end of allocation by saving a pointer, and always
|
||||
* calculate the free space at the end by comparing it to the
|
||||
* last_handle pointer.
|
||||
*/
|
||||
if(block == ctx->alloc_end)
|
||||
{
|
||||
last = true;
|
||||
block_len = ctx->last_handle - block;
|
||||
if ((size_t)block_len < size)
|
||||
block = NULL;
|
||||
break;
|
||||
}
|
||||
block_len = block->val;
|
||||
/* blocks with positive length are already allocated. */
|
||||
if(block_len > 0)
|
||||
continue;
|
||||
block_len = -block_len;
|
||||
/* The search is first-fit, any fragmentation this causes will be
|
||||
* handled at compaction.
|
||||
*/
|
||||
if ((size_t)block_len >= size)
|
||||
break;
|
||||
}
|
||||
if (!block)
|
||||
{
|
||||
/* Try compacting if allocation failed, but only if the handle
|
||||
* allocation did not trigger compaction already, since there will
|
||||
* be no further gain.
|
||||
*/
|
||||
if (!ctx->compact && buflib_compact(ctx))
|
||||
{
|
||||
goto buffer_alloc;
|
||||
} else {
|
||||
handle->val=1;
|
||||
handle_free(ctx, handle);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up the allocated block, by marking the size allocated, and storing
|
||||
* a pointer to the handle.
|
||||
*/
|
||||
block->val = size;
|
||||
block[1].ptr = handle;
|
||||
handle->ptr = block + 2;
|
||||
/* If we have just taken the first free block, the next allocation search
|
||||
* can save some time by starting after this block.
|
||||
*/
|
||||
if (block == ctx->first_free_block)
|
||||
ctx->first_free_block += size;
|
||||
block += size;
|
||||
/* alloc_end must be kept current if we're taking the last block. */
|
||||
if (last)
|
||||
ctx->alloc_end = block;
|
||||
/* Only free blocks *before* alloc_end have tagged length. */
|
||||
else if ((size_t)block_len > size)
|
||||
block->val = size - block_len;
|
||||
/* Return the handle index as a positive integer. */
|
||||
return ctx->handle_table - handle;
|
||||
}
|
||||
|
||||
/* Free the buffer associated with handle_num. */
|
||||
void
|
||||
buflib_free(struct buflib_context *ctx, int handle_num)
|
||||
{
|
||||
union buflib_data *handle = ctx->handle_table - handle_num,
|
||||
*freed_block = handle->ptr - 2,
|
||||
*block = ctx->first_free_block,
|
||||
*next_block = block;
|
||||
/* We need to find the block before the current one, to see if it is free
|
||||
* and can be merged with this one.
|
||||
*/
|
||||
while (next_block < freed_block)
|
||||
{
|
||||
block = next_block;
|
||||
next_block += abs(block->val);
|
||||
}
|
||||
/* If next_block == block, the above loop didn't go anywhere. If it did,
|
||||
* and the block before this one is empty, we can combine them.
|
||||
*/
|
||||
if (next_block == freed_block && next_block != block && block->val < 0)
|
||||
block->val -= freed_block->val;
|
||||
/* Otherwise, set block to the newly-freed block, and mark it free, before
|
||||
* continuing on, since the code below exects block to point to a free
|
||||
* block which may have free space after it.
|
||||
*/
|
||||
else
|
||||
{
|
||||
block = freed_block;
|
||||
block->val = -block->val;
|
||||
}
|
||||
next_block = block - block->val;
|
||||
/* Check if we are merging with the free space at alloc_end. */
|
||||
if (next_block == ctx->alloc_end)
|
||||
ctx->alloc_end = block;
|
||||
/* Otherwise, the next block might still be a "normal" free block, and the
|
||||
* mid-allocation free means that the buffer is no longer compact.
|
||||
*/
|
||||
else {
|
||||
ctx->compact = false;
|
||||
if (next_block->val < 0)
|
||||
block->val += next_block->val;
|
||||
}
|
||||
handle_free(ctx, handle);
|
||||
handle->ptr = NULL;
|
||||
/* If this block is before first_free_block, it becomes the new starting
|
||||
* point for free-block search.
|
||||
*/
|
||||
if (block < ctx->first_free_block)
|
||||
ctx->first_free_block = block;
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* This is a memory allocator designed to provide reasonable management of free
|
||||
* space and fast access to allocated data. More than one allocator can be used
|
||||
* at a time by initializing multiple contexts.
|
||||
*
|
||||
* Copyright (C) 2009 Andrew Mahone
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef _BUFLIB_H_
|
||||
#include <plugin.h>
|
||||
|
||||
union buflib_data
|
||||
{
|
||||
intptr_t val;
|
||||
union buflib_data *ptr;
|
||||
};
|
||||
|
||||
struct buflib_context
|
||||
{
|
||||
union buflib_data *handle_table;
|
||||
union buflib_data *first_free_handle;
|
||||
union buflib_data *last_handle;
|
||||
union buflib_data *first_free_block;
|
||||
union buflib_data *buf_start;
|
||||
union buflib_data *alloc_end;
|
||||
bool compact;
|
||||
};
|
||||
|
||||
void buflib_init(struct buflib_context *context, void *buf, size_t size);
|
||||
int buflib_alloc(struct buflib_context *context, size_t size);
|
||||
void buflib_free(struct buflib_context *context, int handle);
|
||||
void* buflib_buffer_out(struct buflib_context *ctx, size_t *size);
|
||||
void buflib_buffer_in(struct buflib_context *ctx, int size);
|
||||
|
||||
|
||||
|
||||
static inline void* buflib_get_data(struct buflib_context *context, int handle)
|
||||
{
|
||||
return (void*)(context->handle_table[-handle].ptr);
|
||||
}
|
||||
#endif
|
|
@ -33,7 +33,6 @@
|
|||
#include "lib/grey.h"
|
||||
#include "lib/mylcd.h"
|
||||
#include "lib/feature_wrappers.h"
|
||||
#include "lib/buflib.h"
|
||||
|
||||
|
||||
|
||||
|
@ -924,7 +923,7 @@ void create_track_index(const int slide_index)
|
|||
int string_index = 0, track_num;
|
||||
int disc_num;
|
||||
size_t out = 0;
|
||||
track_names = (char *)buflib_buffer_out(&buf_ctx, &out);
|
||||
track_names = rb->buflib_buffer_out(&buf_ctx, &out);
|
||||
borrowed += out;
|
||||
int avail = borrowed;
|
||||
tracks = (struct track_data*)(track_names + borrowed);
|
||||
|
@ -980,7 +979,7 @@ retry:
|
|||
if (!free_slide_prio(0))
|
||||
goto fail;
|
||||
out = 0;
|
||||
buflib_buffer_out(&buf_ctx, &out);
|
||||
rb->buflib_buffer_out(&buf_ctx, &out);
|
||||
avail += out;
|
||||
borrowed += out;
|
||||
|
||||
|
@ -1457,7 +1456,7 @@ static inline void lla_insert_before(int *head, int i, int p)
|
|||
static inline void free_slide(int i)
|
||||
{
|
||||
if (cache[i].hid != empty_slide_hid)
|
||||
buflib_free(&buf_ctx, cache[i].hid);
|
||||
rb->buflib_free(&buf_ctx, cache[i].hid);
|
||||
cache[i].index = -1;
|
||||
lla_pop_item(&cache_used, i);
|
||||
lla_insert_tail(&cache_free, i);
|
||||
|
@ -1521,7 +1520,7 @@ int read_pfraw(char* filename, int prio)
|
|||
sizeof( pix_t ) * bmph.width * bmph.height;
|
||||
|
||||
int hid;
|
||||
while (!(hid = buflib_alloc(&buf_ctx, size)) && free_slide_prio(prio));
|
||||
while (!(hid = rb->buflib_alloc(&buf_ctx, size)) && free_slide_prio(prio));
|
||||
|
||||
if (!hid) {
|
||||
rb->close( fh );
|
||||
|
@ -1529,7 +1528,7 @@ int read_pfraw(char* filename, int prio)
|
|||
}
|
||||
|
||||
rb->yield(); /* allow audio to play when fast scrolling */
|
||||
struct dim *bm = buflib_get_data(&buf_ctx, hid);
|
||||
struct dim *bm = rb->buflib_get_data(&buf_ctx, hid);
|
||||
|
||||
bm->width = bmph.width;
|
||||
bm->height = bmph.height;
|
||||
|
@ -1694,7 +1693,7 @@ static inline struct dim *get_slide(const int hid)
|
|||
|
||||
struct dim *bmp;
|
||||
|
||||
bmp = buflib_get_data(&buf_ctx, hid);
|
||||
bmp = rb->buflib_get_data(&buf_ctx, hid);
|
||||
|
||||
return bmp;
|
||||
}
|
||||
|
@ -2697,7 +2696,7 @@ int main(void)
|
|||
configfile_save(CONFIG_FILE, config, CONFIG_NUM_ITEMS, CONFIG_VERSION);
|
||||
}
|
||||
|
||||
buflib_init(&buf_ctx, (void *)buf, buf_size);
|
||||
rb->buflib_init(&buf_ctx, (void *)buf, buf_size);
|
||||
|
||||
if (!(empty_slide_hid = read_pfraw(EMPTY_SLIDE, 0)))
|
||||
{
|
||||
|
@ -2832,7 +2831,7 @@ int main(void)
|
|||
case PF_BACK:
|
||||
if ( pf_state == pf_show_tracks )
|
||||
{
|
||||
buflib_buffer_in(&buf_ctx, borrowed);
|
||||
rb->buflib_buffer_in(&buf_ctx, borrowed);
|
||||
borrowed = 0;
|
||||
track_index = -1;
|
||||
pf_state = pf_cover_out;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue