Convert a number of allocations to use buflib pinning

Several places in the codebase implemented an ad-hoc form of pinning;
they can be converted to use buflib pinning instead.

Change-Id: I4450be007e80f6c9cc9f56c2929fa4b9b85ebff3
This commit is contained in:
Aidan MacDonald 2022-04-03 11:16:39 +01:00
parent b16bae6fe6
commit 1718cf5f8a
10 changed files with 94 additions and 158 deletions

View file

@ -63,7 +63,6 @@ static struct iconset {
struct bitmap bmp;
bool loaded;
int handle;
int handle_locked;
} iconsets[Iconset_Count][NB_SCREENS];
#define ICON_HEIGHT(screen) (!iconsets[Iconset_user][screen].loaded ? \
@ -160,8 +159,6 @@ static int buflib_move_callback(int handle, void* current, void* new)
struct iconset *set = &iconsets[i][j];
if (set->bmp.data == current)
{
if (set->handle_locked > 0)
return BUFLIB_CB_CANNOT_MOVE;
set->bmp.data = new;
return BUFLIB_CB_OK;
}
@ -201,11 +198,10 @@ static void load_icons(const char* filename, enum Iconset iconset,
goto finished;
}
lseek(fd, 0, SEEK_SET);
core_pin(ic->handle);
ic->bmp.data = core_get_data(ic->handle);
ic->handle_locked = 1;
size_read = read_bmp_fd(fd, &ic->bmp, buf_size, bmpformat, NULL);
ic->handle_locked = 0;
core_unpin(ic->handle);
if (size_read < 0)
{

View file

@ -41,7 +41,6 @@ static struct skin_backdrop {
} backdrops[NB_BDROPS];
#define NB_BDROPS SKINNABLE_SCREENS_COUNT*NB_SCREENS
static int handle_being_loaded;
static int current_lcd_backdrop[NB_SCREENS];
bool skin_backdrop_get_debug(int index, char **path, int *ref_count, size_t *size)
@ -65,8 +64,8 @@ bool skin_backdrop_get_debug(int index, char **path, int *ref_count, size_t *siz
static int buflib_move_callback(int handle, void* current, void* new)
{
if (handle == handle_being_loaded)
return BUFLIB_CB_CANNOT_MOVE;
(void)handle;
for (int i=0; i<NB_BDROPS; i++)
{
if (backdrops[i].buffer == current)
@ -96,7 +95,6 @@ bool skin_backdrop_init(void)
}
FOR_NB_SCREENS(i)
current_lcd_backdrop[i] = -1;
handle_being_loaded = -1;
first_go = false;
}
return go_status;
@ -182,16 +180,16 @@ bool skin_backdrops_preload(void)
backdrops[i].buffer = core_get_data(backdrops[i].buflib_handle);
if (strcmp(filename, BACKDROP_BUFFERNAME))
{
handle_being_loaded = backdrops[i].buflib_handle;
core_pin(backdrops[i].buflib_handle);
backdrops[i].loaded =
screens[screen].backdrop_load(filename, backdrops[i].buffer);
core_unpin(backdrops[i].buflib_handle);
if (!backdrops[i].loaded)
{
core_free(backdrops[i].buflib_handle);
backdrops[i].buflib_handle = -1;
retval = false;
}
handle_being_loaded = -1;
}
else
backdrops[i].loaded = true;
@ -295,12 +293,12 @@ void skin_backdrop_load_setting(void)
return;
}
bool loaded;
core_pin(backdrops[i].buflib_handle);
backdrops[i].buffer = core_get_data(backdrops[i].buflib_handle);
handle_being_loaded = backdrops[i].buflib_handle;
loaded = screens[SCREEN_MAIN].backdrop_load(
global_settings.backdrop_file,
backdrops[i].buffer);
handle_being_loaded = -1;
core_unpin(backdrops[i].buflib_handle);
backdrops[i].name[2] = loaded ? '.' : '\0';
backdrops[i].loaded = loaded;
return;

View file

@ -1870,13 +1870,11 @@ static void skin_data_reset(struct wps_data *wps_data)
}
#ifndef __PCTOOL__
static int currently_loading_handle = -1;
static int buflib_move_callback(int handle, void* current, void* new)
{
(void)handle;
(void)current;
(void)new;
if (handle == currently_loading_handle)
return BUFLIB_CB_CANNOT_MOVE;
/* Any active skins may be scrolling - which means using viewports which
* will be moved after this callback returns. This is a hammer to make that
* safe. TODO: use a screwdriver instead.
@ -1890,14 +1888,6 @@ static int buflib_move_callback(int handle, void* current, void* new)
return BUFLIB_CB_OK;
}
static struct buflib_callbacks buflib_ops = {buflib_move_callback, NULL, NULL};
static void lock_handle(int handle)
{
currently_loading_handle = handle;
}
static void unlock_handle(void)
{
currently_loading_handle = -1;
}
#endif
static int load_skin_bmp(struct wps_data *wps_data, struct bitmap *bitmap, char* bmpdir)
@ -1944,12 +1934,12 @@ static int load_skin_bmp(struct wps_data *wps_data, struct bitmap *bitmap, char*
_stats->buflib_handles++;
_stats->images_size += buf_size;
lseek(fd, 0, SEEK_SET);
lock_handle(handle);
core_pin(handle);
bitmap->data = core_get_data(handle);
int ret = read_bmp_fd(fd, bitmap, buf_size, format, NULL);
bitmap->data = NULL; /* do this to force a crash later if the
caller doesnt call core_get_data() */
unlock_handle();
core_unpin(handle);
close(fd);
if (ret > 0)
{

View file

@ -277,17 +277,16 @@ static struct tcramcache
{
struct ramcache_header *hdr; /* allocated ramcache_header */
int handle; /* buffer handle */
int move_lock;
} tcramcache;
static inline void tcrc_buffer_lock(void)
{
tcramcache.move_lock++;
core_pin(tcramcache.handle);
}
static inline void tcrc_buffer_unlock(void)
{
tcramcache.move_lock--;
core_unpin(tcramcache.handle);
}
#else /* ndef HAVE_TC_RAMCACHE */
@ -3861,10 +3860,9 @@ static bool delete_entry(long idx_id)
struct tagfile_entry *tfe;
int32_t *seek = &tcramcache.hdr->indices[idx_id].tag_seek[tag];
/* crc_32 is assumed not to yield (why would it...?) */
tfe = (struct tagfile_entry *)&tcramcache.hdr->tags[tag][*seek];
tcrc_buffer_lock(); /* protect tfe and seek if crc_32() yield()s */
*seek = crc_32(tfe->tag_data, strlen(tfe->tag_data), 0xffffffff);
tcrc_buffer_unlock();
myidx.tag_seek[tag] = *seek;
}
else
@ -4005,9 +4003,6 @@ static void fix_ramcache(void* old_addr, void* new_addr)
static int move_cb(int handle, void* current, void* new)
{
(void)handle;
if (tcramcache.move_lock > 0)
return BUFLIB_CB_CANNOT_MOVE;
fix_ramcache(current, new);
tcramcache.hdr = new;
return BUFLIB_CB_OK;
@ -4092,8 +4087,8 @@ static bool tagcache_dumpload(void)
return false;
}
tcrc_buffer_lock();
tcramcache.handle = handle;
tcrc_buffer_lock();
tcramcache.hdr = core_get_data(handle);
rc = read(fd, tcramcache.hdr, shdr.tc_stat.ramcache_allocated);
tcrc_buffer_unlock();

View file

@ -192,7 +192,7 @@ static int current_entry_count;
static struct tree_context *tc;
/* a few memory alloc helper */
static int tagtree_handle, lock_count;
static int tagtree_handle;
static size_t tagtree_bufsize, tagtree_buf_used;
#define UPDATE(x, y) { x = (typeof(x))((char*)(x) + (y)); }
@ -201,9 +201,6 @@ static int move_callback(int handle, void* current, void* new)
(void)handle; (void)current; (void)new;
ptrdiff_t diff = new - current;
if (lock_count > 0)
return BUFLIB_CB_CANNOT_MOVE;
if (menu)
UPDATE(menu, diff);
@ -251,16 +248,6 @@ static int move_callback(int handle, void* current, void* new)
}
#undef UPDATE
static inline void tagtree_lock(void)
{
lock_count++;
}
static inline void tagtree_unlock(void)
{
lock_count--;
}
static struct buflib_callbacks ops = {
.move_callback = move_callback,
.shrink_callback = NULL,
@ -623,7 +610,7 @@ static int add_format(const char *buf)
int clause_count = 0;
strp++;
tagtree_lock();
core_pin(tagtree_handle);
while (1)
{
struct tagcache_search_clause *new_clause;
@ -646,7 +633,7 @@ static int add_format(const char *buf)
clause_count++;
}
tagtree_unlock();
core_unpin(tagtree_handle);
formats[format_count]->clause_count = clause_count;
}
@ -719,9 +706,9 @@ static int get_condition(struct search_instruction *inst)
}
else
{
tagtree_lock();
core_pin(tagtree_handle);
bool ret = read_clause(new_clause);
tagtree_unlock();
core_unpin(tagtree_handle);
if (!ret)
return -1;
}
@ -812,9 +799,9 @@ static bool parse_search(struct menu_entry *entry, const char *str)
logf("tag: %d", inst->tagorder[inst->tagorder_count]);
tagtree_lock();
core_pin(tagtree_handle);
while ( (ret = get_condition(inst)) > 0 ) ;
tagtree_unlock();
core_unpin(tagtree_handle);
if (ret < 0)
return false;
@ -1176,10 +1163,10 @@ static int parse_line(int n, char *buf, void *parameters)
logf("tagtree failed to allocate %s", "menu items");
return -2;
}
tagtree_lock();
core_pin(tagtree_handle);
if (parse_search(menu->items[menu->itemcount], buf))
menu->itemcount++;
tagtree_unlock();
core_unpin(tagtree_handle);
return 0;
}
@ -1212,8 +1199,8 @@ static bool parse_menu(const char *filename)
static void tagtree_unload(struct tree_context *c)
{
int i;
tagtree_lock();
/* may be spurious... */
core_pin(tagtree_handle);
remove_event(PLAYBACK_EVENT_TRACK_BUFFER, tagtree_buffer_event);
remove_event(PLAYBACK_EVENT_TRACK_FINISH, tagtree_track_finish_event);
@ -1229,7 +1216,7 @@ static void tagtree_unload(struct tree_context *c)
return;
}
for (i = 0; i < menu->itemcount; i++)
for (int i = 0; i < menu->itemcount; i++)
{
dptr->name = NULL;
dptr->newtable = 0;
@ -1238,11 +1225,11 @@ static void tagtree_unload(struct tree_context *c)
}
}
for (i = 0; i < menu_count; i++)
for (int i = 0; i < menu_count; i++)
menus[i] = NULL;
menu_count = 0;
for (i = 0; i < format_count; i++)
for (int i = 0; i < format_count; i++)
formats[i] = NULL;
format_count = 0;
@ -1253,9 +1240,6 @@ static void tagtree_unload(struct tree_context *c)
if (c)
tree_unlock_cache(c);
tagtree_unlock();
if (lock_count > 0)
tagtree_unlock();/* second unlock to enable re-init */
}
void tagtree_init(void)
@ -1285,8 +1269,6 @@ void tagtree_init(void)
if (sizeof(struct tagentry) != sizeof(struct entry))
panicf("tagentry(%zu) and entry mismatch(%zu)",
sizeof(struct tagentry), sizeof(struct entry));
if (lock_count > 0)
panicf("tagtree locked after parsing");
/* If no root menu is set, assume it's the first single menu
* we have. That shouldn't normally happen. */
@ -1502,7 +1484,7 @@ static int retrieve_entries(struct tree_context *c, int offset, bool init)
/* because tagcache saves the clauses, we need to lock the buffer
* for the entire duration of the search */
tagtree_lock();
core_pin(tagtree_handle);
for (i = 0; i <= level; i++)
{
int j;
@ -1579,7 +1561,7 @@ static int retrieve_entries(struct tree_context *c, int offset, bool init)
fmt = NULL;
/* Check the format */
tagtree_lock();
core_pin(tagtree_handle);
for (i = 0; i < format_count; i++)
{
if (formats[i]->group_id != csi->format_id[level])
@ -1592,7 +1574,7 @@ static int retrieve_entries(struct tree_context *c, int offset, bool init)
break;
}
}
tagtree_unlock();
core_unpin(tagtree_handle);
if (strcmp(tcs.result, UNTAGGED) == 0)
{
@ -1634,7 +1616,7 @@ static int retrieve_entries(struct tree_context *c, int offset, bool init)
logf("format_str() failed");
tagcache_search_finish(&tcs);
tree_unlock_cache(c);
tagtree_unlock();
core_unpin(tagtree_handle);
return 0;
}
else
@ -1675,7 +1657,7 @@ entry_skip_formatter:
{ /* user aborted */
tagcache_search_finish(&tcs);
tree_unlock_cache(c);
tagtree_unlock();
core_unpin(tagtree_handle);
return current_entry_count;
}
}
@ -1694,7 +1676,7 @@ entry_skip_formatter:
{
tagcache_search_finish(&tcs);
tree_unlock_cache(c);
tagtree_unlock();
core_unpin(tagtree_handle);
return current_entry_count;
}
@ -1710,7 +1692,7 @@ entry_skip_formatter:
tagcache_search_finish(&tcs);
tree_unlock_cache(c);
tagtree_unlock();
core_unpin(tagtree_handle);
if (!sort && (sort_inverse || sort_limit))
{
@ -1870,7 +1852,7 @@ int tagtree_enter(struct tree_context* c)
/* lock buflib for possible I/O to protect dptr */
tree_lock_cache(c);
tagtree_lock();
core_pin(tagtree_handle);
bool reset_selection = true;
@ -1940,7 +1922,7 @@ int tagtree_enter(struct tree_context* c)
{
tagtree_exit(c);
tree_unlock_cache(c);
tagtree_unlock();
core_unpin(tagtree_handle);
return 0;
}
if (csi->clause[i][j]->numeric)
@ -1999,7 +1981,7 @@ int tagtree_enter(struct tree_context* c)
}
tree_unlock_cache(c);
tagtree_unlock();
core_unpin(tagtree_handle);
return rc;
}

View file

@ -314,6 +314,18 @@ struct tree_context* tree_get_context(void)
return &tc;
}
void tree_lock_cache(struct tree_context *t)
{
core_pin(t->cache.name_buffer_handle);
core_pin(t->cache.entries_handle);
}
void tree_unlock_cache(struct tree_context *t)
{
core_unpin(t->cache.name_buffer_handle);
core_unpin(t->cache.entries_handle);
}
/*
* Returns the position of a given file in the current directory
* returns -1 if not found
@ -1020,9 +1032,6 @@ int rockbox_browse(struct browse_context *browse)
static int move_callback(int handle, void* current, void* new)
{
struct tree_cache* cache = &tc.cache;
if (cache->lock_count > 0)
return BUFLIB_CB_CANNOT_MOVE;
ptrdiff_t diff = new - current;
/* FIX_PTR makes sure to not accidentally update static allocations */
#define FIX_PTR(x) \

View file

@ -52,7 +52,6 @@ struct tree_cache {
int name_buffer_handle; /* handle to the name cache */
int max_entries; /* Max entries in the cache */
int name_buffer_size; /* in bytes */
volatile int lock_count; /* non-0 if buffers may not move */
};
struct browse_context {
@ -120,14 +119,10 @@ void browse_context_init(struct browse_context *browse,
int rockbox_browse(struct browse_context *browse);
int create_playlist(void);
void resume_directory(const char *dir);
static inline void tree_lock_cache(struct tree_context *t)
{
t->cache.lock_count++;
}
static inline void tree_unlock_cache(struct tree_context *t)
{
t->cache.lock_count--;
}
void tree_lock_cache(struct tree_context *t);
void tree_unlock_cache(struct tree_context *t);
#ifdef WIN32
/* it takes an int on windows */
#define getcwd_size_t int

View file

@ -249,7 +249,6 @@ static struct dircache_runinfo
/* cache buffer info */
int handle; /* buflib buffer handle */
size_t bufsize; /* size of buflib allocation - 1 */
int buflocked; /* don't move due to other allocs */
union {
void *p; /* address of buffer - ENTRYSIZE */
struct dircache_entry *pentry; /* alias of .p to assist entry resolution */
@ -329,29 +328,9 @@ static inline void dumpster_clean_buffer(void *p, size_t size)
*/
static int move_callback(int handle, void *current, void *new)
{
if (dircache_runinfo.buflocked)
return BUFLIB_CB_CANNOT_MOVE;
dircache_runinfo.p = new - ENTRYSIZE;
return BUFLIB_CB_OK;
(void)handle; (void)current;
}
/**
* add a "don't move" lock count
*/
static inline void buffer_lock(void)
{
dircache_runinfo.buflocked++;
}
/**
* remove a "don't move" lock count
*/
static inline void buffer_unlock(void)
{
dircache_runinfo.buflocked--;
dircache_runinfo.p = new - ENTRYSIZE;
return BUFLIB_CB_OK;
}
@ -530,14 +509,14 @@ static void set_buffer(int handle, size_t size)
/**
* remove the allocation from dircache control and return the handle
* Note that dircache must not be using the buffer!
*/
static int reset_buffer(void)
{
int handle = dircache_runinfo.handle;
if (handle > 0)
{
/* don't mind .p; it might get changed by the callback even after
this call; buffer presence is determined by the following: */
/* don't mind .p; buffer presence is determined by the following: */
dircache_runinfo.handle = 0;
dircache_runinfo.bufsize = 0;
}
@ -1857,7 +1836,7 @@ static void reset_cache(void)
*/
static void build_volumes(void)
{
buffer_lock();
core_pin(dircache_runinfo.handle);
for (int i = 0; i < NUM_VOLUMES; i++)
{
@ -1903,12 +1882,14 @@ static void build_volumes(void)
logf("Done, %ld KiB used", dircache.size / 1024);
buffer_unlock();
core_unpin(dircache_runinfo.handle);
}
/**
* allocate buffer and return whether or not a synchronous build should take
* place; if 'realloced' is NULL, it's just a query about what will happen
*
* Note this must be called with the dircache_lock() active.
*/
static int prepare_build(bool *realloced)
{
@ -1958,16 +1939,14 @@ static int prepare_build(bool *realloced)
*realloced = true;
reset_cache();
buffer_lock();
int handle = reset_buffer();
dircache_unlock();
dircache_unlock(); /* release lock held by caller */
core_free(handle);
handle = alloc_cache(size);
dircache_lock();
dircache_lock(); /* reacquire lock */
if (dircache_runinfo.suspended && handle > 0)
{
@ -1979,13 +1958,9 @@ static int prepare_build(bool *realloced)
}
if (handle <= 0)
{
buffer_unlock();
return -1;
}
set_buffer(handle, size);
buffer_unlock();
return syncbuild;
}
@ -2384,9 +2359,9 @@ void dircache_fileop_create(struct file_base_info *dirinfop,
if ((dinp->attr & ATTR_DIRECTORY) && !is_dotdir_name(basename))
{
/* scan-in the contents of the new directory at this level only */
buffer_lock();
core_pin(dircache_runinfo.handle);
sab_process_dir(infop, false);
buffer_unlock();
core_unpin(dircache_runinfo.handle);
}
}
@ -2915,7 +2890,7 @@ void dircache_dump(void)
if (dircache_runinfo.handle)
{
buffer_lock();
core_pin(dircache_runinfo.handle);
/* bin */
write(fdbin, dircache_runinfo.p + ENTRYSIZE,
@ -2985,7 +2960,7 @@ void dircache_dump(void)
tm.tm_hour, tm.tm_min, tm.tm_sec);
}
buffer_unlock();
core_unpin(dircache_runinfo.handle);
}
dircache_unlock();
@ -3104,7 +3079,6 @@ int dircache_load(void)
}
dircache_lock();
buffer_lock();
if (!dircache_is_clean(false))
goto error;
@ -3113,6 +3087,7 @@ int dircache_load(void)
dircache = maindata.dircache;
set_buffer(handle, bufsize);
core_pin(handle);
hasbuffer = true;
/* convert back to in-RAM representation */
@ -3167,13 +3142,13 @@ int dircache_load(void)
dircache_enable_internal(false);
/* cache successfully loaded */
core_unpin(handle);
logf("Done, %ld KiB used", dircache.size / 1024);
rc = 0;
error:
if (rc < 0 && hasbuffer)
reset_buffer();
buffer_unlock();
dircache_unlock();
error_nolock:
@ -3199,8 +3174,9 @@ int dircache_save(void)
if (fd < 0)
return -1;
/* it seems the handle *must* exist if this function is called */
dircache_lock();
buffer_lock();
core_pin(dircache_runinfo.handle);
int rc = -1;
@ -3269,7 +3245,7 @@ int dircache_save(void)
that makes what was saved completely invalid */
rc = 0;
error:
buffer_unlock();
core_unpin(dircache_runinfo.handle);
dircache_unlock();
if (rc < 0)

View file

@ -168,6 +168,10 @@ static unsigned short default_cp_table_buf[MAX_CP_TABLE_SIZE+1];
do {} while (0)
#define cp_table_alloc(filename, size, opsp) \
({ (void)(opsp); 1; })
#define cp_table_pin(handle) \
do { (void)handle; } while(0)
#define cp_table_unpin(handle) \
do { (void)handle; } while(0)
#else
#define cp_table_alloc(filename, size, opsp) \
core_alloc_ex((filename), (size), (opsp))
@ -175,6 +179,10 @@ static unsigned short default_cp_table_buf[MAX_CP_TABLE_SIZE+1];
core_free(handle)
#define cp_table_get_data(handle) \
core_get_data(handle)
#define cp_table_pin(handle) \
core_pin(handle)
#define cp_table_unpin(handle) \
core_unpin(handle)
#endif
static const unsigned char utf8comp[6] =
@ -191,21 +199,8 @@ static inline void cptable_tohw16(uint16_t *buf, unsigned int count)
(void)buf; (void)count;
}
static int move_callback(int handle, void *current, void *new)
{
/* we don't keep a pointer but we have to stop it if this applies to a
buffer not yet swapped-in since it will likely be in use in an I/O
call */
return (handle != default_cp_handle || default_cp_table_ref != 0) ?
BUFLIB_CB_CANNOT_MOVE : BUFLIB_CB_OK;
(void)current; (void)new;
}
static int alloc_and_load_cp_table(int cp, void *buf)
{
static struct buflib_callbacks ops =
{ .move_callback = move_callback };
/* alloc and read only if there is an associated file */
const char *filename = cp_info[cp].filename;
if (!filename)
@ -228,13 +223,17 @@ static int alloc_and_load_cp_table(int cp, void *buf)
!(size % (off_t)sizeof (uint16_t))) {
/* if the buffer is provided, use that but don't alloc */
int handle = buf ? 0 : cp_table_alloc(filename, size, &ops);
if (handle > 0)
int handle = buf ? 0 : cp_table_alloc(filename, size, NULL);
if (handle > 0) {
cp_table_pin(handle);
buf = cp_table_get_data(handle);
}
if (buf && read(fd, buf, size) == size) {
close(fd);
cptable_tohw16(buf, size / sizeof (uint16_t));
if (handle > 0)
cp_table_unpin(handle);
return handle;
}

View file

@ -90,7 +90,6 @@ extern struct font sysfont;
struct buflib_alloc_data {
struct font font; /* must be the first member! */
int handle_locks; /* is the buflib handle currently locked? */
int refcount; /* how many times has this font been loaded? */
unsigned char buffer[];
};
@ -107,9 +106,6 @@ static int buflibmove_callback(int handle, void* current, void* new)
struct buflib_alloc_data *alloc = (struct buflib_alloc_data*)current;
ptrdiff_t diff = new - current;
if (alloc->handle_locks > 0)
return BUFLIB_CB_CANNOT_MOVE;
#define UPDATE(x) if (x) { x = PTR_ADD(x, diff); }
UPDATE(alloc->font.bits);
@ -129,18 +125,18 @@ static void lock_font_handle(int handle, bool lock)
{
if ( handle < 0 )
return;
struct buflib_alloc_data *alloc = core_get_data(handle);
if (lock)
alloc->handle_locks++;
core_pin(handle);
else
alloc->handle_locks--;
core_unpin(handle);
}
void font_lock(int font_id, bool lock)
{
if( font_id < 0 || font_id >= MAXFONTS )
return;
if( buflib_allocations[font_id] >= 0 )
if( buflib_allocations[font_id] > 0 )
lock_font_handle(buflib_allocations[font_id], lock);
}
@ -522,8 +518,8 @@ int font_load_ex( const char *path, size_t buf_size, int glyphs )
return -1;
}
struct buflib_alloc_data *pdata;
core_pin(handle);
pdata = core_get_data(handle);
pdata->handle_locks = 1;
pdata->refcount = 1;
/* load and init */