summaryrefslogtreecommitdiff
path: root/firmware
diff options
context:
space:
mode:
authorThomas Martitz <kugel@rockbox.org>2011-08-30 14:01:33 +0000
committerThomas Martitz <kugel@rockbox.org>2011-08-30 14:01:33 +0000
commitd0b72e25903574acb1cf9184a6052cdd646dbc37 (patch)
tree5be8db5ee00b2a727e4821cf51a5f7bcf3991073 /firmware
parentc940811ade7d99a0e0d414df7c6509672413684a (diff)
downloadrockbox-d0b72e25903574acb1cf9184a6052cdd646dbc37.tar.gz
rockbox-d0b72e25903574acb1cf9184a6052cdd646dbc37.zip
GSoC/Buflib: Add buflib memory alocator to the core.
The buflib memory allocator is handle based and can free and compact, move or resize memory on demand. This allows to effeciently allocate memory dynamically without an MMU, by avoiding fragmentation through memory compaction. This patch adds the buflib library to the core, along with convinience wrappers to omit the context parameter. Compaction is not yet enabled, but will be in a later patch. Therefore, this acts as a replacement for buffer_alloc/buffer_get_buffer() with the benifit of a debug menu. See buflib.h for some API documentation. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@30380 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware')
-rw-r--r--firmware/SOURCES2
-rw-r--r--firmware/buflib.c777
-rw-r--r--firmware/common/dircache.c23
-rw-r--r--firmware/core_alloc.c57
-rw-r--r--firmware/export/audio.h1
-rw-r--r--firmware/include/buflib.h319
-rw-r--r--firmware/include/core_alloc.h36
-rw-r--r--firmware/rolo.c8
-rw-r--r--firmware/target/arm/ata-nand-telechips.c15
-rw-r--r--firmware/target/arm/tms320dm320/creative-zvm/ata-creativezvm.c21
10 files changed, 1242 insertions, 17 deletions
diff --git a/firmware/SOURCES b/firmware/SOURCES
index f685ed7dc7..4517c37e7f 100644
--- a/firmware/SOURCES
+++ b/firmware/SOURCES
@@ -2,6 +2,8 @@ ata_idle_notify.c
2events.c 2events.c
3backlight.c 3backlight.c
4buffer.c 4buffer.c
5buflib.c
6core_alloc.c
5general.c 7general.c
6load_code.c 8load_code.c
7powermgmt.c 9powermgmt.c
diff --git a/firmware/buflib.c b/firmware/buflib.c
new file mode 100644
index 0000000000..51cf86bf5b
--- /dev/null
+++ b/firmware/buflib.c
@@ -0,0 +1,777 @@
1/***************************************************************************
2* __________ __ ___.
3* Open \______ \ ____ ____ | | _\_ |__ _______ ___
4* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7* \/ \/ \/ \/ \/
8* $Id$
9*
10* This is a memory allocator designed to provide reasonable management of free
11* space and fast access to allocated data. More than one allocator can be used
12* at a time by initializing multiple contexts.
13*
14* Copyright (C) 2009 Andrew Mahone
15* Copyright (C) 2011 Thomas Martitz
16*
17*
18* This program is free software; you can redistribute it and/or
19* modify it under the terms of the GNU General Public License
20* as published by the Free Software Foundation; either version 2
21* of the License, or (at your option) any later version.
22*
23* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
24* KIND, either express or implied.
25*
26****************************************************************************/
27
28#include <stdlib.h> /* for abs() */
29#include <stdio.h> /* for snprintf() */
30#include "buflib.h"
31#include "string-extra.h" /* strlcpy() */
32#include "debug.h"
33#include "buffer.h"
34#include "system.h" /* for ALIGN_*() */
35
36/* The main goal of this design is fast fetching of the pointer for a handle.
37 * For that reason, the handles are stored in a table at the end of the buffer
38 * with a fixed address, so that returning the pointer for a handle is a simple
39 * table lookup. To reduce the frequency with which allocated blocks will need
40 * to be moved to free space, allocations grow up in address from the start of
41 * the buffer. The buffer is treated as an array of union buflib_data. Blocks
42 * start with a length marker, which is included in their length. Free blocks
43 * are marked by negative length. Allocated blocks have a positiv length marker,
44 * and additional metadata forllowing that: It follows a pointer
45 * (union buflib_data*) to the corresponding handle table entry. so that it can
46 * be quickly found and updated during compaction. After that follows
47 * the pointer to the struct buflib_callbacks associated with this allocation
48 * (may be NULL). That pointer follows a variable length character array
49 * containing the nul-terminated string identifier of the allocation. After this
50 * array there's a length marker for the length of the character array including
51 * this length marker (counted in n*sizeof(union buflib_data)), which allows
52 * to find the start of the character array (and therefore the start of the
53 * entire block) when only the handle or payload start is known.
54 *
55 * Example:
56 * |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->|
57 * |L|H|C|cccc|L2|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|XXXXXXXXXXXXX|AAA|
58 *
59 * L - length marker (negative if block unallocated)
60 * H - handle table enry pointer
61 * C - pointer to struct buflib_callbacks
62 * c - variable sized string identifier
63 * L2 - second length marker for string identifier
64 * X - actual payload
65 * Y - unallocated space
66 *
67 * A - pointer to start of payload (first X) in the handle table (may be null)
68 *
69 * The blocks can be walked by jumping the abs() of the L length marker, i.e.
70 * union buflib_data* L;
71 * for(L = start; L < end; L += abs(L->val)) { .... }
72 *
73 *
74 * The allocator functions are passed a context struct so that two allocators
75 * can be run, for example, one per core may be used, with convenience wrappers
76 * for the single-allocator case that use a predefined context.
77 */
78
79#define B_ALIGN_DOWN(x) \
80 ALIGN_DOWN(x, sizeof(union buflib_data))
81
82#define B_ALIGN_UP(x) \
83 ALIGN_UP(x, sizeof(union buflib_data))
84
85#ifdef DEBUG
86 #include <stdio.h>
87 #define BDEBUGF DEBUGF
88#else
89 #define BDEBUGF(...) do { } while(0)
90#endif
91
92/* Initialize buffer manager */
93void
94buflib_init(struct buflib_context *ctx, void *buf, size_t size)
95{
96 union buflib_data *bd_buf = buf;
97
98 /* Align on sizeof(buflib_data), to prevent unaligned access */
99 ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data));
100 size /= sizeof(union buflib_data);
101 /* The handle table is initialized with no entries */
102 ctx->handle_table = bd_buf + size;
103 ctx->last_handle = bd_buf + size;
104 ctx->first_free_handle = bd_buf + size - 1;
105 ctx->first_free_block = bd_buf;
106 ctx->buf_start = bd_buf;
107 /* A marker is needed for the end of allocated data, to make sure that it
108 * does not collide with the handle table, and to detect end-of-buffer.
109 */
110 ctx->alloc_end = bd_buf;
111 ctx->compact = true;
112
113 BDEBUGF("buflib initialized with %d.%2d kiB", size / 1024, (size%1000)/10);
114}
115
116/* Allocate a new handle, returning 0 on failure */
117static inline
118union buflib_data* handle_alloc(struct buflib_context *ctx)
119{
120 union buflib_data *handle;
121 /* first_free_handle is a lower bound on free handles, work through the
122 * table from there until a handle containing NULL is found, or the end
123 * of the table is reached.
124 */
125 for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--)
126 if (!handle->alloc)
127 break;
128 /* If the search went past the end of the table, it means we need to extend
129 * the table to get a new handle.
130 */
131 if (handle < ctx->last_handle)
132 {
133 if (handle >= ctx->alloc_end)
134 ctx->last_handle--;
135 else
136 return NULL;
137 }
138 handle->val = -1;
139 return handle;
140}
141
142/* Free one handle, shrinking the handle table if it's the last one */
143static inline
144void handle_free(struct buflib_context *ctx, union buflib_data *handle)
145{
146 handle->alloc = 0;
147 /* Update free handle lower bound if this handle has a lower index than the
148 * old one.
149 */
150 if (handle > ctx->first_free_handle)
151 ctx->first_free_handle = handle;
152 if (handle == ctx->last_handle)
153 ctx->last_handle++;
154 else
155 ctx->compact = false;
156}
157
158/* Get the start block of an allocation */
159static union buflib_data* handle_to_block(struct buflib_context* ctx, int handle)
160{
161 union buflib_data* name_field =
162 (union buflib_data*)buflib_get_name(ctx, handle);
163
164 return name_field - 3;
165}
166
167/* Shrink the handle table, returning true if its size was reduced, false if
168 * not
169 */
170static inline
171bool
172handle_table_shrink(struct buflib_context *ctx)
173{
174 bool rv;
175 union buflib_data *handle;
176 for (handle = ctx->last_handle; !(handle->alloc); handle++);
177 if (handle > ctx->first_free_handle)
178 ctx->first_free_handle = handle - 1;
179 rv = handle == ctx->last_handle;
180 ctx->last_handle = handle;
181 return rv;
182}
183
184
185/* If shift is non-zero, it represents the number of places to move
186 * blocks in memory. Calculate the new address for this block,
187 * update its entry in the handle table, and then move its contents.
188 *
189 * Returns false if moving was unsucessful
190 * (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned)
191 */
192static bool
193move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
194{
195#if 1 /* moving temporarily disabled */
196 (void)ctx;(void)block;(void)shift;
197 return false;
198#else
199 char* new_start;
200 union buflib_data *new_block, *tmp = block[1].handle;
201 struct buflib_callbacks *ops = block[2].ops;
202 if (ops && !ops->move_callback)
203 return false;
204
205 int handle = ctx->handle_table - tmp;
206 BDEBUGF("%s(): moving \"%s\"(id=%d) by %d(%d)\n", __func__, block[3].name,
207 handle, shift, shift*sizeof(union buflib_data));
208 new_block = block + shift;
209 new_start = tmp->alloc + shift*sizeof(union buflib_data);
210 /* call the callback before moving */
211 if (ops)
212 {
213 if (ops->move_callback(handle, tmp->alloc, new_start)
214 == BUFLIB_CB_CANNOT_MOVE)
215 return false;
216 }
217 tmp->alloc = new_start; /* update handle table */
218 memmove(new_block, block, block->val * sizeof(union buflib_data));
219
220 return true;
221#endif
222}
223
224/* Compact allocations and handle table, adjusting handle pointers as needed.
225 * Return true if any space was freed or consolidated, false otherwise.
226 */
227static bool
228buflib_compact(struct buflib_context *ctx)
229{
230 BDEBUGF("%s(): Compacting!\n", __func__);
231 union buflib_data *block;
232 int shift = 0, len;
233 /* Store the results of attempting to shrink the handle table */
234 bool ret = handle_table_shrink(ctx);
235 for(block = ctx->first_free_block; block != ctx->alloc_end; block += len)
236 {
237 len = block->val;
238 /* This block is free, add its length to the shift value */
239 if (len < 0)
240 {
241 shift += len;
242 len = -len;
243 continue;
244 }
245 /* attempt to fill any hole */
246 if (-ctx->first_free_block->val > block->val)
247 {
248 intptr_t size = ctx->first_free_block->val;
249 if (move_block(ctx, block, ctx->first_free_block - block))
250 {
251 /* moving was successful. Mark the next block as the new
252 * first_free_block and merge it with the free space
253 * that the move created */
254 ctx->first_free_block += block->val;
255 ctx->first_free_block->val = size + block->val;
256 continue;
257 }
258 }
259 /* attempt move the allocation by shift */
260 if (shift)
261 {
262 /* failing to move creates a hole, therefore mark this
263 * block as not allocated anymore and move first_free_block up */
264 if (!move_block(ctx, block, shift))
265 {
266 union buflib_data* hole = block + shift;
267 hole->val = shift;
268 if (ctx->first_free_block > hole)
269 ctx->first_free_block = hole;
270 shift = 0;
271 }
272 /* if move was successful, the just moved block is now
273 * possibly in place of the first free one, so move this thing up */
274 else if (ctx->first_free_block == block+shift)
275 {
276 ctx->first_free_block += ctx->first_free_block->val;
277 ctx->first_free_block->val = shift;
278 }
279 }
280 }
281 /* Move the end-of-allocation mark, and return true if any new space has
282 * been freed.
283 */
284 ctx->alloc_end += shift;
285 /* only move first_free_block up if it wasn't already by a hole */
286 if (ctx->first_free_block > ctx->alloc_end)
287 ctx->first_free_block = ctx->alloc_end;
288 ctx->compact = true;
289 return ret || shift;
290}
291
292/* Compact the buffer by trying both shrinking and moving.
293 *
294 * Try to move first. If unsuccesfull, try to shrink. If that was successful
295 * try to move once more as there might be more room now.
296 */
297static bool
298buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints)
299{
300 bool result = false;
301 /* if something compacted before already there will be no further gain */
302 if (!ctx->compact)
303 result = buflib_compact(ctx);
304 if (!result)
305 {
306 union buflib_data* this;
307 for(this = ctx->buf_start; this < ctx->alloc_end; this += abs(this->val))
308 {
309 if (this->val > 0 && this[2].ops
310 && this[2].ops->shrink_callback)
311 {
312 int ret;
313 int handle = ctx->handle_table - this[1].handle;
314 char* data = this[1].handle->alloc;
315 ret = this[2].ops->shrink_callback(handle, shrink_hints,
316 data, (char*)(this+this->val)-data);
317 result |= (ret == BUFLIB_CB_OK);
318 /* this might have changed in the callback (if
319 * it shrinked from the top), get it again */
320 this = handle_to_block(ctx, handle);
321 }
322 }
323 /* shrinking was successful at least once, try compaction again */
324 if (result)
325 result |= buflib_compact(ctx);
326 }
327
328 return result;
329}
330
331/* Shift buffered items by size units, and update handle pointers. The shift
332 * value must be determined to be safe *before* calling.
333 */
334static void
335buflib_buffer_shift(struct buflib_context *ctx, int shift)
336{
337 memmove(ctx->buf_start + shift, ctx->buf_start,
338 (ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data));
339 union buflib_data *handle;
340 for (handle = ctx->last_handle; handle < ctx->handle_table; handle++)
341 if (handle->alloc)
342 handle->alloc += shift;
343 ctx->first_free_block += shift;
344 ctx->buf_start += shift;
345 ctx->alloc_end += shift;
346}
347
348/* Shift buffered items up by size bytes, or as many as possible if size == 0.
349 * Set size to the number of bytes freed.
350 */
351void*
352buflib_buffer_out(struct buflib_context *ctx, size_t *size)
353{
354 if (!ctx->compact)
355 buflib_compact(ctx);
356 size_t avail = ctx->last_handle - ctx->alloc_end;
357 size_t avail_b = avail * sizeof(union buflib_data);
358 if (*size && *size < avail_b)
359 {
360 avail = (*size + sizeof(union buflib_data) - 1)
361 / sizeof(union buflib_data);
362 avail_b = avail * sizeof(union buflib_data);
363 }
364 *size = avail_b;
365 void *ret = ctx->buf_start;
366 buflib_buffer_shift(ctx, avail);
367 return ret;
368}
369
370/* Shift buffered items down by size bytes */
371void
372buflib_buffer_in(struct buflib_context *ctx, int size)
373{
374 size /= sizeof(union buflib_data);
375 buflib_buffer_shift(ctx, -size);
376}
377
378/* Allocate a buffer of size bytes, returning a handle for it */
379int
380buflib_alloc(struct buflib_context *ctx, size_t size)
381{
382 return buflib_alloc_ex(ctx, size, "<anonymous>", NULL);
383}
384
385/* Allocate a buffer of size bytes, returning a handle for it.
386 *
387 * The additional name parameter gives the allocation a human-readable name,
388 * the ops parameter points to caller-implemented callbacks for moving and
389 * shrinking. NULL for default callbacks (which do nothing but don't
390 * prevent moving or shrinking)
391 */
392
393int
394buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name,
395 struct buflib_callbacks *ops)
396{
397 union buflib_data *handle, *block;
398 size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0;
399 bool last;
400 /* This really is assigned a value before use */
401 int block_len;
402 size += name_len;
403 size = (size + sizeof(union buflib_data) - 1) /
404 sizeof(union buflib_data)
405 /* add 4 objects for alloc len, pointer to handle table entry and
406 * name length, and the ops pointer */
407 + 4;
408handle_alloc:
409 handle = handle_alloc(ctx);
410 if (!handle)
411 {
412 /* If allocation has failed, and compaction has succeded, it may be
413 * possible to get a handle by trying again.
414 */
415 if (!ctx->compact && buflib_compact(ctx))
416 goto handle_alloc;
417 else
418 { /* first try to shrink the alloc before the handle table
419 * to make room for new handles */
420 int handle = ctx->handle_table - ctx->last_handle;
421 union buflib_data* last_block = handle_to_block(ctx, handle);
422 struct buflib_callbacks* ops = last_block[2].ops;
423 if (ops && ops->shrink_callback)
424 {
425 char *data = buflib_get_data(ctx, handle);
426 unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data);
427 if (ops->shrink_callback(handle, hint, data,
428 (char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK)
429 { /* retry one more time */
430 goto handle_alloc;
431 }
432 }
433 return 0;
434 }
435 }
436
437buffer_alloc:
438 /* need to re-evaluate last before the loop because the last allocation
439 * possibly made room in its front to fit this, so last would be wrong */
440 last = false;
441 for (block = ctx->first_free_block;;block += block_len)
442 {
443 /* If the last used block extends all the way to the handle table, the
444 * block "after" it doesn't have a header. Because of this, it's easier
445 * to always find the end of allocation by saving a pointer, and always
446 * calculate the free space at the end by comparing it to the
447 * last_handle pointer.
448 */
449 if(block == ctx->alloc_end)
450 {
451 last = true;
452 block_len = ctx->last_handle - block;
453 if ((size_t)block_len < size)
454 block = NULL;
455 break;
456 }
457 block_len = block->val;
458 /* blocks with positive length are already allocated. */
459 if(block_len > 0)
460 continue;
461 block_len = -block_len;
462 /* The search is first-fit, any fragmentation this causes will be
463 * handled at compaction.
464 */
465 if ((size_t)block_len >= size)
466 break;
467 }
468 if (!block)
469 {
470 /* Try compacting if allocation failed */
471 unsigned hint = BUFLIB_SHRINK_POS_FRONT |
472 ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK);
473 if (buflib_compact_and_shrink(ctx, hint))
474 {
475 goto buffer_alloc;
476 } else {
477 handle->val=1;
478 handle_free(ctx, handle);
479 return 0;
480 }
481 }
482
483 /* Set up the allocated block, by marking the size allocated, and storing
484 * a pointer to the handle.
485 */
486 union buflib_data *name_len_slot;
487 block->val = size;
488 block[1].handle = handle;
489 block[2].ops = ops;
490 strcpy(block[3].name, name);
491 name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len);
492 name_len_slot->val = 1 + name_len/sizeof(union buflib_data);
493 handle->alloc = (char*)(name_len_slot + 1);
494 /* If we have just taken the first free block, the next allocation search
495 * can save some time by starting after this block.
496 */
497 if (block == ctx->first_free_block)
498 ctx->first_free_block += size;
499 block += size;
500 /* alloc_end must be kept current if we're taking the last block. */
501 if (last)
502 ctx->alloc_end = block;
503 /* Only free blocks *before* alloc_end have tagged length. */
504 else if ((size_t)block_len > size)
505 block->val = size - block_len;
506 /* Return the handle index as a positive integer. */
507 return ctx->handle_table - handle;
508}
509
510/* Finds the free block before block, and returns NULL if it's not free */
511static union buflib_data*
512find_free_block_before(struct buflib_context *ctx, union buflib_data* block)
513{
514 union buflib_data *ret = ctx->first_free_block,
515 *next_block = ret;
516
517 /* find the block that's before the current one */
518 while (next_block < block)
519 {
520 ret = next_block;
521 next_block += abs(ret->val);
522 }
523
524 /* If next_block == block, the above loop didn't go anywhere. If it did,
525 * and the block before this one is empty, that is the wanted one
526 */
527 if (next_block == block && ret < block && ret->val < 0)
528 return ret;
529 /* otherwise, e.g. if ret > block, or if the buffer is compact,
530 * there's no free block before */
531 return NULL;
532}
533
534/* Free the buffer associated with handle_num. */
535int
536buflib_free(struct buflib_context *ctx, int handle_num)
537{
538 union buflib_data *handle = ctx->handle_table - handle_num,
539 *freed_block = handle_to_block(ctx, handle_num),
540 *block, *next_block;
541 /* We need to find the block before the current one, to see if it is free
542 * and can be merged with this one.
543 */
544 block = find_free_block_before(ctx, freed_block);
545 if (block)
546 {
547 block->val -= freed_block->val;
548 }
549 else
550 {
551 /* Otherwise, set block to the newly-freed block, and mark it free, before
552 * continuing on, since the code below exects block to point to a free
553 * block which may have free space after it.
554 */
555 block = freed_block;
556 block->val = -block->val;
557 }
558 next_block = block - block->val;
559 /* Check if we are merging with the free space at alloc_end. */
560 if (next_block == ctx->alloc_end)
561 ctx->alloc_end = block;
562 /* Otherwise, the next block might still be a "normal" free block, and the
563 * mid-allocation free means that the buffer is no longer compact.
564 */
565 else {
566 ctx->compact = false;
567 if (next_block->val < 0)
568 block->val += next_block->val;
569 }
570 handle_free(ctx, handle);
571 handle->alloc = NULL;
572 /* If this block is before first_free_block, it becomes the new starting
573 * point for free-block search.
574 */
575 if (block < ctx->first_free_block)
576 ctx->first_free_block = block;
577
578 return 0; /* unconditionally */
579}
580
581/* Return the maximum allocatable memory in bytes */
582size_t
583buflib_available(struct buflib_context* ctx)
584{
585 /* subtract 5 elements for
586 * val, handle, name_len, ops and the handle table entry*/
587 ptrdiff_t diff = (ctx->last_handle - ctx->alloc_end - 5);
588 diff -= 16; /* space for future handles */
589 diff *= sizeof(union buflib_data); /* make it bytes */
590 diff -= 16; /* reserve 16 for the name */
591
592 if (diff > 0)
593 return diff;
594 else
595 return 0;
596}
597
598/*
599 * Allocate all available (as returned by buflib_available()) memory and return
600 * a handle to it
601 *
602 * This grabs a lock which can only be unlocked by buflib_free() or
603 * buflib_shrink(), to protect from further allocations (which couldn't be
604 * serviced anyway).
605 */
606int
607buflib_alloc_maximum(struct buflib_context* ctx, const char* name, size_t *size, struct buflib_callbacks *ops)
608{
609 /* limit name to 16 since that's what buflib_available() accounts for it */
610 char buf[16];
611 *size = buflib_available(ctx);
612 strlcpy(buf, name, sizeof(buf));
613
614 return buflib_alloc_ex(ctx, *size, buf, ops);
615}
616
617/* Shrink the allocation indicated by the handle according to new_start and
618 * new_size. Grow is not possible, therefore new_start and new_start + new_size
619 * must be within the original allocation
620 */
621bool
622buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size)
623{
624 char* oldstart = buflib_get_data(ctx, handle);
625 char* newstart = new_start;
626 char* newend = newstart + new_size;
627
628 /* newstart must be higher and new_size not "negative" */
629 if (newstart < oldstart || newend < newstart)
630 return false;
631 union buflib_data *block = handle_to_block(ctx, handle),
632 *old_next_block = block + block->val,
633 /* newstart isn't necessarily properly aligned but it
634 * needn't be since it's only dereferenced by the user code */
635 *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart),
636 *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart),
637 *new_next_block = (union buflib_data*)B_ALIGN_UP(newend),
638 *new_block, metadata_size;
639
640 /* growing is not supported */
641 if (new_next_block > old_next_block)
642 return false;
643
644 metadata_size.val = aligned_oldstart - block;
645 /* update val and the handle table entry */
646 new_block = aligned_newstart - metadata_size.val;
647 block[0].val = new_next_block - new_block;
648
649 block[1].handle->alloc = newstart;
650 if (block != new_block)
651 {
652 /* move metadata over, i.e. pointer to handle table entry and name
653 * This is actually the point of no return. Data in the allocation is
654 * being modified, and therefore we must successfully finish the shrink
655 * operation */
656 memmove(new_block, block, metadata_size.val*sizeof(metadata_size));
657 /* mark the old block unallocated */
658 block->val = block - new_block;
659 /* find the block before in order to merge with the new free space */
660 union buflib_data *free_before = find_free_block_before(ctx, block);
661 if (free_before)
662 free_before->val += block->val;
663 else if (ctx->first_free_block > block)
664 ctx->first_free_block = block;
665
666 /* We didn't handle size changes yet, assign block to the new one
667 * the code below the wants block whether it changed or not */
668 block = new_block;
669 }
670
671 /* Now deal with size changes that create free blocks after the allocation */
672 if (old_next_block != new_next_block)
673 {
674 if (ctx->alloc_end == old_next_block)
675 ctx->alloc_end = new_next_block;
676 else if (old_next_block->val < 0)
677 { /* enlarge next block by moving it up */
678 new_next_block->val = old_next_block->val - (old_next_block - new_next_block);
679 }
680 else if (old_next_block != new_next_block)
681 { /* creating a hole */
682 /* must be negative to indicate being unallocated */
683 new_next_block->val = new_next_block - old_next_block;
684 }
685 /* update first_free_block for the newly created free space */
686 if (ctx->first_free_block > new_next_block)
687 ctx->first_free_block = new_next_block;
688 }
689
690 return true;
691}
692
693const char* buflib_get_name(struct buflib_context *ctx, int handle)
694{
695 union buflib_data *data = (union buflib_data*)ALIGN_DOWN((intptr_t)buflib_get_data(ctx, handle), sizeof (*data));
696 size_t len = data[-1].val;
697 if (len <= 1)
698 return NULL;
699 return data[-len].name;
700}
701
702#ifdef BUFLIB_DEBUG_BLOCKS
703void buflib_print_allocs(struct buflib_context *ctx,
704 void (*print)(int, const char*))
705{
706 union buflib_data *this, *end = ctx->handle_table;
707 char buf[128];
708 for(this = end - 1; this >= ctx->last_handle; this--)
709 {
710 if (!this->alloc) continue;
711
712 int handle_num;
713 const char *name;
714 union buflib_data *block_start, *alloc_start;
715 intptr_t alloc_len;
716
717 handle_num = end - this;
718 alloc_start = buflib_get_data(ctx, handle_num);
719 name = buflib_get_name(ctx, handle_num);
720 block_start = (union buflib_data*)name - 3;
721 alloc_len = block_start->val * sizeof(union buflib_data);
722
723 snprintf(buf, sizeof(buf),
724 "%s(%d):\t%p\n"
725 " \t%p\n"
726 " \t%ld\n",
727 name?:"(null)", handle_num, block_start, alloc_start, alloc_len);
728 /* handle_num is 1-based */
729 print(handle_num - 1, buf);
730 }
731}
732
733void buflib_print_blocks(struct buflib_context *ctx,
734 void (*print)(int, const char*))
735{
736 char buf[128];
737 int i = 0;
738 for(union buflib_data* this = ctx->buf_start;
739 this < ctx->alloc_end;
740 this += abs(this->val))
741 {
742 snprintf(buf, sizeof(buf), "%8p: val: %4ld (%s)",
743 this, this->val,
744 this->val > 0? this[3].name:"<unallocated>");
745 print(i++, buf);
746 }
747}
748#endif
749
750#ifdef BUFLIB_DEBUG_BLOCK_SINGLE
751int buflib_get_num_blocks(struct buflib_context *ctx)
752{
753 int i = 0;
754 for(union buflib_data* this = ctx->buf_start;
755 this < ctx->alloc_end;
756 this += abs(this->val))
757 {
758 i++;
759 }
760 return i;
761}
762
763void buflib_print_block_at(struct buflib_context *ctx, int block_num,
764 char* buf, size_t bufsize)
765{
766 union buflib_data* this = ctx->buf_start;
767 while(block_num > 0 && this < ctx->alloc_end)
768 {
769 this += abs(this->val);
770 block_num -= 1;
771 }
772 snprintf(buf, bufsize, "%8p: val: %4ld (%s)",
773 this, this->val,
774 this->val > 0? this[3].name:"<unallocated>");
775}
776
777#endif
diff --git a/firmware/common/dircache.c b/firmware/common/dircache.c
index f47e65e428..334801ce57 100644
--- a/firmware/common/dircache.c
+++ b/firmware/common/dircache.c
@@ -38,7 +38,7 @@
38#include "kernel.h" 38#include "kernel.h"
39#include "usb.h" 39#include "usb.h"
40#include "file.h" 40#include "file.h"
41#include "buffer.h" 41#include "core_alloc.h"
42#include "dir.h" 42#include "dir.h"
43#include "storage.h" 43#include "storage.h"
44#if CONFIG_RTC 44#if CONFIG_RTC
@@ -57,6 +57,8 @@
57#else 57#else
58#define MAX_OPEN_DIRS 8 58#define MAX_OPEN_DIRS 8
59#endif 59#endif
60static DIR_CACHED opendirs[MAX_OPEN_DIRS];
61static char opendir_dnames[MAX_OPEN_DIRS][MAX_PATH];
60 62
61#define MAX_PENDING_BINDINGS 2 63#define MAX_PENDING_BINDINGS 2
62struct fdbind_queue { 64struct fdbind_queue {
@@ -571,7 +573,8 @@ int dircache_load(void)
571 } 573 }
572 574
573 allocated_size = maindata.size + DIRCACHE_RESERVE; 575 allocated_size = maindata.size + DIRCACHE_RESERVE;
574 dircache_root = buffer_alloc(allocated_size); 576 int handle = core_alloc("dircache", allocated_size);
577 dircache_root = core_get_data(handle);
575 /* needs to be struct-size aligned so that the pointer arithmetic below works */ 578 /* needs to be struct-size aligned so that the pointer arithmetic below works */
576 ALIGN_BUFFER(dircache_root, allocated_size, sizeof(struct dircache_entry)); 579 ALIGN_BUFFER(dircache_root, allocated_size, sizeof(struct dircache_entry));
577 entry_count = maindata.entry_count; 580 entry_count = maindata.entry_count;
@@ -814,6 +817,7 @@ static void generate_dot_d_names(void)
814 strcpy(dot, "."); 817 strcpy(dot, ".");
815 strcpy(dotdot, ".."); 818 strcpy(dotdot, "..");
816} 819}
820
817/** 821/**
818 * Start scanning the disk to build the dircache. 822 * Start scanning the disk to build the dircache.
819 * Either transparent or non-transparent build method is used. 823 * Either transparent or non-transparent build method is used.
@@ -841,11 +845,13 @@ int dircache_build(int last_size)
841 queue_post(&dircache_queue, DIRCACHE_BUILD, 0); 845 queue_post(&dircache_queue, DIRCACHE_BUILD, 0);
842 return 2; 846 return 2;
843 } 847 }
844 848
845 if (last_size > DIRCACHE_RESERVE && last_size < DIRCACHE_LIMIT ) 849 if (last_size > DIRCACHE_RESERVE && last_size < DIRCACHE_LIMIT )
846 { 850 {
851 int handle;
847 allocated_size = last_size + DIRCACHE_RESERVE; 852 allocated_size = last_size + DIRCACHE_RESERVE;
848 dircache_root = buffer_alloc(allocated_size); 853 handle = core_alloc("dircache", allocated_size);
854 dircache_root = core_get_data(handle);
849 ALIGN_BUFFER(dircache_root, allocated_size, sizeof(struct dircache_entry)); 855 ALIGN_BUFFER(dircache_root, allocated_size, sizeof(struct dircache_entry));
850 d_names_start = d_names_end = ((char*)dircache_root)+allocated_size-1; 856 d_names_start = d_names_end = ((char*)dircache_root)+allocated_size-1;
851 dircache_size = 0; 857 dircache_size = 0;
@@ -863,7 +869,8 @@ int dircache_build(int last_size)
863 * after generation the buffer will be compacted with DIRCACHE_RESERVE 869 * after generation the buffer will be compacted with DIRCACHE_RESERVE
864 * free bytes inbetween */ 870 * free bytes inbetween */
865 size_t got_size; 871 size_t got_size;
866 char* buf = buffer_get_buffer(&got_size); 872 int handle = core_alloc_maximum("dircache", &got_size, NULL);
873 char* buf = core_get_data(handle);
867 dircache_root = (struct dircache_entry*)ALIGN_UP(buf, 874 dircache_root = (struct dircache_entry*)ALIGN_UP(buf,
868 sizeof(struct dircache_entry)); 875 sizeof(struct dircache_entry));
869 d_names_start = d_names_end = buf + got_size - 1; 876 d_names_start = d_names_end = buf + got_size - 1;
@@ -902,11 +909,11 @@ int dircache_build(int last_size)
902 allocated_size = (d_names_end - buf); 909 allocated_size = (d_names_end - buf);
903 reserve_used = 0; 910 reserve_used = 0;
904 911
905 buffer_release_buffer(allocated_size); 912 core_shrink(handle, dircache_root, allocated_size);
906 return res; 913 return res;
907fail: 914fail:
908 dircache_disable(); 915 dircache_disable();
909 buffer_release_buffer(0); 916 core_free(handle);
910 return res; 917 return res;
911} 918}
912 919
@@ -942,7 +949,7 @@ void dircache_init(void)
942 memset(opendirs, 0, sizeof(opendirs)); 949 memset(opendirs, 0, sizeof(opendirs));
943 for (i = 0; i < MAX_OPEN_DIRS; i++) 950 for (i = 0; i < MAX_OPEN_DIRS; i++)
944 { 951 {
945 opendirs[i].theent.d_name = buffer_alloc(MAX_PATH); 952 opendirs[i].theent.d_name = opendir_dnames[i];
946 } 953 }
947 954
948 queue_init(&dircache_queue, true); 955 queue_init(&dircache_queue, true);
diff --git a/firmware/core_alloc.c b/firmware/core_alloc.c
new file mode 100644
index 0000000000..75dfc75b86
--- /dev/null
+++ b/firmware/core_alloc.c
@@ -0,0 +1,57 @@
1
2#include <string.h>
3#include "core_alloc.h"
4#include "buflib.h"
5#include "buffer.h"
6
7/* not static so it can be discovered by core_get_data() */
8struct buflib_context core_ctx;
9
10void core_allocator_init(void)
11{
12 buffer_init();
13 size_t size;
14 void *start = buffer_get_buffer(&size);
15 buflib_init(&core_ctx, start, size);
16 buffer_release_buffer(size);
17}
18
19int core_alloc(const char* name, size_t size)
20{
21 return buflib_alloc_ex(&core_ctx, size, name, NULL);
22}
23
24int core_alloc_ex(const char* name, size_t size, struct buflib_callbacks *ops)
25{
26 return buflib_alloc_ex(&core_ctx, size, name, ops);
27}
28
29size_t core_available(void)
30{
31 return buflib_available(&core_ctx);
32}
33
34int core_free(int handle)
35{
36 return buflib_free(&core_ctx, handle);
37}
38
39int core_alloc_maximum(const char* name, size_t *size, struct buflib_callbacks *ops)
40{
41 return buflib_alloc_maximum(&core_ctx, name, size, ops);
42}
43
44bool core_shrink(int handle, void* new_start, size_t new_size)
45{
46 return buflib_shrink(&core_ctx, handle, new_start, new_size);
47}
48
49int core_get_num_blocks(void)
50{
51 return buflib_get_num_blocks(&core_ctx);
52}
53
54void core_print_block_at(int block_num, char* buf, size_t bufsize)
55{
56 buflib_print_block_at(&core_ctx, block_num, buf, bufsize);
57}
diff --git a/firmware/export/audio.h b/firmware/export/audio.h
index 2835d8f4c4..57f3c24aae 100644
--- a/firmware/export/audio.h
+++ b/firmware/export/audio.h
@@ -58,6 +58,7 @@ void audio_resume(void);
58void audio_next(void); 58void audio_next(void);
59void audio_prev(void); 59void audio_prev(void);
60int audio_status(void); 60int audio_status(void);
61size_t audio_buffer_available(void);
61void audio_ff_rewind(long newpos); 62void audio_ff_rewind(long newpos);
62void audio_flush_and_reload_tracks(void); 63void audio_flush_and_reload_tracks(void);
63struct mp3entry* audio_current_track(void); 64struct mp3entry* audio_current_track(void);
diff --git a/firmware/include/buflib.h b/firmware/include/buflib.h
new file mode 100644
index 0000000000..db7b5ec50a
--- /dev/null
+++ b/firmware/include/buflib.h
@@ -0,0 +1,319 @@
1/***************************************************************************
2* __________ __ ___.
3* Open \______ \ ____ ____ | | _\_ |__ _______ ___
4* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7* \/ \/ \/ \/ \/
8* $Id$
9*
10* This is a memory allocator designed to provide reasonable management of free
11* space and fast access to allocated data. More than one allocator can be used
12* at a time by initializing multiple contexts.
13*
14* Copyright (C) 2009 Andrew Mahone
15* Copyright (C) 2011 Thomas Martitz
16*
17* This program is free software; you can redistribute it and/or
18* modify it under the terms of the GNU General Public License
19* as published by the Free Software Foundation; either version 2
20* of the License, or (at your option) any later version.
21*
22* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
23* KIND, either express or implied.
24*
25****************************************************************************/
26
27#ifndef _BUFLIB_H_
28#define _BUFLIB_H_
29#include <stdint.h>
30#include <stdbool.h>
31#include <string.h>
32
33/* enable single block debugging */
34#define BUFLIB_DEBUG_BLOCK_SINGLE
35
36union buflib_data
37{
38 intptr_t val;
39 char name[1]; /* actually a variable sized string */
40 struct buflib_callbacks* ops;
41 char* alloc;
42 union buflib_data *handle;
43};
44
45struct buflib_context
46{
47 union buflib_data *handle_table;
48 union buflib_data *first_free_handle;
49 union buflib_data *last_handle;
50 union buflib_data *first_free_block;
51 union buflib_data *buf_start;
52 union buflib_data *alloc_end;
53 bool compact;
54};
55
56/**
57 * Callbacks used by the buflib to inform allocation that compaction
58 * is happening (before data is moved)
59 *
60 * Note that buflib tries to move to satisfy new allocations before shrinking.
61 * So if you have something to resize try to do it outside of the callback.
62 *
63 * Regardless of the above, if the allocation is SHRINKABLE, but not
64 * MUST_NOT_MOVE buflib will move the allocation before even attempting to
65 * shrink.
66 */
67struct buflib_callbacks {
68 /**
69 * This is called before data is moved. Use this to fix up any cached
70 * pointers pointing to inside the allocation. The size is unchanged.
71 *
72 * This is not needed if you don't cache the data pointer (but always
73 * call buflib_get_data()) and don't pass pointer to the data to yielding
74 * functions.
75 *
76 * handle: The corresponding handle
77 * current: The current start of the allocation
78 * new: The new start of the allocation, after data movement
79 *
80 * Return: Return BUFLIB_CB_OK, or BUFLIB_CB_CANNOT_MOVE if movement
81 * is impossible at this moment.
82 *
83 * If NULL: this allocation must not be moved around by the buflib when
84 * compation occurs
85 */
86 int (*move_callback)(int handle, void* current, void* new);
87 /**
88 * This is called when the buflib desires to shrink a buffer
89 * in order to satisfy new allocation. This happens when buflib runs
90 * out of memory, e.g. because buflib_alloc_maximum() was called.
91 * Move data around as you need to make space and call core_shrink() as
92 * appropriate from within the callback to complete the shrink operation.
93 * buflib will not move data as part of shrinking.
94 *
95 * hint: bit mask containing hints on how shrinking is desired (see below)
96 * handle: The corresponding handle
97 * start: The old start of the allocation
98 *
99 * Return: Return BUFLIB_CB_OK, or BUFLIB_CB_CANNOT_SHRINK if shirinking
100 * is impossible at this moment.
101 *
102 * if NULL: this allocation cannot be resized.
103 * It is recommended that allocation that must not move are
104 * at least shrinkable
105 */
106 int (*shrink_callback)(int handle, unsigned hints, void* start, size_t old_size);
107};
108
109#define BUFLIB_SHRINK_POS_MASK ((1<<0|1<<1)<<30)
110#define BUFLIB_SHRINK_SIZE_MASK (~BUFLIB_SHRINK_POS_MASK)
111#define BUFLIB_SHRINK_POS_FRONT (1u<<31)
112#define BUFLIB_SHRINK_POS_BACK (1u<<30)
113
114/**
115 * Possible return values for the callbacks, some of them can cause
116 * compaction to fail and therefore new allocations to fail
117 */
118/* Everything alright */
119#define BUFLIB_CB_OK 0
120/* Tell buflib that moving failed. Buflib may retry to move at any point */
121#define BUFLIB_CB_CANNOT_MOVE 1
122/* Tell buflib that resizing failed, possibly future making allocations fail */
123#define BUFLIB_CB_CANNOT_SHRINK 1
124
125/**
126 * Initializes buflib with a caller allocated context instance and memory pool.
127 *
128 * The buflib_context instance needs to be passed to every other buflib
129 * function. It's should be considered opaque, even though it is not yet
130 * (that's to make inlining core_get_data() possible). The documentation
131 * of the other functions will not describe the context
132 * instance paramter further as it's obligatory.
133 *
134 * context: The new buflib instance to be initialized, allocated by the caller
135 * size: The size of the memory pool
136 */
137void buflib_init(struct buflib_context *context, void *buf, size_t size);
138
139
140/**
141 * Returns how many bytes left the buflib has to satisfy allocations.
142 *
143 * This function does not yet consider possible compaction so there might
144 * be more space left. This may change in the future.
145 *
146 * Returns: The number of bytes left in the memory pool.
147 */
148size_t buflib_available(struct buflib_context *ctx);
149
150
151/**
152 * Allocates memory from buflib's memory pool
153 *
154 * size: How many bytes to allocate
155 *
156 * Returns: An integer handle identifying this allocation
157 */
158int buflib_alloc(struct buflib_context *context, size_t size);
159
160
161/**
162 * Allocates memory from the buflib's memory pool with additional callbacks
163 * and flags
164 *
165 * name: A string identifier giving this allocation a name
166 * size: How many bytes to allocate
167 * ops: a struct with pointers to callback functions (see above)
168 *
169 * Returns: An integer handle identifying this allocation
170 */
171int buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name,
172 struct buflib_callbacks *ops);
173
174
175/**
176 * Gets all available memory from buflib, for temporary use.
177 *
178 * Since this effectively makes all future allocations fail (unless
179 * another allocation is freed in the meantime), you should definitely provide
180 * a shrink callback if you plan to hold the buffer for a longer period. This
181 * will allow buflib to permit allocations by shrinking the buffer returned by
182 * this function.
183 *
184 * Note that this currently gives whatever buflib_available() returns. However,
185 * do not depend on this behavior, it may change.
186 *
187 * name: A string identifier giving this allocation a name
188 * size: The actual size will be returned into size
189 * ops: a struct with pointers to callback functions
190 *
191 * Returns: An integer handle identifying this allocation
192 */
193int buflib_alloc_maximum(struct buflib_context* ctx, const char* name,
194 size_t *size, struct buflib_callbacks *ops);
195
196/**
197 * Queries the data pointer for the given handle. It's actually a cheap
198 * operation, don't hesitate using it extensivly.
199 *
200 * Notice that you need to re-query after every direct or indirect yield(),
201 * because compaction can happen by other threads which may get your data
202 * moved around (or you can get notified about changes by callbacks,
203 * see further above).
204 *
205 * handle: The handle corresponding to the allocation
206 *
207 * Returns: The start pointer of the allocation
208 */
209static inline void* buflib_get_data(struct buflib_context *context, int handle)
210{
211 return (void*)(context->handle_table[-handle].alloc);
212}
213
214/**
215 * Shrink the memory allocation associated with the given handle
216 * Mainly intended to be used with the shrink callback, but it can also
217 * be called outside as well, e.g. to give back buffer space allocated
218 * with buflib_alloc_maximum().
219 *
220 * Note that you must move/copy data around yourself before calling this,
221 * buflib will not do this as part of shrinking.
222 *
223 * handle: The handle identifying this allocation
224 * new_start: the new start of the allocation
225 * new_size: the new size of the allocation
226 *
227 * Returns: true if shrinking was successful. Otherwise it returns false,
228 * without having modified memory.
229 *
230 */
231bool buflib_shrink(struct buflib_context *ctx, int handle, void* newstart, size_t new_size);
232
233/**
234 * Frees memory associated with the given handle
235 *
236 * Returns: 0 (to invalidate handles in one line)
237 */
238int buflib_free(struct buflib_context *context, int handle);
239
240/**
241 * Moves the underlying buflib buffer up by size bytes (as much as
242 * possible for size == 0) without moving the end. This effectively
243 * reduces the available space by taking away managable space from the
244 * front. This space is not available for new allocations anymore.
245 *
246 * To make space available in the front, everything is moved up.
247 * It does _NOT_ call the move callbacks
248 *
249 *
250 * size: size in bytes to move the buffer up (take away). The actual
251 * bytes moved is returned in this
252 * Returns: The new start of the underlying buflib buffer
253 */
254void* buflib_buffer_out(struct buflib_context *ctx, size_t *size);
255
256/**
257 * Moves the underlying buflib buffer down by size bytes without
258 * moving the end. This grows the buflib buffer by adding space to the front.
259 * The new bytes are available for new allocations.
260 *
261 * Everything is moved down, and the new free space will be in the middle.
262 * It does _NOT_ call the move callbacks.
263 *
264 * size: size in bytes to move the buffer down (new free space)
265 */
266void buflib_buffer_in(struct buflib_context *ctx, int size);
267
268/* debugging */
269
270/**
271 * Returns the name, as given to core_alloc() and core_allloc_ex(), of the
272 * allocation associated with the given handle
273 *
274 * handle: The handle indicating the allocation
275 *
276 * Returns: A pointer to the string identifier of the allocation
277 */
278const char* buflib_get_name(struct buflib_context *ctx, int handle);
279
280/**
281 * Prints an overview of all current allocations with the help
282 * of the passed printer helper
283 *
284 * This walks only the handle table and prints only valid allocations
285 *
286 * Only available if BUFLIB_DEBUG_BLOCKS is defined
287 */
288void buflib_print_allocs(struct buflib_context *ctx, void (*print)(int, const char*));
289
290/**
291 * Prints an overview of all blocks in the buflib buffer, allocated
292 * or unallocated, with the help pf the passted printer helper
293 *
294 * This walks the entire buffer and prints unallocated space also.
295 * The output is also different from buflib_print_allocs().
296 *
297 * Only available if BUFLIB_DEBUG_BLOCKS is defined
298 */
299void buflib_print_blocks(struct buflib_context *ctx, void (*print)(int, const char*));
300
301/**
302 * Gets the number of blocks in the entire buffer, allocated or unallocated
303 *
304 * Only available if BUFLIB_DEBUG_BLOCK_SIGNLE is defined
305 */
306int buflib_get_num_blocks(struct buflib_context *ctx);
307
308/**
309 * Print information about a single block as indicated by block_num
310 * into buf
311 *
312 * buflib_get_num_blocks() beforehand to get the total number of blocks,
313 * as passing an block_num higher than that is undefined
314 *
315 * Only available if BUFLIB_DEBUG_BLOCK_SIGNLE is defined
316 */
317void buflib_print_block_at(struct buflib_context *ctx, int block_num,
318 char* buf, size_t bufsize);
319#endif
diff --git a/firmware/include/core_alloc.h b/firmware/include/core_alloc.h
new file mode 100644
index 0000000000..f5206c9db9
--- /dev/null
+++ b/firmware/include/core_alloc.h
@@ -0,0 +1,36 @@
1
2#ifndef __CORE_ALLOC_H__
3#define __CORE_ALLOC_H__
4#include <string.h>
5#include <stdbool.h>
6#include "buflib.h"
7
8/* All functions below are wrappers for functions in buflib.h, except
9 * they have a predefined context
10 */
11void core_allocator_init(void);
12int core_alloc(const char* name, size_t size);
13int core_alloc_ex(const char* name, size_t size, struct buflib_callbacks *ops);
14int core_alloc_maximum(const char* name, size_t *size, struct buflib_callbacks *ops);
15bool core_shrink(int handle, void* new_start, size_t new_size);
16int core_free(int handle);
17size_t core_available(void);
18
19/* DO NOT ADD wrappers for buflib_buffer_out/in. They do not call
20 * the move callbacks and are therefore unsafe in the core */
21
22#ifdef BUFLIB_DEBUG_BLOCKS
23void core_print_allocs(void (*print)(const char*));
24void core_print_blocks(void (*print)(const char*));
25#endif
26#ifdef BUFLIB_DEBUG_BLOCK_SINGLE
27int core_get_num_blocks(void);
28void core_print_block_at(int block_num, char* buf, size_t bufsize);
29#endif
30
31static inline void* core_get_data(int handle)
32{
33 extern struct buflib_context core_ctx;
34 return buflib_get_data(&core_ctx, handle);
35}
36#endif /* __CORE_ALLOC_H__ */
diff --git a/firmware/rolo.c b/firmware/rolo.c
index 9b6f4fec4a..283779d7ee 100644
--- a/firmware/rolo.c
+++ b/firmware/rolo.c
@@ -31,7 +31,7 @@
31#include "i2c.h" 31#include "i2c.h"
32#include "adc.h" 32#include "adc.h"
33#include "string.h" 33#include "string.h"
34#include "buffer.h" 34#include "core_alloc.h"
35#include "storage.h" 35#include "storage.h"
36#include "rolo.h" 36#include "rolo.h"
37 37
@@ -48,6 +48,7 @@
48 48
49#define IRQ0_EDGE_TRIGGER 0x80 49#define IRQ0_EDGE_TRIGGER 0x80
50 50
51static int rolo_handle;
51#ifdef CPU_PP 52#ifdef CPU_PP
52/* Handle the COP properly - it needs to jump to a function outside SDRAM while 53/* Handle the COP properly - it needs to jump to a function outside SDRAM while
53 * the new firmware is being loaded, and then jump to the start of SDRAM 54 * the new firmware is being loaded, and then jump to the start of SDRAM
@@ -99,7 +100,7 @@ void rolo_restart_cop(void)
99 100
100static void rolo_error(const char *text) 101static void rolo_error(const char *text)
101{ 102{
102 buffer_release_buffer(0); 103 rolo_handle = core_free(rolo_handle);
103 lcd_clear_display(); 104 lcd_clear_display();
104 lcd_puts(0, 0, "ROLO error:"); 105 lcd_puts(0, 0, "ROLO error:");
105 lcd_puts_scroll(0, 1, text); 106 lcd_puts_scroll(0, 1, text);
@@ -240,7 +241,8 @@ int rolo_load(const char* filename)
240 241
241 /* get the system buffer. release only in case of error, otherwise 242 /* get the system buffer. release only in case of error, otherwise
242 * we don't return anyway */ 243 * we don't return anyway */
243 filebuf = buffer_get_buffer(&filebuf_size); 244 rolo_handle = core_alloc_maximum("rolo", &filebuf_size, NULL);
245 filebuf = core_get_data(rolo_handle);
244 246
245#if CONFIG_CPU != SH7034 247#if CONFIG_CPU != SH7034
246 /* Read and save checksum */ 248 /* Read and save checksum */
diff --git a/firmware/target/arm/ata-nand-telechips.c b/firmware/target/arm/ata-nand-telechips.c
index 81dde33938..2ae425f4c6 100644
--- a/firmware/target/arm/ata-nand-telechips.c
+++ b/firmware/target/arm/ata-nand-telechips.c
@@ -26,7 +26,6 @@
26#include "panic.h" 26#include "panic.h"
27#include "nand_id.h" 27#include "nand_id.h"
28#include "storage.h" 28#include "storage.h"
29#include "buffer.h"
30 29
31#define SECTOR_SIZE 512 30#define SECTOR_SIZE 512
32 31
@@ -122,8 +121,9 @@ struct lpt_entry
122#ifdef BOOTLOADER 121#ifdef BOOTLOADER
123static struct lpt_entry lpt_lookup[MAX_SEGMENTS]; 122static struct lpt_entry lpt_lookup[MAX_SEGMENTS];
124#else 123#else
125/* buffer_alloc'd in nand_init() when the correct size has been determined */ 124/* core_alloc()'d in nand_init() when the correct size has been determined */
126static struct lpt_entry* lpt_lookup = NULL; 125#include "core_alloc.h"
126static int lpt_handle;
127#endif 127#endif
128 128
129/* Write Caches */ 129/* Write Caches */
@@ -607,6 +607,9 @@ static bool nand_read_sector_of_logical_segment(int log_segment, int sector,
607 int page_in_segment = sector / sectors_per_page; 607 int page_in_segment = sector / sectors_per_page;
608 int sector_in_page = sector % sectors_per_page; 608 int sector_in_page = sector % sectors_per_page;
609 609
610#ifndef BOOTLOADER
611 struct lpt_entry* lpt_lookup = core_get_data(lpt_handle);
612#endif
610 int bank = lpt_lookup[log_segment].bank; 613 int bank = lpt_lookup[log_segment].bank;
611 int phys_segment = lpt_lookup[log_segment].phys_segment; 614 int phys_segment = lpt_lookup[log_segment].phys_segment;
612 615
@@ -918,7 +921,8 @@ int nand_init(void)
918#ifndef BOOTLOADER 921#ifndef BOOTLOADER
919 /* Use chip info to allocate the correct size LPT buffer */ 922 /* Use chip info to allocate the correct size LPT buffer */
920 lptbuf_size = sizeof(struct lpt_entry) * segments_per_bank * total_banks; 923 lptbuf_size = sizeof(struct lpt_entry) * segments_per_bank * total_banks;
921 lpt_lookup = buffer_alloc(lptbuf_size); 924 lpt_handle = core_alloc("lpt lookup", lptbuf_size);
925 struct lpt_entry* lpt_lookup = core_get_data(lpt_handle);
922#else 926#else
923 /* Use a static array in the bootloader */ 927 /* Use a static array in the bootloader */
924 lptbuf_size = sizeof(lpt_lookup); 928 lptbuf_size = sizeof(lpt_lookup);
@@ -968,6 +972,9 @@ int nand_init(void)
968 972
969 if (log_segment < segments_per_bank * total_banks) 973 if (log_segment < segments_per_bank * total_banks)
970 { 974 {
975#ifndef BOOTLOADER
976 lpt_lookup = core_get_data(lpt_handle);
977#endif
971 if (lpt_lookup[log_segment].bank == -1 || 978 if (lpt_lookup[log_segment].bank == -1 ||
972 lpt_lookup[log_segment].phys_segment == -1) 979 lpt_lookup[log_segment].phys_segment == -1)
973 { 980 {
diff --git a/firmware/target/arm/tms320dm320/creative-zvm/ata-creativezvm.c b/firmware/target/arm/tms320dm320/creative-zvm/ata-creativezvm.c
index afb8d5cf62..ad10502f2d 100644
--- a/firmware/target/arm/tms320dm320/creative-zvm/ata-creativezvm.c
+++ b/firmware/target/arm/tms320dm320/creative-zvm/ata-creativezvm.c
@@ -30,7 +30,7 @@
30#include "dm320.h" 30#include "dm320.h"
31#include "ata.h" 31#include "ata.h"
32#include "string.h" 32#include "string.h"
33#include "buffer.h" 33#include "core_alloc.h"
34#include "logf.h" 34#include "logf.h"
35#include "ata-defines.h" 35#include "ata-defines.h"
36 36
@@ -202,7 +202,11 @@ struct cfs_direntry_item
202 202
203static bool cfs_inited = false; 203static bool cfs_inited = false;
204static unsigned long cfs_start; 204static unsigned long cfs_start;
205#ifdef BOOTLOADER
205static unsigned long *sectors; 206static unsigned long *sectors;
207#else
208static int sectors_handle;
209#endif
206 210
207#define CFS_START ( ((hdr->partitions[1].start*hdr->sector_size) & ~0xFFFF) + 0x10000 ) 211#define CFS_START ( ((hdr->partitions[1].start*hdr->sector_size) & ~0xFFFF) + 0x10000 )
208#define CFS_CLUSTER2CLUSTER(x) ( (CFS_START/512)+((x)-1)*64 ) 212#define CFS_CLUSTER2CLUSTER(x) ( (CFS_START/512)+((x)-1)*64 )
@@ -299,7 +303,8 @@ static void cfs_init(void)
299 _ata_read_sectors(CFS_CLUSTER2CLUSTER(vfat_inodes_nr[1]), 1, &sector); 303 _ata_read_sectors(CFS_CLUSTER2CLUSTER(vfat_inodes_nr[1]), 1, &sector);
300 inode = (struct cfs_inode*)&sector; 304 inode = (struct cfs_inode*)&sector;
301#ifndef BOOTLOADER 305#ifndef BOOTLOADER
302 sectors = (unsigned long*)buffer_alloc(VFAT_SECTOR_SIZE(inode->filesize)); 306 sectors_handle = core_alloc("ata sectors", VFAT_SECTOR_SIZE(inode->filesize));
307 unsigned long *sectors = core_get_data(sectors_handle);
303#else 308#else
304 static unsigned long _sector[VFAT_SECTOR_SIZE(1024*1024*1024)]; /* 1GB guess */ 309 static unsigned long _sector[VFAT_SECTOR_SIZE(1024*1024*1024)]; /* 1GB guess */
305 sectors = _sector; 310 sectors = _sector;
@@ -322,6 +327,9 @@ static void cfs_init(void)
322 _ata_read_sectors(CFS_CLUSTER2CLUSTER(inode->second_class_chain_second_cluster), 64, &vfat_data[1]); 327 _ata_read_sectors(CFS_CLUSTER2CLUSTER(inode->second_class_chain_second_cluster), 64, &vfat_data[1]);
323 328
324 /* First class chain */ 329 /* First class chain */
330#ifndef BOOTLOADER
331 sectors = core_get_data(sectors_handle);
332#endif
325 for(j=0; j<12; j++) 333 for(j=0; j<12; j++)
326 { 334 {
327 if( (inode->first_class_chain[j] & 0xFFFF) != 0xFFFF && 335 if( (inode->first_class_chain[j] & 0xFFFF) != 0xFFFF &&
@@ -331,6 +339,9 @@ static void cfs_init(void)
331 } 339 }
332 340
333 /* Second class chain */ 341 /* Second class chain */
342#ifndef BOOTLOADER
343 sectors = core_get_data(sectors_handle);
344#endif
334 for(j=0; j<0x8000/4; j++) 345 for(j=0; j<0x8000/4; j++)
335 { 346 {
336 if( (vfat_data[0][j] & 0xFFFF) != 0xFFFF && 347 if( (vfat_data[0][j] & 0xFFFF) != 0xFFFF &&
@@ -351,6 +362,9 @@ static void cfs_init(void)
351 /* Read third class subchain(s) */ 362 /* Read third class subchain(s) */
352 _ata_read_sectors(CFS_CLUSTER2CLUSTER(vfat_data[1][j]), 64, &vfat_data[0]); 363 _ata_read_sectors(CFS_CLUSTER2CLUSTER(vfat_data[1][j]), 64, &vfat_data[0]);
353 364
365#ifndef BOOTLOADER
366 sectors = core_get_data(sectors_handle);
367#endif
354 for(k=0; k<0x8000/4; k++) 368 for(k=0; k<0x8000/4; k++)
355 { 369 {
356 if( (vfat_data[0][k] & 0xFFFF) != 0xFFFF && 370 if( (vfat_data[0][k] & 0xFFFF) != 0xFFFF &&
@@ -376,6 +390,9 @@ static inline unsigned long map_sector(unsigned long sector)
376 * Sector mapping: start of CFS + FAT_SECTOR2CFS_SECTOR(sector) + missing part 390 * Sector mapping: start of CFS + FAT_SECTOR2CFS_SECTOR(sector) + missing part
377 * FAT works with sectors of 0x200 bytes, CFS with sectors of 0x8000 bytes. 391 * FAT works with sectors of 0x200 bytes, CFS with sectors of 0x8000 bytes.
378 */ 392 */
393#ifndef BOOTLOADER
394 unsigned long *sectors = core_get_data(sectors_handle);
395#endif
379 return cfs_start+sectors[sector/64]*64+sector%64; 396 return cfs_start+sectors[sector/64]*64+sector%64;
380} 397}
381 398