From 680261fbb7c3c0a4cde4321c35ce3fe0e6b0cce8 Mon Sep 17 00:00:00 2001 From: Aidan MacDonald Date: Mon, 2 Jan 2023 19:45:59 +0000 Subject: buflib: Prep for multiple backend support, rename to buflib_mempool Rename the current buflib implementation to buflib_mempool. Change-Id: Iefdf74be1f7d8fcd19e6ce2289c3d1459b54d013 --- apps/plugin.h | 2 +- firmware/SOURCES | 2 +- firmware/buflib.c | 1113 ------------------------------------- firmware/buflib_mempool.c | 1113 +++++++++++++++++++++++++++++++++++++ firmware/core_alloc.c | 2 +- firmware/include/buflib.h | 381 ------------- firmware/include/buflib_mempool.h | 381 +++++++++++++ firmware/include/chunk_alloc.h | 2 +- firmware/include/core_alloc.h | 2 +- lib/rbcodec/test/SOURCES | 2 +- 10 files changed, 1500 insertions(+), 1500 deletions(-) delete mode 100644 firmware/buflib.c create mode 100644 firmware/buflib_mempool.c delete mode 100644 firmware/include/buflib.h create mode 100644 firmware/include/buflib_mempool.h diff --git a/apps/plugin.h b/apps/plugin.h index 850e7484d9..ea69cbe2b2 100644 --- a/apps/plugin.h +++ b/apps/plugin.h @@ -100,7 +100,7 @@ int plugin_open(const char *plugin, const char *parameter); #include "list.h" #include "tree.h" #include "color_picker.h" -#include "buflib.h" +#include "buflib_mempool.h" #include "buffering.h" #include "tagcache.h" #include "viewport.h" diff --git a/firmware/SOURCES b/firmware/SOURCES index fca6fdf573..c8789756aa 100644 --- a/firmware/SOURCES +++ b/firmware/SOURCES @@ -4,7 +4,7 @@ ata_idle_notify.c events.c backlight.c -buflib.c +buflib_mempool.c core_alloc.c general.c powermgmt.c diff --git a/firmware/buflib.c b/firmware/buflib.c deleted file mode 100644 index cb35290c03..0000000000 --- a/firmware/buflib.c +++ /dev/null @@ -1,1113 +0,0 @@ -/*************************************************************************** -* __________ __ ___. -* Open \______ \ ____ ____ | | _\_ |__ _______ ___ -* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / -* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < -* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ -* \/ \/ \/ \/ \/ -* $Id$ -* -* This is a memory allocator designed to provide reasonable management of free -* space and fast access to allocated data. More than one allocator can be used -* at a time by initializing multiple contexts. -* -* Copyright (C) 2009 Andrew Mahone -* Copyright (C) 2011 Thomas Martitz -* -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version 2 -* of the License, or (at your option) any later version. -* -* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY -* KIND, either express or implied. -* -****************************************************************************/ - -#include -#include /* for abs() */ -#include /* for snprintf() */ -#include /* for ptrdiff_t */ -#include "buflib.h" -#include "string-extra.h" /* strmemccpy() */ -#include "debug.h" -#include "panic.h" -#include "system.h" /* for ALIGN_*() */ - -/* FIXME: This comment is pretty out of date now and wrong in some details. - * - * The main goal of this design is fast fetching of the pointer for a handle. - * For that reason, the handles are stored in a table at the end of the buffer - * with a fixed address, so that returning the pointer for a handle is a simple - * table lookup. To reduce the frequency with which allocated blocks will need - * to be moved to free space, allocations grow up in address from the start of - * the buffer. The buffer is treated as an array of union buflib_data. Blocks - * start with a length marker, which is included in their length. Free blocks - * are marked by negative length. Allocated blocks have a positiv length marker, - * and additional metadata following that: It follows a pointer - * (union buflib_data*) to the corresponding handle table entry. so that it can - * be quickly found and updated during compaction. After that follows - * the pointer to the struct buflib_callbacks associated with this allocation - * (may be NULL). That pointer follows a variable length character array - * containing the nul-terminated string identifier of the allocation. After this - * array there's a length marker for the length of the character array including - * this length marker (counted in n*sizeof(union buflib_data)), which allows - * to find the start of the character array (and therefore the start of the - * entire block) when only the handle or payload start is known. - * - * UPDATE BUFLIB_ALLOC_OVERHEAD (buflib.h) WHEN THE METADATA CHANGES! - * - * Example: - * |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->| - * |L|H|C|cccc|L2|crc|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|crc|XXXXXXXXXXXXX|AAA| - * - * L - length marker (negative if block unallocated) - * H - handle table entry pointer - * C - pointer to struct buflib_callbacks - * c - variable sized string identifier - * L2 - length of the metadata - * crc - crc32 protecting buflib metadata integrity - * X - actual payload - * Y - unallocated space - * - * A - pointer to start of payload (first X) in the handle table (may be null) - * - * The blocks can be walked by jumping the abs() of the L length marker, i.e. - * union buflib_data* L; - * for(L = start; L < end; L += abs(L->val)) { .... } - * - * - * The allocator functions are passed a context struct so that two allocators - * can be run, for example, one per core may be used, with convenience wrappers - * for the single-allocator case that use a predefined context. - */ - -#define B_ALIGN_DOWN(x) \ - ALIGN_DOWN(x, sizeof(union buflib_data)) - -#define B_ALIGN_UP(x) \ - ALIGN_UP(x, sizeof(union buflib_data)) - -#ifdef DEBUG - #include - #define BDEBUGF DEBUGF -#else - #define BDEBUGF(...) do { } while(0) -#endif - -#define BPANICF panicf - -/* Available paranoia checks */ -#define PARANOIA_CHECK_LENGTH (1 << 0) -#define PARANOIA_CHECK_BLOCK_HANDLE (1 << 1) -#define PARANOIA_CHECK_PINNING (1 << 2) -/* Bitmask of enabled paranoia checks */ -#define BUFLIB_PARANOIA \ - (PARANOIA_CHECK_LENGTH | \ - PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING) - -/* Indices used to access block fields as block[idx_XXX] */ -enum { - idx_LEN, /* length of the block, must come first */ - idx_HANDLE, /* pointer to entry in the handle table */ - idx_OPS, /* pointer to an ops struct */ - idx_PIN, /* pin count */ - BUFLIB_NUM_FIELDS, -}; - -struct buflib_callbacks buflib_ops_locked = { - .move_callback = NULL, - .shrink_callback = NULL, - .sync_callback = NULL, -}; - -#define IS_MOVABLE(a) (!a[idx_OPS].ops || a[idx_OPS].ops->move_callback) - -static union buflib_data* find_first_free(struct buflib_context *ctx); -static union buflib_data* find_block_before(struct buflib_context *ctx, - union buflib_data* block, - bool is_free); - -/* Check the length of a block to ensure it does not go beyond the end - * of the allocated area. The block can be either allocated or free. - * - * This verifies that it is safe to iterate to the next block in a loop. - */ -static void check_block_length(struct buflib_context *ctx, - union buflib_data *block); - -/* Check a block's handle pointer to ensure it is within the handle - * table, and that the user pointer is pointing within the block. - * - * This verifies that it is safe to dereference the entry and ensures - * that the pointer in the handle table points within the block, as - * determined by the length field at the start of the block. - */ -static void check_block_handle(struct buflib_context *ctx, - union buflib_data *block); - -/* Initialize buffer manager */ -void -buflib_init(struct buflib_context *ctx, void *buf, size_t size) -{ - union buflib_data *bd_buf = buf; - BDEBUGF("buflib initialized with %lu.%02lu kiB\n", - (unsigned long)size / 1024, ((unsigned long)size%1000)/10); - - /* Align on sizeof(buflib_data), to prevent unaligned access */ - ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data)); - size /= sizeof(union buflib_data); - /* The handle table is initialized with no entries */ - ctx->handle_table = bd_buf + size; - ctx->last_handle = bd_buf + size; - ctx->first_free_handle = bd_buf + size - 1; - ctx->buf_start = bd_buf; - /* A marker is needed for the end of allocated data, to make sure that it - * does not collide with the handle table, and to detect end-of-buffer. - */ - ctx->alloc_end = bd_buf; - ctx->compact = true; - - if (size == 0) - { - BPANICF("buflib_init error (CTX:%p, %zd bytes):\n", ctx, - (ctx->handle_table - ctx->buf_start) * sizeof(union buflib_data)); - } -} - -bool buflib_context_relocate(struct buflib_context *ctx, void *buf) -{ - union buflib_data *handle, *bd_buf = buf; - ptrdiff_t diff = bd_buf - ctx->buf_start; - - /* cannot continue if the buffer is not aligned, since we would need - * to reduce the size of the buffer for aligning */ - if (!IS_ALIGNED((uintptr_t)buf, sizeof(union buflib_data))) - return false; - - /* relocate the handle table entries */ - for (handle = ctx->last_handle; handle < ctx->handle_table; handle++) - { - if (handle->alloc) - handle->alloc += diff * sizeof(union buflib_data); - } - /* relocate the pointers in the context */ - ctx->handle_table += diff; - ctx->last_handle += diff; - ctx->first_free_handle += diff; - ctx->buf_start += diff; - ctx->alloc_end += diff; - - return true; -} - -static void buflib_panic(struct buflib_context *ctx, const char *message, ...) -{ - char buf[128]; - va_list ap; - - va_start(ap, message); - vsnprintf(buf, sizeof(buf), message, ap); - va_end(ap); - - BPANICF("buflib error (CTX:%p, %zd bytes):\n%s", ctx, - (ctx->handle_table - ctx->buf_start) * sizeof(union buflib_data), buf); -} - -/* Allocate a new handle, returning 0 on failure */ -static inline -union buflib_data* handle_alloc(struct buflib_context *ctx) -{ - union buflib_data *handle; - /* first_free_handle is a lower bound on free handles, work through the - * table from there until a handle containing NULL is found, or the end - * of the table is reached. - */ - for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--) - if (!handle->alloc) - break; - /* If the search went past the end of the table, it means we need to extend - * the table to get a new handle. - */ - if (handle < ctx->last_handle) - { - if (handle >= ctx->alloc_end) - ctx->last_handle--; - else - { - /* We know the table is full, so update first_free_handle */ - ctx->first_free_handle = ctx->last_handle - 1; - return NULL; - } - } - - /* We know there are no free handles between the old first_free_handle - * and the found handle, therefore we can update first_free_handle */ - ctx->first_free_handle = handle - 1; - - /* We need to set the table entry to a non-NULL value to ensure that - * compactions triggered by an allocation do not compact the handle - * table and delete this handle. */ - handle->val = -1; - - return handle; -} - -/* Free one handle, shrinking the handle table if it's the last one */ -static inline -void handle_free(struct buflib_context *ctx, union buflib_data *handle) -{ - handle->alloc = 0; - /* Update free handle lower bound if this handle has a lower index than the - * old one. - */ - if (handle > ctx->first_free_handle) - ctx->first_free_handle = handle; - if (handle == ctx->last_handle) - ctx->last_handle++; - else - ctx->compact = false; -} - -/* Get the start block of an allocation */ -static inline -union buflib_data* handle_to_block(struct buflib_context* ctx, int handle) -{ - void *ptr = buflib_get_data(ctx, handle); - - /* this is a valid case for shrinking if handle - * was freed by the shrink callback */ - if (!ptr) - return NULL; - - union buflib_data *data = ALIGN_DOWN(ptr, sizeof(*data)); - return data - BUFLIB_NUM_FIELDS; -} - -/* Shrink the handle table, returning true if its size was reduced, false if - * not - */ -static inline bool handle_table_shrink(struct buflib_context *ctx) -{ - union buflib_data *handle; - union buflib_data *old_last = ctx->last_handle; - - for (handle = ctx->last_handle; handle != ctx->handle_table; ++handle) - if (handle->alloc) - break; - - if (handle > ctx->first_free_handle) - ctx->first_free_handle = handle - 1; - - ctx->last_handle = handle; - return handle != old_last; -} - -/* If shift is non-zero, it represents the number of places to move - * blocks in memory. Calculate the new address for this block, - * update its entry in the handle table, and then move its contents. - * - * Returns false if moving was unsucessful - * (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned) - */ -static bool -move_block(struct buflib_context* ctx, union buflib_data* block, int shift) -{ - char* new_start; - union buflib_data *new_block; - - check_block_handle(ctx, block); - union buflib_data *h_entry = block[idx_HANDLE].handle; - - if (!IS_MOVABLE(block) || block[idx_PIN].pincount > 0) - return false; - - int handle = ctx->handle_table - h_entry; - BDEBUGF("%s(): moving id=%d by %d(%d)\n", __func__, - handle, shift, shift*(int)sizeof(union buflib_data)); - new_block = block + shift; - new_start = h_entry->alloc + shift*sizeof(union buflib_data); - - struct buflib_callbacks *ops = block[idx_OPS].ops; - - /* If move must be synchronized with use, user should have specified a - callback that handles this */ - if (ops && ops->sync_callback) - ops->sync_callback(handle, true); - - bool retval = false; - if (!ops || ops->move_callback(handle, h_entry->alloc, new_start) - != BUFLIB_CB_CANNOT_MOVE) - { - h_entry->alloc = new_start; /* update handle table */ - memmove(new_block, block, block->val * sizeof(union buflib_data)); - retval = true; - } - - if (ops && ops->sync_callback) - ops->sync_callback(handle, false); - - return retval; -} - -/* Compact allocations and handle table, adjusting handle pointers as needed. - * Return true if any space was freed or consolidated, false otherwise. - */ -static bool -buflib_compact(struct buflib_context *ctx) -{ - BDEBUGF("%s(): Compacting!\n", __func__); - union buflib_data *block, - *hole = NULL; - int shift = 0, len; - /* Store the results of attempting to shrink the handle table */ - bool ret = handle_table_shrink(ctx); - /* compaction has basically two modes of operation: - * 1) the buffer is nicely movable: In this mode, blocks can be simply - * moved towards the beginning. Free blocks add to a shift value, - * which is the amount to move. - * 2) the buffer contains unmovable blocks: unmovable blocks create - * holes and reset shift. Once a hole is found, we're trying to fill - * holes first, moving by shift is the fallback. As the shift is reset, - * this effectively splits the buffer into portions of movable blocks. - * This mode cannot be used if no holes are found yet as it only works - * when it moves blocks across the portions. On the other side, - * moving by shift only works within the same portion - * For simplicity only 1 hole at a time is considered */ - for(block = find_first_free(ctx); block < ctx->alloc_end; block += len) - { - check_block_length(ctx, block); - - bool movable = true; /* cache result to avoid 2nd call to move_block */ - len = block->val; - /* This block is free, add its length to the shift value */ - if (len < 0) - { - shift += len; - len = -len; - continue; - } - /* attempt to fill any hole */ - if (hole && -hole->val >= len) - { - intptr_t hlen = -hole->val; - if ((movable = move_block(ctx, block, hole - block))) - { - ret = true; - /* Move was successful. The memory at block is now free */ - block->val = -len; - - /* add its length to shift */ - shift += -len; - /* Reduce the size of the hole accordingly - * but be careful to not overwrite an existing block */ - if (hlen != len) - { - hole += len; - hole->val = len - hlen; /* negative */ - } - else /* hole closed */ - hole = NULL; - continue; - } - } - /* attempt move the allocation by shift */ - if (shift) - { - union buflib_data* target_block = block + shift; - if (!movable || !move_block(ctx, block, shift)) - { - /* free space before an unmovable block becomes a hole, - * therefore mark this block free and track the hole */ - target_block->val = shift; - hole = target_block; - shift = 0; - } - else - ret = true; - } - } - /* Move the end-of-allocation mark, and return true if any new space has - * been freed. - */ - ctx->alloc_end += shift; - ctx->compact = true; - return ret || shift; -} - -/* Compact the buffer by trying both shrinking and moving. - * - * Try to move first. If unsuccesfull, try to shrink. If that was successful - * try to move once more as there might be more room now. - */ -static bool -buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints) -{ - bool result = false; - /* if something compacted before already there will be no further gain */ - if (!ctx->compact) - result = buflib_compact(ctx); - if (!result) - { - union buflib_data *this, *before; - for(this = ctx->buf_start, before = this; - this < ctx->alloc_end; - before = this, this += abs(this->val)) - { - check_block_length(ctx, this); - if (this->val < 0) - continue; - - struct buflib_callbacks *ops = this[idx_OPS].ops; - if (!ops || !ops->shrink_callback) - continue; - - check_block_handle(ctx, this); - union buflib_data* h_entry = this[idx_HANDLE].handle; - int handle = ctx->handle_table - h_entry; - - unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK; - /* adjust what we ask for if there's free space in the front - * this isn't too unlikely assuming this block is - * shrinkable but not movable */ - if (pos_hints == BUFLIB_SHRINK_POS_FRONT && - before != this && before->val < 0) - { - size_t free_space = (-before->val) * sizeof(union buflib_data); - size_t wanted = shrink_hints & BUFLIB_SHRINK_SIZE_MASK; - if (wanted < free_space) /* no shrink needed? */ - continue; - wanted -= free_space; - shrink_hints = pos_hints | wanted; - } - - char* data = h_entry->alloc; - char* data_end = (char*)(this + this->val); - bool last = (data_end == (char*)ctx->alloc_end); - - int ret = ops->shrink_callback(handle, shrink_hints, - data, data_end - data); - result |= (ret == BUFLIB_CB_OK); - - /* 'this' might have changed in the callback (if it shrinked - * from the top or even freed the handle), get it again */ - this = handle_to_block(ctx, handle); - - /* The handle was possibly be freed in the callback, - * re-run the loop with the handle before */ - if (!this) - this = before; - /* could also change with shrinking from back */ - else if (last) - ctx->alloc_end = this + this->val; - } - /* shrinking was successful at least once, try compaction again */ - if (result) - result |= buflib_compact(ctx); - } - - return result; -} - -/* Shift buffered items by size units, and update handle pointers. The shift - * value must be determined to be safe *before* calling. - */ -static void -buflib_buffer_shift(struct buflib_context *ctx, int shift) -{ - memmove(ctx->buf_start + shift, ctx->buf_start, - (ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data)); - ctx->buf_start += shift; - ctx->alloc_end += shift; - shift *= sizeof(union buflib_data); - union buflib_data *handle; - for (handle = ctx->last_handle; handle < ctx->handle_table; handle++) - if (handle->alloc) - handle->alloc += shift; -} - -/* Shift buffered items up by size bytes, or as many as possible if size == 0. - * Set size to the number of bytes freed. - */ -void* -buflib_buffer_out(struct buflib_context *ctx, size_t *size) -{ - if (!ctx->compact) - buflib_compact(ctx); - size_t avail = ctx->last_handle - ctx->alloc_end; - size_t avail_b = avail * sizeof(union buflib_data); - if (*size && *size < avail_b) - { - avail = (*size + sizeof(union buflib_data) - 1) - / sizeof(union buflib_data); - avail_b = avail * sizeof(union buflib_data); - } - *size = avail_b; - void *ret = ctx->buf_start; - buflib_buffer_shift(ctx, avail); - return ret; -} - -/* Shift buffered items down by size bytes */ -void -buflib_buffer_in(struct buflib_context *ctx, int size) -{ - size /= sizeof(union buflib_data); - buflib_buffer_shift(ctx, -size); -} - -/* Allocate a buffer of size bytes, returning a handle for it. - * Note: Buffers are movable since NULL is passed for "ops". - Don't pass them to functions that call yield() */ -int -buflib_alloc(struct buflib_context *ctx, size_t size) -{ - return buflib_alloc_ex(ctx, size, NULL); -} - -/* Allocate a buffer of size bytes, returning a handle for it. - * - * The ops parameter points to caller-implemented callbacks for moving and - * shrinking. - * - * If you pass NULL for "ops", buffers are movable by default. - * Don't pass them to functions that call yield() like I/O. - * Buffers are only shrinkable when a shrink callback is given. - */ - -int -buflib_alloc_ex(struct buflib_context *ctx, size_t size, - struct buflib_callbacks *ops) -{ - union buflib_data *handle, *block; - bool last; - /* This really is assigned a value before use */ - int block_len; - size = (size + sizeof(union buflib_data) - 1) / - sizeof(union buflib_data) - + BUFLIB_NUM_FIELDS; -handle_alloc: - handle = handle_alloc(ctx); - if (!handle) - { - /* If allocation has failed, and compaction has succeded, it may be - * possible to get a handle by trying again. - */ - union buflib_data* last_block = find_block_before(ctx, - ctx->alloc_end, false); - struct buflib_callbacks* ops = last_block[idx_OPS].ops; - unsigned hints = 0; - if (!ops || !ops->shrink_callback) - { /* the last one isn't shrinkable - * make room in front of a shrinkable and move this alloc */ - hints = BUFLIB_SHRINK_POS_FRONT; - hints |= last_block->val * sizeof(union buflib_data); - } - else if (ops && ops->shrink_callback) - { /* the last is shrinkable, make room for handles directly */ - hints = BUFLIB_SHRINK_POS_BACK; - hints |= 16*sizeof(union buflib_data); - } - /* buflib_compact_and_shrink() will compact and move last_block() - * if possible */ - if (buflib_compact_and_shrink(ctx, hints)) - goto handle_alloc; - return -1; - } - -buffer_alloc: - /* need to re-evaluate last before the loop because the last allocation - * possibly made room in its front to fit this, so last would be wrong */ - last = false; - for (block = find_first_free(ctx);; block += block_len) - { - /* If the last used block extends all the way to the handle table, the - * block "after" it doesn't have a header. Because of this, it's easier - * to always find the end of allocation by saving a pointer, and always - * calculate the free space at the end by comparing it to the - * last_handle pointer. - */ - if(block == ctx->alloc_end) - { - last = true; - block_len = ctx->last_handle - block; - if ((size_t)block_len < size) - block = NULL; - break; - } - - check_block_length(ctx, block); - block_len = block->val; - /* blocks with positive length are already allocated. */ - if(block_len > 0) - continue; - block_len = -block_len; - /* The search is first-fit, any fragmentation this causes will be - * handled at compaction. - */ - if ((size_t)block_len >= size) - break; - } - if (!block) - { - /* Try compacting if allocation failed */ - unsigned hint = BUFLIB_SHRINK_POS_FRONT | - ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK); - if (buflib_compact_and_shrink(ctx, hint)) - { - goto buffer_alloc; - } else { - handle_free(ctx, handle); - return -2; - } - } - - /* Set up the allocated block, by marking the size allocated, and storing - * a pointer to the handle. - */ - block[idx_LEN].val = size; - block[idx_HANDLE].handle = handle; - block[idx_OPS].ops = ops; - block[idx_PIN].pincount = 0; - - handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS]; - - BDEBUGF("buflib_alloc_ex: size=%d handle=%p clb=%p\n", - (unsigned int)size, (void *)handle, (void *)ops); - - block += size; - /* alloc_end must be kept current if we're taking the last block. */ - if (last) - ctx->alloc_end = block; - /* Only free blocks *before* alloc_end have tagged length. */ - else if ((size_t)block_len > size) - block->val = size - block_len; - /* Return the handle index as a positive integer. */ - return ctx->handle_table - handle; -} - -static union buflib_data* -find_first_free(struct buflib_context *ctx) -{ - union buflib_data *ret; - for(ret = ctx->buf_start; ret < ctx->alloc_end; ret += ret->val) - { - check_block_length(ctx, ret); - if (ret->val < 0) - break; - } - - /* ret is now either a free block or the same as alloc_end, both is fine */ - return ret; -} - -/* Finds the free block before block, and returns NULL if it's not free */ -static union buflib_data* -find_block_before(struct buflib_context *ctx, union buflib_data* block, - bool is_free) -{ - union buflib_data *ret = ctx->buf_start, - *next_block = ret; - - /* no previous block */ - if (next_block == block) - return NULL; - - /* find the block that's before the current one */ - while (next_block != block) - { - check_block_length(ctx, ret); - ret = next_block; - next_block += abs(ret->val); - } - - /* don't return it if the found block isn't free */ - if (is_free && ret->val >= 0) - return NULL; - - return ret; -} - -/* Free the buffer associated with handle_num. */ -int -buflib_free(struct buflib_context *ctx, int handle_num) -{ - if (handle_num <= 0) /* invalid or already free */ - return handle_num; - union buflib_data *handle = ctx->handle_table - handle_num, - *freed_block = handle_to_block(ctx, handle_num), - *block, *next_block; - /* We need to find the block before the current one, to see if it is free - * and can be merged with this one. - */ - block = find_block_before(ctx, freed_block, true); - if (block) - { - block->val -= freed_block->val; - } - else - { - /* Otherwise, set block to the newly-freed block, and mark it free, - * before continuing on, since the code below expects block to point - * to a free block which may have free space after it. */ - block = freed_block; - block->val = -block->val; - } - - next_block = block - block->val; - /* Check if we are merging with the free space at alloc_end. */ - if (next_block == ctx->alloc_end) - ctx->alloc_end = block; - /* Otherwise, the next block might still be a "normal" free block, and the - * mid-allocation free means that the buffer is no longer compact. - */ - else { - ctx->compact = false; - if (next_block->val < 0) - block->val += next_block->val; - } - handle_free(ctx, handle); - handle->alloc = NULL; - - return 0; /* unconditionally */ -} - -static size_t -free_space_at_end(struct buflib_context* ctx) -{ - /* subtract 5 elements for - * val, handle, meta_len, ops and the handle table entry*/ - ptrdiff_t diff = (ctx->last_handle - ctx->alloc_end - BUFLIB_NUM_FIELDS); - diff -= 16; /* space for future handles */ - diff *= sizeof(union buflib_data); /* make it bytes */ - - if (diff > 0) - return diff; - else - return 0; -} - -/* Return the maximum allocatable contiguous memory in bytes */ -size_t -buflib_allocatable(struct buflib_context* ctx) -{ - size_t free_space = 0, max_free_space = 0; - intptr_t block_len; - - /* make sure buffer is as contiguous as possible */ - if (!ctx->compact) - buflib_compact(ctx); - - /* now look if there's free in holes */ - for(union buflib_data *block = find_first_free(ctx); - block < ctx->alloc_end; - block += block_len) - { - check_block_length(ctx, block); - block_len = block->val; - - if (block_len < 0) - { - block_len = -block_len; - free_space += block_len; - continue; - } - - /* an unmovable section resets the count as free space - * can't be contigous */ - if (!IS_MOVABLE(block)) - { - if (max_free_space < free_space) - max_free_space = free_space; - free_space = 0; - } - } - - /* select the best */ - max_free_space = MAX(max_free_space, free_space); - max_free_space *= sizeof(union buflib_data); - max_free_space = MAX(max_free_space, free_space_at_end(ctx)); - - if (max_free_space > 0) - return max_free_space; - else - return 0; -} - -/* Return the amount of unallocated memory in bytes (even if not contiguous) */ -size_t -buflib_available(struct buflib_context* ctx) -{ - size_t free_space = 0; - - /* add up all holes */ - for(union buflib_data *block = find_first_free(ctx); - block < ctx->alloc_end; - block += abs(block->val)) - { - check_block_length(ctx, block); - if (block->val < 0) - free_space += -block->val; - } - - free_space *= sizeof(union buflib_data); /* make it bytes */ - free_space += free_space_at_end(ctx); - - return free_space; -} - -/* - * Allocate all available (as returned by buflib_available()) memory and return - * a handle to it - * - * This grabs a lock which can only be unlocked by buflib_free() or - * buflib_shrink(), to protect from further allocations (which couldn't be - * serviced anyway). - */ -int -buflib_alloc_maximum(struct buflib_context* ctx, size_t *size, struct buflib_callbacks *ops) -{ - /* ignore ctx->compact because it's true if all movable blocks are contiguous - * even if the buffer has holes due to unmovable allocations */ - unsigned hints; - size_t bufsize = ctx->handle_table - ctx->buf_start; - bufsize = MIN(BUFLIB_SHRINK_SIZE_MASK, bufsize*sizeof(union buflib_data)); /* make it bytes */ - /* try as hard as possible to free up space. allocations are - * welcome to give up some or all of their memory */ - hints = BUFLIB_SHRINK_POS_BACK | BUFLIB_SHRINK_POS_FRONT | bufsize; - /* compact until no space can be gained anymore */ - while (buflib_compact_and_shrink(ctx, hints)); - - *size = buflib_allocatable(ctx); - if (*size <= 0) /* OOM */ - return -1; - - return buflib_alloc_ex(ctx, *size, ops); -} - -/* Shrink the allocation indicated by the handle according to new_start and - * new_size. Grow is not possible, therefore new_start and new_start + new_size - * must be within the original allocation - */ -bool -buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size) -{ - char* oldstart = buflib_get_data(ctx, handle); - char* newstart = new_start != NULL ? new_start : oldstart; - char* newend = newstart + new_size; - - /* newstart must be higher and new_size not "negative" */ - if (newstart < oldstart || newend < newstart) - return false; - union buflib_data *block = handle_to_block(ctx, handle), - *old_next_block = block + block->val, - /* newstart isn't necessarily properly aligned but it - * needn't be since it's only dereferenced by the user code */ - *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart), - *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart), - *new_next_block = (union buflib_data*)B_ALIGN_UP(newend), - *new_block, metadata_size; - - /* growing is not supported */ - if (new_next_block > old_next_block) - return false; - - metadata_size.val = aligned_oldstart - block; - /* update val and the handle table entry */ - new_block = aligned_newstart - metadata_size.val; - block[idx_LEN].val = new_next_block - new_block; - - check_block_handle(ctx, block); - block[idx_HANDLE].handle->alloc = newstart; - if (block != new_block) - { - /* move metadata over, i.e. pointer to handle table entry and name - * This is actually the point of no return. Data in the allocation is - * being modified, and therefore we must successfully finish the shrink - * operation */ - memmove(new_block, block, metadata_size.val*sizeof(metadata_size)); - /* mark the old block unallocated */ - block->val = block - new_block; - /* find the block before in order to merge with the new free space */ - union buflib_data *free_before = find_block_before(ctx, block, true); - if (free_before) - free_before->val += block->val; - - /* We didn't handle size changes yet, assign block to the new one - * the code below the wants block whether it changed or not */ - block = new_block; - } - - /* Now deal with size changes that create free blocks after the allocation */ - if (old_next_block != new_next_block) - { - if (ctx->alloc_end == old_next_block) - ctx->alloc_end = new_next_block; - else if (old_next_block->val < 0) - { /* enlarge next block by moving it up */ - new_next_block->val = old_next_block->val - (old_next_block - new_next_block); - } - else if (old_next_block != new_next_block) - { /* creating a hole */ - /* must be negative to indicate being unallocated */ - new_next_block->val = new_next_block - old_next_block; - } - } - - return true; -} - -void buflib_pin(struct buflib_context *ctx, int handle) -{ - if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) - buflib_panic(ctx, "invalid handle pin: %d", handle); - - union buflib_data *data = handle_to_block(ctx, handle); - data[idx_PIN].pincount++; -} - -void buflib_unpin(struct buflib_context *ctx, int handle) -{ - if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) - buflib_panic(ctx, "invalid handle unpin: %d", handle); - - union buflib_data *data = handle_to_block(ctx, handle); - if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) - { - if (data[idx_PIN].pincount == 0) - buflib_panic(ctx, "handle pin underflow: %d", handle); - } - - data[idx_PIN].pincount--; -} - -unsigned buflib_pin_count(struct buflib_context *ctx, int handle) -{ - if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) - buflib_panic(ctx, "invalid handle: %d", handle); - - union buflib_data *data = handle_to_block(ctx, handle); - return data[idx_PIN].pincount; -} - -#ifdef BUFLIB_DEBUG_GET_DATA -void *buflib_get_data(struct buflib_context *ctx, int handle) -{ - if (handle <= 0) - buflib_panic(ctx, "invalid handle access: %d", handle); - - return (void*)(ctx->handle_table[-handle].alloc); -} -#endif - -#ifdef BUFLIB_DEBUG_CHECK_VALID -void buflib_check_valid(struct buflib_context *ctx) -{ - for(union buflib_data *block = ctx->buf_start; - block < ctx->alloc_end; - block += abs(block->val)) - { - check_block_length(ctx, block); - if (block->val < 0) - continue; - - check_block_handle(ctx, block); - } -} -#endif - -#ifdef BUFLIB_DEBUG_PRINT -int buflib_get_num_blocks(struct buflib_context *ctx) -{ - int i = 0; - - for(union buflib_data *block = ctx->buf_start; - block < ctx->alloc_end; - block += abs(block->val)) - { - check_block_length(ctx, block); - ++i; - } - - return i; -} - -bool buflib_print_block_at(struct buflib_context *ctx, int block_num, - char *buf, size_t bufsize) -{ - for(union buflib_data *block = ctx->buf_start; - block < ctx->alloc_end; - block += abs(block->val)) - { - check_block_length(ctx, block); - - if (block_num-- == 0) - { - snprintf(buf, bufsize, "%8p: val: %4ld (%sallocated)", - block, (long)block->val, - block->val > 0 ? "" : "un"); - return true; - } - } - - if (bufsize > 0) - *buf = '\0'; - return false; -} -#endif - -static void check_block_length(struct buflib_context *ctx, - union buflib_data *block) -{ - if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH) - { - intptr_t length = block[idx_LEN].val; - - /* Check the block length does not pass beyond the end */ - if (length == 0 || block > ctx->alloc_end - abs(length)) - { - buflib_panic(ctx, "block len wacky [%p]=%ld", - (void*)&block[idx_LEN], (long)length); - } - } -} - -static void check_block_handle(struct buflib_context *ctx, - union buflib_data *block) -{ - if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE) - { - intptr_t length = block[idx_LEN].val; - union buflib_data *h_entry = block[idx_HANDLE].handle; - - /* Check the handle pointer is properly aligned */ - /* TODO: Can we ensure the compiler doesn't optimize this out? - * I dunno, maybe the compiler can assume the pointer is always - * properly aligned due to some C standard voodoo?? */ - if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry))) - { - buflib_panic(ctx, "handle unaligned [%p]=%p", - &block[idx_HANDLE], h_entry); - } - - /* Check the pointer is actually inside the handle table */ - if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table) - { - buflib_panic(ctx, "handle out of bounds [%p]=%p", - &block[idx_HANDLE], h_entry); - } - - /* Now check the allocation is within the block. - * This is stricter than check_handle(). */ - void *alloc = h_entry->alloc; - void *alloc_begin = block; - void *alloc_end = block + length; - /* buflib allows zero length allocations, so alloc_end is inclusive */ - if (alloc < alloc_begin || alloc > alloc_end) - { - buflib_panic(ctx, "alloc outside block [%p]=%p, %p-%p", - h_entry, alloc, alloc_begin, alloc_end); - } - } -} diff --git a/firmware/buflib_mempool.c b/firmware/buflib_mempool.c new file mode 100644 index 0000000000..18e4f118ff --- /dev/null +++ b/firmware/buflib_mempool.c @@ -0,0 +1,1113 @@ +/*************************************************************************** +* __________ __ ___. +* Open \______ \ ____ ____ | | _\_ |__ _______ ___ +* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / +* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < +* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ +* \/ \/ \/ \/ \/ +* $Id$ +* +* This is a memory allocator designed to provide reasonable management of free +* space and fast access to allocated data. More than one allocator can be used +* at a time by initializing multiple contexts. +* +* Copyright (C) 2009 Andrew Mahone +* Copyright (C) 2011 Thomas Martitz +* +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +* KIND, either express or implied. +* +****************************************************************************/ + +#include +#include /* for abs() */ +#include /* for snprintf() */ +#include /* for ptrdiff_t */ +#include "buflib_mempool.h" +#include "string-extra.h" /* strmemccpy() */ +#include "debug.h" +#include "panic.h" +#include "system.h" /* for ALIGN_*() */ + +/* FIXME: This comment is pretty out of date now and wrong in some details. + * + * The main goal of this design is fast fetching of the pointer for a handle. + * For that reason, the handles are stored in a table at the end of the buffer + * with a fixed address, so that returning the pointer for a handle is a simple + * table lookup. To reduce the frequency with which allocated blocks will need + * to be moved to free space, allocations grow up in address from the start of + * the buffer. The buffer is treated as an array of union buflib_data. Blocks + * start with a length marker, which is included in their length. Free blocks + * are marked by negative length. Allocated blocks have a positiv length marker, + * and additional metadata following that: It follows a pointer + * (union buflib_data*) to the corresponding handle table entry. so that it can + * be quickly found and updated during compaction. After that follows + * the pointer to the struct buflib_callbacks associated with this allocation + * (may be NULL). That pointer follows a variable length character array + * containing the nul-terminated string identifier of the allocation. After this + * array there's a length marker for the length of the character array including + * this length marker (counted in n*sizeof(union buflib_data)), which allows + * to find the start of the character array (and therefore the start of the + * entire block) when only the handle or payload start is known. + * + * UPDATE BUFLIB_ALLOC_OVERHEAD (buflib.h) WHEN THE METADATA CHANGES! + * + * Example: + * |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->| + * |L|H|C|cccc|L2|crc|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|crc|XXXXXXXXXXXXX|AAA| + * + * L - length marker (negative if block unallocated) + * H - handle table entry pointer + * C - pointer to struct buflib_callbacks + * c - variable sized string identifier + * L2 - length of the metadata + * crc - crc32 protecting buflib metadata integrity + * X - actual payload + * Y - unallocated space + * + * A - pointer to start of payload (first X) in the handle table (may be null) + * + * The blocks can be walked by jumping the abs() of the L length marker, i.e. + * union buflib_data* L; + * for(L = start; L < end; L += abs(L->val)) { .... } + * + * + * The allocator functions are passed a context struct so that two allocators + * can be run, for example, one per core may be used, with convenience wrappers + * for the single-allocator case that use a predefined context. + */ + +#define B_ALIGN_DOWN(x) \ + ALIGN_DOWN(x, sizeof(union buflib_data)) + +#define B_ALIGN_UP(x) \ + ALIGN_UP(x, sizeof(union buflib_data)) + +#ifdef DEBUG + #include + #define BDEBUGF DEBUGF +#else + #define BDEBUGF(...) do { } while(0) +#endif + +#define BPANICF panicf + +/* Available paranoia checks */ +#define PARANOIA_CHECK_LENGTH (1 << 0) +#define PARANOIA_CHECK_BLOCK_HANDLE (1 << 1) +#define PARANOIA_CHECK_PINNING (1 << 2) +/* Bitmask of enabled paranoia checks */ +#define BUFLIB_PARANOIA \ + (PARANOIA_CHECK_LENGTH | \ + PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING) + +/* Indices used to access block fields as block[idx_XXX] */ +enum { + idx_LEN, /* length of the block, must come first */ + idx_HANDLE, /* pointer to entry in the handle table */ + idx_OPS, /* pointer to an ops struct */ + idx_PIN, /* pin count */ + BUFLIB_NUM_FIELDS, +}; + +struct buflib_callbacks buflib_ops_locked = { + .move_callback = NULL, + .shrink_callback = NULL, + .sync_callback = NULL, +}; + +#define IS_MOVABLE(a) (!a[idx_OPS].ops || a[idx_OPS].ops->move_callback) + +static union buflib_data* find_first_free(struct buflib_context *ctx); +static union buflib_data* find_block_before(struct buflib_context *ctx, + union buflib_data* block, + bool is_free); + +/* Check the length of a block to ensure it does not go beyond the end + * of the allocated area. The block can be either allocated or free. + * + * This verifies that it is safe to iterate to the next block in a loop. + */ +static void check_block_length(struct buflib_context *ctx, + union buflib_data *block); + +/* Check a block's handle pointer to ensure it is within the handle + * table, and that the user pointer is pointing within the block. + * + * This verifies that it is safe to dereference the entry and ensures + * that the pointer in the handle table points within the block, as + * determined by the length field at the start of the block. + */ +static void check_block_handle(struct buflib_context *ctx, + union buflib_data *block); + +/* Initialize buffer manager */ +void +buflib_init(struct buflib_context *ctx, void *buf, size_t size) +{ + union buflib_data *bd_buf = buf; + BDEBUGF("buflib initialized with %lu.%02lu kiB\n", + (unsigned long)size / 1024, ((unsigned long)size%1000)/10); + + /* Align on sizeof(buflib_data), to prevent unaligned access */ + ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data)); + size /= sizeof(union buflib_data); + /* The handle table is initialized with no entries */ + ctx->handle_table = bd_buf + size; + ctx->last_handle = bd_buf + size; + ctx->first_free_handle = bd_buf + size - 1; + ctx->buf_start = bd_buf; + /* A marker is needed for the end of allocated data, to make sure that it + * does not collide with the handle table, and to detect end-of-buffer. + */ + ctx->alloc_end = bd_buf; + ctx->compact = true; + + if (size == 0) + { + BPANICF("buflib_init error (CTX:%p, %zd bytes):\n", ctx, + (ctx->handle_table - ctx->buf_start) * sizeof(union buflib_data)); + } +} + +bool buflib_context_relocate(struct buflib_context *ctx, void *buf) +{ + union buflib_data *handle, *bd_buf = buf; + ptrdiff_t diff = bd_buf - ctx->buf_start; + + /* cannot continue if the buffer is not aligned, since we would need + * to reduce the size of the buffer for aligning */ + if (!IS_ALIGNED((uintptr_t)buf, sizeof(union buflib_data))) + return false; + + /* relocate the handle table entries */ + for (handle = ctx->last_handle; handle < ctx->handle_table; handle++) + { + if (handle->alloc) + handle->alloc += diff * sizeof(union buflib_data); + } + /* relocate the pointers in the context */ + ctx->handle_table += diff; + ctx->last_handle += diff; + ctx->first_free_handle += diff; + ctx->buf_start += diff; + ctx->alloc_end += diff; + + return true; +} + +static void buflib_panic(struct buflib_context *ctx, const char *message, ...) +{ + char buf[128]; + va_list ap; + + va_start(ap, message); + vsnprintf(buf, sizeof(buf), message, ap); + va_end(ap); + + BPANICF("buflib error (CTX:%p, %zd bytes):\n%s", ctx, + (ctx->handle_table - ctx->buf_start) * sizeof(union buflib_data), buf); +} + +/* Allocate a new handle, returning 0 on failure */ +static inline +union buflib_data* handle_alloc(struct buflib_context *ctx) +{ + union buflib_data *handle; + /* first_free_handle is a lower bound on free handles, work through the + * table from there until a handle containing NULL is found, or the end + * of the table is reached. + */ + for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--) + if (!handle->alloc) + break; + /* If the search went past the end of the table, it means we need to extend + * the table to get a new handle. + */ + if (handle < ctx->last_handle) + { + if (handle >= ctx->alloc_end) + ctx->last_handle--; + else + { + /* We know the table is full, so update first_free_handle */ + ctx->first_free_handle = ctx->last_handle - 1; + return NULL; + } + } + + /* We know there are no free handles between the old first_free_handle + * and the found handle, therefore we can update first_free_handle */ + ctx->first_free_handle = handle - 1; + + /* We need to set the table entry to a non-NULL value to ensure that + * compactions triggered by an allocation do not compact the handle + * table and delete this handle. */ + handle->val = -1; + + return handle; +} + +/* Free one handle, shrinking the handle table if it's the last one */ +static inline +void handle_free(struct buflib_context *ctx, union buflib_data *handle) +{ + handle->alloc = 0; + /* Update free handle lower bound if this handle has a lower index than the + * old one. + */ + if (handle > ctx->first_free_handle) + ctx->first_free_handle = handle; + if (handle == ctx->last_handle) + ctx->last_handle++; + else + ctx->compact = false; +} + +/* Get the start block of an allocation */ +static inline +union buflib_data* handle_to_block(struct buflib_context* ctx, int handle) +{ + void *ptr = buflib_get_data(ctx, handle); + + /* this is a valid case for shrinking if handle + * was freed by the shrink callback */ + if (!ptr) + return NULL; + + union buflib_data *data = ALIGN_DOWN(ptr, sizeof(*data)); + return data - BUFLIB_NUM_FIELDS; +} + +/* Shrink the handle table, returning true if its size was reduced, false if + * not + */ +static inline bool handle_table_shrink(struct buflib_context *ctx) +{ + union buflib_data *handle; + union buflib_data *old_last = ctx->last_handle; + + for (handle = ctx->last_handle; handle != ctx->handle_table; ++handle) + if (handle->alloc) + break; + + if (handle > ctx->first_free_handle) + ctx->first_free_handle = handle - 1; + + ctx->last_handle = handle; + return handle != old_last; +} + +/* If shift is non-zero, it represents the number of places to move + * blocks in memory. Calculate the new address for this block, + * update its entry in the handle table, and then move its contents. + * + * Returns false if moving was unsucessful + * (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned) + */ +static bool +move_block(struct buflib_context* ctx, union buflib_data* block, int shift) +{ + char* new_start; + union buflib_data *new_block; + + check_block_handle(ctx, block); + union buflib_data *h_entry = block[idx_HANDLE].handle; + + if (!IS_MOVABLE(block) || block[idx_PIN].pincount > 0) + return false; + + int handle = ctx->handle_table - h_entry; + BDEBUGF("%s(): moving id=%d by %d(%d)\n", __func__, + handle, shift, shift*(int)sizeof(union buflib_data)); + new_block = block + shift; + new_start = h_entry->alloc + shift*sizeof(union buflib_data); + + struct buflib_callbacks *ops = block[idx_OPS].ops; + + /* If move must be synchronized with use, user should have specified a + callback that handles this */ + if (ops && ops->sync_callback) + ops->sync_callback(handle, true); + + bool retval = false; + if (!ops || ops->move_callback(handle, h_entry->alloc, new_start) + != BUFLIB_CB_CANNOT_MOVE) + { + h_entry->alloc = new_start; /* update handle table */ + memmove(new_block, block, block->val * sizeof(union buflib_data)); + retval = true; + } + + if (ops && ops->sync_callback) + ops->sync_callback(handle, false); + + return retval; +} + +/* Compact allocations and handle table, adjusting handle pointers as needed. + * Return true if any space was freed or consolidated, false otherwise. + */ +static bool +buflib_compact(struct buflib_context *ctx) +{ + BDEBUGF("%s(): Compacting!\n", __func__); + union buflib_data *block, + *hole = NULL; + int shift = 0, len; + /* Store the results of attempting to shrink the handle table */ + bool ret = handle_table_shrink(ctx); + /* compaction has basically two modes of operation: + * 1) the buffer is nicely movable: In this mode, blocks can be simply + * moved towards the beginning. Free blocks add to a shift value, + * which is the amount to move. + * 2) the buffer contains unmovable blocks: unmovable blocks create + * holes and reset shift. Once a hole is found, we're trying to fill + * holes first, moving by shift is the fallback. As the shift is reset, + * this effectively splits the buffer into portions of movable blocks. + * This mode cannot be used if no holes are found yet as it only works + * when it moves blocks across the portions. On the other side, + * moving by shift only works within the same portion + * For simplicity only 1 hole at a time is considered */ + for(block = find_first_free(ctx); block < ctx->alloc_end; block += len) + { + check_block_length(ctx, block); + + bool movable = true; /* cache result to avoid 2nd call to move_block */ + len = block->val; + /* This block is free, add its length to the shift value */ + if (len < 0) + { + shift += len; + len = -len; + continue; + } + /* attempt to fill any hole */ + if (hole && -hole->val >= len) + { + intptr_t hlen = -hole->val; + if ((movable = move_block(ctx, block, hole - block))) + { + ret = true; + /* Move was successful. The memory at block is now free */ + block->val = -len; + + /* add its length to shift */ + shift += -len; + /* Reduce the size of the hole accordingly + * but be careful to not overwrite an existing block */ + if (hlen != len) + { + hole += len; + hole->val = len - hlen; /* negative */ + } + else /* hole closed */ + hole = NULL; + continue; + } + } + /* attempt move the allocation by shift */ + if (shift) + { + union buflib_data* target_block = block + shift; + if (!movable || !move_block(ctx, block, shift)) + { + /* free space before an unmovable block becomes a hole, + * therefore mark this block free and track the hole */ + target_block->val = shift; + hole = target_block; + shift = 0; + } + else + ret = true; + } + } + /* Move the end-of-allocation mark, and return true if any new space has + * been freed. + */ + ctx->alloc_end += shift; + ctx->compact = true; + return ret || shift; +} + +/* Compact the buffer by trying both shrinking and moving. + * + * Try to move first. If unsuccesfull, try to shrink. If that was successful + * try to move once more as there might be more room now. + */ +static bool +buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints) +{ + bool result = false; + /* if something compacted before already there will be no further gain */ + if (!ctx->compact) + result = buflib_compact(ctx); + if (!result) + { + union buflib_data *this, *before; + for(this = ctx->buf_start, before = this; + this < ctx->alloc_end; + before = this, this += abs(this->val)) + { + check_block_length(ctx, this); + if (this->val < 0) + continue; + + struct buflib_callbacks *ops = this[idx_OPS].ops; + if (!ops || !ops->shrink_callback) + continue; + + check_block_handle(ctx, this); + union buflib_data* h_entry = this[idx_HANDLE].handle; + int handle = ctx->handle_table - h_entry; + + unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK; + /* adjust what we ask for if there's free space in the front + * this isn't too unlikely assuming this block is + * shrinkable but not movable */ + if (pos_hints == BUFLIB_SHRINK_POS_FRONT && + before != this && before->val < 0) + { + size_t free_space = (-before->val) * sizeof(union buflib_data); + size_t wanted = shrink_hints & BUFLIB_SHRINK_SIZE_MASK; + if (wanted < free_space) /* no shrink needed? */ + continue; + wanted -= free_space; + shrink_hints = pos_hints | wanted; + } + + char* data = h_entry->alloc; + char* data_end = (char*)(this + this->val); + bool last = (data_end == (char*)ctx->alloc_end); + + int ret = ops->shrink_callback(handle, shrink_hints, + data, data_end - data); + result |= (ret == BUFLIB_CB_OK); + + /* 'this' might have changed in the callback (if it shrinked + * from the top or even freed the handle), get it again */ + this = handle_to_block(ctx, handle); + + /* The handle was possibly be freed in the callback, + * re-run the loop with the handle before */ + if (!this) + this = before; + /* could also change with shrinking from back */ + else if (last) + ctx->alloc_end = this + this->val; + } + /* shrinking was successful at least once, try compaction again */ + if (result) + result |= buflib_compact(ctx); + } + + return result; +} + +/* Shift buffered items by size units, and update handle pointers. The shift + * value must be determined to be safe *before* calling. + */ +static void +buflib_buffer_shift(struct buflib_context *ctx, int shift) +{ + memmove(ctx->buf_start + shift, ctx->buf_start, + (ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data)); + ctx->buf_start += shift; + ctx->alloc_end += shift; + shift *= sizeof(union buflib_data); + union buflib_data *handle; + for (handle = ctx->last_handle; handle < ctx->handle_table; handle++) + if (handle->alloc) + handle->alloc += shift; +} + +/* Shift buffered items up by size bytes, or as many as possible if size == 0. + * Set size to the number of bytes freed. + */ +void* +buflib_buffer_out(struct buflib_context *ctx, size_t *size) +{ + if (!ctx->compact) + buflib_compact(ctx); + size_t avail = ctx->last_handle - ctx->alloc_end; + size_t avail_b = avail * sizeof(union buflib_data); + if (*size && *size < avail_b) + { + avail = (*size + sizeof(union buflib_data) - 1) + / sizeof(union buflib_data); + avail_b = avail * sizeof(union buflib_data); + } + *size = avail_b; + void *ret = ctx->buf_start; + buflib_buffer_shift(ctx, avail); + return ret; +} + +/* Shift buffered items down by size bytes */ +void +buflib_buffer_in(struct buflib_context *ctx, int size) +{ + size /= sizeof(union buflib_data); + buflib_buffer_shift(ctx, -size); +} + +/* Allocate a buffer of size bytes, returning a handle for it. + * Note: Buffers are movable since NULL is passed for "ops". + Don't pass them to functions that call yield() */ +int +buflib_alloc(struct buflib_context *ctx, size_t size) +{ + return buflib_alloc_ex(ctx, size, NULL); +} + +/* Allocate a buffer of size bytes, returning a handle for it. + * + * The ops parameter points to caller-implemented callbacks for moving and + * shrinking. + * + * If you pass NULL for "ops", buffers are movable by default. + * Don't pass them to functions that call yield() like I/O. + * Buffers are only shrinkable when a shrink callback is given. + */ + +int +buflib_alloc_ex(struct buflib_context *ctx, size_t size, + struct buflib_callbacks *ops) +{ + union buflib_data *handle, *block; + bool last; + /* This really is assigned a value before use */ + int block_len; + size = (size + sizeof(union buflib_data) - 1) / + sizeof(union buflib_data) + + BUFLIB_NUM_FIELDS; +handle_alloc: + handle = handle_alloc(ctx); + if (!handle) + { + /* If allocation has failed, and compaction has succeded, it may be + * possible to get a handle by trying again. + */ + union buflib_data* last_block = find_block_before(ctx, + ctx->alloc_end, false); + struct buflib_callbacks* ops = last_block[idx_OPS].ops; + unsigned hints = 0; + if (!ops || !ops->shrink_callback) + { /* the last one isn't shrinkable + * make room in front of a shrinkable and move this alloc */ + hints = BUFLIB_SHRINK_POS_FRONT; + hints |= last_block->val * sizeof(union buflib_data); + } + else if (ops && ops->shrink_callback) + { /* the last is shrinkable, make room for handles directly */ + hints = BUFLIB_SHRINK_POS_BACK; + hints |= 16*sizeof(union buflib_data); + } + /* buflib_compact_and_shrink() will compact and move last_block() + * if possible */ + if (buflib_compact_and_shrink(ctx, hints)) + goto handle_alloc; + return -1; + } + +buffer_alloc: + /* need to re-evaluate last before the loop because the last allocation + * possibly made room in its front to fit this, so last would be wrong */ + last = false; + for (block = find_first_free(ctx);; block += block_len) + { + /* If the last used block extends all the way to the handle table, the + * block "after" it doesn't have a header. Because of this, it's easier + * to always find the end of allocation by saving a pointer, and always + * calculate the free space at the end by comparing it to the + * last_handle pointer. + */ + if(block == ctx->alloc_end) + { + last = true; + block_len = ctx->last_handle - block; + if ((size_t)block_len < size) + block = NULL; + break; + } + + check_block_length(ctx, block); + block_len = block->val; + /* blocks with positive length are already allocated. */ + if(block_len > 0) + continue; + block_len = -block_len; + /* The search is first-fit, any fragmentation this causes will be + * handled at compaction. + */ + if ((size_t)block_len >= size) + break; + } + if (!block) + { + /* Try compacting if allocation failed */ + unsigned hint = BUFLIB_SHRINK_POS_FRONT | + ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK); + if (buflib_compact_and_shrink(ctx, hint)) + { + goto buffer_alloc; + } else { + handle_free(ctx, handle); + return -2; + } + } + + /* Set up the allocated block, by marking the size allocated, and storing + * a pointer to the handle. + */ + block[idx_LEN].val = size; + block[idx_HANDLE].handle = handle; + block[idx_OPS].ops = ops; + block[idx_PIN].pincount = 0; + + handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS]; + + BDEBUGF("buflib_alloc_ex: size=%d handle=%p clb=%p\n", + (unsigned int)size, (void *)handle, (void *)ops); + + block += size; + /* alloc_end must be kept current if we're taking the last block. */ + if (last) + ctx->alloc_end = block; + /* Only free blocks *before* alloc_end have tagged length. */ + else if ((size_t)block_len > size) + block->val = size - block_len; + /* Return the handle index as a positive integer. */ + return ctx->handle_table - handle; +} + +static union buflib_data* +find_first_free(struct buflib_context *ctx) +{ + union buflib_data *ret; + for(ret = ctx->buf_start; ret < ctx->alloc_end; ret += ret->val) + { + check_block_length(ctx, ret); + if (ret->val < 0) + break; + } + + /* ret is now either a free block or the same as alloc_end, both is fine */ + return ret; +} + +/* Finds the free block before block, and returns NULL if it's not free */ +static union buflib_data* +find_block_before(struct buflib_context *ctx, union buflib_data* block, + bool is_free) +{ + union buflib_data *ret = ctx->buf_start, + *next_block = ret; + + /* no previous block */ + if (next_block == block) + return NULL; + + /* find the block that's before the current one */ + while (next_block != block) + { + check_block_length(ctx, ret); + ret = next_block; + next_block += abs(ret->val); + } + + /* don't return it if the found block isn't free */ + if (is_free && ret->val >= 0) + return NULL; + + return ret; +} + +/* Free the buffer associated with handle_num. */ +int +buflib_free(struct buflib_context *ctx, int handle_num) +{ + if (handle_num <= 0) /* invalid or already free */ + return handle_num; + union buflib_data *handle = ctx->handle_table - handle_num, + *freed_block = handle_to_block(ctx, handle_num), + *block, *next_block; + /* We need to find the block before the current one, to see if it is free + * and can be merged with this one. + */ + block = find_block_before(ctx, freed_block, true); + if (block) + { + block->val -= freed_block->val; + } + else + { + /* Otherwise, set block to the newly-freed block, and mark it free, + * before continuing on, since the code below expects block to point + * to a free block which may have free space after it. */ + block = freed_block; + block->val = -block->val; + } + + next_block = block - block->val; + /* Check if we are merging with the free space at alloc_end. */ + if (next_block == ctx->alloc_end) + ctx->alloc_end = block; + /* Otherwise, the next block might still be a "normal" free block, and the + * mid-allocation free means that the buffer is no longer compact. + */ + else { + ctx->compact = false; + if (next_block->val < 0) + block->val += next_block->val; + } + handle_free(ctx, handle); + handle->alloc = NULL; + + return 0; /* unconditionally */ +} + +static size_t +free_space_at_end(struct buflib_context* ctx) +{ + /* subtract 5 elements for + * val, handle, meta_len, ops and the handle table entry*/ + ptrdiff_t diff = (ctx->last_handle - ctx->alloc_end - BUFLIB_NUM_FIELDS); + diff -= 16; /* space for future handles */ + diff *= sizeof(union buflib_data); /* make it bytes */ + + if (diff > 0) + return diff; + else + return 0; +} + +/* Return the maximum allocatable contiguous memory in bytes */ +size_t +buflib_allocatable(struct buflib_context* ctx) +{ + size_t free_space = 0, max_free_space = 0; + intptr_t block_len; + + /* make sure buffer is as contiguous as possible */ + if (!ctx->compact) + buflib_compact(ctx); + + /* now look if there's free in holes */ + for(union buflib_data *block = find_first_free(ctx); + block < ctx->alloc_end; + block += block_len) + { + check_block_length(ctx, block); + block_len = block->val; + + if (block_len < 0) + { + block_len = -block_len; + free_space += block_len; + continue; + } + + /* an unmovable section resets the count as free space + * can't be contigous */ + if (!IS_MOVABLE(block)) + { + if (max_free_space < free_space) + max_free_space = free_space; + free_space = 0; + } + } + + /* select the best */ + max_free_space = MAX(max_free_space, free_space); + max_free_space *= sizeof(union buflib_data); + max_free_space = MAX(max_free_space, free_space_at_end(ctx)); + + if (max_free_space > 0) + return max_free_space; + else + return 0; +} + +/* Return the amount of unallocated memory in bytes (even if not contiguous) */ +size_t +buflib_available(struct buflib_context* ctx) +{ + size_t free_space = 0; + + /* add up all holes */ + for(union buflib_data *block = find_first_free(ctx); + block < ctx->alloc_end; + block += abs(block->val)) + { + check_block_length(ctx, block); + if (block->val < 0) + free_space += -block->val; + } + + free_space *= sizeof(union buflib_data); /* make it bytes */ + free_space += free_space_at_end(ctx); + + return free_space; +} + +/* + * Allocate all available (as returned by buflib_available()) memory and return + * a handle to it + * + * This grabs a lock which can only be unlocked by buflib_free() or + * buflib_shrink(), to protect from further allocations (which couldn't be + * serviced anyway). + */ +int +buflib_alloc_maximum(struct buflib_context* ctx, size_t *size, struct buflib_callbacks *ops) +{ + /* ignore ctx->compact because it's true if all movable blocks are contiguous + * even if the buffer has holes due to unmovable allocations */ + unsigned hints; + size_t bufsize = ctx->handle_table - ctx->buf_start; + bufsize = MIN(BUFLIB_SHRINK_SIZE_MASK, bufsize*sizeof(union buflib_data)); /* make it bytes */ + /* try as hard as possible to free up space. allocations are + * welcome to give up some or all of their memory */ + hints = BUFLIB_SHRINK_POS_BACK | BUFLIB_SHRINK_POS_FRONT | bufsize; + /* compact until no space can be gained anymore */ + while (buflib_compact_and_shrink(ctx, hints)); + + *size = buflib_allocatable(ctx); + if (*size <= 0) /* OOM */ + return -1; + + return buflib_alloc_ex(ctx, *size, ops); +} + +/* Shrink the allocation indicated by the handle according to new_start and + * new_size. Grow is not possible, therefore new_start and new_start + new_size + * must be within the original allocation + */ +bool +buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size) +{ + char* oldstart = buflib_get_data(ctx, handle); + char* newstart = new_start != NULL ? new_start : oldstart; + char* newend = newstart + new_size; + + /* newstart must be higher and new_size not "negative" */ + if (newstart < oldstart || newend < newstart) + return false; + union buflib_data *block = handle_to_block(ctx, handle), + *old_next_block = block + block->val, + /* newstart isn't necessarily properly aligned but it + * needn't be since it's only dereferenced by the user code */ + *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart), + *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart), + *new_next_block = (union buflib_data*)B_ALIGN_UP(newend), + *new_block, metadata_size; + + /* growing is not supported */ + if (new_next_block > old_next_block) + return false; + + metadata_size.val = aligned_oldstart - block; + /* update val and the handle table entry */ + new_block = aligned_newstart - metadata_size.val; + block[idx_LEN].val = new_next_block - new_block; + + check_block_handle(ctx, block); + block[idx_HANDLE].handle->alloc = newstart; + if (block != new_block) + { + /* move metadata over, i.e. pointer to handle table entry and name + * This is actually the point of no return. Data in the allocation is + * being modified, and therefore we must successfully finish the shrink + * operation */ + memmove(new_block, block, metadata_size.val*sizeof(metadata_size)); + /* mark the old block unallocated */ + block->val = block - new_block; + /* find the block before in order to merge with the new free space */ + union buflib_data *free_before = find_block_before(ctx, block, true); + if (free_before) + free_before->val += block->val; + + /* We didn't handle size changes yet, assign block to the new one + * the code below the wants block whether it changed or not */ + block = new_block; + } + + /* Now deal with size changes that create free blocks after the allocation */ + if (old_next_block != new_next_block) + { + if (ctx->alloc_end == old_next_block) + ctx->alloc_end = new_next_block; + else if (old_next_block->val < 0) + { /* enlarge next block by moving it up */ + new_next_block->val = old_next_block->val - (old_next_block - new_next_block); + } + else if (old_next_block != new_next_block) + { /* creating a hole */ + /* must be negative to indicate being unallocated */ + new_next_block->val = new_next_block - old_next_block; + } + } + + return true; +} + +void buflib_pin(struct buflib_context *ctx, int handle) +{ + if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) + buflib_panic(ctx, "invalid handle pin: %d", handle); + + union buflib_data *data = handle_to_block(ctx, handle); + data[idx_PIN].pincount++; +} + +void buflib_unpin(struct buflib_context *ctx, int handle) +{ + if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) + buflib_panic(ctx, "invalid handle unpin: %d", handle); + + union buflib_data *data = handle_to_block(ctx, handle); + if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) + { + if (data[idx_PIN].pincount == 0) + buflib_panic(ctx, "handle pin underflow: %d", handle); + } + + data[idx_PIN].pincount--; +} + +unsigned buflib_pin_count(struct buflib_context *ctx, int handle) +{ + if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) + buflib_panic(ctx, "invalid handle: %d", handle); + + union buflib_data *data = handle_to_block(ctx, handle); + return data[idx_PIN].pincount; +} + +#ifdef BUFLIB_DEBUG_GET_DATA +void *buflib_get_data(struct buflib_context *ctx, int handle) +{ + if (handle <= 0) + buflib_panic(ctx, "invalid handle access: %d", handle); + + return (void*)(ctx->handle_table[-handle].alloc); +} +#endif + +#ifdef BUFLIB_DEBUG_CHECK_VALID +void buflib_check_valid(struct buflib_context *ctx) +{ + for(union buflib_data *block = ctx->buf_start; + block < ctx->alloc_end; + block += abs(block->val)) + { + check_block_length(ctx, block); + if (block->val < 0) + continue; + + check_block_handle(ctx, block); + } +} +#endif + +#ifdef BUFLIB_DEBUG_PRINT +int buflib_get_num_blocks(struct buflib_context *ctx) +{ + int i = 0; + + for(union buflib_data *block = ctx->buf_start; + block < ctx->alloc_end; + block += abs(block->val)) + { + check_block_length(ctx, block); + ++i; + } + + return i; +} + +bool buflib_print_block_at(struct buflib_context *ctx, int block_num, + char *buf, size_t bufsize) +{ + for(union buflib_data *block = ctx->buf_start; + block < ctx->alloc_end; + block += abs(block->val)) + { + check_block_length(ctx, block); + + if (block_num-- == 0) + { + snprintf(buf, bufsize, "%8p: val: %4ld (%sallocated)", + block, (long)block->val, + block->val > 0 ? "" : "un"); + return true; + } + } + + if (bufsize > 0) + *buf = '\0'; + return false; +} +#endif + +static void check_block_length(struct buflib_context *ctx, + union buflib_data *block) +{ + if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH) + { + intptr_t length = block[idx_LEN].val; + + /* Check the block length does not pass beyond the end */ + if (length == 0 || block > ctx->alloc_end - abs(length)) + { + buflib_panic(ctx, "block len wacky [%p]=%ld", + (void*)&block[idx_LEN], (long)length); + } + } +} + +static void check_block_handle(struct buflib_context *ctx, + union buflib_data *block) +{ + if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE) + { + intptr_t length = block[idx_LEN].val; + union buflib_data *h_entry = block[idx_HANDLE].handle; + + /* Check the handle pointer is properly aligned */ + /* TODO: Can we ensure the compiler doesn't optimize this out? + * I dunno, maybe the compiler can assume the pointer is always + * properly aligned due to some C standard voodoo?? */ + if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry))) + { + buflib_panic(ctx, "handle unaligned [%p]=%p", + &block[idx_HANDLE], h_entry); + } + + /* Check the pointer is actually inside the handle table */ + if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table) + { + buflib_panic(ctx, "handle out of bounds [%p]=%p", + &block[idx_HANDLE], h_entry); + } + + /* Now check the allocation is within the block. + * This is stricter than check_handle(). */ + void *alloc = h_entry->alloc; + void *alloc_begin = block; + void *alloc_end = block + length; + /* buflib allows zero length allocations, so alloc_end is inclusive */ + if (alloc < alloc_begin || alloc > alloc_end) + { + buflib_panic(ctx, "alloc outside block [%p]=%p, %p-%p", + h_entry, alloc, alloc_begin, alloc_end); + } + } +} diff --git a/firmware/core_alloc.c b/firmware/core_alloc.c index 948911b973..6f6c385597 100644 --- a/firmware/core_alloc.c +++ b/firmware/core_alloc.c @@ -3,7 +3,7 @@ #include #include "system.h" #include "core_alloc.h" -#include "buflib.h" +#include "buflib_mempool.h" /* not static so it can be discovered by core_get_data() */ struct buflib_context core_ctx; diff --git a/firmware/include/buflib.h b/firmware/include/buflib.h deleted file mode 100644 index 49e4db11c0..0000000000 --- a/firmware/include/buflib.h +++ /dev/null @@ -1,381 +0,0 @@ -/*************************************************************************** -* __________ __ ___. -* Open \______ \ ____ ____ | | _\_ |__ _______ ___ -* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / -* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < -* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ -* \/ \/ \/ \/ \/ -* $Id$ -* -* This is a memory allocator designed to provide reasonable management of free -* space and fast access to allocated data. More than one allocator can be used -* at a time by initializing multiple contexts. -* -* Copyright (C) 2009 Andrew Mahone -* Copyright (C) 2011 Thomas Martitz -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version 2 -* of the License, or (at your option) any later version. -* -* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY -* KIND, either express or implied. -* -****************************************************************************/ -#ifndef _BUFLIB_H_ -#define _BUFLIB_H_ - -#include -#include -#include - -/* add extra checks to buflib_get_data to catch bad handles */ -//#define BUFLIB_DEBUG_GET_DATA - -/* support integrity check */ -//#define BUFLIB_DEBUG_CHECK_VALID - -/* support debug printing of memory blocks */ -//#define BUFLIB_DEBUG_PRINT - -union buflib_data -{ - intptr_t val; /* length of the block in n*sizeof(union buflib_data). - Includes buflib metadata overhead. A negative value - indicates block is unallocated */ - volatile unsigned pincount; /* number of pins */ - struct buflib_callbacks* ops; /* callback functions for move and shrink. Can be NULL */ - char* alloc; /* start of allocated memory area */ - union buflib_data *handle; /* pointer to entry in the handle table. - Used during compaction for fast lookup */ -}; - -struct buflib_context -{ - union buflib_data *handle_table; - union buflib_data *first_free_handle; - union buflib_data *last_handle; - union buflib_data *buf_start; - union buflib_data *alloc_end; - bool compact; -}; - -/** - * This declares the minimal overhead that is required per alloc. These - * are bytes that are allocated from the context's pool in addition - * to the actually requested number of bytes. - * - * The total number of bytes consumed by an allocation is - * BUFLIB_ALLOC_OVERHEAD + requested bytes + pad to pointer size - */ -#define BUFLIB_ALLOC_OVERHEAD (4*sizeof(union buflib_data)) - -/** - * Callbacks used by the buflib to inform allocation that compaction - * is happening (before data is moved) - * - * Note that buflib tries to move to satisfy new allocations before shrinking. - * So if you have something to resize try to do it outside of the callback. - * - * Regardless of the above, if the allocation is SHRINKABLE, but not - * MUST_NOT_MOVE buflib will move the allocation before even attempting to - * shrink. - */ -struct buflib_callbacks { - /** - * This is called before data is moved. Use this to fix up any cached - * pointers pointing to inside the allocation. The size is unchanged. - * - * This is not needed if you don't cache the data pointer (but always - * call buflib_get_data()) and don't pass pointer to the data to yielding - * functions. - * - * handle: The corresponding handle - * current: The current start of the allocation - * new: The new start of the allocation, after data movement - * - * Return: Return BUFLIB_CB_OK, or BUFLIB_CB_CANNOT_MOVE if movement - * is impossible at this moment. - * - * If NULL: this allocation must not be moved around - * by the buflib when compaction occurs. Attention: Don't confuse - * that with passing NULL for the whole callback structure - * to buflib_alloc_ex(). This would enable moving buffers by default. - * You have to pass NULL inside the "struct buflib_callbacks" structure. - */ - int (*move_callback)(int handle, void* current, void* new); - /** - * This is called when the buflib desires to shrink a buffer - * in order to satisfy new allocation. This happens when buflib runs - * out of memory, e.g. because buflib_alloc_maximum() was called. - * Move data around as you need to make space and call core_shrink() as - * appropriate from within the callback to complete the shrink operation. - * buflib will not move data as part of shrinking. - * - * hint: bit mask containing hints on how shrinking is desired (see below) - * handle: The corresponding handle - * start: The old start of the allocation - * - * Return: Return BUFLIB_CB_OK, or BUFLIB_CB_CANNOT_SHRINK if shirinking - * is impossible at this moment. - * - * if NULL: this allocation cannot be resized. - * It is recommended that allocation that must not move are - * at least shrinkable - */ - int (*shrink_callback)(int handle, unsigned hints, void* start, size_t old_size); - /** - * This is called when special steps must be taken for synchronization - * both before the move_callback is called and after the data has been - * moved. - */ - void (*sync_callback)(int handle, bool sync_on); -}; - -/** A set of all NULL callbacks for use with allocations that need to stay - * locked in RAM and not moved or shrunk. These type of allocations should - * be avoided as much as possible to avoid memory fragmentation but it can - * suitable for short-lived allocations. */ -extern struct buflib_callbacks buflib_ops_locked; - -#define BUFLIB_SHRINK_SIZE_MASK (~BUFLIB_SHRINK_POS_MASK) -#define BUFLIB_SHRINK_POS_FRONT (1u<<31) -#define BUFLIB_SHRINK_POS_BACK (1u<<30) -#define BUFLIB_SHRINK_POS_MASK (BUFLIB_SHRINK_POS_FRONT|BUFLIB_SHRINK_POS_BACK) - -/** - * Possible return values for the callbacks, some of them can cause - * compaction to fail and therefore new allocations to fail - */ -/* Everything alright */ -#define BUFLIB_CB_OK 0 -/* Tell buflib that moving failed. Buflib may retry to move at any point */ -#define BUFLIB_CB_CANNOT_MOVE 1 -/* Tell buflib that resizing failed, possibly future making allocations fail */ -#define BUFLIB_CB_CANNOT_SHRINK 1 - -/** - * Initializes buflib with a caller allocated context instance and memory pool. - * - * The buflib_context instance needs to be passed to every other buflib - * function. It's should be considered opaque, even though it is not yet - * (that's to make inlining core_get_data() possible). The documentation - * of the other functions will not describe the context - * instance parameter further as it's obligatory. - * - * context: The new buflib instance to be initialized, allocated by the caller - * size: The size of the memory pool - */ -void buflib_init(struct buflib_context *context, void *buf, size_t size); - - -/** - * Returns the amount of unallocated bytes. It does not mean this amount - * can be actually allocated because they might not be contiguous. - * - * Returns: The number of unallocated bytes in the memory pool. - */ -size_t buflib_available(struct buflib_context *ctx); - -/** - * Returns the biggest possible allocation that can be determined to succeed. - * - * Returns: The amount of bytes of the biggest unallocated, contiguous region. - */ -size_t buflib_allocatable(struct buflib_context *ctx); - -/** - * Relocates the fields in *ctx to the new buffer position pointed to by buf. - * This does _not_ move any data but updates the pointers. The data has - * to be moved afterwards manually and only if this function returned true. - * - * This is intended to be called from within a move_callback(), for - * buflib-on-buflib scenarios (i.e. a new buflib instance backed by a buffer - * that was allocated by another buflib instance). Be aware that if the parent - * move_callback() moves the underlying buffer _no_ move_callback() of the - * underlying buffer are called. - * - * Returns true of the relocation was successful. If it returns false no - * change to *ctx was made. - */ -bool buflib_context_relocate(struct buflib_context *ctx, void *buf); - -/** - * Allocates memory from buflib's memory pool - * - * size: How many bytes to allocate - * - * This function passes NULL for the callback structure "ops", so buffers - * are movable. Don't pass them to functions that yield(). - * - * Returns: A positive integer handle identifying this allocation, or - * a negative value on error (0 is also not a valid handle) - */ -int buflib_alloc(struct buflib_context *context, size_t size); - - -/** - * Allocates memory from the buflib's memory pool with additional callbacks - * and flags - * - * size: How many bytes to allocate - * ops: a struct with pointers to callback functions (see above). - * if "ops" is NULL: Buffer is movable. - * - * Returns: A positive integer handle identifying this allocation, or - * a negative value on error (0 is also not a valid handle) - */ -int buflib_alloc_ex(struct buflib_context *ctx, size_t size, - struct buflib_callbacks *ops); - - -/** - * Gets all available memory from buflib, for temporary use. - * - * Since this effectively makes all future allocations fail (unless - * another allocation is freed in the meantime), you should definitely provide - * a shrink callback if you plan to hold the buffer for a longer period. This - * will allow buflib to permit allocations by shrinking the buffer returned by - * this function. - * - * Note that this might return many more bytes than buflib_available() or - * buflib_allocatable() return, because it aggressively compacts the pool - * and even shrinks other allocations. However, do not depend on this behavior, - * it may change. - * - * size: The actual size will be returned into size - * ops: a struct with pointers to callback functions - * - * Returns: A positive integer handle identifying this allocation, or - * a negative value on error (0 is also not a valid handle) - */ -int buflib_alloc_maximum(struct buflib_context* ctx, - size_t *size, struct buflib_callbacks *ops); - -/** - * Queries the data pointer for the given handle. It's actually a cheap - * operation, don't hesitate using it extensively. - * - * Notice that you need to re-query after every direct or indirect yield(), - * because compaction can happen by other threads which may get your data - * moved around (or you can get notified about changes by callbacks, - * see further above). - * - * handle: The handle corresponding to the allocation - * - * Returns: The start pointer of the allocation - */ -#ifdef BUFLIB_DEBUG_GET_DATA -void *buflib_get_data(struct buflib_context *ctx, int handle); -#else -static inline void *buflib_get_data(struct buflib_context *ctx, int handle) -{ - return (void *)ctx->handle_table[-handle].alloc; -} -#endif - -/** - * Shrink the memory allocation associated with the given handle - * Mainly intended to be used with the shrink callback, but it can also - * be called outside as well, e.g. to give back buffer space allocated - * with buflib_alloc_maximum(). - * - * Note that you must move/copy data around yourself before calling this, - * buflib will not do this as part of shrinking. - * - * handle: The handle identifying this allocation - * new_start: the new start of the allocation - * new_size: the new size of the allocation - * - * Returns: true if shrinking was successful. Otherwise it returns false, - * without having modified memory. - * - */ -bool buflib_shrink(struct buflib_context *ctx, int handle, void* newstart, size_t new_size); - -/** - * Increment the pin count for a handle. When pinned the handle will not - * be moved and move callbacks will not be triggered, allowing a pointer - * to the buffer to be kept across yields or used for I/O. - * - * Note that shrink callbacks can still be invoked for pinned handles. - */ -void buflib_pin(struct buflib_context *ctx, int handle); - -/** - * Decrement the pin count for a handle. - */ -void buflib_unpin(struct buflib_context *ctx, int handle); - -/** - * Get the current pin count of a handle. Zero means the handle is not pinned. - */ -unsigned buflib_pin_count(struct buflib_context *ctx, int handle); - -/** - * Frees memory associated with the given handle - * - * Returns: 0 (to invalidate handles in one line, 0 is not a valid handle) - */ -int buflib_free(struct buflib_context *context, int handle); - -/** - * Moves the underlying buflib buffer up by size bytes (as much as - * possible for size == 0) without moving the end. This effectively - * reduces the available space by taking away manageable space from the - * front. This space is not available for new allocations anymore. - * - * To make space available in the front, everything is moved up. - * It does _NOT_ call the move callbacks - * - * - * size: size in bytes to move the buffer up (take away). The actual - * bytes moved is returned in this - * Returns: The new start of the underlying buflib buffer - */ -void* buflib_buffer_out(struct buflib_context *ctx, size_t *size); - -/** - * Moves the underlying buflib buffer down by size bytes without - * moving the end. This grows the buflib buffer by adding space to the front. - * The new bytes are available for new allocations. - * - * Everything is moved down, and the new free space will be in the middle. - * It does _NOT_ call the move callbacks. - * - * size: size in bytes to move the buffer down (new free space) - */ -void buflib_buffer_in(struct buflib_context *ctx, int size); - -#ifdef BUFLIB_DEBUG_PRINT -/** - * Return the number of blocks in the buffer, allocated or unallocated. - * - * Only available if BUFLIB_DEBUG_PRINT is defined. - */ -int buflib_get_num_blocks(struct buflib_context *ctx); - -/** - * Write a string describing the block at index block_num to the - * provided buffer. The buffer will always be null terminated and - * there is no provision to detect truncation. (A 40-byte buffer - * is enough to contain any returned string.) - * - * Returns false if the block index is out of bounds, and writes - * an empty string. - * - * Only available if BUFLIB_DEBUG_PRINT is defined. - */ -bool buflib_print_block_at(struct buflib_context *ctx, int block_num, - char *buf, size_t bufsize); -#endif - -#ifdef BUFLIB_DEBUG_CHECK_VALID -/** - * Check integrity of given buflib context - */ -void buflib_check_valid(struct buflib_context *ctx); -#endif - -#endif /* _BUFLIB_H_ */ diff --git a/firmware/include/buflib_mempool.h b/firmware/include/buflib_mempool.h new file mode 100644 index 0000000000..61fe2168b0 --- /dev/null +++ b/firmware/include/buflib_mempool.h @@ -0,0 +1,381 @@ +/*************************************************************************** +* __________ __ ___. +* Open \______ \ ____ ____ | | _\_ |__ _______ ___ +* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / +* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < +* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ +* \/ \/ \/ \/ \/ +* $Id$ +* +* This is a memory allocator designed to provide reasonable management of free +* space and fast access to allocated data. More than one allocator can be used +* at a time by initializing multiple contexts. +* +* Copyright (C) 2009 Andrew Mahone +* Copyright (C) 2011 Thomas Martitz +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +* KIND, either express or implied. +* +****************************************************************************/ +#ifndef _BUFLIB_MEMPOOL_H_ +#define _BUFLIB_MEMPOOL_H_ + +#include +#include +#include + +/* add extra checks to buflib_get_data to catch bad handles */ +//#define BUFLIB_DEBUG_GET_DATA + +/* support integrity check */ +//#define BUFLIB_DEBUG_CHECK_VALID + +/* support debug printing of memory blocks */ +//#define BUFLIB_DEBUG_PRINT + +union buflib_data +{ + intptr_t val; /* length of the block in n*sizeof(union buflib_data). + Includes buflib metadata overhead. A negative value + indicates block is unallocated */ + volatile unsigned pincount; /* number of pins */ + struct buflib_callbacks* ops; /* callback functions for move and shrink. Can be NULL */ + char* alloc; /* start of allocated memory area */ + union buflib_data *handle; /* pointer to entry in the handle table. + Used during compaction for fast lookup */ +}; + +struct buflib_context +{ + union buflib_data *handle_table; + union buflib_data *first_free_handle; + union buflib_data *last_handle; + union buflib_data *buf_start; + union buflib_data *alloc_end; + bool compact; +}; + +/** + * This declares the minimal overhead that is required per alloc. These + * are bytes that are allocated from the context's pool in addition + * to the actually requested number of bytes. + * + * The total number of bytes consumed by an allocation is + * BUFLIB_ALLOC_OVERHEAD + requested bytes + pad to pointer size + */ +#define BUFLIB_ALLOC_OVERHEAD (4*sizeof(union buflib_data)) + +/** + * Callbacks used by the buflib to inform allocation that compaction + * is happening (before data is moved) + * + * Note that buflib tries to move to satisfy new allocations before shrinking. + * So if you have something to resize try to do it outside of the callback. + * + * Regardless of the above, if the allocation is SHRINKABLE, but not + * MUST_NOT_MOVE buflib will move the allocation before even attempting to + * shrink. + */ +struct buflib_callbacks { + /** + * This is called before data is moved. Use this to fix up any cached + * pointers pointing to inside the allocation. The size is unchanged. + * + * This is not needed if you don't cache the data pointer (but always + * call buflib_get_data()) and don't pass pointer to the data to yielding + * functions. + * + * handle: The corresponding handle + * current: The current start of the allocation + * new: The new start of the allocation, after data movement + * + * Return: Return BUFLIB_CB_OK, or BUFLIB_CB_CANNOT_MOVE if movement + * is impossible at this moment. + * + * If NULL: this allocation must not be moved around + * by the buflib when compaction occurs. Attention: Don't confuse + * that with passing NULL for the whole callback structure + * to buflib_alloc_ex(). This would enable moving buffers by default. + * You have to pass NULL inside the "struct buflib_callbacks" structure. + */ + int (*move_callback)(int handle, void* current, void* new); + /** + * This is called when the buflib desires to shrink a buffer + * in order to satisfy new allocation. This happens when buflib runs + * out of memory, e.g. because buflib_alloc_maximum() was called. + * Move data around as you need to make space and call core_shrink() as + * appropriate from within the callback to complete the shrink operation. + * buflib will not move data as part of shrinking. + * + * hint: bit mask containing hints on how shrinking is desired (see below) + * handle: The corresponding handle + * start: The old start of the allocation + * + * Return: Return BUFLIB_CB_OK, or BUFLIB_CB_CANNOT_SHRINK if shirinking + * is impossible at this moment. + * + * if NULL: this allocation cannot be resized. + * It is recommended that allocation that must not move are + * at least shrinkable + */ + int (*shrink_callback)(int handle, unsigned hints, void* start, size_t old_size); + /** + * This is called when special steps must be taken for synchronization + * both before the move_callback is called and after the data has been + * moved. + */ + void (*sync_callback)(int handle, bool sync_on); +}; + +/** A set of all NULL callbacks for use with allocations that need to stay + * locked in RAM and not moved or shrunk. These type of allocations should + * be avoided as much as possible to avoid memory fragmentation but it can + * suitable for short-lived allocations. */ +extern struct buflib_callbacks buflib_ops_locked; + +#define BUFLIB_SHRINK_SIZE_MASK (~BUFLIB_SHRINK_POS_MASK) +#define BUFLIB_SHRINK_POS_FRONT (1u<<31) +#define BUFLIB_SHRINK_POS_BACK (1u<<30) +#define BUFLIB_SHRINK_POS_MASK (BUFLIB_SHRINK_POS_FRONT|BUFLIB_SHRINK_POS_BACK) + +/** + * Possible return values for the callbacks, some of them can cause + * compaction to fail and therefore new allocations to fail + */ +/* Everything alright */ +#define BUFLIB_CB_OK 0 +/* Tell buflib that moving failed. Buflib may retry to move at any point */ +#define BUFLIB_CB_CANNOT_MOVE 1 +/* Tell buflib that resizing failed, possibly future making allocations fail */ +#define BUFLIB_CB_CANNOT_SHRINK 1 + +/** + * Initializes buflib with a caller allocated context instance and memory pool. + * + * The buflib_context instance needs to be passed to every other buflib + * function. It's should be considered opaque, even though it is not yet + * (that's to make inlining core_get_data() possible). The documentation + * of the other functions will not describe the context + * instance parameter further as it's obligatory. + * + * context: The new buflib instance to be initialized, allocated by the caller + * size: The size of the memory pool + */ +void buflib_init(struct buflib_context *context, void *buf, size_t size); + + +/** + * Returns the amount of unallocated bytes. It does not mean this amount + * can be actually allocated because they might not be contiguous. + * + * Returns: The number of unallocated bytes in the memory pool. + */ +size_t buflib_available(struct buflib_context *ctx); + +/** + * Returns the biggest possible allocation that can be determined to succeed. + * + * Returns: The amount of bytes of the biggest unallocated, contiguous region. + */ +size_t buflib_allocatable(struct buflib_context *ctx); + +/** + * Relocates the fields in *ctx to the new buffer position pointed to by buf. + * This does _not_ move any data but updates the pointers. The data has + * to be moved afterwards manually and only if this function returned true. + * + * This is intended to be called from within a move_callback(), for + * buflib-on-buflib scenarios (i.e. a new buflib instance backed by a buffer + * that was allocated by another buflib instance). Be aware that if the parent + * move_callback() moves the underlying buffer _no_ move_callback() of the + * underlying buffer are called. + * + * Returns true of the relocation was successful. If it returns false no + * change to *ctx was made. + */ +bool buflib_context_relocate(struct buflib_context *ctx, void *buf); + +/** + * Allocates memory from buflib's memory pool + * + * size: How many bytes to allocate + * + * This function passes NULL for the callback structure "ops", so buffers + * are movable. Don't pass them to functions that yield(). + * + * Returns: A positive integer handle identifying this allocation, or + * a negative value on error (0 is also not a valid handle) + */ +int buflib_alloc(struct buflib_context *context, size_t size); + + +/** + * Allocates memory from the buflib's memory pool with additional callbacks + * and flags + * + * size: How many bytes to allocate + * ops: a struct with pointers to callback functions (see above). + * if "ops" is NULL: Buffer is movable. + * + * Returns: A positive integer handle identifying this allocation, or + * a negative value on error (0 is also not a valid handle) + */ +int buflib_alloc_ex(struct buflib_context *ctx, size_t size, + struct buflib_callbacks *ops); + + +/** + * Gets all available memory from buflib, for temporary use. + * + * Since this effectively makes all future allocations fail (unless + * another allocation is freed in the meantime), you should definitely provide + * a shrink callback if you plan to hold the buffer for a longer period. This + * will allow buflib to permit allocations by shrinking the buffer returned by + * this function. + * + * Note that this might return many more bytes than buflib_available() or + * buflib_allocatable() return, because it aggressively compacts the pool + * and even shrinks other allocations. However, do not depend on this behavior, + * it may change. + * + * size: The actual size will be returned into size + * ops: a struct with pointers to callback functions + * + * Returns: A positive integer handle identifying this allocation, or + * a negative value on error (0 is also not a valid handle) + */ +int buflib_alloc_maximum(struct buflib_context* ctx, + size_t *size, struct buflib_callbacks *ops); + +/** + * Queries the data pointer for the given handle. It's actually a cheap + * operation, don't hesitate using it extensively. + * + * Notice that you need to re-query after every direct or indirect yield(), + * because compaction can happen by other threads which may get your data + * moved around (or you can get notified about changes by callbacks, + * see further above). + * + * handle: The handle corresponding to the allocation + * + * Returns: The start pointer of the allocation + */ +#ifdef BUFLIB_DEBUG_GET_DATA +void *buflib_get_data(struct buflib_context *ctx, int handle); +#else +static inline void *buflib_get_data(struct buflib_context *ctx, int handle) +{ + return (void *)ctx->handle_table[-handle].alloc; +} +#endif + +/** + * Shrink the memory allocation associated with the given handle + * Mainly intended to be used with the shrink callback, but it can also + * be called outside as well, e.g. to give back buffer space allocated + * with buflib_alloc_maximum(). + * + * Note that you must move/copy data around yourself before calling this, + * buflib will not do this as part of shrinking. + * + * handle: The handle identifying this allocation + * new_start: the new start of the allocation + * new_size: the new size of the allocation + * + * Returns: true if shrinking was successful. Otherwise it returns false, + * without having modified memory. + * + */ +bool buflib_shrink(struct buflib_context *ctx, int handle, void* newstart, size_t new_size); + +/** + * Increment the pin count for a handle. When pinned the handle will not + * be moved and move callbacks will not be triggered, allowing a pointer + * to the buffer to be kept across yields or used for I/O. + * + * Note that shrink callbacks can still be invoked for pinned handles. + */ +void buflib_pin(struct buflib_context *ctx, int handle); + +/** + * Decrement the pin count for a handle. + */ +void buflib_unpin(struct buflib_context *ctx, int handle); + +/** + * Get the current pin count of a handle. Zero means the handle is not pinned. + */ +unsigned buflib_pin_count(struct buflib_context *ctx, int handle); + +/** + * Frees memory associated with the given handle + * + * Returns: 0 (to invalidate handles in one line, 0 is not a valid handle) + */ +int buflib_free(struct buflib_context *context, int handle); + +/** + * Moves the underlying buflib buffer up by size bytes (as much as + * possible for size == 0) without moving the end. This effectively + * reduces the available space by taking away manageable space from the + * front. This space is not available for new allocations anymore. + * + * To make space available in the front, everything is moved up. + * It does _NOT_ call the move callbacks + * + * + * size: size in bytes to move the buffer up (take away). The actual + * bytes moved is returned in this + * Returns: The new start of the underlying buflib buffer + */ +void* buflib_buffer_out(struct buflib_context *ctx, size_t *size); + +/** + * Moves the underlying buflib buffer down by size bytes without + * moving the end. This grows the buflib buffer by adding space to the front. + * The new bytes are available for new allocations. + * + * Everything is moved down, and the new free space will be in the middle. + * It does _NOT_ call the move callbacks. + * + * size: size in bytes to move the buffer down (new free space) + */ +void buflib_buffer_in(struct buflib_context *ctx, int size); + +#ifdef BUFLIB_DEBUG_PRINT +/** + * Return the number of blocks in the buffer, allocated or unallocated. + * + * Only available if BUFLIB_DEBUG_PRINT is defined. + */ +int buflib_get_num_blocks(struct buflib_context *ctx); + +/** + * Write a string describing the block at index block_num to the + * provided buffer. The buffer will always be null terminated and + * there is no provision to detect truncation. (A 40-byte buffer + * is enough to contain any returned string.) + * + * Returns false if the block index is out of bounds, and writes + * an empty string. + * + * Only available if BUFLIB_DEBUG_PRINT is defined. + */ +bool buflib_print_block_at(struct buflib_context *ctx, int block_num, + char *buf, size_t bufsize); +#endif + +#ifdef BUFLIB_DEBUG_CHECK_VALID +/** + * Check integrity of given buflib context + */ +void buflib_check_valid(struct buflib_context *ctx); +#endif + +#endif /* _BUFLIB_MEMPOOL_H_ */ diff --git a/firmware/include/chunk_alloc.h b/firmware/include/chunk_alloc.h index 7d64d4b591..f589cc0870 100644 --- a/firmware/include/chunk_alloc.h +++ b/firmware/include/chunk_alloc.h @@ -24,7 +24,7 @@ #include #include #include "config.h" -#include "buflib.h" +#include "buflib_mempool.h" #define CHUNK_ALLOC_INVALID ((size_t)-1) diff --git a/firmware/include/core_alloc.h b/firmware/include/core_alloc.h index 22cc1988da..382200dd75 100644 --- a/firmware/include/core_alloc.h +++ b/firmware/include/core_alloc.h @@ -4,7 +4,7 @@ #include #include #include "config.h" -#include "buflib.h" +#include "buflib_mempool.h" #include "chunk_alloc.h" /* All functions below are wrappers for functions in buflib.h, except diff --git a/lib/rbcodec/test/SOURCES b/lib/rbcodec/test/SOURCES index 416bbed266..c2a3914a5e 100644 --- a/lib/rbcodec/test/SOURCES +++ b/lib/rbcodec/test/SOURCES @@ -5,5 +5,5 @@ warble.c ../../../firmware/common/structec.c ../../../firmware/common/pathfuncs.c ../../../firmware/common/crc32.c -../../../firmware/buflib.c +../../../firmware/buflib_mempool.c ../../../firmware/core_alloc.c -- cgit v1.2.3