summaryrefslogtreecommitdiff
path: root/apps/plugins/lib/buflib.c
diff options
context:
space:
mode:
Diffstat (limited to 'apps/plugins/lib/buflib.c')
-rw-r--r--apps/plugins/lib/buflib.c349
1 files changed, 0 insertions, 349 deletions
diff --git a/apps/plugins/lib/buflib.c b/apps/plugins/lib/buflib.c
deleted file mode 100644
index 930e49d02e..0000000000
--- a/apps/plugins/lib/buflib.c
+++ /dev/null
@@ -1,349 +0,0 @@
1/***************************************************************************
2* __________ __ ___.
3* Open \______ \ ____ ____ | | _\_ |__ _______ ___
4* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7* \/ \/ \/ \/ \/
8* $Id$
9*
10* This is a memory allocator designed to provide reasonable management of free
11* space and fast access to allocated data. More than one allocator can be used
12* at a time by initializing multiple contexts.
13*
14* Copyright (C) 2009 Andrew Mahone
15*
16* This program is free software; you can redistribute it and/or
17* modify it under the terms of the GNU General Public License
18* as published by the Free Software Foundation; either version 2
19* of the License, or (at your option) any later version.
20*
21* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
22* KIND, either express or implied.
23*
24****************************************************************************/
25
26#include <stdlib.h> /* for abs() */
27#include "buflib.h"
28/* The main goal of this design is fast fetching of the pointer for a handle.
29 * For that reason, the handles are stored in a table at the end of the buffer
30 * with a fixed address, so that returning the pointer for a handle is a simple
31 * table lookup. To reduce the frequency with which allocated blocks will need
32 * to be moved to free space, allocations grow up in address from the start of
33 * the buffer. The buffer is treated as an array of union buflib_data. Blocks
34 * start with a length marker, which is included in their length. Free blocks
35 * are marked by negative length, allocated ones use the second buflib_data in
36 * the block to store a pointer to their handle table entry, so that it can be
37 * quickly found and updated during compaction. The allocator functions are
38 * passed a context struct so that two allocators can be run, for example, one
39 * per core may be used, with convenience wrappers for the single-allocator
40 * case that use a predefined context.
41 */
42
43/* Initialize buffer manager */
44void
45buflib_init(struct buflib_context *ctx, void *buf, size_t size)
46{
47 union buflib_data *bd_buf = buf;
48
49 /* Align on sizeof(buflib_data), to prevent unaligned access */
50 ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data));
51 size /= sizeof(union buflib_data);
52 /* The handle table is initialized with no entries */
53 ctx->handle_table = bd_buf + size;
54 ctx->last_handle = bd_buf + size;
55 ctx->first_free_handle = bd_buf + size - 1;
56 ctx->first_free_block = bd_buf;
57 ctx->buf_start = bd_buf;
58 /* A marker is needed for the end of allocated data, to make sure that it
59 * does not collide with the handle table, and to detect end-of-buffer.
60 */
61 ctx->alloc_end = bd_buf;
62 ctx->compact = true;
63}
64
65/* Allocate a new handle, returning 0 on failure */
66static inline
67union buflib_data* handle_alloc(struct buflib_context *ctx)
68{
69 union buflib_data *handle;
70 /* first_free_handle is a lower bound on free handles, work through the
71 * table from there until a handle containing NULL is found, or the end
72 * of the table is reached.
73 */
74 for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--)
75 if (!handle->ptr)
76 break;
77 /* If the search went past the end of the table, it means we need to extend
78 * the table to get a new handle.
79 */
80 if (handle < ctx->last_handle)
81 {
82 if (handle >= ctx->alloc_end)
83 ctx->last_handle--;
84 else
85 return NULL;
86 }
87 handle->val = -1;
88 return handle;
89}
90
91/* Free one handle, shrinking the handle table if it's the last one */
92static inline
93void handle_free(struct buflib_context *ctx, union buflib_data *handle)
94{
95 handle->ptr = 0;
96 /* Update free handle lower bound if this handle has a lower index than the
97 * old one.
98 */
99 if (handle > ctx->first_free_handle)
100 ctx->first_free_handle = handle;
101 if (handle == ctx->last_handle)
102 ctx->last_handle++;
103 else
104 ctx->compact = false;
105}
106
107/* Shrink the handle table, returning true if its size was reduced, false if
108 * not
109 */
110static inline
111bool
112handle_table_shrink(struct buflib_context *ctx)
113{
114 bool rv;
115 union buflib_data *handle;
116 for (handle = ctx->last_handle; !(handle->ptr); handle++);
117 if (handle > ctx->first_free_handle)
118 ctx->first_free_handle = handle - 1;
119 rv = handle == ctx->last_handle;
120 ctx->last_handle = handle;
121 return rv;
122}
123
124/* Compact allocations and handle table, adjusting handle pointers as needed.
125 * Return true if any space was freed or consolidated, false otherwise.
126 */
127static bool
128buflib_compact(struct buflib_context *ctx)
129{
130 union buflib_data *block = ctx->first_free_block, *new_block;
131 int shift = 0, len;
132 /* Store the results of attempting to shrink the handle table */
133 bool ret = handle_table_shrink(ctx);
134 for(; block != ctx->alloc_end; block += len)
135 {
136 len = block->val;
137 /* This block is free, add its length to the shift value */
138 if (len < 0)
139 {
140 shift += len;
141 len = -len;
142 continue;
143 }
144 /* If shift is non-zero, it represents the number of places to move
145 * blocks down in memory. Calculate the new address for this block,
146 * update its entry in the handle table, and then move its contents.
147 */
148 if (shift)
149 {
150 new_block = block + shift;
151 block[1].ptr->ptr = new_block + 2;
152 rb->memmove(new_block, block, len * sizeof(union buflib_data));
153 }
154 }
155 /* Move the end-of-allocation mark, and return true if any new space has
156 * been freed.
157 */
158 ctx->alloc_end += shift;
159 ctx->first_free_block = ctx->alloc_end;
160 ctx->compact = true;
161 return ret || shift;
162}
163
164/* Shift buffered items by size units, and update handle pointers. The shift
165 * value must be determined to be safe *before* calling.
166 */
167static void
168buflib_buffer_shift(struct buflib_context *ctx, int shift)
169{
170 rb->memmove(ctx->buf_start + shift, ctx->buf_start,
171 (ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data));
172 union buflib_data *ptr;
173 for (ptr = ctx->last_handle; ptr < ctx->handle_table; ptr++)
174 if (ptr->ptr)
175 ptr->ptr += shift;
176 ctx->first_free_block += shift;
177 ctx->buf_start += shift;
178 ctx->alloc_end += shift;
179}
180
181/* Shift buffered items up by size bytes, or as many as possible if size == 0.
182 * Set size to the number of bytes freed.
183 */
184void*
185buflib_buffer_out(struct buflib_context *ctx, size_t *size)
186{
187 if (!ctx->compact)
188 buflib_compact(ctx);
189 size_t avail = ctx->last_handle - ctx->alloc_end;
190 size_t avail_b = avail * sizeof(union buflib_data);
191 if (*size && *size < avail_b)
192 {
193 avail = (*size + sizeof(union buflib_data) - 1)
194 / sizeof(union buflib_data);
195 avail_b = avail * sizeof(union buflib_data);
196 }
197 *size = avail_b;
198 void *ret = ctx->buf_start;
199 buflib_buffer_shift(ctx, avail);
200 return ret;
201}
202
203/* Shift buffered items down by size bytes */
204void
205buflib_buffer_in(struct buflib_context *ctx, int size)
206{
207 size /= sizeof(union buflib_data);
208 buflib_buffer_shift(ctx, -size);
209}
210
211/* Allocate a buffer of size bytes, returning a handle for it */
212int
213buflib_alloc(struct buflib_context *ctx, size_t size)
214{
215 union buflib_data *handle, *block;
216 bool last = false;
217 /* This really is assigned a value before use */
218 int block_len;
219 size = (size + sizeof(union buflib_data) - 1) /
220 sizeof(union buflib_data) + 2;
221handle_alloc:
222 handle = handle_alloc(ctx);
223 if (!handle)
224 {
225 /* If allocation has failed, and compaction has succeded, it may be
226 * possible to get a handle by trying again.
227 */
228 if (!ctx->compact && buflib_compact(ctx))
229 goto handle_alloc;
230 else
231 return 0;
232 }
233
234buffer_alloc:
235 for (block = ctx->first_free_block;; block += block_len)
236 {
237 /* If the last used block extends all the way to the handle table, the
238 * block "after" it doesn't have a header. Because of this, it's easier
239 * to always find the end of allocation by saving a pointer, and always
240 * calculate the free space at the end by comparing it to the
241 * last_handle pointer.
242 */
243 if(block == ctx->alloc_end)
244 {
245 last = true;
246 block_len = ctx->last_handle - block;
247 if ((size_t)block_len < size)
248 block = NULL;
249 break;
250 }
251 block_len = block->val;
252 /* blocks with positive length are already allocated. */
253 if(block_len > 0)
254 continue;
255 block_len = -block_len;
256 /* The search is first-fit, any fragmentation this causes will be
257 * handled at compaction.
258 */
259 if ((size_t)block_len >= size)
260 break;
261 }
262 if (!block)
263 {
264 /* Try compacting if allocation failed, but only if the handle
265 * allocation did not trigger compaction already, since there will
266 * be no further gain.
267 */
268 if (!ctx->compact && buflib_compact(ctx))
269 {
270 goto buffer_alloc;
271 } else {
272 handle->val=1;
273 handle_free(ctx, handle);
274 return 0;
275 }
276 }
277
278 /* Set up the allocated block, by marking the size allocated, and storing
279 * a pointer to the handle.
280 */
281 block->val = size;
282 block[1].ptr = handle;
283 handle->ptr = block + 2;
284 /* If we have just taken the first free block, the next allocation search
285 * can save some time by starting after this block.
286 */
287 if (block == ctx->first_free_block)
288 ctx->first_free_block += size;
289 block += size;
290 /* alloc_end must be kept current if we're taking the last block. */
291 if (last)
292 ctx->alloc_end = block;
293 /* Only free blocks *before* alloc_end have tagged length. */
294 else if ((size_t)block_len > size)
295 block->val = size - block_len;
296 /* Return the handle index as a positive integer. */
297 return ctx->handle_table - handle;
298}
299
300/* Free the buffer associated with handle_num. */
301void
302buflib_free(struct buflib_context *ctx, int handle_num)
303{
304 union buflib_data *handle = ctx->handle_table - handle_num,
305 *freed_block = handle->ptr - 2,
306 *block = ctx->first_free_block,
307 *next_block = block;
308 /* We need to find the block before the current one, to see if it is free
309 * and can be merged with this one.
310 */
311 while (next_block < freed_block)
312 {
313 block = next_block;
314 next_block += abs(block->val);
315 }
316 /* If next_block == block, the above loop didn't go anywhere. If it did,
317 * and the block before this one is empty, we can combine them.
318 */
319 if (next_block == freed_block && next_block != block && block->val < 0)
320 block->val -= freed_block->val;
321 /* Otherwise, set block to the newly-freed block, and mark it free, before
322 * continuing on, since the code below exects block to point to a free
323 * block which may have free space after it.
324 */
325 else
326 {
327 block = freed_block;
328 block->val = -block->val;
329 }
330 next_block = block - block->val;
331 /* Check if we are merging with the free space at alloc_end. */
332 if (next_block == ctx->alloc_end)
333 ctx->alloc_end = block;
334 /* Otherwise, the next block might still be a "normal" free block, and the
335 * mid-allocation free means that the buffer is no longer compact.
336 */
337 else {
338 ctx->compact = false;
339 if (next_block->val < 0)
340 block->val += next_block->val;
341 }
342 handle_free(ctx, handle);
343 handle->ptr = NULL;
344 /* If this block is before first_free_block, it becomes the new starting
345 * point for free-block search.
346 */
347 if (block < ctx->first_free_block)
348 ctx->first_free_block = block;
349}