diff options
Diffstat (limited to 'firmware/buflib.c')
-rw-r--r-- | firmware/buflib.c | 777 |
1 files changed, 777 insertions, 0 deletions
diff --git a/firmware/buflib.c b/firmware/buflib.c new file mode 100644 index 0000000000..51cf86bf5b --- /dev/null +++ b/firmware/buflib.c | |||
@@ -0,0 +1,777 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * This is a memory allocator designed to provide reasonable management of free | ||
11 | * space and fast access to allocated data. More than one allocator can be used | ||
12 | * at a time by initializing multiple contexts. | ||
13 | * | ||
14 | * Copyright (C) 2009 Andrew Mahone | ||
15 | * Copyright (C) 2011 Thomas Martitz | ||
16 | * | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version 2 | ||
21 | * of the License, or (at your option) any later version. | ||
22 | * | ||
23 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
24 | * KIND, either express or implied. | ||
25 | * | ||
26 | ****************************************************************************/ | ||
27 | |||
28 | #include <stdlib.h> /* for abs() */ | ||
29 | #include <stdio.h> /* for snprintf() */ | ||
30 | #include "buflib.h" | ||
31 | #include "string-extra.h" /* strlcpy() */ | ||
32 | #include "debug.h" | ||
33 | #include "buffer.h" | ||
34 | #include "system.h" /* for ALIGN_*() */ | ||
35 | |||
36 | /* The main goal of this design is fast fetching of the pointer for a handle. | ||
37 | * For that reason, the handles are stored in a table at the end of the buffer | ||
38 | * with a fixed address, so that returning the pointer for a handle is a simple | ||
39 | * table lookup. To reduce the frequency with which allocated blocks will need | ||
40 | * to be moved to free space, allocations grow up in address from the start of | ||
41 | * the buffer. The buffer is treated as an array of union buflib_data. Blocks | ||
42 | * start with a length marker, which is included in their length. Free blocks | ||
43 | * are marked by negative length. Allocated blocks have a positiv length marker, | ||
44 | * and additional metadata forllowing that: It follows a pointer | ||
45 | * (union buflib_data*) to the corresponding handle table entry. so that it can | ||
46 | * be quickly found and updated during compaction. After that follows | ||
47 | * the pointer to the struct buflib_callbacks associated with this allocation | ||
48 | * (may be NULL). That pointer follows a variable length character array | ||
49 | * containing the nul-terminated string identifier of the allocation. After this | ||
50 | * array there's a length marker for the length of the character array including | ||
51 | * this length marker (counted in n*sizeof(union buflib_data)), which allows | ||
52 | * to find the start of the character array (and therefore the start of the | ||
53 | * entire block) when only the handle or payload start is known. | ||
54 | * | ||
55 | * Example: | ||
56 | * |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->| | ||
57 | * |L|H|C|cccc|L2|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|XXXXXXXXXXXXX|AAA| | ||
58 | * | ||
59 | * L - length marker (negative if block unallocated) | ||
60 | * H - handle table enry pointer | ||
61 | * C - pointer to struct buflib_callbacks | ||
62 | * c - variable sized string identifier | ||
63 | * L2 - second length marker for string identifier | ||
64 | * X - actual payload | ||
65 | * Y - unallocated space | ||
66 | * | ||
67 | * A - pointer to start of payload (first X) in the handle table (may be null) | ||
68 | * | ||
69 | * The blocks can be walked by jumping the abs() of the L length marker, i.e. | ||
70 | * union buflib_data* L; | ||
71 | * for(L = start; L < end; L += abs(L->val)) { .... } | ||
72 | * | ||
73 | * | ||
74 | * The allocator functions are passed a context struct so that two allocators | ||
75 | * can be run, for example, one per core may be used, with convenience wrappers | ||
76 | * for the single-allocator case that use a predefined context. | ||
77 | */ | ||
78 | |||
79 | #define B_ALIGN_DOWN(x) \ | ||
80 | ALIGN_DOWN(x, sizeof(union buflib_data)) | ||
81 | |||
82 | #define B_ALIGN_UP(x) \ | ||
83 | ALIGN_UP(x, sizeof(union buflib_data)) | ||
84 | |||
85 | #ifdef DEBUG | ||
86 | #include <stdio.h> | ||
87 | #define BDEBUGF DEBUGF | ||
88 | #else | ||
89 | #define BDEBUGF(...) do { } while(0) | ||
90 | #endif | ||
91 | |||
92 | /* Initialize buffer manager */ | ||
93 | void | ||
94 | buflib_init(struct buflib_context *ctx, void *buf, size_t size) | ||
95 | { | ||
96 | union buflib_data *bd_buf = buf; | ||
97 | |||
98 | /* Align on sizeof(buflib_data), to prevent unaligned access */ | ||
99 | ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data)); | ||
100 | size /= sizeof(union buflib_data); | ||
101 | /* The handle table is initialized with no entries */ | ||
102 | ctx->handle_table = bd_buf + size; | ||
103 | ctx->last_handle = bd_buf + size; | ||
104 | ctx->first_free_handle = bd_buf + size - 1; | ||
105 | ctx->first_free_block = bd_buf; | ||
106 | ctx->buf_start = bd_buf; | ||
107 | /* A marker is needed for the end of allocated data, to make sure that it | ||
108 | * does not collide with the handle table, and to detect end-of-buffer. | ||
109 | */ | ||
110 | ctx->alloc_end = bd_buf; | ||
111 | ctx->compact = true; | ||
112 | |||
113 | BDEBUGF("buflib initialized with %d.%2d kiB", size / 1024, (size%1000)/10); | ||
114 | } | ||
115 | |||
116 | /* Allocate a new handle, returning 0 on failure */ | ||
117 | static inline | ||
118 | union buflib_data* handle_alloc(struct buflib_context *ctx) | ||
119 | { | ||
120 | union buflib_data *handle; | ||
121 | /* first_free_handle is a lower bound on free handles, work through the | ||
122 | * table from there until a handle containing NULL is found, or the end | ||
123 | * of the table is reached. | ||
124 | */ | ||
125 | for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--) | ||
126 | if (!handle->alloc) | ||
127 | break; | ||
128 | /* If the search went past the end of the table, it means we need to extend | ||
129 | * the table to get a new handle. | ||
130 | */ | ||
131 | if (handle < ctx->last_handle) | ||
132 | { | ||
133 | if (handle >= ctx->alloc_end) | ||
134 | ctx->last_handle--; | ||
135 | else | ||
136 | return NULL; | ||
137 | } | ||
138 | handle->val = -1; | ||
139 | return handle; | ||
140 | } | ||
141 | |||
142 | /* Free one handle, shrinking the handle table if it's the last one */ | ||
143 | static inline | ||
144 | void handle_free(struct buflib_context *ctx, union buflib_data *handle) | ||
145 | { | ||
146 | handle->alloc = 0; | ||
147 | /* Update free handle lower bound if this handle has a lower index than the | ||
148 | * old one. | ||
149 | */ | ||
150 | if (handle > ctx->first_free_handle) | ||
151 | ctx->first_free_handle = handle; | ||
152 | if (handle == ctx->last_handle) | ||
153 | ctx->last_handle++; | ||
154 | else | ||
155 | ctx->compact = false; | ||
156 | } | ||
157 | |||
158 | /* Get the start block of an allocation */ | ||
159 | static union buflib_data* handle_to_block(struct buflib_context* ctx, int handle) | ||
160 | { | ||
161 | union buflib_data* name_field = | ||
162 | (union buflib_data*)buflib_get_name(ctx, handle); | ||
163 | |||
164 | return name_field - 3; | ||
165 | } | ||
166 | |||
167 | /* Shrink the handle table, returning true if its size was reduced, false if | ||
168 | * not | ||
169 | */ | ||
170 | static inline | ||
171 | bool | ||
172 | handle_table_shrink(struct buflib_context *ctx) | ||
173 | { | ||
174 | bool rv; | ||
175 | union buflib_data *handle; | ||
176 | for (handle = ctx->last_handle; !(handle->alloc); handle++); | ||
177 | if (handle > ctx->first_free_handle) | ||
178 | ctx->first_free_handle = handle - 1; | ||
179 | rv = handle == ctx->last_handle; | ||
180 | ctx->last_handle = handle; | ||
181 | return rv; | ||
182 | } | ||
183 | |||
184 | |||
185 | /* If shift is non-zero, it represents the number of places to move | ||
186 | * blocks in memory. Calculate the new address for this block, | ||
187 | * update its entry in the handle table, and then move its contents. | ||
188 | * | ||
189 | * Returns false if moving was unsucessful | ||
190 | * (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned) | ||
191 | */ | ||
192 | static bool | ||
193 | move_block(struct buflib_context* ctx, union buflib_data* block, int shift) | ||
194 | { | ||
195 | #if 1 /* moving temporarily disabled */ | ||
196 | (void)ctx;(void)block;(void)shift; | ||
197 | return false; | ||
198 | #else | ||
199 | char* new_start; | ||
200 | union buflib_data *new_block, *tmp = block[1].handle; | ||
201 | struct buflib_callbacks *ops = block[2].ops; | ||
202 | if (ops && !ops->move_callback) | ||
203 | return false; | ||
204 | |||
205 | int handle = ctx->handle_table - tmp; | ||
206 | BDEBUGF("%s(): moving \"%s\"(id=%d) by %d(%d)\n", __func__, block[3].name, | ||
207 | handle, shift, shift*sizeof(union buflib_data)); | ||
208 | new_block = block + shift; | ||
209 | new_start = tmp->alloc + shift*sizeof(union buflib_data); | ||
210 | /* call the callback before moving */ | ||
211 | if (ops) | ||
212 | { | ||
213 | if (ops->move_callback(handle, tmp->alloc, new_start) | ||
214 | == BUFLIB_CB_CANNOT_MOVE) | ||
215 | return false; | ||
216 | } | ||
217 | tmp->alloc = new_start; /* update handle table */ | ||
218 | memmove(new_block, block, block->val * sizeof(union buflib_data)); | ||
219 | |||
220 | return true; | ||
221 | #endif | ||
222 | } | ||
223 | |||
224 | /* Compact allocations and handle table, adjusting handle pointers as needed. | ||
225 | * Return true if any space was freed or consolidated, false otherwise. | ||
226 | */ | ||
227 | static bool | ||
228 | buflib_compact(struct buflib_context *ctx) | ||
229 | { | ||
230 | BDEBUGF("%s(): Compacting!\n", __func__); | ||
231 | union buflib_data *block; | ||
232 | int shift = 0, len; | ||
233 | /* Store the results of attempting to shrink the handle table */ | ||
234 | bool ret = handle_table_shrink(ctx); | ||
235 | for(block = ctx->first_free_block; block != ctx->alloc_end; block += len) | ||
236 | { | ||
237 | len = block->val; | ||
238 | /* This block is free, add its length to the shift value */ | ||
239 | if (len < 0) | ||
240 | { | ||
241 | shift += len; | ||
242 | len = -len; | ||
243 | continue; | ||
244 | } | ||
245 | /* attempt to fill any hole */ | ||
246 | if (-ctx->first_free_block->val > block->val) | ||
247 | { | ||
248 | intptr_t size = ctx->first_free_block->val; | ||
249 | if (move_block(ctx, block, ctx->first_free_block - block)) | ||
250 | { | ||
251 | /* moving was successful. Mark the next block as the new | ||
252 | * first_free_block and merge it with the free space | ||
253 | * that the move created */ | ||
254 | ctx->first_free_block += block->val; | ||
255 | ctx->first_free_block->val = size + block->val; | ||
256 | continue; | ||
257 | } | ||
258 | } | ||
259 | /* attempt move the allocation by shift */ | ||
260 | if (shift) | ||
261 | { | ||
262 | /* failing to move creates a hole, therefore mark this | ||
263 | * block as not allocated anymore and move first_free_block up */ | ||
264 | if (!move_block(ctx, block, shift)) | ||
265 | { | ||
266 | union buflib_data* hole = block + shift; | ||
267 | hole->val = shift; | ||
268 | if (ctx->first_free_block > hole) | ||
269 | ctx->first_free_block = hole; | ||
270 | shift = 0; | ||
271 | } | ||
272 | /* if move was successful, the just moved block is now | ||
273 | * possibly in place of the first free one, so move this thing up */ | ||
274 | else if (ctx->first_free_block == block+shift) | ||
275 | { | ||
276 | ctx->first_free_block += ctx->first_free_block->val; | ||
277 | ctx->first_free_block->val = shift; | ||
278 | } | ||
279 | } | ||
280 | } | ||
281 | /* Move the end-of-allocation mark, and return true if any new space has | ||
282 | * been freed. | ||
283 | */ | ||
284 | ctx->alloc_end += shift; | ||
285 | /* only move first_free_block up if it wasn't already by a hole */ | ||
286 | if (ctx->first_free_block > ctx->alloc_end) | ||
287 | ctx->first_free_block = ctx->alloc_end; | ||
288 | ctx->compact = true; | ||
289 | return ret || shift; | ||
290 | } | ||
291 | |||
292 | /* Compact the buffer by trying both shrinking and moving. | ||
293 | * | ||
294 | * Try to move first. If unsuccesfull, try to shrink. If that was successful | ||
295 | * try to move once more as there might be more room now. | ||
296 | */ | ||
297 | static bool | ||
298 | buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints) | ||
299 | { | ||
300 | bool result = false; | ||
301 | /* if something compacted before already there will be no further gain */ | ||
302 | if (!ctx->compact) | ||
303 | result = buflib_compact(ctx); | ||
304 | if (!result) | ||
305 | { | ||
306 | union buflib_data* this; | ||
307 | for(this = ctx->buf_start; this < ctx->alloc_end; this += abs(this->val)) | ||
308 | { | ||
309 | if (this->val > 0 && this[2].ops | ||
310 | && this[2].ops->shrink_callback) | ||
311 | { | ||
312 | int ret; | ||
313 | int handle = ctx->handle_table - this[1].handle; | ||
314 | char* data = this[1].handle->alloc; | ||
315 | ret = this[2].ops->shrink_callback(handle, shrink_hints, | ||
316 | data, (char*)(this+this->val)-data); | ||
317 | result |= (ret == BUFLIB_CB_OK); | ||
318 | /* this might have changed in the callback (if | ||
319 | * it shrinked from the top), get it again */ | ||
320 | this = handle_to_block(ctx, handle); | ||
321 | } | ||
322 | } | ||
323 | /* shrinking was successful at least once, try compaction again */ | ||
324 | if (result) | ||
325 | result |= buflib_compact(ctx); | ||
326 | } | ||
327 | |||
328 | return result; | ||
329 | } | ||
330 | |||
331 | /* Shift buffered items by size units, and update handle pointers. The shift | ||
332 | * value must be determined to be safe *before* calling. | ||
333 | */ | ||
334 | static void | ||
335 | buflib_buffer_shift(struct buflib_context *ctx, int shift) | ||
336 | { | ||
337 | memmove(ctx->buf_start + shift, ctx->buf_start, | ||
338 | (ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data)); | ||
339 | union buflib_data *handle; | ||
340 | for (handle = ctx->last_handle; handle < ctx->handle_table; handle++) | ||
341 | if (handle->alloc) | ||
342 | handle->alloc += shift; | ||
343 | ctx->first_free_block += shift; | ||
344 | ctx->buf_start += shift; | ||
345 | ctx->alloc_end += shift; | ||
346 | } | ||
347 | |||
348 | /* Shift buffered items up by size bytes, or as many as possible if size == 0. | ||
349 | * Set size to the number of bytes freed. | ||
350 | */ | ||
351 | void* | ||
352 | buflib_buffer_out(struct buflib_context *ctx, size_t *size) | ||
353 | { | ||
354 | if (!ctx->compact) | ||
355 | buflib_compact(ctx); | ||
356 | size_t avail = ctx->last_handle - ctx->alloc_end; | ||
357 | size_t avail_b = avail * sizeof(union buflib_data); | ||
358 | if (*size && *size < avail_b) | ||
359 | { | ||
360 | avail = (*size + sizeof(union buflib_data) - 1) | ||
361 | / sizeof(union buflib_data); | ||
362 | avail_b = avail * sizeof(union buflib_data); | ||
363 | } | ||
364 | *size = avail_b; | ||
365 | void *ret = ctx->buf_start; | ||
366 | buflib_buffer_shift(ctx, avail); | ||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | /* Shift buffered items down by size bytes */ | ||
371 | void | ||
372 | buflib_buffer_in(struct buflib_context *ctx, int size) | ||
373 | { | ||
374 | size /= sizeof(union buflib_data); | ||
375 | buflib_buffer_shift(ctx, -size); | ||
376 | } | ||
377 | |||
378 | /* Allocate a buffer of size bytes, returning a handle for it */ | ||
379 | int | ||
380 | buflib_alloc(struct buflib_context *ctx, size_t size) | ||
381 | { | ||
382 | return buflib_alloc_ex(ctx, size, "<anonymous>", NULL); | ||
383 | } | ||
384 | |||
385 | /* Allocate a buffer of size bytes, returning a handle for it. | ||
386 | * | ||
387 | * The additional name parameter gives the allocation a human-readable name, | ||
388 | * the ops parameter points to caller-implemented callbacks for moving and | ||
389 | * shrinking. NULL for default callbacks (which do nothing but don't | ||
390 | * prevent moving or shrinking) | ||
391 | */ | ||
392 | |||
393 | int | ||
394 | buflib_alloc_ex(struct buflib_context *ctx, size_t size, const char *name, | ||
395 | struct buflib_callbacks *ops) | ||
396 | { | ||
397 | union buflib_data *handle, *block; | ||
398 | size_t name_len = name ? B_ALIGN_UP(strlen(name)+1) : 0; | ||
399 | bool last; | ||
400 | /* This really is assigned a value before use */ | ||
401 | int block_len; | ||
402 | size += name_len; | ||
403 | size = (size + sizeof(union buflib_data) - 1) / | ||
404 | sizeof(union buflib_data) | ||
405 | /* add 4 objects for alloc len, pointer to handle table entry and | ||
406 | * name length, and the ops pointer */ | ||
407 | + 4; | ||
408 | handle_alloc: | ||
409 | handle = handle_alloc(ctx); | ||
410 | if (!handle) | ||
411 | { | ||
412 | /* If allocation has failed, and compaction has succeded, it may be | ||
413 | * possible to get a handle by trying again. | ||
414 | */ | ||
415 | if (!ctx->compact && buflib_compact(ctx)) | ||
416 | goto handle_alloc; | ||
417 | else | ||
418 | { /* first try to shrink the alloc before the handle table | ||
419 | * to make room for new handles */ | ||
420 | int handle = ctx->handle_table - ctx->last_handle; | ||
421 | union buflib_data* last_block = handle_to_block(ctx, handle); | ||
422 | struct buflib_callbacks* ops = last_block[2].ops; | ||
423 | if (ops && ops->shrink_callback) | ||
424 | { | ||
425 | char *data = buflib_get_data(ctx, handle); | ||
426 | unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data); | ||
427 | if (ops->shrink_callback(handle, hint, data, | ||
428 | (char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK) | ||
429 | { /* retry one more time */ | ||
430 | goto handle_alloc; | ||
431 | } | ||
432 | } | ||
433 | return 0; | ||
434 | } | ||
435 | } | ||
436 | |||
437 | buffer_alloc: | ||
438 | /* need to re-evaluate last before the loop because the last allocation | ||
439 | * possibly made room in its front to fit this, so last would be wrong */ | ||
440 | last = false; | ||
441 | for (block = ctx->first_free_block;;block += block_len) | ||
442 | { | ||
443 | /* If the last used block extends all the way to the handle table, the | ||
444 | * block "after" it doesn't have a header. Because of this, it's easier | ||
445 | * to always find the end of allocation by saving a pointer, and always | ||
446 | * calculate the free space at the end by comparing it to the | ||
447 | * last_handle pointer. | ||
448 | */ | ||
449 | if(block == ctx->alloc_end) | ||
450 | { | ||
451 | last = true; | ||
452 | block_len = ctx->last_handle - block; | ||
453 | if ((size_t)block_len < size) | ||
454 | block = NULL; | ||
455 | break; | ||
456 | } | ||
457 | block_len = block->val; | ||
458 | /* blocks with positive length are already allocated. */ | ||
459 | if(block_len > 0) | ||
460 | continue; | ||
461 | block_len = -block_len; | ||
462 | /* The search is first-fit, any fragmentation this causes will be | ||
463 | * handled at compaction. | ||
464 | */ | ||
465 | if ((size_t)block_len >= size) | ||
466 | break; | ||
467 | } | ||
468 | if (!block) | ||
469 | { | ||
470 | /* Try compacting if allocation failed */ | ||
471 | unsigned hint = BUFLIB_SHRINK_POS_FRONT | | ||
472 | ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK); | ||
473 | if (buflib_compact_and_shrink(ctx, hint)) | ||
474 | { | ||
475 | goto buffer_alloc; | ||
476 | } else { | ||
477 | handle->val=1; | ||
478 | handle_free(ctx, handle); | ||
479 | return 0; | ||
480 | } | ||
481 | } | ||
482 | |||
483 | /* Set up the allocated block, by marking the size allocated, and storing | ||
484 | * a pointer to the handle. | ||
485 | */ | ||
486 | union buflib_data *name_len_slot; | ||
487 | block->val = size; | ||
488 | block[1].handle = handle; | ||
489 | block[2].ops = ops; | ||
490 | strcpy(block[3].name, name); | ||
491 | name_len_slot = (union buflib_data*)B_ALIGN_UP(block[3].name + name_len); | ||
492 | name_len_slot->val = 1 + name_len/sizeof(union buflib_data); | ||
493 | handle->alloc = (char*)(name_len_slot + 1); | ||
494 | /* If we have just taken the first free block, the next allocation search | ||
495 | * can save some time by starting after this block. | ||
496 | */ | ||
497 | if (block == ctx->first_free_block) | ||
498 | ctx->first_free_block += size; | ||
499 | block += size; | ||
500 | /* alloc_end must be kept current if we're taking the last block. */ | ||
501 | if (last) | ||
502 | ctx->alloc_end = block; | ||
503 | /* Only free blocks *before* alloc_end have tagged length. */ | ||
504 | else if ((size_t)block_len > size) | ||
505 | block->val = size - block_len; | ||
506 | /* Return the handle index as a positive integer. */ | ||
507 | return ctx->handle_table - handle; | ||
508 | } | ||
509 | |||
510 | /* Finds the free block before block, and returns NULL if it's not free */ | ||
511 | static union buflib_data* | ||
512 | find_free_block_before(struct buflib_context *ctx, union buflib_data* block) | ||
513 | { | ||
514 | union buflib_data *ret = ctx->first_free_block, | ||
515 | *next_block = ret; | ||
516 | |||
517 | /* find the block that's before the current one */ | ||
518 | while (next_block < block) | ||
519 | { | ||
520 | ret = next_block; | ||
521 | next_block += abs(ret->val); | ||
522 | } | ||
523 | |||
524 | /* If next_block == block, the above loop didn't go anywhere. If it did, | ||
525 | * and the block before this one is empty, that is the wanted one | ||
526 | */ | ||
527 | if (next_block == block && ret < block && ret->val < 0) | ||
528 | return ret; | ||
529 | /* otherwise, e.g. if ret > block, or if the buffer is compact, | ||
530 | * there's no free block before */ | ||
531 | return NULL; | ||
532 | } | ||
533 | |||
534 | /* Free the buffer associated with handle_num. */ | ||
535 | int | ||
536 | buflib_free(struct buflib_context *ctx, int handle_num) | ||
537 | { | ||
538 | union buflib_data *handle = ctx->handle_table - handle_num, | ||
539 | *freed_block = handle_to_block(ctx, handle_num), | ||
540 | *block, *next_block; | ||
541 | /* We need to find the block before the current one, to see if it is free | ||
542 | * and can be merged with this one. | ||
543 | */ | ||
544 | block = find_free_block_before(ctx, freed_block); | ||
545 | if (block) | ||
546 | { | ||
547 | block->val -= freed_block->val; | ||
548 | } | ||
549 | else | ||
550 | { | ||
551 | /* Otherwise, set block to the newly-freed block, and mark it free, before | ||
552 | * continuing on, since the code below exects block to point to a free | ||
553 | * block which may have free space after it. | ||
554 | */ | ||
555 | block = freed_block; | ||
556 | block->val = -block->val; | ||
557 | } | ||
558 | next_block = block - block->val; | ||
559 | /* Check if we are merging with the free space at alloc_end. */ | ||
560 | if (next_block == ctx->alloc_end) | ||
561 | ctx->alloc_end = block; | ||
562 | /* Otherwise, the next block might still be a "normal" free block, and the | ||
563 | * mid-allocation free means that the buffer is no longer compact. | ||
564 | */ | ||
565 | else { | ||
566 | ctx->compact = false; | ||
567 | if (next_block->val < 0) | ||
568 | block->val += next_block->val; | ||
569 | } | ||
570 | handle_free(ctx, handle); | ||
571 | handle->alloc = NULL; | ||
572 | /* If this block is before first_free_block, it becomes the new starting | ||
573 | * point for free-block search. | ||
574 | */ | ||
575 | if (block < ctx->first_free_block) | ||
576 | ctx->first_free_block = block; | ||
577 | |||
578 | return 0; /* unconditionally */ | ||
579 | } | ||
580 | |||
581 | /* Return the maximum allocatable memory in bytes */ | ||
582 | size_t | ||
583 | buflib_available(struct buflib_context* ctx) | ||
584 | { | ||
585 | /* subtract 5 elements for | ||
586 | * val, handle, name_len, ops and the handle table entry*/ | ||
587 | ptrdiff_t diff = (ctx->last_handle - ctx->alloc_end - 5); | ||
588 | diff -= 16; /* space for future handles */ | ||
589 | diff *= sizeof(union buflib_data); /* make it bytes */ | ||
590 | diff -= 16; /* reserve 16 for the name */ | ||
591 | |||
592 | if (diff > 0) | ||
593 | return diff; | ||
594 | else | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Allocate all available (as returned by buflib_available()) memory and return | ||
600 | * a handle to it | ||
601 | * | ||
602 | * This grabs a lock which can only be unlocked by buflib_free() or | ||
603 | * buflib_shrink(), to protect from further allocations (which couldn't be | ||
604 | * serviced anyway). | ||
605 | */ | ||
606 | int | ||
607 | buflib_alloc_maximum(struct buflib_context* ctx, const char* name, size_t *size, struct buflib_callbacks *ops) | ||
608 | { | ||
609 | /* limit name to 16 since that's what buflib_available() accounts for it */ | ||
610 | char buf[16]; | ||
611 | *size = buflib_available(ctx); | ||
612 | strlcpy(buf, name, sizeof(buf)); | ||
613 | |||
614 | return buflib_alloc_ex(ctx, *size, buf, ops); | ||
615 | } | ||
616 | |||
617 | /* Shrink the allocation indicated by the handle according to new_start and | ||
618 | * new_size. Grow is not possible, therefore new_start and new_start + new_size | ||
619 | * must be within the original allocation | ||
620 | */ | ||
621 | bool | ||
622 | buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size) | ||
623 | { | ||
624 | char* oldstart = buflib_get_data(ctx, handle); | ||
625 | char* newstart = new_start; | ||
626 | char* newend = newstart + new_size; | ||
627 | |||
628 | /* newstart must be higher and new_size not "negative" */ | ||
629 | if (newstart < oldstart || newend < newstart) | ||
630 | return false; | ||
631 | union buflib_data *block = handle_to_block(ctx, handle), | ||
632 | *old_next_block = block + block->val, | ||
633 | /* newstart isn't necessarily properly aligned but it | ||
634 | * needn't be since it's only dereferenced by the user code */ | ||
635 | *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart), | ||
636 | *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart), | ||
637 | *new_next_block = (union buflib_data*)B_ALIGN_UP(newend), | ||
638 | *new_block, metadata_size; | ||
639 | |||
640 | /* growing is not supported */ | ||
641 | if (new_next_block > old_next_block) | ||
642 | return false; | ||
643 | |||
644 | metadata_size.val = aligned_oldstart - block; | ||
645 | /* update val and the handle table entry */ | ||
646 | new_block = aligned_newstart - metadata_size.val; | ||
647 | block[0].val = new_next_block - new_block; | ||
648 | |||
649 | block[1].handle->alloc = newstart; | ||
650 | if (block != new_block) | ||
651 | { | ||
652 | /* move metadata over, i.e. pointer to handle table entry and name | ||
653 | * This is actually the point of no return. Data in the allocation is | ||
654 | * being modified, and therefore we must successfully finish the shrink | ||
655 | * operation */ | ||
656 | memmove(new_block, block, metadata_size.val*sizeof(metadata_size)); | ||
657 | /* mark the old block unallocated */ | ||
658 | block->val = block - new_block; | ||
659 | /* find the block before in order to merge with the new free space */ | ||
660 | union buflib_data *free_before = find_free_block_before(ctx, block); | ||
661 | if (free_before) | ||
662 | free_before->val += block->val; | ||
663 | else if (ctx->first_free_block > block) | ||
664 | ctx->first_free_block = block; | ||
665 | |||
666 | /* We didn't handle size changes yet, assign block to the new one | ||
667 | * the code below the wants block whether it changed or not */ | ||
668 | block = new_block; | ||
669 | } | ||
670 | |||
671 | /* Now deal with size changes that create free blocks after the allocation */ | ||
672 | if (old_next_block != new_next_block) | ||
673 | { | ||
674 | if (ctx->alloc_end == old_next_block) | ||
675 | ctx->alloc_end = new_next_block; | ||
676 | else if (old_next_block->val < 0) | ||
677 | { /* enlarge next block by moving it up */ | ||
678 | new_next_block->val = old_next_block->val - (old_next_block - new_next_block); | ||
679 | } | ||
680 | else if (old_next_block != new_next_block) | ||
681 | { /* creating a hole */ | ||
682 | /* must be negative to indicate being unallocated */ | ||
683 | new_next_block->val = new_next_block - old_next_block; | ||
684 | } | ||
685 | /* update first_free_block for the newly created free space */ | ||
686 | if (ctx->first_free_block > new_next_block) | ||
687 | ctx->first_free_block = new_next_block; | ||
688 | } | ||
689 | |||
690 | return true; | ||
691 | } | ||
692 | |||
693 | const char* buflib_get_name(struct buflib_context *ctx, int handle) | ||
694 | { | ||
695 | union buflib_data *data = (union buflib_data*)ALIGN_DOWN((intptr_t)buflib_get_data(ctx, handle), sizeof (*data)); | ||
696 | size_t len = data[-1].val; | ||
697 | if (len <= 1) | ||
698 | return NULL; | ||
699 | return data[-len].name; | ||
700 | } | ||
701 | |||
702 | #ifdef BUFLIB_DEBUG_BLOCKS | ||
703 | void buflib_print_allocs(struct buflib_context *ctx, | ||
704 | void (*print)(int, const char*)) | ||
705 | { | ||
706 | union buflib_data *this, *end = ctx->handle_table; | ||
707 | char buf[128]; | ||
708 | for(this = end - 1; this >= ctx->last_handle; this--) | ||
709 | { | ||
710 | if (!this->alloc) continue; | ||
711 | |||
712 | int handle_num; | ||
713 | const char *name; | ||
714 | union buflib_data *block_start, *alloc_start; | ||
715 | intptr_t alloc_len; | ||
716 | |||
717 | handle_num = end - this; | ||
718 | alloc_start = buflib_get_data(ctx, handle_num); | ||
719 | name = buflib_get_name(ctx, handle_num); | ||
720 | block_start = (union buflib_data*)name - 3; | ||
721 | alloc_len = block_start->val * sizeof(union buflib_data); | ||
722 | |||
723 | snprintf(buf, sizeof(buf), | ||
724 | "%s(%d):\t%p\n" | ||
725 | " \t%p\n" | ||
726 | " \t%ld\n", | ||
727 | name?:"(null)", handle_num, block_start, alloc_start, alloc_len); | ||
728 | /* handle_num is 1-based */ | ||
729 | print(handle_num - 1, buf); | ||
730 | } | ||
731 | } | ||
732 | |||
733 | void buflib_print_blocks(struct buflib_context *ctx, | ||
734 | void (*print)(int, const char*)) | ||
735 | { | ||
736 | char buf[128]; | ||
737 | int i = 0; | ||
738 | for(union buflib_data* this = ctx->buf_start; | ||
739 | this < ctx->alloc_end; | ||
740 | this += abs(this->val)) | ||
741 | { | ||
742 | snprintf(buf, sizeof(buf), "%8p: val: %4ld (%s)", | ||
743 | this, this->val, | ||
744 | this->val > 0? this[3].name:"<unallocated>"); | ||
745 | print(i++, buf); | ||
746 | } | ||
747 | } | ||
748 | #endif | ||
749 | |||
750 | #ifdef BUFLIB_DEBUG_BLOCK_SINGLE | ||
751 | int buflib_get_num_blocks(struct buflib_context *ctx) | ||
752 | { | ||
753 | int i = 0; | ||
754 | for(union buflib_data* this = ctx->buf_start; | ||
755 | this < ctx->alloc_end; | ||
756 | this += abs(this->val)) | ||
757 | { | ||
758 | i++; | ||
759 | } | ||
760 | return i; | ||
761 | } | ||
762 | |||
763 | void buflib_print_block_at(struct buflib_context *ctx, int block_num, | ||
764 | char* buf, size_t bufsize) | ||
765 | { | ||
766 | union buflib_data* this = ctx->buf_start; | ||
767 | while(block_num > 0 && this < ctx->alloc_end) | ||
768 | { | ||
769 | this += abs(this->val); | ||
770 | block_num -= 1; | ||
771 | } | ||
772 | snprintf(buf, bufsize, "%8p: val: %4ld (%s)", | ||
773 | this, this->val, | ||
774 | this->val > 0? this[3].name:"<unallocated>"); | ||
775 | } | ||
776 | |||
777 | #endif | ||