diff options
Diffstat (limited to 'firmware/buflib_mempool.c')
-rw-r--r-- | firmware/buflib_mempool.c | 1113 |
1 files changed, 1113 insertions, 0 deletions
diff --git a/firmware/buflib_mempool.c b/firmware/buflib_mempool.c new file mode 100644 index 0000000000..18e4f118ff --- /dev/null +++ b/firmware/buflib_mempool.c | |||
@@ -0,0 +1,1113 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * This is a memory allocator designed to provide reasonable management of free | ||
11 | * space and fast access to allocated data. More than one allocator can be used | ||
12 | * at a time by initializing multiple contexts. | ||
13 | * | ||
14 | * Copyright (C) 2009 Andrew Mahone | ||
15 | * Copyright (C) 2011 Thomas Martitz | ||
16 | * | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version 2 | ||
21 | * of the License, or (at your option) any later version. | ||
22 | * | ||
23 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
24 | * KIND, either express or implied. | ||
25 | * | ||
26 | ****************************************************************************/ | ||
27 | |||
28 | #include <stdarg.h> | ||
29 | #include <stdlib.h> /* for abs() */ | ||
30 | #include <stdio.h> /* for snprintf() */ | ||
31 | #include <stddef.h> /* for ptrdiff_t */ | ||
32 | #include "buflib_mempool.h" | ||
33 | #include "string-extra.h" /* strmemccpy() */ | ||
34 | #include "debug.h" | ||
35 | #include "panic.h" | ||
36 | #include "system.h" /* for ALIGN_*() */ | ||
37 | |||
38 | /* FIXME: This comment is pretty out of date now and wrong in some details. | ||
39 | * | ||
40 | * The main goal of this design is fast fetching of the pointer for a handle. | ||
41 | * For that reason, the handles are stored in a table at the end of the buffer | ||
42 | * with a fixed address, so that returning the pointer for a handle is a simple | ||
43 | * table lookup. To reduce the frequency with which allocated blocks will need | ||
44 | * to be moved to free space, allocations grow up in address from the start of | ||
45 | * the buffer. The buffer is treated as an array of union buflib_data. Blocks | ||
46 | * start with a length marker, which is included in their length. Free blocks | ||
47 | * are marked by negative length. Allocated blocks have a positiv length marker, | ||
48 | * and additional metadata following that: It follows a pointer | ||
49 | * (union buflib_data*) to the corresponding handle table entry. so that it can | ||
50 | * be quickly found and updated during compaction. After that follows | ||
51 | * the pointer to the struct buflib_callbacks associated with this allocation | ||
52 | * (may be NULL). That pointer follows a variable length character array | ||
53 | * containing the nul-terminated string identifier of the allocation. After this | ||
54 | * array there's a length marker for the length of the character array including | ||
55 | * this length marker (counted in n*sizeof(union buflib_data)), which allows | ||
56 | * to find the start of the character array (and therefore the start of the | ||
57 | * entire block) when only the handle or payload start is known. | ||
58 | * | ||
59 | * UPDATE BUFLIB_ALLOC_OVERHEAD (buflib.h) WHEN THE METADATA CHANGES! | ||
60 | * | ||
61 | * Example: | ||
62 | * |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->| | ||
63 | * |L|H|C|cccc|L2|crc|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|crc|XXXXXXXXXXXXX|AAA| | ||
64 | * | ||
65 | * L - length marker (negative if block unallocated) | ||
66 | * H - handle table entry pointer | ||
67 | * C - pointer to struct buflib_callbacks | ||
68 | * c - variable sized string identifier | ||
69 | * L2 - length of the metadata | ||
70 | * crc - crc32 protecting buflib metadata integrity | ||
71 | * X - actual payload | ||
72 | * Y - unallocated space | ||
73 | * | ||
74 | * A - pointer to start of payload (first X) in the handle table (may be null) | ||
75 | * | ||
76 | * The blocks can be walked by jumping the abs() of the L length marker, i.e. | ||
77 | * union buflib_data* L; | ||
78 | * for(L = start; L < end; L += abs(L->val)) { .... } | ||
79 | * | ||
80 | * | ||
81 | * The allocator functions are passed a context struct so that two allocators | ||
82 | * can be run, for example, one per core may be used, with convenience wrappers | ||
83 | * for the single-allocator case that use a predefined context. | ||
84 | */ | ||
85 | |||
86 | #define B_ALIGN_DOWN(x) \ | ||
87 | ALIGN_DOWN(x, sizeof(union buflib_data)) | ||
88 | |||
89 | #define B_ALIGN_UP(x) \ | ||
90 | ALIGN_UP(x, sizeof(union buflib_data)) | ||
91 | |||
92 | #ifdef DEBUG | ||
93 | #include <stdio.h> | ||
94 | #define BDEBUGF DEBUGF | ||
95 | #else | ||
96 | #define BDEBUGF(...) do { } while(0) | ||
97 | #endif | ||
98 | |||
99 | #define BPANICF panicf | ||
100 | |||
101 | /* Available paranoia checks */ | ||
102 | #define PARANOIA_CHECK_LENGTH (1 << 0) | ||
103 | #define PARANOIA_CHECK_BLOCK_HANDLE (1 << 1) | ||
104 | #define PARANOIA_CHECK_PINNING (1 << 2) | ||
105 | /* Bitmask of enabled paranoia checks */ | ||
106 | #define BUFLIB_PARANOIA \ | ||
107 | (PARANOIA_CHECK_LENGTH | \ | ||
108 | PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING) | ||
109 | |||
110 | /* Indices used to access block fields as block[idx_XXX] */ | ||
111 | enum { | ||
112 | idx_LEN, /* length of the block, must come first */ | ||
113 | idx_HANDLE, /* pointer to entry in the handle table */ | ||
114 | idx_OPS, /* pointer to an ops struct */ | ||
115 | idx_PIN, /* pin count */ | ||
116 | BUFLIB_NUM_FIELDS, | ||
117 | }; | ||
118 | |||
119 | struct buflib_callbacks buflib_ops_locked = { | ||
120 | .move_callback = NULL, | ||
121 | .shrink_callback = NULL, | ||
122 | .sync_callback = NULL, | ||
123 | }; | ||
124 | |||
125 | #define IS_MOVABLE(a) (!a[idx_OPS].ops || a[idx_OPS].ops->move_callback) | ||
126 | |||
127 | static union buflib_data* find_first_free(struct buflib_context *ctx); | ||
128 | static union buflib_data* find_block_before(struct buflib_context *ctx, | ||
129 | union buflib_data* block, | ||
130 | bool is_free); | ||
131 | |||
132 | /* Check the length of a block to ensure it does not go beyond the end | ||
133 | * of the allocated area. The block can be either allocated or free. | ||
134 | * | ||
135 | * This verifies that it is safe to iterate to the next block in a loop. | ||
136 | */ | ||
137 | static void check_block_length(struct buflib_context *ctx, | ||
138 | union buflib_data *block); | ||
139 | |||
140 | /* Check a block's handle pointer to ensure it is within the handle | ||
141 | * table, and that the user pointer is pointing within the block. | ||
142 | * | ||
143 | * This verifies that it is safe to dereference the entry and ensures | ||
144 | * that the pointer in the handle table points within the block, as | ||
145 | * determined by the length field at the start of the block. | ||
146 | */ | ||
147 | static void check_block_handle(struct buflib_context *ctx, | ||
148 | union buflib_data *block); | ||
149 | |||
150 | /* Initialize buffer manager */ | ||
151 | void | ||
152 | buflib_init(struct buflib_context *ctx, void *buf, size_t size) | ||
153 | { | ||
154 | union buflib_data *bd_buf = buf; | ||
155 | BDEBUGF("buflib initialized with %lu.%02lu kiB\n", | ||
156 | (unsigned long)size / 1024, ((unsigned long)size%1000)/10); | ||
157 | |||
158 | /* Align on sizeof(buflib_data), to prevent unaligned access */ | ||
159 | ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data)); | ||
160 | size /= sizeof(union buflib_data); | ||
161 | /* The handle table is initialized with no entries */ | ||
162 | ctx->handle_table = bd_buf + size; | ||
163 | ctx->last_handle = bd_buf + size; | ||
164 | ctx->first_free_handle = bd_buf + size - 1; | ||
165 | ctx->buf_start = bd_buf; | ||
166 | /* A marker is needed for the end of allocated data, to make sure that it | ||
167 | * does not collide with the handle table, and to detect end-of-buffer. | ||
168 | */ | ||
169 | ctx->alloc_end = bd_buf; | ||
170 | ctx->compact = true; | ||
171 | |||
172 | if (size == 0) | ||
173 | { | ||
174 | BPANICF("buflib_init error (CTX:%p, %zd bytes):\n", ctx, | ||
175 | (ctx->handle_table - ctx->buf_start) * sizeof(union buflib_data)); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | bool buflib_context_relocate(struct buflib_context *ctx, void *buf) | ||
180 | { | ||
181 | union buflib_data *handle, *bd_buf = buf; | ||
182 | ptrdiff_t diff = bd_buf - ctx->buf_start; | ||
183 | |||
184 | /* cannot continue if the buffer is not aligned, since we would need | ||
185 | * to reduce the size of the buffer for aligning */ | ||
186 | if (!IS_ALIGNED((uintptr_t)buf, sizeof(union buflib_data))) | ||
187 | return false; | ||
188 | |||
189 | /* relocate the handle table entries */ | ||
190 | for (handle = ctx->last_handle; handle < ctx->handle_table; handle++) | ||
191 | { | ||
192 | if (handle->alloc) | ||
193 | handle->alloc += diff * sizeof(union buflib_data); | ||
194 | } | ||
195 | /* relocate the pointers in the context */ | ||
196 | ctx->handle_table += diff; | ||
197 | ctx->last_handle += diff; | ||
198 | ctx->first_free_handle += diff; | ||
199 | ctx->buf_start += diff; | ||
200 | ctx->alloc_end += diff; | ||
201 | |||
202 | return true; | ||
203 | } | ||
204 | |||
205 | static void buflib_panic(struct buflib_context *ctx, const char *message, ...) | ||
206 | { | ||
207 | char buf[128]; | ||
208 | va_list ap; | ||
209 | |||
210 | va_start(ap, message); | ||
211 | vsnprintf(buf, sizeof(buf), message, ap); | ||
212 | va_end(ap); | ||
213 | |||
214 | BPANICF("buflib error (CTX:%p, %zd bytes):\n%s", ctx, | ||
215 | (ctx->handle_table - ctx->buf_start) * sizeof(union buflib_data), buf); | ||
216 | } | ||
217 | |||
218 | /* Allocate a new handle, returning 0 on failure */ | ||
219 | static inline | ||
220 | union buflib_data* handle_alloc(struct buflib_context *ctx) | ||
221 | { | ||
222 | union buflib_data *handle; | ||
223 | /* first_free_handle is a lower bound on free handles, work through the | ||
224 | * table from there until a handle containing NULL is found, or the end | ||
225 | * of the table is reached. | ||
226 | */ | ||
227 | for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--) | ||
228 | if (!handle->alloc) | ||
229 | break; | ||
230 | /* If the search went past the end of the table, it means we need to extend | ||
231 | * the table to get a new handle. | ||
232 | */ | ||
233 | if (handle < ctx->last_handle) | ||
234 | { | ||
235 | if (handle >= ctx->alloc_end) | ||
236 | ctx->last_handle--; | ||
237 | else | ||
238 | { | ||
239 | /* We know the table is full, so update first_free_handle */ | ||
240 | ctx->first_free_handle = ctx->last_handle - 1; | ||
241 | return NULL; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | /* We know there are no free handles between the old first_free_handle | ||
246 | * and the found handle, therefore we can update first_free_handle */ | ||
247 | ctx->first_free_handle = handle - 1; | ||
248 | |||
249 | /* We need to set the table entry to a non-NULL value to ensure that | ||
250 | * compactions triggered by an allocation do not compact the handle | ||
251 | * table and delete this handle. */ | ||
252 | handle->val = -1; | ||
253 | |||
254 | return handle; | ||
255 | } | ||
256 | |||
257 | /* Free one handle, shrinking the handle table if it's the last one */ | ||
258 | static inline | ||
259 | void handle_free(struct buflib_context *ctx, union buflib_data *handle) | ||
260 | { | ||
261 | handle->alloc = 0; | ||
262 | /* Update free handle lower bound if this handle has a lower index than the | ||
263 | * old one. | ||
264 | */ | ||
265 | if (handle > ctx->first_free_handle) | ||
266 | ctx->first_free_handle = handle; | ||
267 | if (handle == ctx->last_handle) | ||
268 | ctx->last_handle++; | ||
269 | else | ||
270 | ctx->compact = false; | ||
271 | } | ||
272 | |||
273 | /* Get the start block of an allocation */ | ||
274 | static inline | ||
275 | union buflib_data* handle_to_block(struct buflib_context* ctx, int handle) | ||
276 | { | ||
277 | void *ptr = buflib_get_data(ctx, handle); | ||
278 | |||
279 | /* this is a valid case for shrinking if handle | ||
280 | * was freed by the shrink callback */ | ||
281 | if (!ptr) | ||
282 | return NULL; | ||
283 | |||
284 | union buflib_data *data = ALIGN_DOWN(ptr, sizeof(*data)); | ||
285 | return data - BUFLIB_NUM_FIELDS; | ||
286 | } | ||
287 | |||
288 | /* Shrink the handle table, returning true if its size was reduced, false if | ||
289 | * not | ||
290 | */ | ||
291 | static inline bool handle_table_shrink(struct buflib_context *ctx) | ||
292 | { | ||
293 | union buflib_data *handle; | ||
294 | union buflib_data *old_last = ctx->last_handle; | ||
295 | |||
296 | for (handle = ctx->last_handle; handle != ctx->handle_table; ++handle) | ||
297 | if (handle->alloc) | ||
298 | break; | ||
299 | |||
300 | if (handle > ctx->first_free_handle) | ||
301 | ctx->first_free_handle = handle - 1; | ||
302 | |||
303 | ctx->last_handle = handle; | ||
304 | return handle != old_last; | ||
305 | } | ||
306 | |||
307 | /* If shift is non-zero, it represents the number of places to move | ||
308 | * blocks in memory. Calculate the new address for this block, | ||
309 | * update its entry in the handle table, and then move its contents. | ||
310 | * | ||
311 | * Returns false if moving was unsucessful | ||
312 | * (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned) | ||
313 | */ | ||
314 | static bool | ||
315 | move_block(struct buflib_context* ctx, union buflib_data* block, int shift) | ||
316 | { | ||
317 | char* new_start; | ||
318 | union buflib_data *new_block; | ||
319 | |||
320 | check_block_handle(ctx, block); | ||
321 | union buflib_data *h_entry = block[idx_HANDLE].handle; | ||
322 | |||
323 | if (!IS_MOVABLE(block) || block[idx_PIN].pincount > 0) | ||
324 | return false; | ||
325 | |||
326 | int handle = ctx->handle_table - h_entry; | ||
327 | BDEBUGF("%s(): moving id=%d by %d(%d)\n", __func__, | ||
328 | handle, shift, shift*(int)sizeof(union buflib_data)); | ||
329 | new_block = block + shift; | ||
330 | new_start = h_entry->alloc + shift*sizeof(union buflib_data); | ||
331 | |||
332 | struct buflib_callbacks *ops = block[idx_OPS].ops; | ||
333 | |||
334 | /* If move must be synchronized with use, user should have specified a | ||
335 | callback that handles this */ | ||
336 | if (ops && ops->sync_callback) | ||
337 | ops->sync_callback(handle, true); | ||
338 | |||
339 | bool retval = false; | ||
340 | if (!ops || ops->move_callback(handle, h_entry->alloc, new_start) | ||
341 | != BUFLIB_CB_CANNOT_MOVE) | ||
342 | { | ||
343 | h_entry->alloc = new_start; /* update handle table */ | ||
344 | memmove(new_block, block, block->val * sizeof(union buflib_data)); | ||
345 | retval = true; | ||
346 | } | ||
347 | |||
348 | if (ops && ops->sync_callback) | ||
349 | ops->sync_callback(handle, false); | ||
350 | |||
351 | return retval; | ||
352 | } | ||
353 | |||
354 | /* Compact allocations and handle table, adjusting handle pointers as needed. | ||
355 | * Return true if any space was freed or consolidated, false otherwise. | ||
356 | */ | ||
357 | static bool | ||
358 | buflib_compact(struct buflib_context *ctx) | ||
359 | { | ||
360 | BDEBUGF("%s(): Compacting!\n", __func__); | ||
361 | union buflib_data *block, | ||
362 | *hole = NULL; | ||
363 | int shift = 0, len; | ||
364 | /* Store the results of attempting to shrink the handle table */ | ||
365 | bool ret = handle_table_shrink(ctx); | ||
366 | /* compaction has basically two modes of operation: | ||
367 | * 1) the buffer is nicely movable: In this mode, blocks can be simply | ||
368 | * moved towards the beginning. Free blocks add to a shift value, | ||
369 | * which is the amount to move. | ||
370 | * 2) the buffer contains unmovable blocks: unmovable blocks create | ||
371 | * holes and reset shift. Once a hole is found, we're trying to fill | ||
372 | * holes first, moving by shift is the fallback. As the shift is reset, | ||
373 | * this effectively splits the buffer into portions of movable blocks. | ||
374 | * This mode cannot be used if no holes are found yet as it only works | ||
375 | * when it moves blocks across the portions. On the other side, | ||
376 | * moving by shift only works within the same portion | ||
377 | * For simplicity only 1 hole at a time is considered */ | ||
378 | for(block = find_first_free(ctx); block < ctx->alloc_end; block += len) | ||
379 | { | ||
380 | check_block_length(ctx, block); | ||
381 | |||
382 | bool movable = true; /* cache result to avoid 2nd call to move_block */ | ||
383 | len = block->val; | ||
384 | /* This block is free, add its length to the shift value */ | ||
385 | if (len < 0) | ||
386 | { | ||
387 | shift += len; | ||
388 | len = -len; | ||
389 | continue; | ||
390 | } | ||
391 | /* attempt to fill any hole */ | ||
392 | if (hole && -hole->val >= len) | ||
393 | { | ||
394 | intptr_t hlen = -hole->val; | ||
395 | if ((movable = move_block(ctx, block, hole - block))) | ||
396 | { | ||
397 | ret = true; | ||
398 | /* Move was successful. The memory at block is now free */ | ||
399 | block->val = -len; | ||
400 | |||
401 | /* add its length to shift */ | ||
402 | shift += -len; | ||
403 | /* Reduce the size of the hole accordingly | ||
404 | * but be careful to not overwrite an existing block */ | ||
405 | if (hlen != len) | ||
406 | { | ||
407 | hole += len; | ||
408 | hole->val = len - hlen; /* negative */ | ||
409 | } | ||
410 | else /* hole closed */ | ||
411 | hole = NULL; | ||
412 | continue; | ||
413 | } | ||
414 | } | ||
415 | /* attempt move the allocation by shift */ | ||
416 | if (shift) | ||
417 | { | ||
418 | union buflib_data* target_block = block + shift; | ||
419 | if (!movable || !move_block(ctx, block, shift)) | ||
420 | { | ||
421 | /* free space before an unmovable block becomes a hole, | ||
422 | * therefore mark this block free and track the hole */ | ||
423 | target_block->val = shift; | ||
424 | hole = target_block; | ||
425 | shift = 0; | ||
426 | } | ||
427 | else | ||
428 | ret = true; | ||
429 | } | ||
430 | } | ||
431 | /* Move the end-of-allocation mark, and return true if any new space has | ||
432 | * been freed. | ||
433 | */ | ||
434 | ctx->alloc_end += shift; | ||
435 | ctx->compact = true; | ||
436 | return ret || shift; | ||
437 | } | ||
438 | |||
439 | /* Compact the buffer by trying both shrinking and moving. | ||
440 | * | ||
441 | * Try to move first. If unsuccesfull, try to shrink. If that was successful | ||
442 | * try to move once more as there might be more room now. | ||
443 | */ | ||
444 | static bool | ||
445 | buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints) | ||
446 | { | ||
447 | bool result = false; | ||
448 | /* if something compacted before already there will be no further gain */ | ||
449 | if (!ctx->compact) | ||
450 | result = buflib_compact(ctx); | ||
451 | if (!result) | ||
452 | { | ||
453 | union buflib_data *this, *before; | ||
454 | for(this = ctx->buf_start, before = this; | ||
455 | this < ctx->alloc_end; | ||
456 | before = this, this += abs(this->val)) | ||
457 | { | ||
458 | check_block_length(ctx, this); | ||
459 | if (this->val < 0) | ||
460 | continue; | ||
461 | |||
462 | struct buflib_callbacks *ops = this[idx_OPS].ops; | ||
463 | if (!ops || !ops->shrink_callback) | ||
464 | continue; | ||
465 | |||
466 | check_block_handle(ctx, this); | ||
467 | union buflib_data* h_entry = this[idx_HANDLE].handle; | ||
468 | int handle = ctx->handle_table - h_entry; | ||
469 | |||
470 | unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK; | ||
471 | /* adjust what we ask for if there's free space in the front | ||
472 | * this isn't too unlikely assuming this block is | ||
473 | * shrinkable but not movable */ | ||
474 | if (pos_hints == BUFLIB_SHRINK_POS_FRONT && | ||
475 | before != this && before->val < 0) | ||
476 | { | ||
477 | size_t free_space = (-before->val) * sizeof(union buflib_data); | ||
478 | size_t wanted = shrink_hints & BUFLIB_SHRINK_SIZE_MASK; | ||
479 | if (wanted < free_space) /* no shrink needed? */ | ||
480 | continue; | ||
481 | wanted -= free_space; | ||
482 | shrink_hints = pos_hints | wanted; | ||
483 | } | ||
484 | |||
485 | char* data = h_entry->alloc; | ||
486 | char* data_end = (char*)(this + this->val); | ||
487 | bool last = (data_end == (char*)ctx->alloc_end); | ||
488 | |||
489 | int ret = ops->shrink_callback(handle, shrink_hints, | ||
490 | data, data_end - data); | ||
491 | result |= (ret == BUFLIB_CB_OK); | ||
492 | |||
493 | /* 'this' might have changed in the callback (if it shrinked | ||
494 | * from the top or even freed the handle), get it again */ | ||
495 | this = handle_to_block(ctx, handle); | ||
496 | |||
497 | /* The handle was possibly be freed in the callback, | ||
498 | * re-run the loop with the handle before */ | ||
499 | if (!this) | ||
500 | this = before; | ||
501 | /* could also change with shrinking from back */ | ||
502 | else if (last) | ||
503 | ctx->alloc_end = this + this->val; | ||
504 | } | ||
505 | /* shrinking was successful at least once, try compaction again */ | ||
506 | if (result) | ||
507 | result |= buflib_compact(ctx); | ||
508 | } | ||
509 | |||
510 | return result; | ||
511 | } | ||
512 | |||
513 | /* Shift buffered items by size units, and update handle pointers. The shift | ||
514 | * value must be determined to be safe *before* calling. | ||
515 | */ | ||
516 | static void | ||
517 | buflib_buffer_shift(struct buflib_context *ctx, int shift) | ||
518 | { | ||
519 | memmove(ctx->buf_start + shift, ctx->buf_start, | ||
520 | (ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data)); | ||
521 | ctx->buf_start += shift; | ||
522 | ctx->alloc_end += shift; | ||
523 | shift *= sizeof(union buflib_data); | ||
524 | union buflib_data *handle; | ||
525 | for (handle = ctx->last_handle; handle < ctx->handle_table; handle++) | ||
526 | if (handle->alloc) | ||
527 | handle->alloc += shift; | ||
528 | } | ||
529 | |||
530 | /* Shift buffered items up by size bytes, or as many as possible if size == 0. | ||
531 | * Set size to the number of bytes freed. | ||
532 | */ | ||
533 | void* | ||
534 | buflib_buffer_out(struct buflib_context *ctx, size_t *size) | ||
535 | { | ||
536 | if (!ctx->compact) | ||
537 | buflib_compact(ctx); | ||
538 | size_t avail = ctx->last_handle - ctx->alloc_end; | ||
539 | size_t avail_b = avail * sizeof(union buflib_data); | ||
540 | if (*size && *size < avail_b) | ||
541 | { | ||
542 | avail = (*size + sizeof(union buflib_data) - 1) | ||
543 | / sizeof(union buflib_data); | ||
544 | avail_b = avail * sizeof(union buflib_data); | ||
545 | } | ||
546 | *size = avail_b; | ||
547 | void *ret = ctx->buf_start; | ||
548 | buflib_buffer_shift(ctx, avail); | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | /* Shift buffered items down by size bytes */ | ||
553 | void | ||
554 | buflib_buffer_in(struct buflib_context *ctx, int size) | ||
555 | { | ||
556 | size /= sizeof(union buflib_data); | ||
557 | buflib_buffer_shift(ctx, -size); | ||
558 | } | ||
559 | |||
560 | /* Allocate a buffer of size bytes, returning a handle for it. | ||
561 | * Note: Buffers are movable since NULL is passed for "ops". | ||
562 | Don't pass them to functions that call yield() */ | ||
563 | int | ||
564 | buflib_alloc(struct buflib_context *ctx, size_t size) | ||
565 | { | ||
566 | return buflib_alloc_ex(ctx, size, NULL); | ||
567 | } | ||
568 | |||
569 | /* Allocate a buffer of size bytes, returning a handle for it. | ||
570 | * | ||
571 | * The ops parameter points to caller-implemented callbacks for moving and | ||
572 | * shrinking. | ||
573 | * | ||
574 | * If you pass NULL for "ops", buffers are movable by default. | ||
575 | * Don't pass them to functions that call yield() like I/O. | ||
576 | * Buffers are only shrinkable when a shrink callback is given. | ||
577 | */ | ||
578 | |||
579 | int | ||
580 | buflib_alloc_ex(struct buflib_context *ctx, size_t size, | ||
581 | struct buflib_callbacks *ops) | ||
582 | { | ||
583 | union buflib_data *handle, *block; | ||
584 | bool last; | ||
585 | /* This really is assigned a value before use */ | ||
586 | int block_len; | ||
587 | size = (size + sizeof(union buflib_data) - 1) / | ||
588 | sizeof(union buflib_data) | ||
589 | + BUFLIB_NUM_FIELDS; | ||
590 | handle_alloc: | ||
591 | handle = handle_alloc(ctx); | ||
592 | if (!handle) | ||
593 | { | ||
594 | /* If allocation has failed, and compaction has succeded, it may be | ||
595 | * possible to get a handle by trying again. | ||
596 | */ | ||
597 | union buflib_data* last_block = find_block_before(ctx, | ||
598 | ctx->alloc_end, false); | ||
599 | struct buflib_callbacks* ops = last_block[idx_OPS].ops; | ||
600 | unsigned hints = 0; | ||
601 | if (!ops || !ops->shrink_callback) | ||
602 | { /* the last one isn't shrinkable | ||
603 | * make room in front of a shrinkable and move this alloc */ | ||
604 | hints = BUFLIB_SHRINK_POS_FRONT; | ||
605 | hints |= last_block->val * sizeof(union buflib_data); | ||
606 | } | ||
607 | else if (ops && ops->shrink_callback) | ||
608 | { /* the last is shrinkable, make room for handles directly */ | ||
609 | hints = BUFLIB_SHRINK_POS_BACK; | ||
610 | hints |= 16*sizeof(union buflib_data); | ||
611 | } | ||
612 | /* buflib_compact_and_shrink() will compact and move last_block() | ||
613 | * if possible */ | ||
614 | if (buflib_compact_and_shrink(ctx, hints)) | ||
615 | goto handle_alloc; | ||
616 | return -1; | ||
617 | } | ||
618 | |||
619 | buffer_alloc: | ||
620 | /* need to re-evaluate last before the loop because the last allocation | ||
621 | * possibly made room in its front to fit this, so last would be wrong */ | ||
622 | last = false; | ||
623 | for (block = find_first_free(ctx);; block += block_len) | ||
624 | { | ||
625 | /* If the last used block extends all the way to the handle table, the | ||
626 | * block "after" it doesn't have a header. Because of this, it's easier | ||
627 | * to always find the end of allocation by saving a pointer, and always | ||
628 | * calculate the free space at the end by comparing it to the | ||
629 | * last_handle pointer. | ||
630 | */ | ||
631 | if(block == ctx->alloc_end) | ||
632 | { | ||
633 | last = true; | ||
634 | block_len = ctx->last_handle - block; | ||
635 | if ((size_t)block_len < size) | ||
636 | block = NULL; | ||
637 | break; | ||
638 | } | ||
639 | |||
640 | check_block_length(ctx, block); | ||
641 | block_len = block->val; | ||
642 | /* blocks with positive length are already allocated. */ | ||
643 | if(block_len > 0) | ||
644 | continue; | ||
645 | block_len = -block_len; | ||
646 | /* The search is first-fit, any fragmentation this causes will be | ||
647 | * handled at compaction. | ||
648 | */ | ||
649 | if ((size_t)block_len >= size) | ||
650 | break; | ||
651 | } | ||
652 | if (!block) | ||
653 | { | ||
654 | /* Try compacting if allocation failed */ | ||
655 | unsigned hint = BUFLIB_SHRINK_POS_FRONT | | ||
656 | ((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK); | ||
657 | if (buflib_compact_and_shrink(ctx, hint)) | ||
658 | { | ||
659 | goto buffer_alloc; | ||
660 | } else { | ||
661 | handle_free(ctx, handle); | ||
662 | return -2; | ||
663 | } | ||
664 | } | ||
665 | |||
666 | /* Set up the allocated block, by marking the size allocated, and storing | ||
667 | * a pointer to the handle. | ||
668 | */ | ||
669 | block[idx_LEN].val = size; | ||
670 | block[idx_HANDLE].handle = handle; | ||
671 | block[idx_OPS].ops = ops; | ||
672 | block[idx_PIN].pincount = 0; | ||
673 | |||
674 | handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS]; | ||
675 | |||
676 | BDEBUGF("buflib_alloc_ex: size=%d handle=%p clb=%p\n", | ||
677 | (unsigned int)size, (void *)handle, (void *)ops); | ||
678 | |||
679 | block += size; | ||
680 | /* alloc_end must be kept current if we're taking the last block. */ | ||
681 | if (last) | ||
682 | ctx->alloc_end = block; | ||
683 | /* Only free blocks *before* alloc_end have tagged length. */ | ||
684 | else if ((size_t)block_len > size) | ||
685 | block->val = size - block_len; | ||
686 | /* Return the handle index as a positive integer. */ | ||
687 | return ctx->handle_table - handle; | ||
688 | } | ||
689 | |||
690 | static union buflib_data* | ||
691 | find_first_free(struct buflib_context *ctx) | ||
692 | { | ||
693 | union buflib_data *ret; | ||
694 | for(ret = ctx->buf_start; ret < ctx->alloc_end; ret += ret->val) | ||
695 | { | ||
696 | check_block_length(ctx, ret); | ||
697 | if (ret->val < 0) | ||
698 | break; | ||
699 | } | ||
700 | |||
701 | /* ret is now either a free block or the same as alloc_end, both is fine */ | ||
702 | return ret; | ||
703 | } | ||
704 | |||
705 | /* Finds the free block before block, and returns NULL if it's not free */ | ||
706 | static union buflib_data* | ||
707 | find_block_before(struct buflib_context *ctx, union buflib_data* block, | ||
708 | bool is_free) | ||
709 | { | ||
710 | union buflib_data *ret = ctx->buf_start, | ||
711 | *next_block = ret; | ||
712 | |||
713 | /* no previous block */ | ||
714 | if (next_block == block) | ||
715 | return NULL; | ||
716 | |||
717 | /* find the block that's before the current one */ | ||
718 | while (next_block != block) | ||
719 | { | ||
720 | check_block_length(ctx, ret); | ||
721 | ret = next_block; | ||
722 | next_block += abs(ret->val); | ||
723 | } | ||
724 | |||
725 | /* don't return it if the found block isn't free */ | ||
726 | if (is_free && ret->val >= 0) | ||
727 | return NULL; | ||
728 | |||
729 | return ret; | ||
730 | } | ||
731 | |||
732 | /* Free the buffer associated with handle_num. */ | ||
733 | int | ||
734 | buflib_free(struct buflib_context *ctx, int handle_num) | ||
735 | { | ||
736 | if (handle_num <= 0) /* invalid or already free */ | ||
737 | return handle_num; | ||
738 | union buflib_data *handle = ctx->handle_table - handle_num, | ||
739 | *freed_block = handle_to_block(ctx, handle_num), | ||
740 | *block, *next_block; | ||
741 | /* We need to find the block before the current one, to see if it is free | ||
742 | * and can be merged with this one. | ||
743 | */ | ||
744 | block = find_block_before(ctx, freed_block, true); | ||
745 | if (block) | ||
746 | { | ||
747 | block->val -= freed_block->val; | ||
748 | } | ||
749 | else | ||
750 | { | ||
751 | /* Otherwise, set block to the newly-freed block, and mark it free, | ||
752 | * before continuing on, since the code below expects block to point | ||
753 | * to a free block which may have free space after it. */ | ||
754 | block = freed_block; | ||
755 | block->val = -block->val; | ||
756 | } | ||
757 | |||
758 | next_block = block - block->val; | ||
759 | /* Check if we are merging with the free space at alloc_end. */ | ||
760 | if (next_block == ctx->alloc_end) | ||
761 | ctx->alloc_end = block; | ||
762 | /* Otherwise, the next block might still be a "normal" free block, and the | ||
763 | * mid-allocation free means that the buffer is no longer compact. | ||
764 | */ | ||
765 | else { | ||
766 | ctx->compact = false; | ||
767 | if (next_block->val < 0) | ||
768 | block->val += next_block->val; | ||
769 | } | ||
770 | handle_free(ctx, handle); | ||
771 | handle->alloc = NULL; | ||
772 | |||
773 | return 0; /* unconditionally */ | ||
774 | } | ||
775 | |||
776 | static size_t | ||
777 | free_space_at_end(struct buflib_context* ctx) | ||
778 | { | ||
779 | /* subtract 5 elements for | ||
780 | * val, handle, meta_len, ops and the handle table entry*/ | ||
781 | ptrdiff_t diff = (ctx->last_handle - ctx->alloc_end - BUFLIB_NUM_FIELDS); | ||
782 | diff -= 16; /* space for future handles */ | ||
783 | diff *= sizeof(union buflib_data); /* make it bytes */ | ||
784 | |||
785 | if (diff > 0) | ||
786 | return diff; | ||
787 | else | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | /* Return the maximum allocatable contiguous memory in bytes */ | ||
792 | size_t | ||
793 | buflib_allocatable(struct buflib_context* ctx) | ||
794 | { | ||
795 | size_t free_space = 0, max_free_space = 0; | ||
796 | intptr_t block_len; | ||
797 | |||
798 | /* make sure buffer is as contiguous as possible */ | ||
799 | if (!ctx->compact) | ||
800 | buflib_compact(ctx); | ||
801 | |||
802 | /* now look if there's free in holes */ | ||
803 | for(union buflib_data *block = find_first_free(ctx); | ||
804 | block < ctx->alloc_end; | ||
805 | block += block_len) | ||
806 | { | ||
807 | check_block_length(ctx, block); | ||
808 | block_len = block->val; | ||
809 | |||
810 | if (block_len < 0) | ||
811 | { | ||
812 | block_len = -block_len; | ||
813 | free_space += block_len; | ||
814 | continue; | ||
815 | } | ||
816 | |||
817 | /* an unmovable section resets the count as free space | ||
818 | * can't be contigous */ | ||
819 | if (!IS_MOVABLE(block)) | ||
820 | { | ||
821 | if (max_free_space < free_space) | ||
822 | max_free_space = free_space; | ||
823 | free_space = 0; | ||
824 | } | ||
825 | } | ||
826 | |||
827 | /* select the best */ | ||
828 | max_free_space = MAX(max_free_space, free_space); | ||
829 | max_free_space *= sizeof(union buflib_data); | ||
830 | max_free_space = MAX(max_free_space, free_space_at_end(ctx)); | ||
831 | |||
832 | if (max_free_space > 0) | ||
833 | return max_free_space; | ||
834 | else | ||
835 | return 0; | ||
836 | } | ||
837 | |||
838 | /* Return the amount of unallocated memory in bytes (even if not contiguous) */ | ||
839 | size_t | ||
840 | buflib_available(struct buflib_context* ctx) | ||
841 | { | ||
842 | size_t free_space = 0; | ||
843 | |||
844 | /* add up all holes */ | ||
845 | for(union buflib_data *block = find_first_free(ctx); | ||
846 | block < ctx->alloc_end; | ||
847 | block += abs(block->val)) | ||
848 | { | ||
849 | check_block_length(ctx, block); | ||
850 | if (block->val < 0) | ||
851 | free_space += -block->val; | ||
852 | } | ||
853 | |||
854 | free_space *= sizeof(union buflib_data); /* make it bytes */ | ||
855 | free_space += free_space_at_end(ctx); | ||
856 | |||
857 | return free_space; | ||
858 | } | ||
859 | |||
860 | /* | ||
861 | * Allocate all available (as returned by buflib_available()) memory and return | ||
862 | * a handle to it | ||
863 | * | ||
864 | * This grabs a lock which can only be unlocked by buflib_free() or | ||
865 | * buflib_shrink(), to protect from further allocations (which couldn't be | ||
866 | * serviced anyway). | ||
867 | */ | ||
868 | int | ||
869 | buflib_alloc_maximum(struct buflib_context* ctx, size_t *size, struct buflib_callbacks *ops) | ||
870 | { | ||
871 | /* ignore ctx->compact because it's true if all movable blocks are contiguous | ||
872 | * even if the buffer has holes due to unmovable allocations */ | ||
873 | unsigned hints; | ||
874 | size_t bufsize = ctx->handle_table - ctx->buf_start; | ||
875 | bufsize = MIN(BUFLIB_SHRINK_SIZE_MASK, bufsize*sizeof(union buflib_data)); /* make it bytes */ | ||
876 | /* try as hard as possible to free up space. allocations are | ||
877 | * welcome to give up some or all of their memory */ | ||
878 | hints = BUFLIB_SHRINK_POS_BACK | BUFLIB_SHRINK_POS_FRONT | bufsize; | ||
879 | /* compact until no space can be gained anymore */ | ||
880 | while (buflib_compact_and_shrink(ctx, hints)); | ||
881 | |||
882 | *size = buflib_allocatable(ctx); | ||
883 | if (*size <= 0) /* OOM */ | ||
884 | return -1; | ||
885 | |||
886 | return buflib_alloc_ex(ctx, *size, ops); | ||
887 | } | ||
888 | |||
889 | /* Shrink the allocation indicated by the handle according to new_start and | ||
890 | * new_size. Grow is not possible, therefore new_start and new_start + new_size | ||
891 | * must be within the original allocation | ||
892 | */ | ||
893 | bool | ||
894 | buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size) | ||
895 | { | ||
896 | char* oldstart = buflib_get_data(ctx, handle); | ||
897 | char* newstart = new_start != NULL ? new_start : oldstart; | ||
898 | char* newend = newstart + new_size; | ||
899 | |||
900 | /* newstart must be higher and new_size not "negative" */ | ||
901 | if (newstart < oldstart || newend < newstart) | ||
902 | return false; | ||
903 | union buflib_data *block = handle_to_block(ctx, handle), | ||
904 | *old_next_block = block + block->val, | ||
905 | /* newstart isn't necessarily properly aligned but it | ||
906 | * needn't be since it's only dereferenced by the user code */ | ||
907 | *aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart), | ||
908 | *aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart), | ||
909 | *new_next_block = (union buflib_data*)B_ALIGN_UP(newend), | ||
910 | *new_block, metadata_size; | ||
911 | |||
912 | /* growing is not supported */ | ||
913 | if (new_next_block > old_next_block) | ||
914 | return false; | ||
915 | |||
916 | metadata_size.val = aligned_oldstart - block; | ||
917 | /* update val and the handle table entry */ | ||
918 | new_block = aligned_newstart - metadata_size.val; | ||
919 | block[idx_LEN].val = new_next_block - new_block; | ||
920 | |||
921 | check_block_handle(ctx, block); | ||
922 | block[idx_HANDLE].handle->alloc = newstart; | ||
923 | if (block != new_block) | ||
924 | { | ||
925 | /* move metadata over, i.e. pointer to handle table entry and name | ||
926 | * This is actually the point of no return. Data in the allocation is | ||
927 | * being modified, and therefore we must successfully finish the shrink | ||
928 | * operation */ | ||
929 | memmove(new_block, block, metadata_size.val*sizeof(metadata_size)); | ||
930 | /* mark the old block unallocated */ | ||
931 | block->val = block - new_block; | ||
932 | /* find the block before in order to merge with the new free space */ | ||
933 | union buflib_data *free_before = find_block_before(ctx, block, true); | ||
934 | if (free_before) | ||
935 | free_before->val += block->val; | ||
936 | |||
937 | /* We didn't handle size changes yet, assign block to the new one | ||
938 | * the code below the wants block whether it changed or not */ | ||
939 | block = new_block; | ||
940 | } | ||
941 | |||
942 | /* Now deal with size changes that create free blocks after the allocation */ | ||
943 | if (old_next_block != new_next_block) | ||
944 | { | ||
945 | if (ctx->alloc_end == old_next_block) | ||
946 | ctx->alloc_end = new_next_block; | ||
947 | else if (old_next_block->val < 0) | ||
948 | { /* enlarge next block by moving it up */ | ||
949 | new_next_block->val = old_next_block->val - (old_next_block - new_next_block); | ||
950 | } | ||
951 | else if (old_next_block != new_next_block) | ||
952 | { /* creating a hole */ | ||
953 | /* must be negative to indicate being unallocated */ | ||
954 | new_next_block->val = new_next_block - old_next_block; | ||
955 | } | ||
956 | } | ||
957 | |||
958 | return true; | ||
959 | } | ||
960 | |||
961 | void buflib_pin(struct buflib_context *ctx, int handle) | ||
962 | { | ||
963 | if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) | ||
964 | buflib_panic(ctx, "invalid handle pin: %d", handle); | ||
965 | |||
966 | union buflib_data *data = handle_to_block(ctx, handle); | ||
967 | data[idx_PIN].pincount++; | ||
968 | } | ||
969 | |||
970 | void buflib_unpin(struct buflib_context *ctx, int handle) | ||
971 | { | ||
972 | if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) | ||
973 | buflib_panic(ctx, "invalid handle unpin: %d", handle); | ||
974 | |||
975 | union buflib_data *data = handle_to_block(ctx, handle); | ||
976 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) | ||
977 | { | ||
978 | if (data[idx_PIN].pincount == 0) | ||
979 | buflib_panic(ctx, "handle pin underflow: %d", handle); | ||
980 | } | ||
981 | |||
982 | data[idx_PIN].pincount--; | ||
983 | } | ||
984 | |||
985 | unsigned buflib_pin_count(struct buflib_context *ctx, int handle) | ||
986 | { | ||
987 | if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0) | ||
988 | buflib_panic(ctx, "invalid handle: %d", handle); | ||
989 | |||
990 | union buflib_data *data = handle_to_block(ctx, handle); | ||
991 | return data[idx_PIN].pincount; | ||
992 | } | ||
993 | |||
994 | #ifdef BUFLIB_DEBUG_GET_DATA | ||
995 | void *buflib_get_data(struct buflib_context *ctx, int handle) | ||
996 | { | ||
997 | if (handle <= 0) | ||
998 | buflib_panic(ctx, "invalid handle access: %d", handle); | ||
999 | |||
1000 | return (void*)(ctx->handle_table[-handle].alloc); | ||
1001 | } | ||
1002 | #endif | ||
1003 | |||
1004 | #ifdef BUFLIB_DEBUG_CHECK_VALID | ||
1005 | void buflib_check_valid(struct buflib_context *ctx) | ||
1006 | { | ||
1007 | for(union buflib_data *block = ctx->buf_start; | ||
1008 | block < ctx->alloc_end; | ||
1009 | block += abs(block->val)) | ||
1010 | { | ||
1011 | check_block_length(ctx, block); | ||
1012 | if (block->val < 0) | ||
1013 | continue; | ||
1014 | |||
1015 | check_block_handle(ctx, block); | ||
1016 | } | ||
1017 | } | ||
1018 | #endif | ||
1019 | |||
1020 | #ifdef BUFLIB_DEBUG_PRINT | ||
1021 | int buflib_get_num_blocks(struct buflib_context *ctx) | ||
1022 | { | ||
1023 | int i = 0; | ||
1024 | |||
1025 | for(union buflib_data *block = ctx->buf_start; | ||
1026 | block < ctx->alloc_end; | ||
1027 | block += abs(block->val)) | ||
1028 | { | ||
1029 | check_block_length(ctx, block); | ||
1030 | ++i; | ||
1031 | } | ||
1032 | |||
1033 | return i; | ||
1034 | } | ||
1035 | |||
1036 | bool buflib_print_block_at(struct buflib_context *ctx, int block_num, | ||
1037 | char *buf, size_t bufsize) | ||
1038 | { | ||
1039 | for(union buflib_data *block = ctx->buf_start; | ||
1040 | block < ctx->alloc_end; | ||
1041 | block += abs(block->val)) | ||
1042 | { | ||
1043 | check_block_length(ctx, block); | ||
1044 | |||
1045 | if (block_num-- == 0) | ||
1046 | { | ||
1047 | snprintf(buf, bufsize, "%8p: val: %4ld (%sallocated)", | ||
1048 | block, (long)block->val, | ||
1049 | block->val > 0 ? "" : "un"); | ||
1050 | return true; | ||
1051 | } | ||
1052 | } | ||
1053 | |||
1054 | if (bufsize > 0) | ||
1055 | *buf = '\0'; | ||
1056 | return false; | ||
1057 | } | ||
1058 | #endif | ||
1059 | |||
1060 | static void check_block_length(struct buflib_context *ctx, | ||
1061 | union buflib_data *block) | ||
1062 | { | ||
1063 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH) | ||
1064 | { | ||
1065 | intptr_t length = block[idx_LEN].val; | ||
1066 | |||
1067 | /* Check the block length does not pass beyond the end */ | ||
1068 | if (length == 0 || block > ctx->alloc_end - abs(length)) | ||
1069 | { | ||
1070 | buflib_panic(ctx, "block len wacky [%p]=%ld", | ||
1071 | (void*)&block[idx_LEN], (long)length); | ||
1072 | } | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | static void check_block_handle(struct buflib_context *ctx, | ||
1077 | union buflib_data *block) | ||
1078 | { | ||
1079 | if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE) | ||
1080 | { | ||
1081 | intptr_t length = block[idx_LEN].val; | ||
1082 | union buflib_data *h_entry = block[idx_HANDLE].handle; | ||
1083 | |||
1084 | /* Check the handle pointer is properly aligned */ | ||
1085 | /* TODO: Can we ensure the compiler doesn't optimize this out? | ||
1086 | * I dunno, maybe the compiler can assume the pointer is always | ||
1087 | * properly aligned due to some C standard voodoo?? */ | ||
1088 | if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry))) | ||
1089 | { | ||
1090 | buflib_panic(ctx, "handle unaligned [%p]=%p", | ||
1091 | &block[idx_HANDLE], h_entry); | ||
1092 | } | ||
1093 | |||
1094 | /* Check the pointer is actually inside the handle table */ | ||
1095 | if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table) | ||
1096 | { | ||
1097 | buflib_panic(ctx, "handle out of bounds [%p]=%p", | ||
1098 | &block[idx_HANDLE], h_entry); | ||
1099 | } | ||
1100 | |||
1101 | /* Now check the allocation is within the block. | ||
1102 | * This is stricter than check_handle(). */ | ||
1103 | void *alloc = h_entry->alloc; | ||
1104 | void *alloc_begin = block; | ||
1105 | void *alloc_end = block + length; | ||
1106 | /* buflib allows zero length allocations, so alloc_end is inclusive */ | ||
1107 | if (alloc < alloc_begin || alloc > alloc_end) | ||
1108 | { | ||
1109 | buflib_panic(ctx, "alloc outside block [%p]=%p, %p-%p", | ||
1110 | h_entry, alloc, alloc_begin, alloc_end); | ||
1111 | } | ||
1112 | } | ||
1113 | } | ||