diff options
author | Andrew Mahone <andrew.mahone@gmail.com> | 2009-03-04 21:11:12 +0000 |
---|---|---|
committer | Andrew Mahone <andrew.mahone@gmail.com> | 2009-03-04 21:11:12 +0000 |
commit | b727de604ddab15fe809303181ba18e53197708d (patch) | |
tree | a3d6880b77982fc417e7ff9b714ced4a91dc425c /apps/plugins/lib/buflib.c | |
parent | e54c1cc9a2f9053245feddf70243aa357f18870d (diff) | |
download | rockbox-b727de604ddab15fe809303181ba18e53197708d.tar.gz rockbox-b727de604ddab15fe809303181ba18e53197708d.zip |
FS#9916 buflib plugin memory allocator
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@20202 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps/plugins/lib/buflib.c')
-rw-r--r-- | apps/plugins/lib/buflib.c | 308 |
1 files changed, 308 insertions, 0 deletions
diff --git a/apps/plugins/lib/buflib.c b/apps/plugins/lib/buflib.c new file mode 100644 index 0000000000..514479600e --- /dev/null +++ b/apps/plugins/lib/buflib.c | |||
@@ -0,0 +1,308 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * This is a memory allocator designed to provide reasonable management of free | ||
11 | * space and fast access to allocated data. More than one allocator can be used | ||
12 | * at a time by initializing multiple contexts. | ||
13 | * | ||
14 | * Copyright (C) 2009 Andrew Mahone | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or | ||
17 | * modify it under the terms of the GNU General Public License | ||
18 | * as published by the Free Software Foundation; either version 2 | ||
19 | * of the License, or (at your option) any later version. | ||
20 | * | ||
21 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
22 | * KIND, either express or implied. | ||
23 | * | ||
24 | ****************************************************************************/ | ||
25 | |||
26 | #include "buflib.h" | ||
27 | |||
28 | /* The main goal of this design is fast fetching of the pointer for a handle. | ||
29 | * For that reason, the handles are stored in a table at the end of the buffer | ||
30 | * with a fixed address, so that returning the pointer for a handle is a simple | ||
31 | * table lookup. To reduce the frequency with which allocated blocks will need | ||
32 | * to be moved to free space, allocations grow up in address from the start of | ||
33 | * the buffer. The buffer is treated as an array of union buflib_data. Blocks | ||
34 | * start with a length marker, which is included in their length. Free blocks | ||
35 | * are marked by negative length, allocated ones use the second buflib_data in | ||
36 | * the block to store a pointer to their handle table entry, so that it can be | ||
37 | * quickly found and updated during compaction. The allocator functions are | ||
38 | * passed a context struct so that two allocators can be run, for example, one | ||
39 | * per core may be used, with convenience wrappers for the single-allocator | ||
40 | * case that use a predefined context. | ||
41 | */ | ||
42 | |||
43 | #define ABS(x) \ | ||
44 | ({ \ | ||
45 | typeof(x) xtmp_abs_ = x; \ | ||
46 | xtmp_abs_ = xtmp_abs_ < 0 ? -xtmp_abs_ : xtmp_abs_; \ | ||
47 | xtmp_abs_; \ | ||
48 | }) | ||
49 | |||
50 | /* Initialize buffer manager */ | ||
51 | void | ||
52 | buflib_init(struct buflib_context *ctx, void *buf, size_t size) | ||
53 | { | ||
54 | union buflib_data *bd_buf = buf; | ||
55 | |||
56 | /* Align on sizeof(buflib_data), to prevent unaligned access */ | ||
57 | ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data)); | ||
58 | size /= sizeof(union buflib_data); | ||
59 | /* The handle table is initialized with no entries */ | ||
60 | ctx->handle_table = bd_buf + size; | ||
61 | ctx->last_handle = bd_buf + size; | ||
62 | ctx->first_free_handle = bd_buf + size - 1; | ||
63 | ctx->first_free_block = bd_buf; | ||
64 | /* A marker is needed for the end of allocated data, to make sure that it | ||
65 | * does not collide with the handle table, and to detect end-of-buffer. | ||
66 | */ | ||
67 | ctx->alloc_end = bd_buf; | ||
68 | ctx->compact = true; | ||
69 | } | ||
70 | |||
71 | /* Allocate a new handle, returning 0 on failure */ | ||
72 | static inline | ||
73 | union buflib_data* handle_alloc(struct buflib_context *ctx) | ||
74 | { | ||
75 | union buflib_data *handle; | ||
76 | /* first_free_handle is a lower bound on free handles, work through the | ||
77 | * table from there until a handle containing NULL is found, or the end | ||
78 | * of the table is reached. | ||
79 | */ | ||
80 | for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--) | ||
81 | if (!handle->ptr) | ||
82 | break; | ||
83 | /* If the search went past the end of the table, it means we need to extend | ||
84 | * the table to get a new handle. | ||
85 | */ | ||
86 | if (handle < ctx->last_handle) | ||
87 | { | ||
88 | if (handle >= ctx->alloc_end) | ||
89 | ctx->last_handle--; | ||
90 | else | ||
91 | return NULL; | ||
92 | } | ||
93 | handle->val = -1; | ||
94 | return handle; | ||
95 | } | ||
96 | |||
97 | /* Free one handle, shrinking the handle table if it's the last one */ | ||
98 | static inline | ||
99 | void handle_free(struct buflib_context *ctx, union buflib_data *handle) | ||
100 | { | ||
101 | handle->ptr = 0; | ||
102 | /* Update free handle lower bound if this handle has a lower index than the | ||
103 | * old one. | ||
104 | */ | ||
105 | if (handle > ctx->first_free_handle) | ||
106 | ctx->first_free_handle = handle; | ||
107 | if (handle == ctx->last_handle) | ||
108 | ctx->last_handle++; | ||
109 | else | ||
110 | ctx->compact = false; | ||
111 | } | ||
112 | |||
113 | /* Shrink the handle table, returning true if its size was reduced, false if | ||
114 | * not | ||
115 | */ | ||
116 | static inline | ||
117 | bool | ||
118 | handle_table_shrink(struct buflib_context *ctx) | ||
119 | { | ||
120 | bool rv; | ||
121 | union buflib_data *handle; | ||
122 | for (handle = ctx->last_handle; !(handle->ptr); handle++); | ||
123 | if (handle > ctx->first_free_handle) | ||
124 | ctx->first_free_handle = handle - 1; | ||
125 | rv = handle == ctx->last_handle; | ||
126 | ctx->last_handle = handle; | ||
127 | return rv; | ||
128 | } | ||
129 | |||
130 | /* Compact allocations and handle table, adjusting handle pointers as needed. | ||
131 | * Return true if any space was freed or consolidated, false otherwise. | ||
132 | */ | ||
133 | static bool | ||
134 | buflib_compact(struct buflib_context *ctx) | ||
135 | { | ||
136 | union buflib_data *block = ctx->first_free_block, *new_block; | ||
137 | int shift = 0, len; | ||
138 | /* Store the results of attempting to shrink the handle table */ | ||
139 | bool ret = handle_table_shrink(ctx); | ||
140 | for(; block != ctx->alloc_end; block += len) | ||
141 | { | ||
142 | len = block->val; | ||
143 | /* This block is free, add its length to the shift value */ | ||
144 | if (len < 0) | ||
145 | { | ||
146 | shift += len; | ||
147 | len = -len; | ||
148 | continue; | ||
149 | } | ||
150 | /* If shift is non-zero, it represents the number of places to move | ||
151 | * blocks down in memory. Calculate the new address for this block, | ||
152 | * update its entry in the handle table, and then move its contents. | ||
153 | */ | ||
154 | if (shift) | ||
155 | { | ||
156 | new_block = block + shift; | ||
157 | block[1].ptr->ptr = new_block + 2; | ||
158 | rb->memmove(new_block, block, len * sizeof(union buflib_data)); | ||
159 | } | ||
160 | } | ||
161 | /* Move the end-of-allocation mark, and return true if any new space has | ||
162 | * been freed. | ||
163 | */ | ||
164 | ctx->alloc_end += shift; | ||
165 | ctx->first_free_block = ctx->alloc_end; | ||
166 | ctx->compact = true; | ||
167 | return ret || shift; | ||
168 | } | ||
169 | |||
170 | /* Allocate a buffer of size bytes, returning a handle for it */ | ||
171 | int | ||
172 | buflib_alloc(struct buflib_context *ctx, size_t size) | ||
173 | { | ||
174 | union buflib_data *handle, *block; | ||
175 | bool last = false; | ||
176 | /* This really is assigned a value before use */ | ||
177 | int block_len; | ||
178 | size = (size + sizeof(union buflib_data) - 1) / | ||
179 | sizeof(union buflib_data) + 2; | ||
180 | handle_alloc: | ||
181 | handle = handle_alloc(ctx); | ||
182 | if (!handle) | ||
183 | { | ||
184 | /* If allocation has failed, and compaction has succeded, it may be | ||
185 | * possible to get a handle by trying again. | ||
186 | */ | ||
187 | if (!ctx->compact && buflib_compact(ctx)) | ||
188 | goto handle_alloc; | ||
189 | else | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | buffer_alloc: | ||
194 | for (block = ctx->first_free_block;; block += block_len) | ||
195 | { | ||
196 | /* If the last used block extends all the way to the handle table, the | ||
197 | * block "after" it doesn't have a header. Because of this, it's easier | ||
198 | * to always find the end of allocation by saving a pointer, and always | ||
199 | * calculate the free space at the end by comparing it to the | ||
200 | * last_handle pointer. | ||
201 | */ | ||
202 | if(block == ctx->alloc_end) | ||
203 | { | ||
204 | last = true; | ||
205 | block_len = ctx->last_handle - block; | ||
206 | if ((size_t)block_len < size) | ||
207 | block = NULL; | ||
208 | break; | ||
209 | } | ||
210 | block_len = block->val; | ||
211 | /* blocks with positive length are already allocated. */ | ||
212 | if(block_len > 0) | ||
213 | continue; | ||
214 | block_len = -block_len; | ||
215 | /* The search is first-fit, any fragmentation this causes will be | ||
216 | * handled at compaction. | ||
217 | */ | ||
218 | if ((size_t)block_len >= size) | ||
219 | break; | ||
220 | } | ||
221 | if (!block) | ||
222 | { | ||
223 | /* Try compacting if allocation failed, but only if the handle | ||
224 | * allocation did not trigger compaction already, since there will | ||
225 | * be no further gain. | ||
226 | */ | ||
227 | if (!ctx->compact && buflib_compact(ctx)) | ||
228 | { | ||
229 | goto buffer_alloc; | ||
230 | } else { | ||
231 | handle->val=1; | ||
232 | handle_free(ctx, handle); | ||
233 | return 0; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | /* Set up the allocated block, by marking the size allocated, and storing | ||
238 | * a pointer to the handle. | ||
239 | */ | ||
240 | block->val = size; | ||
241 | block[1].ptr = handle; | ||
242 | handle->ptr = block + 2; | ||
243 | /* If we have just taken the first free block, the next allocation search | ||
244 | * can save some time by starting after this block. | ||
245 | */ | ||
246 | if (block == ctx->first_free_block) | ||
247 | ctx->first_free_block += size; | ||
248 | block += size; | ||
249 | /* alloc_end must be kept current if we're taking the last block. */ | ||
250 | if (last) | ||
251 | ctx->alloc_end = block; | ||
252 | /* Only free blocks *before* alloc_end have tagged length. */ | ||
253 | else if ((size_t)block_len > size) | ||
254 | block->val = size - block_len; | ||
255 | /* Return the handle index as a positive integer. */ | ||
256 | return ctx->handle_table - handle; | ||
257 | } | ||
258 | |||
259 | /* Free the buffer associated with handle_num. */ | ||
260 | void | ||
261 | buflib_free(struct buflib_context *ctx, int handle_num) | ||
262 | { | ||
263 | union buflib_data *handle = ctx->handle_table - handle_num, | ||
264 | *freed_block = handle->ptr - 2, | ||
265 | *block = ctx->first_free_block, | ||
266 | *next_block = block; | ||
267 | /* We need to find the block before the current one, to see if it is free | ||
268 | * and can be merged with this one. | ||
269 | */ | ||
270 | while (next_block < freed_block) | ||
271 | { | ||
272 | block = next_block; | ||
273 | next_block += ABS(block->val); | ||
274 | } | ||
275 | /* If next_block == block, the above loop didn't go anywhere. If it did, | ||
276 | * and the block before this one is empty, we can combine them. | ||
277 | */ | ||
278 | if (next_block == freed_block && next_block != block && block->val < 0) | ||
279 | block->val -= freed_block->val; | ||
280 | /* Otherwise, set block to the newly-freed block, and mark it free, before | ||
281 | * continuing on, since the code below exects block to point to a free | ||
282 | * block which may have free space after it. | ||
283 | */ | ||
284 | else | ||
285 | { | ||
286 | block = freed_block; | ||
287 | block->val = -block->val; | ||
288 | } | ||
289 | next_block = block - block->val; | ||
290 | /* Check if we are merging with the free space at alloc_end. */ | ||
291 | if (next_block == ctx->alloc_end) | ||
292 | ctx->alloc_end = block; | ||
293 | /* Otherwise, the next block might still be a "normal" free block, and the | ||
294 | * mid-allocation free means that the buffer is no longer compact. | ||
295 | */ | ||
296 | else { | ||
297 | ctx->compact = false; | ||
298 | if (next_block->val < 0) | ||
299 | block->val += next_block->val; | ||
300 | } | ||
301 | handle_free(ctx, handle); | ||
302 | handle->ptr = NULL; | ||
303 | /* If this block is before first_free_block, it becomes the new starting | ||
304 | * point for free-block search. | ||
305 | */ | ||
306 | if (block < ctx->first_free_block) | ||
307 | ctx->first_free_block = block; | ||
308 | } | ||