summaryrefslogtreecommitdiff
path: root/firmware/buflib.c
diff options
context:
space:
mode:
authorThomas Martitz <kugel@rockbox.org>2011-09-09 15:35:14 +0000
committerThomas Martitz <kugel@rockbox.org>2011-09-09 15:35:14 +0000
commitf7cff8bd69adf6f6345fc1d7742f62d5a919ecde (patch)
treef02905ff8d40cf2eef2f15fafba5f3e20d7ed6d1 /firmware/buflib.c
parent0dcbc6cd5d4d40b37f7dbc38a944bfe28901f6e0 (diff)
downloadrockbox-f7cff8bd69adf6f6345fc1d7742f62d5a919ecde.tar.gz
rockbox-f7cff8bd69adf6f6345fc1d7742f62d5a919ecde.zip
Buflib: Stop caching the first unallocated block. It has little benefit but is complicated to keep up-to-date.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@30487 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/buflib.c')
-rw-r--r--firmware/buflib.c85
1 files changed, 42 insertions, 43 deletions
diff --git a/firmware/buflib.c b/firmware/buflib.c
index ac8cdc864c..0dfbdf6f49 100644
--- a/firmware/buflib.c
+++ b/firmware/buflib.c
@@ -89,9 +89,10 @@
89 #define BDEBUGF(...) do { } while(0) 89 #define BDEBUGF(...) do { } while(0)
90#endif 90#endif
91 91
92static union buflib_data* 92static union buflib_data* find_first_free(struct buflib_context *ctx);
93find_block_before(struct buflib_context *ctx, union buflib_data* block, 93static union buflib_data* find_block_before(struct buflib_context *ctx,
94 bool is_free); 94 union buflib_data* block,
95 bool is_free);
95/* Initialize buffer manager */ 96/* Initialize buffer manager */
96void 97void
97buflib_init(struct buflib_context *ctx, void *buf, size_t size) 98buflib_init(struct buflib_context *ctx, void *buf, size_t size)
@@ -105,7 +106,6 @@ buflib_init(struct buflib_context *ctx, void *buf, size_t size)
105 ctx->handle_table = bd_buf + size; 106 ctx->handle_table = bd_buf + size;
106 ctx->last_handle = bd_buf + size; 107 ctx->last_handle = bd_buf + size;
107 ctx->first_free_handle = bd_buf + size - 1; 108 ctx->first_free_handle = bd_buf + size - 1;
108 ctx->first_free_block = bd_buf;
109 ctx->buf_start = bd_buf; 109 ctx->buf_start = bd_buf;
110 /* A marker is needed for the end of allocated data, to make sure that it 110 /* A marker is needed for the end of allocated data, to make sure that it
111 * does not collide with the handle table, and to detect end-of-buffer. 111 * does not collide with the handle table, and to detect end-of-buffer.
@@ -226,11 +226,12 @@ static bool
226buflib_compact(struct buflib_context *ctx) 226buflib_compact(struct buflib_context *ctx)
227{ 227{
228 BDEBUGF("%s(): Compacting!\n", __func__); 228 BDEBUGF("%s(): Compacting!\n", __func__);
229 union buflib_data *block; 229 union buflib_data *block,
230 *first_free = find_first_free(ctx);
230 int shift = 0, len; 231 int shift = 0, len;
231 /* Store the results of attempting to shrink the handle table */ 232 /* Store the results of attempting to shrink the handle table */
232 bool ret = handle_table_shrink(ctx); 233 bool ret = handle_table_shrink(ctx);
233 for(block = ctx->first_free_block; block < ctx->alloc_end; block += len) 234 for(block = first_free; block < ctx->alloc_end; block += len)
234 { 235 {
235 len = block->val; 236 len = block->val;
236 /* This block is free, add its length to the shift value */ 237 /* This block is free, add its length to the shift value */
@@ -241,41 +242,41 @@ buflib_compact(struct buflib_context *ctx)
241 continue; 242 continue;
242 } 243 }
243 /* attempt to fill any hole */ 244 /* attempt to fill any hole */
244 if (-ctx->first_free_block->val > block->val) 245 if (-first_free->val >= block->val)
245 { 246 {
246 intptr_t size = ctx->first_free_block->val; 247 intptr_t size = -first_free->val;
247 union buflib_data* next_block = block + block->val; 248 union buflib_data* next_block = block + block->val;
248 if (move_block(ctx, block, ctx->first_free_block - block)) 249 if (move_block(ctx, block, first_free - block))
249 { 250 {
250 /* moving was successful. Mark the next block as the new 251 /* moving was successful. Move alloc_end down if necessary */
251 * first_free_block and merge it with the free space
252 * that the move created */
253 if (ctx->alloc_end == next_block) 252 if (ctx->alloc_end == next_block)
254 ctx->alloc_end = block; 253 ctx->alloc_end = block;
255 ctx->first_free_block += block->val; 254 /* Mark the block behind the just moved as free
256 ctx->first_free_block->val = size + block->val; 255 * be careful to not overwrite an existing block */
256 if (size != block->val)
257 {
258 first_free += block->val;
259 first_free->val = block->val - size; /* negative */
260 }
257 continue; 261 continue;
258 } 262 }
259 } 263 }
260 /* attempt move the allocation by shift */ 264 /* attempt move the allocation by shift */
261 if (shift) 265 if (shift)
262 { 266 {
263 /* failing to move creates a hole, therefore mark this 267 /* failing to move creates a hole,
264 * block as not allocated anymore and move first_free_block up */ 268 * therefore mark this block as not allocated */
269 union buflib_data* target_block = block + shift;
265 if (!move_block(ctx, block, shift)) 270 if (!move_block(ctx, block, shift))
266 { 271 {
267 union buflib_data* hole = block + shift; 272 target_block->val = shift; /* this is a hole */
268 hole->val = shift;
269 if (ctx->first_free_block > hole)
270 ctx->first_free_block = hole;
271 shift = 0; 273 shift = 0;
272 } 274 }
273 /* if move was successful, the just moved block is now 275 else
274 * possibly in place of the first free one, so move this thing up */ 276 { /* need to update the next free block, since the above hole
275 else if (ctx->first_free_block == block+shift) 277 * handling might make shift 0 before alloc_end is reached */
276 { 278 union buflib_data* new_free = target_block + target_block->val;
277 ctx->first_free_block += ctx->first_free_block->val; 279 new_free->val = shift;
278 ctx->first_free_block->val = shift;
279 } 280 }
280 } 281 }
281 } 282 }
@@ -283,9 +284,6 @@ buflib_compact(struct buflib_context *ctx)
283 * been freed. 284 * been freed.
284 */ 285 */
285 ctx->alloc_end += shift; 286 ctx->alloc_end += shift;
286 /* only move first_free_block up if it wasn't already by a hole */
287 if (ctx->first_free_block > ctx->alloc_end)
288 ctx->first_free_block = ctx->alloc_end;
289 ctx->compact = true; 287 ctx->compact = true;
290 return ret || shift; 288 return ret || shift;
291} 289}
@@ -345,7 +343,6 @@ buflib_buffer_shift(struct buflib_context *ctx, int shift)
345 for (handle = ctx->last_handle; handle < ctx->handle_table; handle++) 343 for (handle = ctx->last_handle; handle < ctx->handle_table; handle++)
346 if (handle->alloc) 344 if (handle->alloc)
347 handle->alloc += shift; 345 handle->alloc += shift;
348 ctx->first_free_block += shift;
349 ctx->buf_start += shift; 346 ctx->buf_start += shift;
350 ctx->alloc_end += shift; 347 ctx->alloc_end += shift;
351} 348}
@@ -443,7 +440,7 @@ buffer_alloc:
443 /* need to re-evaluate last before the loop because the last allocation 440 /* need to re-evaluate last before the loop because the last allocation
444 * possibly made room in its front to fit this, so last would be wrong */ 441 * possibly made room in its front to fit this, so last would be wrong */
445 last = false; 442 last = false;
446 for (block = ctx->first_free_block;;block += block_len) 443 for (block = find_first_free(ctx);;block += block_len)
447 { 444 {
448 /* If the last used block extends all the way to the handle table, the 445 /* If the last used block extends all the way to the handle table, the
449 * block "after" it doesn't have a header. Because of this, it's easier 446 * block "after" it doesn't have a header. Because of this, it's easier
@@ -499,8 +496,6 @@ buffer_alloc:
499 /* If we have just taken the first free block, the next allocation search 496 /* If we have just taken the first free block, the next allocation search
500 * can save some time by starting after this block. 497 * can save some time by starting after this block.
501 */ 498 */
502 if (block == ctx->first_free_block)
503 ctx->first_free_block += size;
504 block += size; 499 block += size;
505 /* alloc_end must be kept current if we're taking the last block. */ 500 /* alloc_end must be kept current if we're taking the last block. */
506 if (last) 501 if (last)
@@ -512,6 +507,20 @@ buffer_alloc:
512 return ctx->handle_table - handle; 507 return ctx->handle_table - handle;
513} 508}
514 509
510static union buflib_data*
511find_first_free(struct buflib_context *ctx)
512{
513 union buflib_data* ret = ctx->buf_start;
514 while(ret < ctx->alloc_end)
515 {
516 if (ret->val < 0)
517 break;
518 ret += ret->val;
519 }
520 /* ret is now either a free block or the same as alloc_end, both is fine */
521 return ret;
522}
523
515/* Finds the free block before block, and returns NULL if it's not free */ 524/* Finds the free block before block, and returns NULL if it's not free */
516static union buflib_data* 525static union buflib_data*
517find_block_before(struct buflib_context *ctx, union buflib_data* block, 526find_block_before(struct buflib_context *ctx, union buflib_data* block,
@@ -577,11 +586,6 @@ buflib_free(struct buflib_context *ctx, int handle_num)
577 } 586 }
578 handle_free(ctx, handle); 587 handle_free(ctx, handle);
579 handle->alloc = NULL; 588 handle->alloc = NULL;
580 /* If this block is before first_free_block, it becomes the new starting
581 * point for free-block search.
582 */
583 if (block < ctx->first_free_block)
584 ctx->first_free_block = block;
585 589
586 return 0; /* unconditionally */ 590 return 0; /* unconditionally */
587} 591}
@@ -668,8 +672,6 @@ buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t ne
668 union buflib_data *free_before = find_block_before(ctx, block, true); 672 union buflib_data *free_before = find_block_before(ctx, block, true);
669 if (free_before) 673 if (free_before)
670 free_before->val += block->val; 674 free_before->val += block->val;
671 else if (ctx->first_free_block > block)
672 ctx->first_free_block = block;
673 675
674 /* We didn't handle size changes yet, assign block to the new one 676 /* We didn't handle size changes yet, assign block to the new one
675 * the code below the wants block whether it changed or not */ 677 * the code below the wants block whether it changed or not */
@@ -690,9 +692,6 @@ buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t ne
690 /* must be negative to indicate being unallocated */ 692 /* must be negative to indicate being unallocated */
691 new_next_block->val = new_next_block - old_next_block; 693 new_next_block->val = new_next_block - old_next_block;
692 } 694 }
693 /* update first_free_block for the newly created free space */
694 if (ctx->first_free_block > new_next_block)
695 ctx->first_free_block = new_next_block;
696 } 695 }
697 696
698 return true; 697 return true;