diff options
author | Brandon Low <lostlogic@rockbox.org> | 2007-10-29 16:48:16 +0000 |
---|---|---|
committer | Brandon Low <lostlogic@rockbox.org> | 2007-10-29 16:48:16 +0000 |
commit | 483dca99c4e19f2041ab0785772146cbe90c544c (patch) | |
tree | 568e0516d59b70656df3fd02c3cacc09fb8b10d2 | |
parent | 30d3d36513864048ea3a7fe2519c18d7f52b5b2f (diff) | |
download | rockbox-483dca99c4e19f2041ab0785772146cbe90c544c.tar.gz rockbox-483dca99c4e19f2041ab0785772146cbe90c544c.zip |
Remove can_add_handle, its safety and benefit are both questionable. Make shrink_buffer a function to reduce code duplication. Change move_handle semantics so that the caller never loses track of the current position of h, even if the move fails.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15362 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r-- | apps/buffering.c | 112 |
1 files changed, 43 insertions, 69 deletions
diff --git a/apps/buffering.c b/apps/buffering.c index 0600202717..13efe51974 100644 --- a/apps/buffering.c +++ b/apps/buffering.c | |||
@@ -386,30 +386,31 @@ static struct memory_handle *find_handle(const unsigned int handle_id) | |||
386 | a memory_handle after correcting for wraps or if the handle is not | 386 | a memory_handle after correcting for wraps or if the handle is not |
387 | found in the linked list for adjustment. This function has no side | 387 | found in the linked list for adjustment. This function has no side |
388 | effects if NULL is returned. */ | 388 | effects if NULL is returned. */ |
389 | static struct memory_handle *move_handle(const struct memory_handle *h, | 389 | static bool move_handle(struct memory_handle const **h, |
390 | size_t *delta, const size_t data_size) | 390 | size_t *delta, const size_t data_size) |
391 | { | 391 | { |
392 | struct memory_handle *dest; | 392 | struct memory_handle *dest; |
393 | const struct memory_handle *src; | ||
393 | size_t newpos; | 394 | size_t newpos; |
394 | size_t size_to_move; | 395 | size_t size_to_move; |
395 | size_t new_delta = *delta; | 396 | size_t final_delta = *delta; |
396 | int overlap; | 397 | int overlap; |
397 | 398 | ||
398 | if (h == NULL) | 399 | if (h == NULL || (src = *h) == NULL) |
399 | return NULL; | 400 | return false; |
400 | 401 | ||
401 | size_to_move = sizeof(struct memory_handle) + data_size; | 402 | size_to_move = sizeof(struct memory_handle) + data_size; |
402 | 403 | ||
403 | /* Align to four bytes, down */ | 404 | /* Align to four bytes, down */ |
404 | new_delta &= ~3; | 405 | final_delta &= ~3; |
405 | if (new_delta < sizeof(struct memory_handle)) { | 406 | if (final_delta < sizeof(struct memory_handle)) { |
406 | /* It's not legal to move less than the size of the struct */ | 407 | /* It's not legal to move less than the size of the struct */ |
407 | return NULL; | 408 | return false; |
408 | } | 409 | } |
409 | 410 | ||
410 | mutex_lock(&llist_mutex); | 411 | mutex_lock(&llist_mutex); |
411 | 412 | ||
412 | newpos = RINGBUF_ADD((void *)h - (void *)buffer, new_delta); | 413 | newpos = RINGBUF_ADD((void *)src - (void *)buffer, final_delta); |
413 | overlap = RINGBUF_ADD_CROSS(newpos, size_to_move, buffer_len - 1); | 414 | overlap = RINGBUF_ADD_CROSS(newpos, size_to_move, buffer_len - 1); |
414 | 415 | ||
415 | if (overlap > 0) { | 416 | if (overlap > 0) { |
@@ -428,54 +429,55 @@ static struct memory_handle *move_handle(const struct memory_handle *h, | |||
428 | /* Align correction to four bytes, up */ | 429 | /* Align correction to four bytes, up */ |
429 | correction = (correction+3) & ~3; | 430 | correction = (correction+3) & ~3; |
430 | } | 431 | } |
431 | if (new_delta < correction + sizeof(struct memory_handle)) { | 432 | if (final_delta < correction + sizeof(struct memory_handle)) { |
432 | /* Delta cannot end up less than the size of the struct */ | 433 | /* Delta cannot end up less than the size of the struct */ |
433 | mutex_unlock(&llist_mutex); | 434 | mutex_unlock(&llist_mutex); |
434 | return NULL; | 435 | return false; |
435 | } | 436 | } |
436 | 437 | ||
437 | newpos -= correction; | 438 | newpos -= correction; |
438 | overlap -= correction; /* Used below to know how to split the data */ | 439 | overlap -= correction; /* Used below to know how to split the data */ |
439 | new_delta -= correction; | 440 | final_delta -= correction; |
440 | } | 441 | } |
441 | 442 | ||
442 | dest = (struct memory_handle *)(&buffer[newpos]); | 443 | dest = (struct memory_handle *)(&buffer[newpos]); |
443 | 444 | ||
444 | if (h == first_handle) { | 445 | if (src == first_handle) { |
445 | first_handle = dest; | 446 | first_handle = dest; |
446 | buf_ridx = newpos; | 447 | buf_ridx = newpos; |
447 | } else { | 448 | } else { |
448 | struct memory_handle *m = first_handle; | 449 | struct memory_handle *m = first_handle; |
449 | while (m && m->next != h) { | 450 | while (m && m->next != src) { |
450 | m = m->next; | 451 | m = m->next; |
451 | } | 452 | } |
452 | if (m && m->next == h) { | 453 | if (m && m->next == src) { |
453 | m->next = dest; | 454 | m->next = dest; |
454 | } else { | 455 | } else { |
455 | mutex_unlock(&llist_mutex); | 456 | mutex_unlock(&llist_mutex); |
456 | return NULL; | 457 | return false; |
457 | } | 458 | } |
458 | } | 459 | } |
459 | 460 | ||
460 | /* All checks pass, update the caller with how far we're moving */ | ||
461 | *delta = new_delta; | ||
462 | 461 | ||
463 | /* Update the cache to prevent it from keeping the old location of h */ | 462 | /* Update the cache to prevent it from keeping the old location of h */ |
464 | if (h == cached_handle) | 463 | if (src == cached_handle) |
465 | cached_handle = dest; | 464 | cached_handle = dest; |
466 | 465 | ||
467 | /* the cur_handle pointer might need updating */ | 466 | /* the cur_handle pointer might need updating */ |
468 | if (h == cur_handle) | 467 | if (src == cur_handle) |
469 | cur_handle = dest; | 468 | cur_handle = dest; |
470 | 469 | ||
471 | if (overlap > 0) { | 470 | if (overlap > 0) { |
472 | size_t first_part = size_to_move - overlap; | 471 | size_t first_part = size_to_move - overlap; |
473 | memmove(dest, h, first_part); | 472 | memmove(dest, src, first_part); |
474 | memmove(buffer, (char *)h + first_part, overlap); | 473 | memmove(buffer, (char *)src + first_part, overlap); |
475 | } else { | 474 | } else { |
476 | memmove(dest, h, size_to_move); | 475 | memmove(dest, src, size_to_move); |
477 | } | 476 | } |
478 | 477 | ||
478 | /* Update the caller with the new location of h and the distance moved */ | ||
479 | *h = dest; | ||
480 | *delta = final_delta; | ||
479 | mutex_unlock(&llist_mutex); | 481 | mutex_unlock(&llist_mutex); |
480 | return dest; | 482 | return dest; |
481 | } | 483 | } |
@@ -683,8 +685,8 @@ static void shrink_handle(int handle_id) | |||
683 | delta = handle_distance - h->available; | 685 | delta = handle_distance - h->available; |
684 | 686 | ||
685 | /* The value of delta might change for alignment reasons */ | 687 | /* The value of delta might change for alignment reasons */ |
686 | h = move_handle(h, &delta, h->available); | 688 | if (!move_handle(&h, &delta, h->available)) |
687 | if (!h) return; | 689 | return; |
688 | 690 | ||
689 | size_t olddata = h->data; | 691 | size_t olddata = h->data; |
690 | h->data = RINGBUF_ADD(h->data, delta); | 692 | h->data = RINGBUF_ADD(h->data, delta); |
@@ -702,8 +704,8 @@ static void shrink_handle(int handle_id) | |||
702 | { | 704 | { |
703 | /* only move the handle struct */ | 705 | /* only move the handle struct */ |
704 | delta = RINGBUF_SUB(h->ridx, h->data); | 706 | delta = RINGBUF_SUB(h->ridx, h->data); |
705 | h = move_handle(h, &delta, 0); | 707 | if (!move_handle(&h, &delta, 0)) |
706 | if (!h) return; | 708 | return; |
707 | 709 | ||
708 | h->data = RINGBUF_ADD(h->data, delta); | 710 | h->data = RINGBUF_ADD(h->data, delta); |
709 | h->available -= delta; | 711 | h->available -= delta; |
@@ -733,26 +735,6 @@ static void fill_buffer(void) | |||
733 | #endif | 735 | #endif |
734 | } | 736 | } |
735 | 737 | ||
736 | /* Check whether it's safe to add a new handle and reserve space to let the | ||
737 | current one finish buffering its data. Used by bufopen and bufalloc as | ||
738 | a preliminary check before even trying to physically add the handle. | ||
739 | Returns true if it's ok to add a new handle, false if not. | ||
740 | */ | ||
741 | static bool can_add_handle(void) | ||
742 | { | ||
743 | /* the current handle hasn't finished buffering. We can only add | ||
744 | a new one if there is already enough free space to finish | ||
745 | the buffering. */ | ||
746 | if (cur_handle && cur_handle->filerem > 0) { | ||
747 | size_t minimum_space = | ||
748 | cur_handle->filerem + sizeof(struct memory_handle ); | ||
749 | if (RINGBUF_ADD_CROSS(cur_handle->widx, minimum_space, buf_ridx) >= 0) | ||
750 | return false; | ||
751 | } | ||
752 | |||
753 | return true; | ||
754 | } | ||
755 | |||
756 | void update_data_counters(void) | 738 | void update_data_counters(void) |
757 | { | 739 | { |
758 | struct memory_handle *m = find_handle(base_handle_id); | 740 | struct memory_handle *m = find_handle(base_handle_id); |
@@ -802,9 +784,6 @@ management functions for all the actual handle management work. | |||
802 | */ | 784 | */ |
803 | int bufopen(const char *file, size_t offset, enum data_type type) | 785 | int bufopen(const char *file, size_t offset, enum data_type type) |
804 | { | 786 | { |
805 | if (!can_add_handle()) | ||
806 | return ERR_BUFFER_FULL; | ||
807 | |||
808 | int fd = open(file, O_RDONLY); | 787 | int fd = open(file, O_RDONLY); |
809 | if (fd < 0) | 788 | if (fd < 0) |
810 | return ERR_FILE_ERROR; | 789 | return ERR_FILE_ERROR; |
@@ -853,9 +832,6 @@ int bufopen(const char *file, size_t offset, enum data_type type) | |||
853 | */ | 832 | */ |
854 | int bufalloc(const void *src, size_t size, enum data_type type) | 833 | int bufalloc(const void *src, size_t size, enum data_type type) |
855 | { | 834 | { |
856 | if (!can_add_handle()) | ||
857 | return ERR_BUFFER_FULL; | ||
858 | |||
859 | struct memory_handle *h = add_handle(size, false, true); | 835 | struct memory_handle *h = add_handle(size, false, true); |
860 | 836 | ||
861 | if (!h) | 837 | if (!h) |
@@ -1141,6 +1117,16 @@ static void call_buffer_low_callbacks(void) | |||
1141 | } | 1117 | } |
1142 | } | 1118 | } |
1143 | 1119 | ||
1120 | static void shrink_buffer(bool audio, bool other) { | ||
1121 | /* shrink selected buffers */ | ||
1122 | struct memory_handle *m = first_handle; | ||
1123 | while (m) { | ||
1124 | if ((m->type==TYPE_AUDIO && audio) || (m->type!=TYPE_AUDIO && other)) | ||
1125 | shrink_handle(m->id); | ||
1126 | m = m->next; | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1144 | void buffering_thread(void) | 1130 | void buffering_thread(void) |
1145 | { | 1131 | { |
1146 | struct queue_event ev; | 1132 | struct queue_event ev; |
@@ -1231,22 +1217,10 @@ void buffering_thread(void) | |||
1231 | if (data_counters.remaining > 0 && | 1217 | if (data_counters.remaining > 0 && |
1232 | data_counters.wasted > data_counters.buffered/2) | 1218 | data_counters.wasted > data_counters.buffered/2) |
1233 | { | 1219 | { |
1234 | /* free buffer from outdated audio data */ | 1220 | /* First work forward, shrinking any unmoveable handles */ |
1235 | struct memory_handle *m = first_handle; | 1221 | shrink_buffer(true,false); |
1236 | while (m) { | 1222 | /* Then work forward following those up with moveable handles */ |
1237 | if (m->type == TYPE_AUDIO) | 1223 | shrink_buffer(false,true); |
1238 | shrink_handle(m->id); | ||
1239 | m = m->next; | ||
1240 | } | ||
1241 | |||
1242 | /* free buffer by moving metadata */ | ||
1243 | m = first_handle; | ||
1244 | while (m) { | ||
1245 | if (m->type != TYPE_AUDIO) | ||
1246 | shrink_handle(m->id); | ||
1247 | m = m->next; | ||
1248 | } | ||
1249 | |||
1250 | update_data_counters(); | 1224 | update_data_counters(); |
1251 | } | 1225 | } |
1252 | 1226 | ||