summaryrefslogtreecommitdiff
path: root/apps
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2011-02-14 02:14:26 +0000
committerMichael Sevakis <jethead71@rockbox.org>2011-02-14 02:14:26 +0000
commit0fde635fb0c00641f372dfce14aff29c40e4398e (patch)
treeba935ef9d2a346e8391752c4b3be3b647356b430 /apps
parentf8a4fbc8433bc8e9bdba535082dfbf05fb49e304 (diff)
downloadrockbox-0fde635fb0c00641f372dfce14aff29c40e4398e.tar.gz
rockbox-0fde635fb0c00641f372dfce14aff29c40e4398e.zip
Leave a gap between all handles because ringbuf_add_cross interprets equal pointers as empty, corruption guard check could fail to detect overlap if buffering ran right up to the next handle and it gets asked to buffer again before freeing the following handles (adds a byte on average). Storage alignment on handle reset must at times avoid alignment increments if after a stopped rebuffer, the handle was shrunk too close to the next one or the reading position in a prior rebuffer.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@29302 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps')
-rw-r--r--apps/buffering.c40
1 files changed, 26 insertions, 14 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index 85028dc8e6..f4eaf8a051 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -266,9 +266,9 @@ static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
266 /* the current handle hasn't finished buffering. We can only add 266 /* the current handle hasn't finished buffering. We can only add
267 a new one if there is already enough free space to finish 267 a new one if there is already enough free space to finish
268 the buffering. */ 268 the buffering. */
269 size_t req = cur_handle->filerem + sizeof(struct memory_handle); 269 size_t req = cur_handle->filerem;
270 if (ringbuf_add_cross(cur_handle->widx, req, buf_ridx) >= 0) { 270 if (ringbuf_add_cross(cur_handle->widx, req, buf_ridx) >= 0) {
271 /* Not enough space */ 271 /* Not enough space to finish allocation */
272 mutex_unlock(&llist_mod_mutex); 272 mutex_unlock(&llist_mod_mutex);
273 mutex_unlock(&llist_mutex); 273 mutex_unlock(&llist_mutex);
274 return NULL; 274 return NULL;
@@ -278,8 +278,8 @@ static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
278 } 278 }
279 } 279 }
280 280
281 /* align to 4 bytes up */ 281 /* align to 4 bytes up always leaving a gap */
282 new_widx = ringbuf_add(widx, 3) & ~3; 282 new_widx = ringbuf_add(widx, 4) & ~3;
283 283
284 len = data_size + sizeof(struct memory_handle); 284 len = data_size + sizeof(struct memory_handle);
285 285
@@ -681,15 +681,12 @@ static bool buffer_handle(int handle_id)
681 ssize_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK), 681 ssize_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK),
682 buffer_len - h->widx); 682 buffer_len - h->widx);
683 uintptr_t offset = h->next ? ringbuf_offset(h->next) : buf_ridx; 683 uintptr_t offset = h->next ? ringbuf_offset(h->next) : buf_ridx;
684 ssize_t overlap = ringbuf_add_cross(h->widx, copy_n, offset); 684 ssize_t overlap = ringbuf_add_cross(h->widx, copy_n, offset) + 1;
685
686 if (!h->next)
687 overlap++; /* sub one more below to avoid buffer overflow */
688 685
689 if (overlap > 0) 686 if (overlap > 0)
690 { 687 {
691 /* read only up to available space and stop if it would overwrite 688 /* read only up to available space and stop if it would overwrite
692 the reading position or the next handle */ 689 or be on top of the reading position or the next handle */
693 stop = true; 690 stop = true;
694 copy_n -= overlap; 691 copy_n -= overlap;
695 } 692 }
@@ -751,7 +748,7 @@ static bool buffer_handle(int handle_id)
751 Use this after having set the new offset to use. */ 748 Use this after having set the new offset to use. */
752static void reset_handle(int handle_id) 749static void reset_handle(int handle_id)
753{ 750{
754 size_t alignment_pad; 751 size_t new_index;
755 752
756 logf("reset_handle(%d)", handle_id); 753 logf("reset_handle(%d)", handle_id);
757 754
@@ -759,12 +756,27 @@ static void reset_handle(int handle_id)
759 if (!h) 756 if (!h)
760 return; 757 return;
761 758
762 /* Align to desired storage alignment */ 759 new_index = h->start;
763 alignment_pad = STORAGE_OVERLAP(h->offset - (size_t)(&buffer[h->start])); 760
764 h->ridx = h->widx = h->data = ringbuf_add(h->start, alignment_pad); 761#ifdef STORAGE_WANTS_ALIGN
762 /* Align to desired storage alignment if space permits - handle could have
763 been shrunken too close to the following one after a previous rebuffer. */
764 size_t alignment_pad = STORAGE_OVERLAP(h->offset - (size_t)(&buffer[new_index]));
765 size_t offset = h->next ? ringbuf_offset(h->next) : buf_ridx;
766
767 if (ringbuf_add_cross(new_index, alignment_pad, offset) >= 0) {
768 /* Forego storage alignment this time */
769 alignment_pad = 0;
770 }
771
772 new_index = ringbuf_add(new_index, alignment_pad);
773#endif
774
775 h->ridx = h->widx = h->data = new_index;
765 776
766 if (h == cur_handle) 777 if (h == cur_handle)
767 buf_widx = h->widx; 778 buf_widx = new_index;
779
768 h->available = 0; 780 h->available = 0;
769 h->filerem = h->filesize - h->offset; 781 h->filerem = h->filesize - h->offset;
770 782