summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrandon Low <lostlogic@rockbox.org>2007-11-01 05:12:55 +0000
committerBrandon Low <lostlogic@rockbox.org>2007-11-01 05:12:55 +0000
commitff9cdb464a5acf63f81fc5359f5218ce742e4fe4 (patch)
treec0fd51f92d7c0613e215a9893d5fe984cd2a06fe
parent7127199230a1c25ff21526d4a6166d6c056aef67 (diff)
downloadrockbox-ff9cdb464a5acf63f81fc5359f5218ce742e4fe4.tar.gz
rockbox-ff9cdb464a5acf63f81fc5359f5218ce742e4fe4.zip
Temporary work around for the situation where a single codec request is for more data than a single file_chunk and that file gets stuck. This should be fixed differently as it could lead to unresponsive behavior from the buffering thread for the duration of a file load.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15390 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--apps/buffering.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index 65070a3556..800686541a 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -540,7 +540,7 @@ static bool yield_codec(void)
540 540
541/* Buffer data for the given handle. Return the amount of data buffered 541/* Buffer data for the given handle. Return the amount of data buffered
542 or -1 if the handle wasn't found */ 542 or -1 if the handle wasn't found */
543static ssize_t buffer_handle(int handle_id) 543static ssize_t buffer_handle(int handle_id, bool force)
544{ 544{
545 logf("buffer_handle(%d)", handle_id); 545 logf("buffer_handle(%d)", handle_id);
546 struct memory_handle *h = find_handle(handle_id); 546 struct memory_handle *h = find_handle(handle_id);
@@ -575,13 +575,17 @@ static ssize_t buffer_handle(int handle_id)
575 size_t copy_n = MIN( MIN(h->filerem, conf_filechunk), 575 size_t copy_n = MIN( MIN(h->filerem, conf_filechunk),
576 buffer_len - h->widx); 576 buffer_len - h->widx);
577 577
578 /* stop copying if it would overwrite the reading position 578 /* stop copying if it would overwrite the reading position */
579 or the next handle */ 579 if (RINGBUF_ADD_CROSS(h->widx, copy_n, buf_ridx) >= 0)
580 if (RINGBUF_ADD_CROSS(h->widx, copy_n, buf_ridx) >= 0 || (h->next &&
581 RINGBUF_ADD_CROSS(h->widx, copy_n, (unsigned)
582 ((void *)h->next - (void *)buffer)) > 0))
583 break; 580 break;
584 581
582 /* This would read into the next handle, this is broken */
583 if (h->next && RINGBUF_ADD_CROSS(h->widx, copy_n,
584 (unsigned)((void *)h->next - (void *)buffer)) > 0) {
585 logf("Handle allocation short");
586 break;
587 }
588
585 /* rc is the actual amount read */ 589 /* rc is the actual amount read */
586 int rc = read(h->fd, &buffer[h->widx], copy_n); 590 int rc = read(h->fd, &buffer[h->widx], copy_n);
587 591
@@ -611,7 +615,7 @@ static ssize_t buffer_handle(int handle_id)
611 * cpu boost for this thread. If the codec's low data 615 * cpu boost for this thread. If the codec's low data
612 * situation was very short lived that could leave us filling 616 * situation was very short lived that could leave us filling
613 * w/o boost */ 617 * w/o boost */
614 if (yield_codec()) 618 if (!force && yield_codec())
615 break; 619 break;
616 } 620 }
617 621
@@ -733,7 +737,7 @@ static void fill_buffer(void)
733 struct memory_handle *m = first_handle; 737 struct memory_handle *m = first_handle;
734 while (queue_empty(&buffering_queue) && m) { 738 while (queue_empty(&buffering_queue) && m) {
735 if (m->filerem > 0) { 739 if (m->filerem > 0) {
736 buffer_handle(m->id); 740 buffer_handle(m->id, false);
737 } 741 }
738 m = m->next; 742 m = m->next;
739 } 743 }
@@ -1152,7 +1156,7 @@ void buffering_thread(void)
1152 case Q_BUFFER_HANDLE: 1156 case Q_BUFFER_HANDLE:
1153 LOGFQUEUE("buffering < Q_BUFFER_HANDLE"); 1157 LOGFQUEUE("buffering < Q_BUFFER_HANDLE");
1154 queue_reply(&buffering_queue, 1); 1158 queue_reply(&buffering_queue, 1);
1155 buffer_handle((int)ev.data); 1159 buffer_handle((int)ev.data, true);
1156 break; 1160 break;
1157 1161
1158 case Q_RESET_HANDLE: 1162 case Q_RESET_HANDLE: