diff options
author | Nicolas Pennequin <nicolas.pennequin@free.fr> | 2007-10-30 17:24:31 +0000 |
---|---|---|
committer | Nicolas Pennequin <nicolas.pennequin@free.fr> | 2007-10-30 17:24:31 +0000 |
commit | 09bce70f17614563df09dedd82cff31298fb1a09 (patch) | |
tree | a7cca91074218d2e140b528dd86e657c556aa3ec /apps | |
parent | 151b7c9038ba796cd87b6ff2904253e6a3962304 (diff) | |
download | rockbox-09bce70f17614563df09dedd82cff31298fb1a09.tar.gz rockbox-09bce70f17614563df09dedd82cff31298fb1a09.zip |
Slight rework of the buffering logic:
* Don't rely only on ata_disk_is_active, and also do buffer filling after buffer handle requests. Should fix FS#8049.
* Shrink the handles at the last possible moment. This allows more seeking without rebuffering for long tracks and minimises buffer waste.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15377 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps')
-rw-r--r-- | apps/buffering.c | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/apps/buffering.c b/apps/buffering.c index 0325d4e4f3..65070a3556 100644 --- a/apps/buffering.c +++ b/apps/buffering.c | |||
@@ -1209,37 +1209,34 @@ void buffering_thread(void) | |||
1209 | 1209 | ||
1210 | #if MEM > 8 | 1210 | #if MEM > 8 |
1211 | /* If the disk is spinning, take advantage by filling the buffer */ | 1211 | /* If the disk is spinning, take advantage by filling the buffer */ |
1212 | if (ata_disk_is_active() && queue_empty(&buffering_queue) && | 1212 | if ((ata_disk_is_active() || ev.id == Q_BUFFER_HANDLE) && |
1213 | data_counters.remaining > 0 && | 1213 | queue_empty(&buffering_queue)) |
1214 | data_counters.buffered < high_watermark) | ||
1215 | { | 1214 | { |
1216 | fill_buffer(); | 1215 | if (data_counters.remaining > 0 && |
1217 | update_data_counters(); | 1216 | data_counters.buffered < high_watermark) |
1218 | } | 1217 | { |
1218 | fill_buffer(); | ||
1219 | update_data_counters(); | ||
1220 | } | ||
1219 | 1221 | ||
1220 | if (ata_disk_is_active() && queue_empty(&buffering_queue) && | 1222 | if (num_handles > 0 && data_counters.useful < high_watermark) |
1221 | num_handles > 0 && data_counters.useful < high_watermark) | 1223 | { |
1222 | { | 1224 | call_buffer_low_callbacks(); |
1223 | call_buffer_low_callbacks(); | 1225 | } |
1224 | } | 1226 | } |
1225 | #endif | 1227 | #endif |
1226 | 1228 | ||
1227 | if (ev.id == SYS_TIMEOUT && queue_empty(&buffering_queue)) | 1229 | if (ev.id == SYS_TIMEOUT && queue_empty(&buffering_queue)) |
1228 | { | 1230 | { |
1229 | if (data_counters.remaining > 0 && | 1231 | if (data_counters.remaining > 0 && |
1230 | data_counters.wasted > data_counters.buffered/2) | 1232 | data_counters.useful < conf_watermark) |
1231 | { | 1233 | { |
1232 | /* First work forward, shrinking any unmoveable handles */ | 1234 | /* First work forward, shrinking any unmoveable handles */ |
1233 | shrink_buffer(true,false); | 1235 | shrink_buffer(true,false); |
1234 | /* Then work forward following those up with moveable handles */ | 1236 | /* Then work forward following those up with moveable handles */ |
1235 | shrink_buffer(false,true); | 1237 | shrink_buffer(false,true); |
1236 | update_data_counters(); | ||
1237 | } | ||
1238 | |||
1239 | if (data_counters.remaining > 0 && | ||
1240 | data_counters.buffered < conf_watermark) | ||
1241 | { | ||
1242 | fill_buffer(); | 1238 | fill_buffer(); |
1239 | update_data_counters(); | ||
1243 | } | 1240 | } |
1244 | } | 1241 | } |
1245 | } | 1242 | } |