summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--apps/buffering.c155
1 files changed, 61 insertions, 94 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index be6cf44aed..cbc47c63e7 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -145,7 +145,8 @@ static struct mutex llist_mutex SHAREDBSS_ATTR;
145 This is global so that move_handle and rm_handle can invalidate it. */ 145 This is global so that move_handle and rm_handle can invalidate it. */
146static struct memory_handle *cached_handle = NULL; 146static struct memory_handle *cached_handle = NULL;
147 147
148static struct data_counters { 148static struct data_counters
149{
149 size_t remaining; /* Amount of data needing to be buffered */ 150 size_t remaining; /* Amount of data needing to be buffered */
150 size_t wasted; /* Amount of space available for freeing */ 151 size_t wasted; /* Amount of space available for freeing */
151 size_t buffered; /* Amount of data currently in the buffer */ 152 size_t buffered; /* Amount of data currently in the buffer */
@@ -154,7 +155,8 @@ static struct data_counters {
154 155
155 156
156/* Messages available to communicate with the buffering thread */ 157/* Messages available to communicate with the buffering thread */
157enum { 158enum
159{
158 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be 160 Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
159 used in a low buffer situation. */ 161 used in a low buffer situation. */
160 Q_REBUFFER_HANDLE, /* Request reset and rebuffering of a handle at a new 162 Q_REBUFFER_HANDLE, /* Request reset and rebuffering of a handle at a new
@@ -286,8 +288,8 @@ static struct memory_handle *add_handle(size_t data_size, bool can_wrap,
286 /* First, will the handle wrap? */ 288 /* First, will the handle wrap? */
287 /* If the handle would wrap, move to the beginning of the buffer, 289 /* If the handle would wrap, move to the beginning of the buffer,
288 * or if the data must not but would wrap, move it to the beginning */ 290 * or if the data must not but would wrap, move it to the beginning */
289 if( (new_widx + sizeof(struct memory_handle) > buffer_len) || 291 if (new_widx + sizeof(struct memory_handle) > buffer_len ||
290 (!can_wrap && (new_widx + len > buffer_len)) ) { 292 (!can_wrap && new_widx + len > buffer_len)) {
291 new_widx = 0; 293 new_widx = 0;
292 } 294 }
293 295
@@ -622,8 +624,7 @@ static bool buffer_handle(int handle_id, size_t to_buffer)
622 return true; 624 return true;
623 } 625 }
624 626
625 if (h->fd < 0) /* file closed, reopen */ 627 if (h->fd < 0) { /* file closed, reopen */
626 {
627 if (*h->path) 628 if (*h->path)
628 h->fd = open(h->path, O_RDONLY); 629 h->fd = open(h->path, O_RDONLY);
629 630
@@ -641,10 +642,9 @@ static bool buffer_handle(int handle_id, size_t to_buffer)
641 642
642 trigger_cpu_boost(); 643 trigger_cpu_boost();
643 644
644 if (h->type == TYPE_ID3) 645 if (h->type == TYPE_ID3) {
645 { 646 if (!get_metadata((struct mp3entry *)(buffer + h->data),
646 if (!get_metadata((struct mp3entry *)(buffer + h->data), h->fd, h->path)) 647 h->fd, h->path)) {
647 {
648 /* metadata parsing failed: clear the buffer. */ 648 /* metadata parsing failed: clear the buffer. */
649 memset(buffer + h->data, 0, sizeof(struct mp3entry)); 649 memset(buffer + h->data, 0, sizeof(struct mp3entry));
650 } 650 }
@@ -665,8 +665,7 @@ static bool buffer_handle(int handle_id, size_t to_buffer)
665 uintptr_t offset = h->next ? ringbuf_offset(h->next) : buf_ridx; 665 uintptr_t offset = h->next ? ringbuf_offset(h->next) : buf_ridx;
666 ssize_t overlap = ringbuf_add_cross(h->widx, copy_n, offset) + 1; 666 ssize_t overlap = ringbuf_add_cross(h->widx, copy_n, offset) + 1;
667 667
668 if (overlap > 0) 668 if (overlap > 0) {
669 {
670 /* read only up to available space and stop if it would overwrite 669 /* read only up to available space and stop if it would overwrite
671 or be on top of the reading position or the next handle */ 670 or be on top of the reading position or the next handle */
672 stop = true; 671 stop = true;
@@ -679,8 +678,7 @@ static bool buffer_handle(int handle_id, size_t to_buffer)
679 /* rc is the actual amount read */ 678 /* rc is the actual amount read */
680 int rc = read(h->fd, &buffer[h->widx], copy_n); 679 int rc = read(h->fd, &buffer[h->widx], copy_n);
681 680
682 if (rc < 0) 681 if (rc < 0) {
683 {
684 /* Some kind of filesystem error, maybe recoverable if not codec */ 682 /* Some kind of filesystem error, maybe recoverable if not codec */
685 if (h->type == TYPE_CODEC) { 683 if (h->type == TYPE_CODEC) {
686 logf("Partial codec"); 684 logf("Partial codec");
@@ -703,23 +701,17 @@ static bool buffer_handle(int handle_id, size_t to_buffer)
703 /* If this is a large file, see if we need to break or give the codec 701 /* If this is a large file, see if we need to break or give the codec
704 * more time */ 702 * more time */
705 if (h->type == TYPE_PACKET_AUDIO && 703 if (h->type == TYPE_PACKET_AUDIO &&
706 pcmbuf_is_lowdata() && !buffer_is_low()) 704 pcmbuf_is_lowdata() && !buffer_is_low()) {
707 {
708 sleep(1); 705 sleep(1);
709 } 706 } else {
710 else
711 {
712 yield(); 707 yield();
713 } 708 }
714 709
715 if (to_buffer == 0) 710 if (to_buffer == 0) {
716 {
717 /* Normal buffering - check queue */ 711 /* Normal buffering - check queue */
718 if(!queue_empty(&buffering_queue)) 712 if(!queue_empty(&buffering_queue))
719 break; 713 break;
720 } 714 } else {
721 else
722 {
723 if (to_buffer <= (size_t)rc) 715 if (to_buffer <= (size_t)rc)
724 break; /* Done */ 716 break; /* Done */
725 to_buffer -= rc; 717 to_buffer -= rc;
@@ -801,9 +793,7 @@ static void shrink_handle(struct memory_handle *h)
801 struct bitmap *bmp = (struct bitmap *)&buffer[h->data]; 793 struct bitmap *bmp = (struct bitmap *)&buffer[h->data];
802 bmp->data = &buffer[h->data + sizeof(struct bitmap)]; 794 bmp->data = &buffer[h->data + sizeof(struct bitmap)];
803 } 795 }
804 } 796 } else {
805 else
806 {
807 /* only move the handle struct */ 797 /* only move the handle struct */
808 delta = ringbuf_sub(h->ridx, h->data); 798 delta = ringbuf_sub(h->ridx, h->data);
809 if (!move_handle(&h, &delta, 0, true)) 799 if (!move_handle(&h, &delta, 0, true))
@@ -837,9 +827,7 @@ static bool fill_buffer(void)
837 827
838 if (m) { 828 if (m) {
839 return true; 829 return true;
840 } 830 } else {
841 else
842 {
843 /* only spin the disk down if the filling wasn't interrupted by an 831 /* only spin the disk down if the filling wasn't interrupted by an
844 event arriving in the queue. */ 832 event arriving in the queue. */
845 storage_sleep(); 833 storage_sleep();
@@ -851,7 +839,8 @@ static bool fill_buffer(void)
851/* Given a file descriptor to a bitmap file, write the bitmap data to the 839/* Given a file descriptor to a bitmap file, write the bitmap data to the
852 buffer, with a struct bitmap and the actual data immediately following. 840 buffer, with a struct bitmap and the actual data immediately following.
853 Return value is the total size (struct + data). */ 841 Return value is the total size (struct + data). */
854static int load_image(int fd, const char *path, struct bufopen_bitmap_data *data) 842static int load_image(int fd, const char *path,
843 struct bufopen_bitmap_data *data)
855{ 844{
856 int rc; 845 int rc;
857 struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx]; 846 struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx];
@@ -873,8 +862,7 @@ static int load_image(int fd, const char *path, struct bufopen_bitmap_data *data
873 - sizeof(struct bitmap); 862 - sizeof(struct bitmap);
874 863
875#ifdef HAVE_JPEG 864#ifdef HAVE_JPEG
876 if (aa != NULL) 865 if (aa != NULL) {
877 {
878 lseek(fd, aa->pos, SEEK_SET); 866 lseek(fd, aa->pos, SEEK_SET);
879 rc = clip_jpeg_fd(fd, aa->size, bmp, free, FORMAT_NATIVE|FORMAT_DITHER| 867 rc = clip_jpeg_fd(fd, aa->size, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
880 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL); 868 FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
@@ -930,14 +918,14 @@ int bufopen(const char *file, size_t offset, enum data_type type,
930 918
931 /* No buffer refs until after the mutex_lock call! */ 919 /* No buffer refs until after the mutex_lock call! */
932 920
933 if (type == TYPE_ID3) 921 if (type == TYPE_ID3) {
934 {
935 /* ID3 case: allocate space, init the handle and return. */ 922 /* ID3 case: allocate space, init the handle and return. */
936 mutex_lock(&llist_mutex); 923 mutex_lock(&llist_mutex);
937 924
938 struct memory_handle *h = add_handle(sizeof(struct mp3entry), false, true); 925 struct memory_handle *h =
939 if (h) 926 add_handle(sizeof(struct mp3entry), false, true);
940 { 927
928 if (h) {
941 handle_id = h->id; 929 handle_id = h->id;
942 h->fd = -1; 930 h->fd = -1;
943 h->filesize = sizeof(struct mp3entry); 931 h->filesize = sizeof(struct mp3entry);
@@ -973,8 +961,8 @@ int bufopen(const char *file, size_t offset, enum data_type type,
973 961
974 size_t size = 0; 962 size_t size = 0;
975#ifdef HAVE_ALBUMART 963#ifdef HAVE_ALBUMART
976 if (type == TYPE_BITMAP) 964 if (type == TYPE_BITMAP) {
977 { /* if albumart is embedded, the complete file is not buffered, 965 /* if albumart is embedded, the complete file is not buffered,
978 * but only the jpeg part; filesize() would be wrong */ 966 * but only the jpeg part; filesize() would be wrong */
979 struct bufopen_bitmap_data *aa = (struct bufopen_bitmap_data*)user_data; 967 struct bufopen_bitmap_data *aa = (struct bufopen_bitmap_data*)user_data;
980 if (aa->embedded_albumart) 968 if (aa->embedded_albumart)
@@ -995,8 +983,7 @@ int bufopen(const char *file, size_t offset, enum data_type type,
995 mutex_lock(&llist_mutex); 983 mutex_lock(&llist_mutex);
996 984
997 struct memory_handle *h = add_handle(padded_size, can_wrap, false); 985 struct memory_handle *h = add_handle(padded_size, can_wrap, false);
998 if (!h) 986 if (!h) {
999 {
1000 DEBUGF("%s(): failed to add handle\n", __func__); 987 DEBUGF("%s(): failed to add handle\n", __func__);
1001 mutex_unlock(&llist_mutex); 988 mutex_unlock(&llist_mutex);
1002 close(fd); 989 close(fd);
@@ -1011,8 +998,7 @@ int bufopen(const char *file, size_t offset, enum data_type type,
1011 /* Don't bother to storage align bitmaps because they are not 998 /* Don't bother to storage align bitmaps because they are not
1012 * loaded directly into the buffer. 999 * loaded directly into the buffer.
1013 */ 1000 */
1014 if (type != TYPE_BITMAP) 1001 if (type != TYPE_BITMAP) {
1015 {
1016 /* Align to desired storage alignment */ 1002 /* Align to desired storage alignment */
1017 size_t alignment_pad = STORAGE_OVERLAP(adjusted_offset - 1003 size_t alignment_pad = STORAGE_OVERLAP(adjusted_offset -
1018 (size_t)(&buffer[buf_widx])); 1004 (size_t)(&buffer[buf_widx]));
@@ -1028,18 +1014,14 @@ int bufopen(const char *file, size_t offset, enum data_type type,
1028 h->type = type; 1014 h->type = type;
1029 1015
1030#ifdef HAVE_ALBUMART 1016#ifdef HAVE_ALBUMART
1031 if (type == TYPE_BITMAP) 1017 if (type == TYPE_BITMAP) {
1032 {
1033 /* Bitmap file: we load the data instead of the file */ 1018 /* Bitmap file: we load the data instead of the file */
1034 int rc; 1019 int rc;
1035 rc = load_image(fd, file, (struct bufopen_bitmap_data*)user_data); 1020 rc = load_image(fd, file, (struct bufopen_bitmap_data*)user_data);
1036 if (rc <= 0) 1021 if (rc <= 0) {
1037 {
1038 rm_handle(h); 1022 rm_handle(h);
1039 handle_id = ERR_FILE_ERROR; 1023 handle_id = ERR_FILE_ERROR;
1040 } 1024 } else {
1041 else
1042 {
1043 h->filesize = rc; 1025 h->filesize = rc;
1044 h->available = rc; 1026 h->available = rc;
1045 h->widx = buf_widx + rc; /* safe because the data doesn't wrap */ 1027 h->widx = buf_widx + rc; /* safe because the data doesn't wrap */
@@ -1060,19 +1042,15 @@ int bufopen(const char *file, size_t offset, enum data_type type,
1060 1042
1061 mutex_unlock(&llist_mutex); 1043 mutex_unlock(&llist_mutex);
1062 1044
1063 if (type == TYPE_CUESHEET) 1045 if (type == TYPE_CUESHEET) {
1064 {
1065 /* Immediately start buffering those */ 1046 /* Immediately start buffering those */
1066 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id); 1047 LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
1067 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id); 1048 queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
1068 } 1049 } else {
1069 else
1070 {
1071 /* Other types will get buffered in the course of normal operations */ 1050 /* Other types will get buffered in the course of normal operations */
1072 close(fd); 1051 close(fd);
1073 1052
1074 if (handle_id >= 0) 1053 if (handle_id >= 0) {
1075 {
1076 /* Inform the buffering thread that we added a handle */ 1054 /* Inform the buffering thread that we added a handle */
1077 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id); 1055 LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
1078 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id); 1056 queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
@@ -1098,8 +1076,7 @@ int bufalloc(const void *src, size_t size, enum data_type type)
1098 1076
1099 struct memory_handle *h = add_handle(size, false, true); 1077 struct memory_handle *h = add_handle(size, false, true);
1100 1078
1101 if (h) 1079 if (h) {
1102 {
1103 handle_id = h->id; 1080 handle_id = h->id;
1104 1081
1105 if (src) { 1082 if (src) {
@@ -1117,7 +1094,7 @@ int bufalloc(const void *src, size_t size, enum data_type type)
1117 h->filesize = size; 1094 h->filesize = size;
1118 h->offset = 0; 1095 h->offset = 0;
1119 h->ridx = buf_widx; 1096 h->ridx = buf_widx;
1120 h->widx = buf_widx + size; /* this is safe because the data doesn't wrap */ 1097 h->widx = buf_widx + size; /* safe because the data doesn't wrap */
1121 h->data = buf_widx; 1098 h->data = buf_widx;
1122 h->available = size; 1099 h->available = size;
1123 h->type = type; 1100 h->type = type;
@@ -1145,20 +1122,21 @@ bool bufclose(int handle_id)
1145static void rebuffer_handle(int handle_id, size_t newpos) 1122static void rebuffer_handle(int handle_id, size_t newpos)
1146{ 1123{
1147 struct memory_handle *h = find_handle(handle_id); 1124 struct memory_handle *h = find_handle(handle_id);
1148 if (!h) 1125
1149 { 1126 if (!h) {
1150 queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND); 1127 queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND);
1151 return; 1128 return;
1152 } 1129 }
1153 1130
1154 /* When seeking foward off of the buffer, if it is a short seek attempt to 1131 /* When seeking foward off of the buffer, if it is a short seek attempt to
1155 avoid rebuffering the whole track, just read enough to satisfy */ 1132 avoid rebuffering the whole track, just read enough to satisfy */
1156 if (newpos > h->offset && newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK) 1133 if (newpos > h->offset &&
1157 { 1134 newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK) {
1135
1158 size_t amount = newpos - h->offset; 1136 size_t amount = newpos - h->offset;
1159 h->ridx = ringbuf_add(h->data, amount); 1137 h->ridx = ringbuf_add(h->data, amount);
1160 if (buffer_handle(handle_id, amount + 1)) 1138
1161 { 1139 if (buffer_handle(handle_id, amount + 1)) {
1162 queue_reply(&buffering_queue, 0); 1140 queue_reply(&buffering_queue, 0);
1163 buffer_handle(handle_id, 0); /* Ok, try the rest */ 1141 buffer_handle(handle_id, 0); /* Ok, try the rest */
1164 return; 1142 return;
@@ -1175,8 +1153,9 @@ static void rebuffer_handle(int handle_id, size_t newpos)
1175 /* Strip alignment padding then redo */ 1153 /* Strip alignment padding then redo */
1176 size_t new_index = ringbuf_add(ringbuf_offset(h), sizeof (*h)); 1154 size_t new_index = ringbuf_add(ringbuf_offset(h), sizeof (*h));
1177 1155
1178 /* Align to desired storage alignment if space permits - handle could have 1156 /* Align to desired storage alignment if space permits - handle could
1179 been shrunken too close to the following one after a previous rebuffer. */ 1157 have been shrunken too close to the following one after a previous
1158 rebuffer. */
1180 size_t alignment_pad = 1159 size_t alignment_pad =
1181 STORAGE_OVERLAP(h->offset - (size_t)(&buffer[new_index])); 1160 STORAGE_OVERLAP(h->offset - (size_t)(&buffer[new_index]));
1182 1161
@@ -1200,8 +1179,7 @@ static void rebuffer_handle(int handle_id, size_t newpos)
1200 if (h->fd >= 0) 1179 if (h->fd >= 0)
1201 lseek(h->fd, h->offset, SEEK_SET); 1180 lseek(h->fd, h->offset, SEEK_SET);
1202 1181
1203 if (h->next && ringbuf_sub(next, h->data) <= h->filesize - newpos) 1182 if (h->next && ringbuf_sub(next, h->data) <= h->filesize - newpos) {
1204 {
1205 /* There isn't enough space to rebuffer all of the track from its new 1183 /* There isn't enough space to rebuffer all of the track from its new
1206 offset, so we ask the user to free some */ 1184 offset, so we ask the user to free some */
1207 DEBUGF("%s(): space is needed\n", __func__); 1185 DEBUGF("%s(): space is needed\n", __func__);
@@ -1290,8 +1268,7 @@ static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1290 1268
1291 size_t avail = handle_size_available(h); 1269 size_t avail = handle_size_available(h);
1292 1270
1293 if (avail == 0 && h->filerem == 0) 1271 if (avail == 0 && h->filerem == 0) {
1294 {
1295 /* File is finished reading */ 1272 /* File is finished reading */
1296 *size = 0; 1273 *size = 0;
1297 return h; 1274 return h;
@@ -1303,8 +1280,7 @@ static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1303 realsize = avail + h->filerem; 1280 realsize = avail + h->filerem;
1304 1281
1305 if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO 1282 if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO
1306 && realsize > GUARD_BUFSIZE) 1283 && realsize > GUARD_BUFSIZE) {
1307 {
1308 logf("data request > guardbuf"); 1284 logf("data request > guardbuf");
1309 /* If more than the size of the guardbuf is requested and this is a 1285 /* If more than the size of the guardbuf is requested and this is a
1310 * bufgetdata, limit to guard_bufsize over the end of the buffer */ 1286 * bufgetdata, limit to guard_bufsize over the end of the buffer */
@@ -1312,8 +1288,7 @@ static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
1312 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */ 1288 /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
1313 } 1289 }
1314 1290
1315 if (h->filerem > 0 && avail < realsize) 1291 if (h->filerem > 0 && avail < realsize) {
1316 {
1317 /* Data isn't ready. Request buffering */ 1292 /* Data isn't ready. Request buffering */
1318 buf_request_buffer_handle(handle_id); 1293 buf_request_buffer_handle(handle_id);
1319 /* Wait for the data to be ready */ 1294 /* Wait for the data to be ready */
@@ -1356,15 +1331,12 @@ ssize_t bufread(int handle_id, size_t size, void *dest)
1356 if (!h) 1331 if (!h)
1357 return ERR_HANDLE_NOT_FOUND; 1332 return ERR_HANDLE_NOT_FOUND;
1358 1333
1359 if (h->ridx + adjusted_size > buffer_len) 1334 if (h->ridx + adjusted_size > buffer_len) {
1360 {
1361 /* the data wraps around the end of the buffer */ 1335 /* the data wraps around the end of the buffer */
1362 size_t read = buffer_len - h->ridx; 1336 size_t read = buffer_len - h->ridx;
1363 memcpy(dest, &buffer[h->ridx], read); 1337 memcpy(dest, &buffer[h->ridx], read);
1364 memcpy(dest+read, buffer, adjusted_size - read); 1338 memcpy(dest+read, buffer, adjusted_size - read);
1365 } 1339 } else {
1366 else
1367 {
1368 memcpy(dest, &buffer[h->ridx], adjusted_size); 1340 memcpy(dest, &buffer[h->ridx], adjusted_size);
1369 } 1341 }
1370 1342
@@ -1389,12 +1361,12 @@ ssize_t bufgetdata(int handle_id, size_t size, void **data)
1389 if (!h) 1361 if (!h)
1390 return ERR_HANDLE_NOT_FOUND; 1362 return ERR_HANDLE_NOT_FOUND;
1391 1363
1392 if (h->ridx + adjusted_size > buffer_len) 1364 if (h->ridx + adjusted_size > buffer_len) {
1393 {
1394 /* the data wraps around the end of the buffer : 1365 /* the data wraps around the end of the buffer :
1395 use the guard buffer to provide the requested amount of data. */ 1366 use the guard buffer to provide the requested amount of data. */
1396 size_t copy_n = h->ridx + adjusted_size - buffer_len; 1367 size_t copy_n = h->ridx + adjusted_size - buffer_len;
1397 /* prep_bufdata ensures adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE, 1368 /* prep_bufdata ensures
1369 adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
1398 so copy_n <= GUARD_BUFSIZE */ 1370 so copy_n <= GUARD_BUFSIZE */
1399 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n); 1371 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1400 } 1372 }
@@ -1425,8 +1397,7 @@ ssize_t bufgettail(int handle_id, size_t size, void **data)
1425 1397
1426 tidx = ringbuf_sub(h->widx, size); 1398 tidx = ringbuf_sub(h->widx, size);
1427 1399
1428 if (tidx + size > buffer_len) 1400 if (tidx + size > buffer_len) {
1429 {
1430 size_t copy_n = tidx + size - buffer_len; 1401 size_t copy_n = tidx + size - buffer_len;
1431 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n); 1402 memcpy(guard_buffer, (const unsigned char *)buffer, copy_n);
1432 } 1403 }
@@ -1614,13 +1585,11 @@ void buffering_thread(void)
1614 * for simplicity until its done right */ 1585 * for simplicity until its done right */
1615#if MEMORYSIZE > 8 1586#if MEMORYSIZE > 8
1616 /* If the disk is spinning, take advantage by filling the buffer */ 1587 /* If the disk is spinning, take advantage by filling the buffer */
1617 else if (storage_disk_is_active() && queue_empty(&buffering_queue)) 1588 else if (storage_disk_is_active() && queue_empty(&buffering_queue)) {
1618 {
1619 if (num_handles > 0 && data_counters.useful <= high_watermark) 1589 if (num_handles > 0 && data_counters.useful <= high_watermark)
1620 send_event(BUFFER_EVENT_BUFFER_LOW, 0); 1590 send_event(BUFFER_EVENT_BUFFER_LOW, 0);
1621 1591
1622 if (data_counters.remaining > 0 && BUF_USED <= high_watermark) 1592 if (data_counters.remaining > 0 && BUF_USED <= high_watermark) {
1623 {
1624 /* This is a new fill, shrink the buffer up first */ 1593 /* This is a new fill, shrink the buffer up first */
1625 if (!filling) 1594 if (!filling)
1626 shrink_buffer(); 1595 shrink_buffer();
@@ -1637,9 +1606,7 @@ void buffering_thread(void)
1637 filling = fill_buffer(); 1606 filling = fill_buffer();
1638 else if (data_counters.remaining == 0) 1607 else if (data_counters.remaining == 0)
1639 filling = false; 1608 filling = false;
1640 } 1609 } else if (ev.id == SYS_TIMEOUT) {
1641 else if (ev.id == SYS_TIMEOUT)
1642 {
1643 if (data_counters.remaining > 0 && 1610 if (data_counters.remaining > 0 &&
1644 data_counters.useful <= conf_watermark) { 1611 data_counters.useful <= conf_watermark) {
1645 shrink_buffer(); 1612 shrink_buffer();