diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2017-04-08 18:11:25 -0400 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2017-04-08 18:32:54 -0400 |
commit | eefc7c73e2495decdc6f242515696fe0e3f85609 (patch) | |
tree | 2b0d4ceb498ee39f1c81340f315292bad5b785f2 | |
parent | 5e4532c87cf747600ec1d7ae22531e89ecdce6a4 (diff) | |
download | rockbox-eefc7c73e2495decdc6f242515696fe0e3f85609.tar.gz rockbox-eefc7c73e2495decdc6f242515696fe0e3f85609.zip |
Fix some problems with playback crashing
I'm not sure all the situations it affects, to be honest. The fix
aimed to address the strange symptom here:
http://forums.rockbox.org/index.php/topic,50793.0.html
It turns out that ringbuf_add_cross was used when handles were
butted up against one another with the first parameter equal to
the last, which it interprets as being an empty case when it should
be interpreted as full in the context it was used. To fix this,
introduce full/empty variants of ringbuf_add_cross and ringbuf_sub
and use them at the appropriate time.
The other way to address the problem is ensure there's always at
least a space byte between the end of one handle and the start of
another but this make the code a bit trickier to reason about than
using additional function variants.
bufopen() may yield after creating a handle and so do some more
locking so that the buffering thread doesn't mess things up by
moving anything or not seeing the yet-to-be linked-in allocation.
Add alignof() macro to use proper method to get alignment of
struct memory_handle. That should be useful in general anyway.
It's merely defined as __alignof__ but looks nicer.
Change-Id: If21739eaa33a4f6c084a28ee5b3c8fceecfd87ce
-rw-r--r-- | apps/buffering.c | 120 | ||||
-rw-r--r-- | firmware/export/system.h | 4 |
2 files changed, 89 insertions, 35 deletions
diff --git a/apps/buffering.c b/apps/buffering.c index 3b4afac073..de180a74af 100644 --- a/apps/buffering.c +++ b/apps/buffering.c | |||
@@ -188,7 +188,8 @@ static inline uintptr_t ringbuf_add(uintptr_t p, size_t v) | |||
188 | } | 188 | } |
189 | 189 | ||
190 | /* Buffer pointer (p) minus value (v), wrapped if necessary */ | 190 | /* Buffer pointer (p) minus value (v), wrapped if necessary */ |
191 | static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v) | 191 | /* Interprets p == v as empty */ |
192 | static inline uintptr_t ringbuf_sub_empty(uintptr_t p, size_t v) | ||
192 | { | 193 | { |
193 | uintptr_t res = p; | 194 | uintptr_t res = p; |
194 | if (p < v) | 195 | if (p < v) |
@@ -197,8 +198,21 @@ static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v) | |||
197 | return res - v; | 198 | return res - v; |
198 | } | 199 | } |
199 | 200 | ||
201 | /* Buffer pointer (p) minus value (v), wrapped if necessary */ | ||
202 | /* Interprets p == v as full */ | ||
203 | static inline uintptr_t ringbuf_sub_full(uintptr_t p, size_t v) | ||
204 | { | ||
205 | uintptr_t res = p; | ||
206 | if (p <= v) | ||
207 | res += buffer_len; /* wrap */ | ||
208 | |||
209 | return res - v; | ||
210 | } | ||
211 | |||
200 | /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */ | 212 | /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */ |
201 | static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2) | 213 | /* Interprets p1 == p2 as empty */ |
214 | static inline ssize_t ringbuf_add_cross_empty(uintptr_t p1, size_t v, | ||
215 | uintptr_t p2) | ||
202 | { | 216 | { |
203 | ssize_t res = p1 + v - p2; | 217 | ssize_t res = p1 + v - p2; |
204 | if (p1 >= p2) /* wrap if necessary */ | 218 | if (p1 >= p2) /* wrap if necessary */ |
@@ -207,9 +221,31 @@ static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2) | |||
207 | return res; | 221 | return res; |
208 | } | 222 | } |
209 | 223 | ||
224 | /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */ | ||
225 | /* Interprets p1 == p2 as full */ | ||
226 | static inline ssize_t ringbuf_add_cross_full(uintptr_t p1, size_t v, | ||
227 | uintptr_t p2) | ||
228 | { | ||
229 | ssize_t res = p1 + v - p2; | ||
230 | if (p1 > p2) /* wrap if necessary */ | ||
231 | res -= buffer_len; | ||
232 | |||
233 | return res; | ||
234 | } | ||
235 | |||
210 | /* Real buffer watermark */ | 236 | /* Real buffer watermark */ |
211 | #define BUF_WATERMARK MIN(conf_watermark, high_watermark) | 237 | #define BUF_WATERMARK MIN(conf_watermark, high_watermark) |
212 | 238 | ||
239 | static size_t bytes_used(void) | ||
240 | { | ||
241 | struct memory_handle *first = first_handle; | ||
242 | if (!first) { | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | return ringbuf_sub_full(cur_handle->widx, ringbuf_offset(first)); | ||
247 | } | ||
248 | |||
213 | /* | 249 | /* |
214 | LINKED LIST MANAGEMENT | 250 | LINKED LIST MANAGEMENT |
215 | ====================== | 251 | ====================== |
@@ -288,7 +324,7 @@ add_handle(unsigned int flags, size_t data_size, size_t *data_out) | |||
288 | /* the current handle hasn't finished buffering. We can only add | 324 | /* the current handle hasn't finished buffering. We can only add |
289 | a new one if there is already enough free space to finish | 325 | a new one if there is already enough free space to finish |
290 | the buffering. */ | 326 | the buffering. */ |
291 | if (ringbuf_add_cross(widx, cur_total, ridx) >= 0) { | 327 | if (ringbuf_add_cross_full(widx, cur_total, ridx) > 0) { |
292 | /* Not enough space to finish allocation */ | 328 | /* Not enough space to finish allocation */ |
293 | return NULL; | 329 | return NULL; |
294 | } else { | 330 | } else { |
@@ -297,8 +333,8 @@ add_handle(unsigned int flags, size_t data_size, size_t *data_out) | |||
297 | } | 333 | } |
298 | } | 334 | } |
299 | 335 | ||
300 | /* Align to pointer size up */ | 336 | /* Align to align size up */ |
301 | size_t adjust = ALIGN_UP(widx, sizeof(intptr_t)) - widx; | 337 | size_t adjust = ALIGN_UP(widx, alignof(struct memory_handle)) - widx; |
302 | size_t index = ringbuf_add(widx, adjust); | 338 | size_t index = ringbuf_add(widx, adjust); |
303 | size_t len = data_size + sizeof(struct memory_handle); | 339 | size_t len = data_size + sizeof(struct memory_handle); |
304 | 340 | ||
@@ -311,12 +347,15 @@ add_handle(unsigned int flags, size_t data_size, size_t *data_out) | |||
311 | } | 347 | } |
312 | 348 | ||
313 | /* How far we shifted index to align things, must be < buffer_len */ | 349 | /* How far we shifted index to align things, must be < buffer_len */ |
314 | size_t shift = ringbuf_sub(index, widx); | 350 | size_t shift = ringbuf_sub_empty(index, widx); |
315 | 351 | ||
316 | /* How much space are we short in the actual ring buffer? */ | 352 | /* How much space are we short in the actual ring buffer? */ |
317 | ssize_t overlap = ringbuf_add_cross(widx, shift + len, ridx); | 353 | ssize_t overlap = first_handle ? |
318 | if (overlap >= 0 && | 354 | ringbuf_add_cross_full(widx, shift + len, ridx) : |
319 | ((flags & H_ALLOCALL) || (size_t)overlap >= data_size)) { | 355 | ringbuf_add_cross_empty(widx, shift + len, ridx); |
356 | |||
357 | if (overlap > 0 && | ||
358 | ((flags & H_ALLOCALL) || (size_t)overlap > data_size)) { | ||
320 | /* Not enough space for required allocations */ | 359 | /* Not enough space for required allocations */ |
321 | return NULL; | 360 | return NULL; |
322 | } | 361 | } |
@@ -430,9 +469,9 @@ static bool move_handle(struct memory_handle **h, size_t *delta, | |||
430 | 469 | ||
431 | size_t size_to_move = sizeof(struct memory_handle) + data_size; | 470 | size_t size_to_move = sizeof(struct memory_handle) + data_size; |
432 | 471 | ||
433 | /* Align to pointer size down */ | 472 | /* Align to align size down */ |
434 | size_t final_delta = *delta; | 473 | size_t final_delta = *delta; |
435 | final_delta = ALIGN_DOWN(final_delta, sizeof(intptr_t)); | 474 | final_delta = ALIGN_DOWN(final_delta, alignof(struct memory_handle)); |
436 | if (final_delta < sizeof(struct memory_handle)) { | 475 | if (final_delta < sizeof(struct memory_handle)) { |
437 | /* It's not legal to move less than the size of the struct */ | 476 | /* It's not legal to move less than the size of the struct */ |
438 | return false; | 477 | return false; |
@@ -440,8 +479,8 @@ static bool move_handle(struct memory_handle **h, size_t *delta, | |||
440 | 479 | ||
441 | uintptr_t oldpos = ringbuf_offset(src); | 480 | uintptr_t oldpos = ringbuf_offset(src); |
442 | uintptr_t newpos = ringbuf_add(oldpos, final_delta); | 481 | uintptr_t newpos = ringbuf_add(oldpos, final_delta); |
443 | intptr_t overlap = ringbuf_add_cross(newpos, size_to_move, buffer_len); | 482 | intptr_t overlap = ringbuf_add_cross_full(newpos, size_to_move, buffer_len); |
444 | intptr_t overlap_old = ringbuf_add_cross(oldpos, size_to_move, buffer_len); | 483 | intptr_t overlap_old = ringbuf_add_cross_full(oldpos, size_to_move, buffer_len); |
445 | 484 | ||
446 | if (overlap > 0) { | 485 | if (overlap > 0) { |
447 | /* Some part of the struct + data would wrap, maybe ok */ | 486 | /* Some part of the struct + data would wrap, maybe ok */ |
@@ -459,8 +498,8 @@ static bool move_handle(struct memory_handle **h, size_t *delta, | |||
459 | correction = overlap - data_size; | 498 | correction = overlap - data_size; |
460 | } | 499 | } |
461 | if (correction) { | 500 | if (correction) { |
462 | /* Align correction to pointer size up */ | 501 | /* Align correction to align size up */ |
463 | correction = ALIGN_UP(correction, sizeof(intptr_t)); | 502 | correction = ALIGN_UP(correction, alignof(struct memory_handle)); |
464 | if (final_delta < correction + sizeof(struct memory_handle)) { | 503 | if (final_delta < correction + sizeof(struct memory_handle)) { |
465 | /* Delta cannot end up less than the size of the struct */ | 504 | /* Delta cannot end up less than the size of the struct */ |
466 | return false; | 505 | return false; |
@@ -648,19 +687,19 @@ static bool buffer_handle(int handle_id, size_t to_buffer) | |||
648 | { | 687 | { |
649 | /* max amount to copy */ | 688 | /* max amount to copy */ |
650 | size_t widx = h->widx; | 689 | size_t widx = h->widx; |
651 | |||
652 | ssize_t copy_n = h->filesize - h->end; | 690 | ssize_t copy_n = h->filesize - h->end; |
653 | copy_n = MIN(copy_n, BUFFERING_DEFAULT_FILECHUNK); | 691 | copy_n = MIN(copy_n, BUFFERING_DEFAULT_FILECHUNK); |
654 | copy_n = MIN(copy_n, (off_t)(buffer_len - widx)); | 692 | copy_n = MIN(copy_n, (off_t)(buffer_len - widx)); |
655 | 693 | ||
656 | uintptr_t offset = ringbuf_offset(h->next ?: first_handle); | 694 | mutex_lock(&llist_mutex); |
657 | ssize_t overlap = ringbuf_add_cross(widx, copy_n, offset); | ||
658 | 695 | ||
659 | /* read only up to available space and stop if it would overwrite | 696 | /* read only up to available space and stop if it would overwrite |
660 | the next handle; stop one byte early for last handle to avoid | 697 | the next handle; stop one byte early to avoid empty/full alias |
661 | empty/full alias */ | 698 | (or else do more complicated arithmetic to differentiate) */ |
662 | if (!h->next) | 699 | size_t next = ringbuf_offset(h->next ?: first_handle); |
663 | overlap++; | 700 | ssize_t overlap = ringbuf_add_cross_full(widx, copy_n, next); |
701 | |||
702 | mutex_unlock(&llist_mutex); | ||
664 | 703 | ||
665 | if (overlap > 0) { | 704 | if (overlap > 0) { |
666 | stop = true; | 705 | stop = true; |
@@ -745,7 +784,7 @@ static void shrink_handle(struct memory_handle *h) | |||
745 | /* data is pinned by default - if we start moving packet audio, | 784 | /* data is pinned by default - if we start moving packet audio, |
746 | the semantics will determine whether or not data is movable | 785 | the semantics will determine whether or not data is movable |
747 | but the handle will remain movable in either case */ | 786 | but the handle will remain movable in either case */ |
748 | size_t delta = ringbuf_sub(h->ridx, h->data); | 787 | size_t delta = ringbuf_sub_empty(h->ridx, h->data); |
749 | 788 | ||
750 | /* The value of delta might change for alignment reasons */ | 789 | /* The value of delta might change for alignment reasons */ |
751 | if (!move_handle(&h, &delta, 0)) | 790 | if (!move_handle(&h, &delta, 0)) |
@@ -760,7 +799,7 @@ static void shrink_handle(struct memory_handle *h) | |||
760 | 799 | ||
761 | size_t data_size = h->filesize - h->start; | 800 | size_t data_size = h->filesize - h->start; |
762 | uintptr_t handle_distance = | 801 | uintptr_t handle_distance = |
763 | ringbuf_sub(ringbuf_offset(h->next), h->data); | 802 | ringbuf_sub_empty(ringbuf_offset(h->next), h->data); |
764 | size_t delta = handle_distance - data_size; | 803 | size_t delta = handle_distance - data_size; |
765 | 804 | ||
766 | /* The value of delta might change for alignment reasons */ | 805 | /* The value of delta might change for alignment reasons */ |
@@ -800,10 +839,13 @@ static void shrink_handle(struct memory_handle *h) | |||
800 | static bool fill_buffer(void) | 839 | static bool fill_buffer(void) |
801 | { | 840 | { |
802 | logf("fill_buffer()"); | 841 | logf("fill_buffer()"); |
803 | struct memory_handle *m = first_handle; | 842 | mutex_lock(&llist_mutex); |
804 | 843 | ||
844 | struct memory_handle *m = first_handle; | ||
805 | shrink_handle(m); | 845 | shrink_handle(m); |
806 | 846 | ||
847 | mutex_unlock(&llist_mutex); | ||
848 | |||
807 | while (queue_empty(&buffering_queue) && m) { | 849 | while (queue_empty(&buffering_queue) && m) { |
808 | if (m->end < m->filesize && !buffer_handle(m->id, 0)) { | 850 | if (m->end < m->filesize && !buffer_handle(m->id, 0)) { |
809 | m = NULL; | 851 | m = NULL; |
@@ -843,7 +885,7 @@ static int load_image(int fd, const char *path, | |||
843 | #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1) | 885 | #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1) |
844 | bmp->maskdata = NULL; | 886 | bmp->maskdata = NULL; |
845 | #endif | 887 | #endif |
846 | int free = (int)MIN(buffer_len - buf_used(), buffer_len - bufidx) | 888 | int free = (int)MIN(buffer_len - bytes_used(), buffer_len - bufidx) |
847 | - sizeof(struct bitmap); | 889 | - sizeof(struct bitmap); |
848 | 890 | ||
849 | #ifdef HAVE_JPEG | 891 | #ifdef HAVE_JPEG |
@@ -1158,6 +1200,8 @@ static void rebuffer_handle(int handle_id, off_t newpos) | |||
1158 | newpos = h->filesize; /* file truncation happened above */ | 1200 | newpos = h->filesize; /* file truncation happened above */ |
1159 | } | 1201 | } |
1160 | 1202 | ||
1203 | mutex_lock(&llist_mutex); | ||
1204 | |||
1161 | size_t next = ringbuf_offset(h->next ?: first_handle); | 1205 | size_t next = ringbuf_offset(h->next ?: first_handle); |
1162 | 1206 | ||
1163 | #ifdef STORAGE_WANTS_ALIGN | 1207 | #ifdef STORAGE_WANTS_ALIGN |
@@ -1170,7 +1214,7 @@ static void rebuffer_handle(int handle_id, off_t newpos) | |||
1170 | size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)newpos - | 1214 | size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)newpos - |
1171 | (uintptr_t)ringbuf_ptr(new_index)); | 1215 | (uintptr_t)ringbuf_ptr(new_index)); |
1172 | 1216 | ||
1173 | if (ringbuf_add_cross(new_index, alignment_pad, next) >= 0) | 1217 | if (ringbuf_add_cross_full(new_index, alignment_pad, next) > 0) |
1174 | alignment_pad = 0; /* Forego storage alignment this time */ | 1218 | alignment_pad = 0; /* Forego storage alignment this time */ |
1175 | 1219 | ||
1176 | new_index = ringbuf_add(new_index, alignment_pad); | 1220 | new_index = ringbuf_add(new_index, alignment_pad); |
@@ -1187,7 +1231,12 @@ static void rebuffer_handle(int handle_id, off_t newpos) | |||
1187 | lseek(h->fd, newpos, SEEK_SET); | 1231 | lseek(h->fd, newpos, SEEK_SET); |
1188 | 1232 | ||
1189 | off_t filerem = h->filesize - newpos; | 1233 | off_t filerem = h->filesize - newpos; |
1190 | if (h->next && ringbuf_add_cross(new_index, filerem, next) > 0) { | 1234 | bool send = h->next && |
1235 | ringbuf_add_cross_full(new_index, filerem, next) > 0; | ||
1236 | |||
1237 | mutex_unlock(&llist_mutex); | ||
1238 | |||
1239 | if (send) { | ||
1191 | /* There isn't enough space to rebuffer all of the track from its new | 1240 | /* There isn't enough space to rebuffer all of the track from its new |
1192 | offset, so we ask the user to free some */ | 1241 | offset, so we ask the user to free some */ |
1193 | DEBUGF("%s(): space is needed\n", __func__); | 1242 | DEBUGF("%s(): space is needed\n", __func__); |
@@ -1416,7 +1465,7 @@ ssize_t bufgettail(int handle_id, size_t size, void **data) | |||
1416 | return ERR_HANDLE_NOT_FOUND; | 1465 | return ERR_HANDLE_NOT_FOUND; |
1417 | 1466 | ||
1418 | if (h->end >= h->filesize) { | 1467 | if (h->end >= h->filesize) { |
1419 | size_t tidx = ringbuf_sub(h->widx, size); | 1468 | size_t tidx = ringbuf_sub_empty(h->widx, size); |
1420 | 1469 | ||
1421 | if (tidx + size > buffer_len) { | 1470 | if (tidx + size > buffer_len) { |
1422 | size_t copy_n = tidx + size - buffer_len; | 1471 | size_t copy_n = tidx + size - buffer_len; |
@@ -1447,7 +1496,7 @@ ssize_t bufcuttail(int handle_id, size_t size) | |||
1447 | if (available < size) | 1496 | if (available < size) |
1448 | size = available; | 1497 | size = available; |
1449 | 1498 | ||
1450 | h->widx = ringbuf_sub(h->widx, size); | 1499 | h->widx = ringbuf_sub_empty(h->widx, size); |
1451 | h->filesize -= size; | 1500 | h->filesize -= size; |
1452 | h->end -= size; | 1501 | h->end -= size; |
1453 | } else { | 1502 | } else { |
@@ -1548,11 +1597,10 @@ size_t buf_length(void) | |||
1548 | /* Return the amount of buffer space used */ | 1597 | /* Return the amount of buffer space used */ |
1549 | size_t buf_used(void) | 1598 | size_t buf_used(void) |
1550 | { | 1599 | { |
1551 | struct memory_handle *first = first_handle; | 1600 | mutex_lock(&llist_mutex); |
1552 | if (!first) | 1601 | size_t used = bytes_used(); |
1553 | return 0; | 1602 | mutex_unlock(&llist_mutex); |
1554 | 1603 | return used; | |
1555 | return ringbuf_sub(cur_handle->widx, ringbuf_offset(first)); | ||
1556 | } | 1604 | } |
1557 | 1605 | ||
1558 | void buf_set_watermark(size_t bytes) | 1606 | void buf_set_watermark(size_t bytes) |
@@ -1579,7 +1627,9 @@ static void shrink_buffer_inner(struct memory_handle *h) | |||
1579 | static void shrink_buffer(void) | 1627 | static void shrink_buffer(void) |
1580 | { | 1628 | { |
1581 | logf("shrink_buffer()"); | 1629 | logf("shrink_buffer()"); |
1630 | mutex_lock(&llist_mutex); | ||
1582 | shrink_buffer_inner(first_handle); | 1631 | shrink_buffer_inner(first_handle); |
1632 | mutex_unlock(&llist_mutex); | ||
1583 | } | 1633 | } |
1584 | 1634 | ||
1585 | static void NORETURN_ATTR buffering_thread(void) | 1635 | static void NORETURN_ATTR buffering_thread(void) |
diff --git a/firmware/export/system.h b/firmware/export/system.h index 050c3074aa..62da252e80 100644 --- a/firmware/export/system.h +++ b/firmware/export/system.h | |||
@@ -135,6 +135,10 @@ int get_cpu_boost_counter(void); | |||
135 | #define PTR_ADD(ptr, x) ((typeof(ptr))((char*)(ptr) + (x))) | 135 | #define PTR_ADD(ptr, x) ((typeof(ptr))((char*)(ptr) + (x))) |
136 | #define PTR_SUB(ptr, x) ((typeof(ptr))((char*)(ptr) - (x))) | 136 | #define PTR_SUB(ptr, x) ((typeof(ptr))((char*)(ptr) - (x))) |
137 | 137 | ||
138 | #ifndef alignof | ||
139 | #define alignof __alignof__ | ||
140 | #endif | ||
141 | |||
138 | /* Get the byte offset of a type's member */ | 142 | /* Get the byte offset of a type's member */ |
139 | #ifndef offsetof | 143 | #ifndef offsetof |
140 | #define offsetof(type, member) __builtin_offsetof(type, member) | 144 | #define offsetof(type, member) __builtin_offsetof(type, member) |