summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--apps/buffering.c243
1 files changed, 147 insertions, 96 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index d632951f5a..347ad611f3 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -105,7 +105,7 @@
105 105
106/* assert(sizeof(struct memory_handle)%4==0) */ 106/* assert(sizeof(struct memory_handle)%4==0) */
107struct memory_handle { 107struct memory_handle {
108 int id; /* A unique ID for the handle */ 108 unsigned int id; /* A unique ID for the handle */
109 enum data_type type; /* Type of data buffered with this handle */ 109 enum data_type type; /* Type of data buffered with this handle */
110 char path[MAX_PATH]; /* Path if data originated in a file */ 110 char path[MAX_PATH]; /* Path if data originated in a file */
111 int fd; /* File descriptor to path (-1 if closed) */ 111 int fd; /* File descriptor to path (-1 if closed) */
@@ -146,7 +146,7 @@ static struct memory_handle *first_handle;
146 146
147static int num_handles; /* number of handles in the list */ 147static int num_handles; /* number of handles in the list */
148 148
149static int base_handle_id; 149static unsigned int base_handle_id;
150 150
151static struct mutex llist_mutex; 151static struct mutex llist_mutex;
152 152
@@ -210,60 +210,86 @@ buf_ridx == buf_widx means the buffer is empty.
210 210
211 211
212/* Add a new handle to the linked list and return it. It will have become the 212/* Add a new handle to the linked list and return it. It will have become the
213 new current handle. "data_size" must contain the size of what will be in the 213 new current handle.
214 handle. On return, it's the size available for the handle. */ 214 data_size must contain the size of what will be in the handle.
215static struct memory_handle *add_handle(size_t *data_size) 215 On return, it's the size available for the handle.
216 can_wrap tells us whether this type of data may wrap on buffer
217 alloc_all tells us if we must immediately be able to allocate data_size
218 */
219static struct memory_handle *add_handle(size_t data_size, const bool can_wrap,
220 const bool alloc_all)
216{ 221{
217 mutex_lock(&llist_mutex); 222 /* gives each handle a unique id, unsigned to handle wraps gracefully */
223 static unsigned int cur_handle_id = 1;
224 size_t shift;
225 size_t new_widx = buf_widx;
226 size_t len;
227 int overlap;
218 228
219 /* this will give each handle a unique id */ 229 mutex_lock(&llist_mutex);
220 static int cur_handle_id = 1;
221 230
222 /* make sure buf_widx is 32-bit aligned so that the handle struct is, 231 /* Allocate the remainder of the space for the current handle */
223 but before that we check we can actually align. */ 232 if (cur_handle)
224 if (RINGBUF_ADD_CROSS(buf_widx, 3, buf_ridx) >= 0) { 233 new_widx = RINGBUF_ADD(cur_handle->widx, cur_handle->filerem);
225 mutex_unlock(&llist_mutex); 234
226 return NULL; 235 /* align buf_widx to 4 bytes up */
236 new_widx = (RINGBUF_ADD(new_widx, 3)) & ~3;
237
238 len = data_size + sizeof(struct memory_handle);
239
240 /* First, will the handle wrap? */
241 overlap = RINGBUF_ADD_CROSS(new_widx, sizeof(struct memory_handle),
242 buffer_len - 1);
243 /* If the handle would wrap, move to the beginning of the buffer,
244 * otherwise check if the data can/would wrap and move it to the
245 * beginning if needed */
246 if (overlap > 0) {
247 new_widx = 0;
248 } else if (!can_wrap) {
249 overlap = RINGBUF_ADD_CROSS(new_widx, len, buffer_len - 1);
250 if (overlap > 0)
251 new_widx += data_size - overlap;
227 } 252 }
228 buf_widx = (RINGBUF_ADD(buf_widx, 3)) & ~3;
229 253
230 size_t len = (data_size ? *data_size : 0) 254 /* This is how far we shifted buf_widx to align things */
231 + sizeof(struct memory_handle); 255 shift = RINGBUF_SUB(new_widx, buf_widx);
232 256
233 /* check that we actually can add the handle and its data */ 257 /* How much space are we short in the actual ring buffer? */
234 int overlap = RINGBUF_ADD_CROSS(buf_widx, len, buf_ridx); 258 overlap = RINGBUF_ADD_CROSS(buf_widx, shift + len, buf_ridx);
235 if (overlap >= 0) { 259 if (overlap >= 0 && (alloc_all || (unsigned)overlap > data_size)) {
236 *data_size -= overlap; 260 /* Not enough space for required allocations */
237 len -= overlap;
238 }
239 if (len < sizeof(struct memory_handle)) {
240 /* There isn't even enough space to write the struct */
241 mutex_unlock(&llist_mutex); 261 mutex_unlock(&llist_mutex);
242 return NULL; 262 return NULL;
243 } 263 }
244 264
265 /* There is enough space for the required data, advance the buf_widx and
266 * initialize the struct */
267 buf_widx = new_widx;
268
245 struct memory_handle *new_handle = 269 struct memory_handle *new_handle =
246 (struct memory_handle *)(&buffer[buf_widx]); 270 (struct memory_handle *)(&buffer[buf_widx]);
247 271
248 /* only advance the buffer write index of the size of the struct */ 272 /* only advance the buffer write index of the size of the struct */
249 buf_widx = RINGBUF_ADD(buf_widx, sizeof(struct memory_handle)); 273 buf_widx = RINGBUF_ADD(buf_widx, sizeof(struct memory_handle));
250 274
251 if (!first_handle) { 275 new_handle->id = cur_handle_id;
276 /* Use += 2 instead of ++ to guarantee that the low bit is always high and
277 * prevent the assignment of a zero id when wrapping. */
278 cur_handle_id += 2;
279 new_handle->next = NULL;
280 num_handles++;
281
282 if (!first_handle)
252 /* the new handle is the first one */ 283 /* the new handle is the first one */
253 first_handle = new_handle; 284 first_handle = new_handle;
254 }
255 285
256 if (cur_handle) { 286 if (cur_handle)
257 cur_handle->next = new_handle; 287 cur_handle->next = new_handle;
258 }
259 288
260 cur_handle = new_handle; 289 cur_handle = new_handle;
261 cur_handle->id = cur_handle_id++;
262 cur_handle->next = NULL;
263 num_handles++;
264 290
265 mutex_unlock(&llist_mutex); 291 mutex_unlock(&llist_mutex);
266 return cur_handle; 292 return new_handle;
267} 293}
268 294
269/* Delete a given memory handle from the linked list 295/* Delete a given memory handle from the linked list
@@ -314,7 +340,7 @@ static bool rm_handle(const struct memory_handle *h)
314 340
315/* Return a pointer to the memory handle of given ID. 341/* Return a pointer to the memory handle of given ID.
316 NULL if the handle wasn't found */ 342 NULL if the handle wasn't found */
317static struct memory_handle *find_handle(const int handle_id) 343static struct memory_handle *find_handle(const unsigned int handle_id)
318{ 344{
319 if (handle_id <= 0) 345 if (handle_id <= 0)
320 return NULL; 346 return NULL;
@@ -349,36 +375,59 @@ static struct memory_handle *find_handle(const int handle_id)
349} 375}
350 376
351/* Move a memory handle and data_size of its data of delta. 377/* Move a memory handle and data_size of its data of delta.
352 Return a pointer to the new location of the handle. 378 Return a pointer to the new location of the handle (null if it hasn't moved).
353 delta is the value of which to move the struct data. 379 delta is the value of which to move the struct data, modified to the actual
380 distance moved.
354 data_size is the amount of data to move along with the struct. */ 381 data_size is the amount of data to move along with the struct. */
355static struct memory_handle *move_handle(struct memory_handle *h, 382static struct memory_handle *move_handle(const struct memory_handle *h,
356 size_t *delta, size_t data_size) 383 size_t *delta, const size_t data_size)
357{ 384{
358 mutex_lock(&llist_mutex); 385 struct memory_handle *dest;
359 386 size_t newpos;
360 if (*delta < 4) { 387 size_t size_to_move;
361 /* aligning backwards would yield a negative result, 388 int overlap;
362 and moving the handle of such a small amount is a waste 389
363 of time anyway. */ 390 if (*delta < sizeof(struct memory_handle)) {
364 mutex_unlock(&llist_mutex); 391 /* It's not worth trying to move such a short distance, and it would
392 * complicate the overlap calculations below */
365 return NULL; 393 return NULL;
366 } 394 }
367 /* make sure delta is 32-bit aligned so that the handle struct is. */
368 *delta = (*delta - 3) & ~3;
369
370 size_t newpos = RINGBUF_ADD((void *)h - (void *)buffer, *delta);
371 395
372 struct memory_handle *dest = (struct memory_handle *)(&buffer[newpos]); 396 mutex_lock(&llist_mutex);
373
374 /* Invalidate the cache to prevent it from keeping the old location of h */
375 if (h == cached_handle)
376 cached_handle = NULL;
377 397
378 /* the cur_handle pointer might need updating */ 398 size_to_move = sizeof(struct memory_handle) + data_size;
379 if (h == cur_handle) { 399
380 cur_handle = dest; 400 /* Align to four bytes, down */
401 *delta &= ~3;
402 newpos = RINGBUF_ADD((void *)h - (void *)buffer, *delta);
403 overlap = RINGBUF_ADD_CROSS(newpos, size_to_move, buffer_len - 1);
404
405 /* This means that moving the data will put it on the wrap */
406 if (overlap > 0) {
407 /* This means that the memory_handle struct would wrap */
408 size_t correction;
409 /* If the overlap lands inside the memory_handle */
410 if ((unsigned)overlap > data_size) {
411 /* Correct the position and real delta to prevent the struct from
412 * wrapping, this guarantees an aligned delta, I think */
413 correction = overlap - data_size;
414 } else {
415 /* Otherwise it falls in the data area and must all be backed out */
416 correction = overlap;
417 /* Align to four bytes, up */
418 correction = (correction+3) & ~3;
419 if (*delta <= correction) {
420 /* After correcting, no movement (or, impossibly, backwards) */
421 mutex_unlock(&llist_mutex);
422 return NULL;
423 }
424 }
425 newpos -= correction;
426 overlap -= correction;
427 *delta -= correction;
381 } 428 }
429
430 dest = (struct memory_handle *)(&buffer[newpos]);
382 431
383 if (h == first_handle) { 432 if (h == first_handle) {
384 first_handle = dest; 433 first_handle = dest;
@@ -396,7 +445,21 @@ static struct memory_handle *move_handle(struct memory_handle *h,
396 } 445 }
397 } 446 }
398 447
399 memmove(dest, h, sizeof(struct memory_handle) + data_size); 448 /* Update the cache to prevent it from keeping the old location of h */
449 if (h == cached_handle)
450 cached_handle = dest;
451
452 /* the cur_handle pointer might need updating */
453 if (h == cur_handle)
454 cur_handle = dest;
455
456 if (overlap > 0) {
457 size_t first_part = size_to_move - overlap;
458 memmove(dest, h, first_part);
459 memmove(buffer, (char *)h + first_part, overlap);
460 } else {
461 memmove(dest, h, size_to_move);
462 }
400 463
401 mutex_unlock(&llist_mutex); 464 mutex_unlock(&llist_mutex);
402 return dest; 465 return dest;
@@ -586,19 +649,21 @@ static bool close_handle(int handle_id)
586 part of its data buffer or by moving all the data. */ 649 part of its data buffer or by moving all the data. */
587static void shrink_handle(int handle_id) 650static void shrink_handle(int handle_id)
588{ 651{
652 size_t delta;
589 struct memory_handle *h = find_handle(handle_id); 653 struct memory_handle *h = find_handle(handle_id);
654
590 if (!h) 655 if (!h)
591 return; 656 return;
592 657
593 size_t delta;
594 /* The value of delta might change for alignment reasons */
595
596 if (h->next && (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET || 658 if (h->next && (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET ||
597 h->type == TYPE_IMAGE) && h->filerem == 0 ) 659 h->type == TYPE_IMAGE) && h->filerem == 0 )
598 { 660 {
599 /* metadata handle: we can move all of it */ 661 /* metadata handle: we can move all of it */
600 delta = RINGBUF_SUB( (unsigned)((void *)h->next - (void *)buffer), 662 size_t handle_distance =
601 h->data) - h->available; 663 RINGBUF_SUB((unsigned)((void *)h->next - (void*)buffer), h->data);
664 delta = handle_distance - h->available;
665
666 /* The value of delta might change for alignment reasons */
602 h = move_handle(h, &delta, h->available); 667 h = move_handle(h, &delta, h->available);
603 if (!h) return; 668 if (!h) return;
604 669
@@ -620,6 +685,7 @@ static void shrink_handle(int handle_id)
620 delta = RINGBUF_SUB(h->ridx, h->data); 685 delta = RINGBUF_SUB(h->ridx, h->data);
621 h = move_handle(h, &delta, 0); 686 h = move_handle(h, &delta, 0);
622 if (!h) return; 687 if (!h) return;
688
623 h->data = RINGBUF_ADD(h->data, delta); 689 h->data = RINGBUF_ADD(h->data, delta);
624 h->available -= delta; 690 h->available -= delta;
625 h->offset += delta; 691 h->offset += delta;
@@ -655,17 +721,14 @@ static void fill_buffer(void)
655*/ 721*/
656static bool can_add_handle(void) 722static bool can_add_handle(void)
657{ 723{
724 /* the current handle hasn't finished buffering. We can only add
725 a new one if there is already enough free space to finish
726 the buffering. */
658 if (cur_handle && cur_handle->filerem > 0) { 727 if (cur_handle && cur_handle->filerem > 0) {
659 /* the current handle hasn't finished buffering. We can only add 728 size_t minimum_space =
660 a new one if there is already enough free space to finish 729 cur_handle->filerem + sizeof(struct memory_handle );
661 the buffering. */ 730 if (RINGBUF_ADD_CROSS(cur_handle->widx, minimum_space, buf_ridx) >= 0)
662 if (cur_handle->filerem < (buffer_len - BUF_USED)) {
663 /* Before adding the new handle we reserve some space for the
664 current one to finish buffering its data. */
665 buf_widx = RINGBUF_ADD(buf_widx, cur_handle->filerem);
666 } else {
667 return false; 731 return false;
668 }
669 } 732 }
670 733
671 return true; 734 return true;
@@ -727,16 +790,9 @@ int bufopen(const char *file, size_t offset, enum data_type type)
727 if (fd < 0) 790 if (fd < 0)
728 return -1; 791 return -1;
729 792
730 size_t size = filesize(fd) - offset; 793 size_t size = filesize(fd);
731 794
732 if (type != TYPE_AUDIO && 795 struct memory_handle *h = add_handle(size-offset, type==TYPE_AUDIO, false);
733 size + sizeof(struct memory_handle) > buffer_len - buf_widx)
734 {
735 /* for types other than audio, the data can't wrap, so we force it */
736 buf_widx = 0;
737 }
738
739 struct memory_handle *h = add_handle(&size);
740 if (!h) 796 if (!h)
741 { 797 {
742 DEBUGF("bufopen: failed to add handle\n"); 798 DEBUGF("bufopen: failed to add handle\n");
@@ -745,9 +801,8 @@ int bufopen(const char *file, size_t offset, enum data_type type)
745 } 801 }
746 802
747 strncpy(h->path, file, MAX_PATH); 803 strncpy(h->path, file, MAX_PATH);
748 h->fd = -1; 804 h->filesize = size;
749 h->filesize = filesize(fd); 805 h->filerem = size - offset;
750 h->filerem = h->filesize - offset;
751 h->offset = offset; 806 h->offset = offset;
752 h->ridx = buf_widx; 807 h->ridx = buf_widx;
753 h->widx = buf_widx; 808 h->widx = buf_widx;
@@ -755,12 +810,15 @@ int bufopen(const char *file, size_t offset, enum data_type type)
755 h->available = 0; 810 h->available = 0;
756 h->type = type; 811 h->type = type;
757 812
758 close(fd);
759
760 if (type == TYPE_CODEC || type == TYPE_CUESHEET || type == TYPE_IMAGE) { 813 if (type == TYPE_CODEC || type == TYPE_CUESHEET || type == TYPE_IMAGE) {
761 /* Immediately buffer those */ 814 h->fd = fd;
815 /* Immediately start buffering those */
762 LOGFQUEUE("? >| buffering Q_BUFFER_HANDLE"); 816 LOGFQUEUE("? >| buffering Q_BUFFER_HANDLE");
763 queue_send(&buffering_queue, Q_BUFFER_HANDLE, h->id); 817 queue_send(&buffering_queue, Q_BUFFER_HANDLE, h->id);
818 } else {
819 /* Other types will get buffered in the course of normal operations */
820 h->fd = -1;
821 close(fd);
764 } 822 }
765 823
766 logf("bufopen: new hdl %d", h->id); 824 logf("bufopen: new hdl %d", h->id);
@@ -779,16 +837,9 @@ int bufalloc(const void *src, size_t size, enum data_type type)
779 if (!can_add_handle()) 837 if (!can_add_handle())
780 return -2; 838 return -2;
781 839
782 if (buf_widx + size + sizeof(struct memory_handle) > buffer_len) { 840 struct memory_handle *h = add_handle(size, false, true);
783 /* The data would need to wrap. */
784 DEBUGF("bufalloc: data wrap\n");
785 return -2;
786 }
787
788 size_t allocsize = size;
789 struct memory_handle *h = add_handle(&allocsize);
790 841
791 if (!h || allocsize != size) 842 if (!h)
792 return -2; 843 return -2;
793 844
794 if (src) { 845 if (src) {