diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2017-04-13 18:53:17 -0400 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2017-05-08 06:24:02 -0400 |
commit | 65c6a14e5fa41906176f93aa04512508e719d970 (patch) | |
tree | 84be1eb41466901ceafba0b11fdcfb4fdb7dafad /apps/buffering.c | |
parent | 1b4ee1f33a7c7a7890dd343e65ec8df1531ccabf (diff) | |
download | rockbox-65c6a14e5fa41906176f93aa04512508e719d970.tar.gz rockbox-65c6a14e5fa41906176f93aa04512508e719d970.zip |
Buffering: Switch to a more general handle caching type
It sort of implemented an MRU cache but just kept track of the most
recent access and checked the one after it, otherwise searching from
the beginning.
Implement a true MRU list of all open handles. Handles of the current
track will tend to stay up front for faster access.
Switch to common linked list functions
Use double-linked lists to have insert, remove and move_handle
operations in O(1)-- no more searching to find the previous handle,
which is very often required.
Smaller code too. :)
Change-Id: I9ae8f3f96d225a5d54b94133f499268007274784
Diffstat (limited to 'apps/buffering.c')
-rw-r--r-- | apps/buffering.c | 261 |
1 files changed, 113 insertions, 148 deletions
diff --git a/apps/buffering.c b/apps/buffering.c index de180a74af..09b164ea4f 100644 --- a/apps/buffering.c +++ b/apps/buffering.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "playback.h" | 37 | #include "playback.h" |
38 | #endif | 38 | #endif |
39 | #include "buffering.h" | 39 | #include "buffering.h" |
40 | #include "linked_list.h" | ||
40 | 41 | ||
41 | /* Define LOGF_ENABLE to enable logf output in this file */ | 42 | /* Define LOGF_ENABLE to enable logf output in this file */ |
42 | /* #define LOGF_ENABLE */ | 43 | /* #define LOGF_ENABLE */ |
@@ -78,7 +79,9 @@ enum handle_flags | |||
78 | }; | 79 | }; |
79 | 80 | ||
80 | struct memory_handle { | 81 | struct memory_handle { |
81 | int id; /* A unique ID for the handle */ | 82 | struct lld_node hnode; /* Handle list node (first!) */ |
83 | struct lld_node mrunode;/* MRU list node */ | ||
84 | int id; /* A unique ID for the handle (after list!) */ | ||
82 | enum data_type type; /* Type of data buffered with this handle */ | 85 | enum data_type type; /* Type of data buffered with this handle */ |
83 | uint8_t flags; /* Handle property flags */ | 86 | uint8_t flags; /* Handle property flags */ |
84 | int8_t pinned; /* Count of pinnings */ | 87 | int8_t pinned; /* Count of pinnings */ |
@@ -92,7 +95,6 @@ struct memory_handle { | |||
92 | off_t start; /* Offset at which we started reading the file */ | 95 | off_t start; /* Offset at which we started reading the file */ |
93 | off_t pos; /* Read position in file */ | 96 | off_t pos; /* Read position in file */ |
94 | off_t volatile end; /* Offset at which we stopped reading the file */ | 97 | off_t volatile end; /* Offset at which we stopped reading the file */ |
95 | struct memory_handle *next; | ||
96 | }; | 98 | }; |
97 | 99 | ||
98 | struct buf_message_data | 100 | struct buf_message_data |
@@ -110,21 +112,29 @@ static size_t buffer_len; | |||
110 | static size_t conf_watermark = 0; /* Level to trigger filebuf fill */ | 112 | static size_t conf_watermark = 0; /* Level to trigger filebuf fill */ |
111 | static size_t high_watermark = 0; /* High watermark for rebuffer */ | 113 | static size_t high_watermark = 0; /* High watermark for rebuffer */ |
112 | 114 | ||
113 | /* current memory handle in the linked list. NULL when the list is empty. */ | 115 | static struct lld_head handle_list; /* buffer-order handle list */ |
114 | static struct memory_handle *cur_handle; | 116 | static struct lld_head mru_cache; /* MRU-ordered list of handles */ |
115 | /* first memory handle in the linked list. NULL when the list is empty. */ | 117 | static int num_handles; /* number of handles in the lists */ |
116 | static struct memory_handle *first_handle; | ||
117 | |||
118 | static int num_handles; /* number of handles in the list */ | ||
119 | |||
120 | static int base_handle_id; | 118 | static int base_handle_id; |
121 | 119 | ||
122 | /* Main lock for adding / removing handles */ | 120 | /* Main lock for adding / removing handles */ |
123 | static struct mutex llist_mutex SHAREDBSS_ATTR; | 121 | static struct mutex llist_mutex SHAREDBSS_ATTR; |
124 | 122 | ||
125 | /* Handle cache (makes find_handle faster). | 123 | #define HLIST_HANDLE(node) \ |
126 | This is global so that move_handle and rm_handle can invalidate it. */ | 124 | ({ struct lld_node *__node = (node); \ |
127 | static struct memory_handle *cached_handle = NULL; | 125 | (struct memory_handle *)__node; }) |
126 | |||
127 | #define HLIST_FIRST \ | ||
128 | HLIST_HANDLE(handle_list.head) | ||
129 | |||
130 | #define HLIST_LAST \ | ||
131 | HLIST_HANDLE(handle_list.tail) | ||
132 | |||
133 | #define HLIST_NEXT(h) \ | ||
134 | HLIST_HANDLE((h)->hnode.next) | ||
135 | |||
136 | #define MRU_HANDLE(m) \ | ||
137 | container_of((m), struct memory_handle, mrunode) | ||
128 | 138 | ||
129 | static struct data_counters | 139 | static struct data_counters |
130 | { | 140 | { |
@@ -238,33 +248,38 @@ static inline ssize_t ringbuf_add_cross_full(uintptr_t p1, size_t v, | |||
238 | 248 | ||
239 | static size_t bytes_used(void) | 249 | static size_t bytes_used(void) |
240 | { | 250 | { |
241 | struct memory_handle *first = first_handle; | 251 | struct memory_handle *first = HLIST_FIRST; |
242 | if (!first) { | 252 | if (!first) { |
243 | return 0; | 253 | return 0; |
244 | } | 254 | } |
245 | 255 | ||
246 | return ringbuf_sub_full(cur_handle->widx, ringbuf_offset(first)); | 256 | return ringbuf_sub_full(HLIST_LAST->widx, ringbuf_offset(first)); |
247 | } | 257 | } |
248 | 258 | ||
249 | /* | 259 | /* |
250 | LINKED LIST MANAGEMENT | 260 | LINKED LIST MANAGEMENT |
251 | ====================== | 261 | ====================== |
252 | 262 | ||
253 | add_handle : Add a handle to the list | 263 | add_handle : Create a new handle |
254 | rm_handle : Remove a handle from the list | 264 | link_handle : Add a handle to the list |
255 | find_handle : Get a handle pointer from an ID | 265 | unlink_handle : Remove a handle from the list |
256 | move_handle : Move a handle in the buffer (with or without its data) | 266 | find_handle : Get a handle pointer from an ID |
267 | move_handle : Move a handle in the buffer (with or without its data) | ||
257 | 268 | ||
258 | These functions only handle the linked list structure. They don't touch the | 269 | These functions only handle the linked list structure. They don't touch the |
259 | contents of the struct memory_handle headers. | 270 | contents of the struct memory_handle headers. |
260 | 271 | ||
261 | The first and current (== last) handle are kept track of. | 272 | Doubly-linked list, not circular. |
262 | A new handle is added at to the end and becomes the current one. | 273 | New handles are added at the tail. |
263 | 274 | ||
264 | num_handles = N | 275 | num_handles = N |
265 | first_handle -> h0 -> h1 -> h2 -> ... hN-1 -> NULL | 276 | NULL <- h0 <-> h1 <-> h2 -> ... <- hN-1 -> NULL |
266 | ^ | 277 | head=> --------^ ^ |
267 | cur_handle -------------------------+ | 278 | tail=> -----------------------------------+ |
279 | |||
280 | MRU cache is similar except new handles are added at the head and the most- | ||
281 | recently-accessed handle is always moved to the head (if not already there). | ||
282 | |||
268 | */ | 283 | */ |
269 | 284 | ||
270 | static int next_handle_id(void) | 285 | static int next_handle_id(void) |
@@ -281,18 +296,38 @@ static int next_handle_id(void) | |||
281 | return next_hid; | 296 | return next_hid; |
282 | } | 297 | } |
283 | 298 | ||
284 | /* adds the handle to the linked list */ | 299 | /* Adds the handle to the linked list */ |
285 | static void link_cur_handle(struct memory_handle *h) | 300 | static void link_handle(struct memory_handle *h) |
286 | { | 301 | { |
287 | h->next = NULL; | 302 | lld_insert_last(&handle_list, &h->hnode); |
303 | lld_insert_first(&mru_cache, &h->mrunode); | ||
304 | num_handles++; | ||
305 | } | ||
288 | 306 | ||
289 | if (first_handle) | 307 | /* Delete a given memory handle from the linked list */ |
290 | cur_handle->next = h; | 308 | static void unlink_handle(struct memory_handle *h) |
291 | else | 309 | { |
292 | first_handle = h; /* the first one */ | 310 | lld_remove(&handle_list, &h->hnode); |
311 | lld_remove(&mru_cache, &h->mrunode); | ||
312 | num_handles--; | ||
313 | } | ||
293 | 314 | ||
294 | cur_handle = h; | 315 | /* Adjusts handle list pointers _before_ it's actually moved */ |
295 | num_handles++; | 316 | static void adjust_handle_node(struct lld_head *list, |
317 | struct lld_node *srcnode, | ||
318 | struct lld_node *destnode) | ||
319 | { | ||
320 | if (srcnode->prev) { | ||
321 | srcnode->prev->next = destnode; | ||
322 | } else { | ||
323 | list->head = destnode; | ||
324 | } | ||
325 | |||
326 | if (srcnode->next) { | ||
327 | srcnode->next->prev = destnode; | ||
328 | } else { | ||
329 | list->tail = destnode; | ||
330 | } | ||
296 | } | 331 | } |
297 | 332 | ||
298 | /* Add a new handle to the linked list and return it. It will have become the | 333 | /* Add a new handle to the linked list and return it. It will have become the |
@@ -313,11 +348,13 @@ add_handle(unsigned int flags, size_t data_size, size_t *data_out) | |||
313 | size_t ridx = 0, widx = 0; | 348 | size_t ridx = 0, widx = 0; |
314 | off_t cur_total = 0; | 349 | off_t cur_total = 0; |
315 | 350 | ||
316 | if (first_handle) { | 351 | struct memory_handle *first = HLIST_FIRST; |
352 | if (first) { | ||
317 | /* Buffer is not empty */ | 353 | /* Buffer is not empty */ |
318 | ridx = ringbuf_offset(first_handle); | 354 | struct memory_handle *last = HLIST_LAST; |
319 | widx = cur_handle->data; | 355 | ridx = ringbuf_offset(first); |
320 | cur_total = cur_handle->filesize - cur_handle->start; | 356 | widx = last->data; |
357 | cur_total = last->filesize - last->start; | ||
321 | } | 358 | } |
322 | 359 | ||
323 | if (cur_total > 0) { | 360 | if (cur_total > 0) { |
@@ -350,7 +387,7 @@ add_handle(unsigned int flags, size_t data_size, size_t *data_out) | |||
350 | size_t shift = ringbuf_sub_empty(index, widx); | 387 | size_t shift = ringbuf_sub_empty(index, widx); |
351 | 388 | ||
352 | /* How much space are we short in the actual ring buffer? */ | 389 | /* How much space are we short in the actual ring buffer? */ |
353 | ssize_t overlap = first_handle ? | 390 | ssize_t overlap = first ? |
354 | ringbuf_add_cross_full(widx, shift + len, ridx) : | 391 | ringbuf_add_cross_full(widx, shift + len, ridx) : |
355 | ringbuf_add_cross_empty(widx, shift + len, ridx); | 392 | ringbuf_add_cross_empty(widx, shift + len, ridx); |
356 | 393 | ||
@@ -374,80 +411,28 @@ add_handle(unsigned int flags, size_t data_size, size_t *data_out) | |||
374 | return h; | 411 | return h; |
375 | } | 412 | } |
376 | 413 | ||
377 | /* Delete a given memory handle from the linked list | 414 | /* Return a pointer to the memory handle of given ID. |
378 | and return true for success. Nothing is actually erased from memory. */ | 415 | NULL if the handle wasn't found */ |
379 | static bool rm_handle(const struct memory_handle *h) | 416 | static struct memory_handle * find_handle(int handle_id) |
380 | { | 417 | { |
381 | if (h == NULL) | 418 | struct memory_handle *h = NULL; |
382 | return true; | 419 | struct lld_node *mru = mru_cache.head; |
383 | 420 | struct lld_node *m = mru; | |
384 | struct memory_handle *m = first_handle; | ||
385 | struct memory_handle *c = cur_handle; | ||
386 | 421 | ||
387 | if (h == m) { | 422 | while (m && MRU_HANDLE(m)->id != handle_id) { |
388 | m = m->next; | 423 | m = m->next; |
389 | first_handle = m; | ||
390 | if (!m) { | ||
391 | /* h was the first and last handle: the buffer is now empty */ | ||
392 | cur_handle = NULL; | ||
393 | } | ||
394 | } else { | ||
395 | /* Find the previous handle */ | ||
396 | while (m && m->next != h) { | ||
397 | m = m->next; | ||
398 | } | ||
399 | if (m && m->next == h) { | ||
400 | m->next = h->next; | ||
401 | if (h == c) | ||
402 | cur_handle = m; | ||
403 | } else { | ||
404 | /* If we don't find ourselves, this is a seriously incoherent | ||
405 | state with a corrupted list and severe action is needed! */ | ||
406 | panicf("rm_handle fail: %d", h->id); | ||
407 | return false; | ||
408 | } | ||
409 | } | 424 | } |
410 | 425 | ||
411 | /* Invalidate the cache to prevent it from keeping the old location of h */ | 426 | if (m) { |
412 | if (h == cached_handle) | 427 | if (m != mru) { |
413 | cached_handle = NULL; | 428 | lld_remove(&mru_cache, m); |
414 | 429 | lld_insert_first(&mru_cache, m); | |
415 | num_handles--; | ||
416 | return true; | ||
417 | } | ||
418 | |||
419 | /* Return a pointer to the memory handle of given ID. | ||
420 | NULL if the handle wasn't found */ | ||
421 | static struct memory_handle *find_handle(int handle_id) | ||
422 | { | ||
423 | if (handle_id < 0 || !first_handle) | ||
424 | return NULL; | ||
425 | |||
426 | /* Simple caching because most of the time the requested handle | ||
427 | will either be the same as the last, or the one after the last */ | ||
428 | struct memory_handle *cached = cached_handle; | ||
429 | if (cached) { | ||
430 | if (cached->id == handle_id) { | ||
431 | return cached; | ||
432 | } else { | ||
433 | cached = cached->next; | ||
434 | if (cached && cached->id == handle_id) { | ||
435 | cached_handle = cached; | ||
436 | return cached; | ||
437 | } | ||
438 | } | 430 | } |
439 | } | ||
440 | 431 | ||
441 | struct memory_handle *m = first_handle; | 432 | h = MRU_HANDLE(m); |
442 | while (m && m->id != handle_id) { | ||
443 | m = m->next; | ||
444 | } | 433 | } |
445 | 434 | ||
446 | /* This condition can only be reached with !m or m->id == handle_id */ | 435 | return h; |
447 | if (m) | ||
448 | cached_handle = m; | ||
449 | |||
450 | return m; | ||
451 | } | 436 | } |
452 | 437 | ||
453 | /* Move a memory handle and data_size of its data delta bytes along the buffer. | 438 | /* Move a memory handle and data_size of its data delta bytes along the buffer. |
@@ -462,7 +447,7 @@ static struct memory_handle *find_handle(int handle_id) | |||
462 | static bool move_handle(struct memory_handle **h, size_t *delta, | 447 | static bool move_handle(struct memory_handle **h, size_t *delta, |
463 | size_t data_size) | 448 | size_t data_size) |
464 | { | 449 | { |
465 | const struct memory_handle *src; | 450 | struct memory_handle *src; |
466 | 451 | ||
467 | if (h == NULL || (src = *h) == NULL) | 452 | if (h == NULL || (src = *h) == NULL) |
468 | return false; | 453 | return false; |
@@ -512,27 +497,9 @@ static bool move_handle(struct memory_handle **h, size_t *delta, | |||
512 | 497 | ||
513 | struct memory_handle *dest = ringbuf_ptr(newpos); | 498 | struct memory_handle *dest = ringbuf_ptr(newpos); |
514 | 499 | ||
515 | if (src == first_handle) { | 500 | /* Adjust list pointers */ |
516 | first_handle = dest; | 501 | adjust_handle_node(&handle_list, &src->hnode, &dest->hnode); |
517 | } else { | 502 | adjust_handle_node(&mru_cache, &src->mrunode, &dest->mrunode); |
518 | struct memory_handle *m = first_handle; | ||
519 | while (m && m->next != src) { | ||
520 | m = m->next; | ||
521 | } | ||
522 | if (m && m->next == src) { | ||
523 | m->next = dest; | ||
524 | } else { | ||
525 | return false; | ||
526 | } | ||
527 | } | ||
528 | |||
529 | /* Update the cache to prevent it from keeping the old location of h */ | ||
530 | if (src == cached_handle) | ||
531 | cached_handle = dest; | ||
532 | |||
533 | /* the cur_handle pointer might need updating */ | ||
534 | if (src == cur_handle) | ||
535 | cur_handle = dest; | ||
536 | 503 | ||
537 | /* x = handle(s) following this one... | 504 | /* x = handle(s) following this one... |
538 | * ...if last handle, unmoveable if metadata, only shrinkable if audio. | 505 | * ...if last handle, unmoveable if metadata, only shrinkable if audio. |
@@ -614,7 +581,7 @@ static int update_data_counters(struct data_counters *dc) | |||
614 | struct memory_handle *m = find_handle(base_handle_id); | 581 | struct memory_handle *m = find_handle(base_handle_id); |
615 | bool is_useful = m == NULL; | 582 | bool is_useful = m == NULL; |
616 | 583 | ||
617 | for (m = first_handle; m; m = m->next) | 584 | for (m = HLIST_FIRST; m; m = HLIST_NEXT(m)) |
618 | { | 585 | { |
619 | off_t pos = m->pos; | 586 | off_t pos = m->pos; |
620 | off_t end = m->end; | 587 | off_t end = m->end; |
@@ -696,7 +663,7 @@ static bool buffer_handle(int handle_id, size_t to_buffer) | |||
696 | /* read only up to available space and stop if it would overwrite | 663 | /* read only up to available space and stop if it would overwrite |
697 | the next handle; stop one byte early to avoid empty/full alias | 664 | the next handle; stop one byte early to avoid empty/full alias |
698 | (or else do more complicated arithmetic to differentiate) */ | 665 | (or else do more complicated arithmetic to differentiate) */ |
699 | size_t next = ringbuf_offset(h->next ?: first_handle); | 666 | size_t next = ringbuf_offset(HLIST_NEXT(h) ?: HLIST_FIRST); |
700 | ssize_t overlap = ringbuf_add_cross_full(widx, copy_n, next); | 667 | ssize_t overlap = ringbuf_add_cross_full(widx, copy_n, next); |
701 | 668 | ||
702 | mutex_unlock(&llist_mutex); | 669 | mutex_unlock(&llist_mutex); |
@@ -755,21 +722,17 @@ static bool buffer_handle(int handle_id, size_t to_buffer) | |||
755 | /* Q_CLOSE_HANDLE */ | 722 | /* Q_CLOSE_HANDLE */ |
756 | static bool close_handle(int handle_id) | 723 | static bool close_handle(int handle_id) |
757 | { | 724 | { |
758 | bool retval = true; | ||
759 | |||
760 | mutex_lock(&llist_mutex); | 725 | mutex_lock(&llist_mutex); |
761 | struct memory_handle *h = find_handle(handle_id); | 726 | struct memory_handle *h = find_handle(handle_id); |
762 | 727 | ||
763 | /* If the handle is not found, it is closed */ | 728 | /* If the handle is not found, it is closed */ |
764 | if (h) { | 729 | if (h) { |
765 | close_fd(&h->fd); | 730 | close_fd(&h->fd); |
766 | /* rm_handle returns true unless the handle somehow persists after | 731 | unlink_handle(h); |
767 | exit */ | ||
768 | retval = rm_handle(h); | ||
769 | } | 732 | } |
770 | 733 | ||
771 | mutex_unlock(&llist_mutex); | 734 | mutex_unlock(&llist_mutex); |
772 | return retval; | 735 | return true; |
773 | } | 736 | } |
774 | 737 | ||
775 | /* Free buffer space by moving the handle struct right before the useful | 738 | /* Free buffer space by moving the handle struct right before the useful |
@@ -794,12 +757,12 @@ static void shrink_handle(struct memory_handle *h) | |||
794 | h->start += delta; | 757 | h->start += delta; |
795 | } else { | 758 | } else { |
796 | /* metadata handle: we can move all of it */ | 759 | /* metadata handle: we can move all of it */ |
797 | if (h->pinned || !h->next) | 760 | if (h->pinned || !HLIST_NEXT(h)) |
798 | return; /* Pinned, last handle */ | 761 | return; /* Pinned, last handle */ |
799 | 762 | ||
800 | size_t data_size = h->filesize - h->start; | 763 | size_t data_size = h->filesize - h->start; |
801 | uintptr_t handle_distance = | 764 | uintptr_t handle_distance = |
802 | ringbuf_sub_empty(ringbuf_offset(h->next), h->data); | 765 | ringbuf_sub_empty(ringbuf_offset(HLIST_NEXT(h)), h->data); |
803 | size_t delta = handle_distance - data_size; | 766 | size_t delta = handle_distance - data_size; |
804 | 767 | ||
805 | /* The value of delta might change for alignment reasons */ | 768 | /* The value of delta might change for alignment reasons */ |
@@ -841,7 +804,7 @@ static bool fill_buffer(void) | |||
841 | logf("fill_buffer()"); | 804 | logf("fill_buffer()"); |
842 | mutex_lock(&llist_mutex); | 805 | mutex_lock(&llist_mutex); |
843 | 806 | ||
844 | struct memory_handle *m = first_handle; | 807 | struct memory_handle *m = HLIST_FIRST; |
845 | shrink_handle(m); | 808 | shrink_handle(m); |
846 | 809 | ||
847 | mutex_unlock(&llist_mutex); | 810 | mutex_unlock(&llist_mutex); |
@@ -851,7 +814,7 @@ static bool fill_buffer(void) | |||
851 | m = NULL; | 814 | m = NULL; |
852 | break; | 815 | break; |
853 | } | 816 | } |
854 | m = m->next; | 817 | m = HLIST_NEXT(m); |
855 | } | 818 | } |
856 | 819 | ||
857 | if (m) { | 820 | if (m) { |
@@ -965,7 +928,7 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
965 | h->pos = 0; | 928 | h->pos = 0; |
966 | h->end = 0; | 929 | h->end = 0; |
967 | 930 | ||
968 | link_cur_handle(h); | 931 | link_handle(h); |
969 | 932 | ||
970 | /* Inform the buffering thread that we added a handle */ | 933 | /* Inform the buffering thread that we added a handle */ |
971 | LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id); | 934 | LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id); |
@@ -1067,7 +1030,7 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
1067 | h->widx = data; | 1030 | h->widx = data; |
1068 | h->filesize = size; | 1031 | h->filesize = size; |
1069 | h->end = adjusted_offset; | 1032 | h->end = adjusted_offset; |
1070 | link_cur_handle(h); | 1033 | link_handle(h); |
1071 | } | 1034 | } |
1072 | 1035 | ||
1073 | mutex_unlock(&llist_mutex); | 1036 | mutex_unlock(&llist_mutex); |
@@ -1136,7 +1099,7 @@ int bufalloc(const void *src, size_t size, enum data_type type) | |||
1136 | h->pos = 0; | 1099 | h->pos = 0; |
1137 | h->end = size; | 1100 | h->end = size; |
1138 | 1101 | ||
1139 | link_cur_handle(h); | 1102 | link_handle(h); |
1140 | } | 1103 | } |
1141 | 1104 | ||
1142 | mutex_unlock(&llist_mutex); | 1105 | mutex_unlock(&llist_mutex); |
@@ -1202,7 +1165,7 @@ static void rebuffer_handle(int handle_id, off_t newpos) | |||
1202 | 1165 | ||
1203 | mutex_lock(&llist_mutex); | 1166 | mutex_lock(&llist_mutex); |
1204 | 1167 | ||
1205 | size_t next = ringbuf_offset(h->next ?: first_handle); | 1168 | size_t next = ringbuf_offset(HLIST_NEXT(h) ?: HLIST_FIRST); |
1206 | 1169 | ||
1207 | #ifdef STORAGE_WANTS_ALIGN | 1170 | #ifdef STORAGE_WANTS_ALIGN |
1208 | /* Strip alignment padding then redo */ | 1171 | /* Strip alignment padding then redo */ |
@@ -1231,7 +1194,7 @@ static void rebuffer_handle(int handle_id, off_t newpos) | |||
1231 | lseek(h->fd, newpos, SEEK_SET); | 1194 | lseek(h->fd, newpos, SEEK_SET); |
1232 | 1195 | ||
1233 | off_t filerem = h->filesize - newpos; | 1196 | off_t filerem = h->filesize - newpos; |
1234 | bool send = h->next && | 1197 | bool send = HLIST_NEXT(h) && |
1235 | ringbuf_add_cross_full(new_index, filerem, next) > 0; | 1198 | ringbuf_add_cross_full(new_index, filerem, next) > 0; |
1236 | 1199 | ||
1237 | mutex_unlock(&llist_mutex); | 1200 | mutex_unlock(&llist_mutex); |
@@ -1619,7 +1582,7 @@ static void shrink_buffer_inner(struct memory_handle *h) | |||
1619 | if (h == NULL) | 1582 | if (h == NULL) |
1620 | return; | 1583 | return; |
1621 | 1584 | ||
1622 | shrink_buffer_inner(h->next); | 1585 | shrink_buffer_inner(HLIST_NEXT(h)); |
1623 | 1586 | ||
1624 | shrink_handle(h); | 1587 | shrink_handle(h); |
1625 | } | 1588 | } |
@@ -1628,7 +1591,7 @@ static void shrink_buffer(void) | |||
1628 | { | 1591 | { |
1629 | logf("shrink_buffer()"); | 1592 | logf("shrink_buffer()"); |
1630 | mutex_lock(&llist_mutex); | 1593 | mutex_lock(&llist_mutex); |
1631 | shrink_buffer_inner(first_handle); | 1594 | shrink_buffer_inner(HLIST_FIRST); |
1632 | mutex_unlock(&llist_mutex); | 1595 | mutex_unlock(&llist_mutex); |
1633 | } | 1596 | } |
1634 | 1597 | ||
@@ -1785,16 +1748,18 @@ bool buffering_reset(char *buf, size_t buflen) | |||
1785 | send_event(BUFFER_EVENT_BUFFER_RESET, NULL); | 1748 | send_event(BUFFER_EVENT_BUFFER_RESET, NULL); |
1786 | 1749 | ||
1787 | /* If handles weren't closed above, just do it */ | 1750 | /* If handles weren't closed above, just do it */ |
1788 | while (num_handles != 0) | 1751 | struct memory_handle *h; |
1789 | bufclose(first_handle->id); | 1752 | while ((h = HLIST_FIRST)) { |
1753 | bufclose(h->id); | ||
1754 | } | ||
1790 | 1755 | ||
1791 | buffer = buf; | 1756 | buffer = buf; |
1792 | buffer_len = buflen; | 1757 | buffer_len = buflen; |
1793 | guard_buffer = buf + buflen; | 1758 | guard_buffer = buf + buflen; |
1794 | 1759 | ||
1795 | first_handle = NULL; | 1760 | lld_init(&handle_list); |
1796 | cur_handle = NULL; | 1761 | lld_init(&mru_cache); |
1797 | cached_handle = NULL; | 1762 | |
1798 | num_handles = 0; | 1763 | num_handles = 0; |
1799 | base_handle_id = -1; | 1764 | base_handle_id = -1; |
1800 | 1765 | ||