diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2013-08-26 16:49:53 -0400 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2014-04-03 02:24:03 +0200 |
commit | 36615815bf92ec0f6d4ed067689bb72f695e3bf9 (patch) | |
tree | 20452cb218953d32de0a3213186c2dca179fd4af | |
parent | bfd0179042b0b02fb88748d54e56e7e208bb117f (diff) | |
download | rockbox-36615815bf92ec0f6d4ed067689bb72f695e3bf9.tar.gz rockbox-36615815bf92ec0f6d4ed067689bb72f695e3bf9.zip |
Buffering: Remove buf_ridx and buf_widx; these data are verbose.
It is trivial to obtain all required information from the allocated
handles without maintaining global indexes. In fact, it is less
complicated and increases general thread safety.
Other miscellaneous changes (some are nice to do at this time due to
required alterations, with some particularly more relevant than others):
* Handle value 0 will no longer be returned as a valid handle but all
failures will still return a negative value. Creates consistency with
buflib and removes the need to explicitly initialize them.
* Linking a new handle is delayed until explicitly
added by the code that called add_handle, keeping it invisible
until every operation succeeds, which is safer thread-wise. If anything
fails, the handle itself may just be abandoned rather than reqiring it
be freed.
* Dump the special handling to slow buffering when the PCM buffer
is low that calls PCM buffer functions. It doesn't seem to help much
of anything these days and it's a bit of a nasty hack to directly
tie those bits together. It can of course be put back (again!) if
there really is a need for it.
* Make data waiters ping the buffering thread more than just once if
the request is taking too long. Somehow I figured out how the requests
could get forgotten about but can't remember why months later after
making the change in my branch. :-)
* Neaten up some code by using (inline) functions and packing down
parameters; remember handle allocation and movement attributes in the
handle itself rather than figuring it out each time they're needed.
Change-Id: Ibf863370da3dd805132fc135e0ad104953365183
Reviewed-on: http://gerrit.rockbox.org/764
Reviewed-by: Michael Sevakis <jethead71@rockbox.org>
Tested: Michael Sevakis <jethead71@rockbox.org>
-rw-r--r-- | apps/buffering.c | 930 | ||||
-rw-r--r-- | apps/buffering.h | 7 |
2 files changed, 457 insertions, 480 deletions
diff --git a/apps/buffering.c b/apps/buffering.c index 326228cbfa..abf1e4b821 100644 --- a/apps/buffering.c +++ b/apps/buffering.c | |||
@@ -18,44 +18,23 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | |||
22 | #include "config.h" | 21 | #include "config.h" |
23 | #include <stdio.h> | ||
24 | #include <string.h> | ||
25 | #include <stdlib.h> | ||
26 | #include <ctype.h> | ||
27 | #include <inttypes.h> | ||
28 | #include "buffering.h" | ||
29 | |||
30 | #include "storage.h" | ||
31 | #include "system.h" | 22 | #include "system.h" |
23 | #include "storage.h" | ||
32 | #include "thread.h" | 24 | #include "thread.h" |
33 | #include "file.h" | ||
34 | #include "panic.h" | ||
35 | #include "lcd.h" | ||
36 | #include "font.h" | ||
37 | #include "button.h" | ||
38 | #include "kernel.h" | 25 | #include "kernel.h" |
39 | #include "tree.h" | 26 | #include "panic.h" |
40 | #include "debug.h" | 27 | #include "debug.h" |
41 | #include "settings.h" | 28 | #include "file.h" |
42 | #include "codecs.h" | ||
43 | #include "audio.h" | ||
44 | #include "mp3_playback.h" | ||
45 | #include "usb.h" | ||
46 | #include "screens.h" | ||
47 | #include "playlist.h" | ||
48 | #include "pcmbuf.h" | ||
49 | #include "appevents.h" | 29 | #include "appevents.h" |
50 | #include "metadata.h" | 30 | #include "metadata.h" |
31 | #include "bmp.h" | ||
51 | #ifdef HAVE_ALBUMART | 32 | #ifdef HAVE_ALBUMART |
52 | #include "albumart.h" | 33 | #include "albumart.h" |
53 | #include "jpeg_load.h" | 34 | #include "jpeg_load.h" |
54 | #include "bmp.h" | ||
55 | #include "playback.h" | 35 | #include "playback.h" |
56 | #endif | 36 | #endif |
57 | 37 | #include "buffering.h" | |
58 | #define GUARD_BUFSIZE (32*1024) | ||
59 | 38 | ||
60 | /* Define LOGF_ENABLE to enable logf output in this file */ | 39 | /* Define LOGF_ENABLE to enable logf output in this file */ |
61 | /* #define LOGF_ENABLE */ | 40 | /* #define LOGF_ENABLE */ |
@@ -82,31 +61,37 @@ | |||
82 | #define LOGFQUEUE_SYS_TIMEOUT(...) | 61 | #define LOGFQUEUE_SYS_TIMEOUT(...) |
83 | #endif | 62 | #endif |
84 | 63 | ||
64 | #define GUARD_BUFSIZE (32*1024) | ||
65 | |||
85 | /* amount of data to read in one read() call */ | 66 | /* amount of data to read in one read() call */ |
86 | #define BUFFERING_DEFAULT_FILECHUNK (1024*32) | 67 | #define BUFFERING_DEFAULT_FILECHUNK (1024*32) |
87 | 68 | ||
88 | #define BUF_HANDLE_MASK 0x7FFFFFFF | 69 | #define BUF_HANDLE_MASK 0x7FFFFFFF |
89 | 70 | ||
71 | enum handle_flags | ||
72 | { | ||
73 | H_CANWRAP = 0x1, /* Handle data may wrap in buffer */ | ||
74 | H_ALLOCALL = 0x2, /* All data must be allocated up front */ | ||
75 | H_FIXEDDATA = 0x4, /* Data is fixed in position */ | ||
76 | }; | ||
90 | 77 | ||
91 | /* assert(sizeof(struct memory_handle)%4==0) */ | ||
92 | struct memory_handle { | 78 | struct memory_handle { |
93 | int id; /* A unique ID for the handle */ | 79 | int id; /* A unique ID for the handle */ |
94 | enum data_type type; /* Type of data buffered with this handle */ | 80 | enum data_type type; /* Type of data buffered with this handle */ |
95 | int8_t pinned; /* Count of references */ | 81 | uint8_t flags; /* Handle property flags */ |
96 | int8_t signaled; /* Stop any attempt at waiting to get the data */ | 82 | int8_t pinned; /* Count of pinnings */ |
97 | char path[MAX_PATH]; /* Path if data originated in a file */ | 83 | int8_t signaled; /* Stop any attempt at waiting to get the data */ |
98 | int fd; /* File descriptor to path (-1 if closed) */ | 84 | char path[MAX_PATH]; /* Path if data originated in a file */ |
99 | size_t data; /* Start index of the handle's data buffer */ | 85 | int fd; /* File descriptor to path (-1 if closed) */ |
100 | volatile size_t ridx; /* Read pointer, relative to the main buffer */ | 86 | size_t data; /* Start index of the handle's data buffer */ |
101 | size_t widx; /* Write pointer, relative to the main buffer */ | 87 | size_t ridx; /* Read pointer, relative to the main buffer */ |
102 | size_t filesize; /* File total length */ | 88 | size_t widx; /* Write pointer, relative to the main buffer */ |
103 | size_t filerem; /* Remaining bytes of file NOT in buffer */ | 89 | ssize_t filesize; /* File total length */ |
104 | volatile size_t available; /* Available bytes to read from buffer */ | 90 | off_t start; /* Offset at which we started reading the file */ |
105 | size_t offset; /* Offset at which we started reading the file */ | 91 | off_t pos; /* Read position in file */ |
92 | off_t volatile end; /* Offset at which we stopped reading the file */ | ||
106 | struct memory_handle *next; | 93 | struct memory_handle *next; |
107 | }; | 94 | }; |
108 | /* invariant: filesize == offset + available + filerem */ | ||
109 | |||
110 | 95 | ||
111 | struct buf_message_data | 96 | struct buf_message_data |
112 | { | 97 | { |
@@ -119,10 +104,6 @@ static char *guard_buffer; | |||
119 | 104 | ||
120 | static size_t buffer_len; | 105 | static size_t buffer_len; |
121 | 106 | ||
122 | static volatile size_t buf_widx; /* current writing position */ | ||
123 | static volatile size_t buf_ridx; /* current reading position */ | ||
124 | /* buf_*idx are values relative to the buffer, not real pointers. */ | ||
125 | |||
126 | /* Configuration */ | 107 | /* Configuration */ |
127 | static size_t conf_watermark = 0; /* Level to trigger filebuf fill */ | 108 | static size_t conf_watermark = 0; /* Level to trigger filebuf fill */ |
128 | static size_t high_watermark = 0; /* High watermark for rebuffer */ | 109 | static size_t high_watermark = 0; /* High watermark for rebuffer */ |
@@ -146,7 +127,6 @@ static struct memory_handle *cached_handle = NULL; | |||
146 | static struct data_counters | 127 | static struct data_counters |
147 | { | 128 | { |
148 | size_t remaining; /* Amount of data needing to be buffered */ | 129 | size_t remaining; /* Amount of data needing to be buffered */ |
149 | size_t wasted; /* Amount of space available for freeing */ | ||
150 | size_t buffered; /* Amount of data currently in the buffer */ | 130 | size_t buffered; /* Amount of data currently in the buffer */ |
151 | size_t useful; /* Amount of data still useful to the user */ | 131 | size_t useful; /* Amount of data still useful to the user */ |
152 | } data_counters; | 132 | } data_counters; |
@@ -176,13 +156,24 @@ static unsigned int buffering_thread_id = 0; | |||
176 | static struct event_queue buffering_queue SHAREDBSS_ATTR; | 156 | static struct event_queue buffering_queue SHAREDBSS_ATTR; |
177 | static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR; | 157 | static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR; |
178 | 158 | ||
179 | 159 | static void close_fd(int *fd_p) | |
160 | { | ||
161 | int fd = *fd_p; | ||
162 | if (fd >= 0) { | ||
163 | close(fd); | ||
164 | *fd_p = -1; | ||
165 | } | ||
166 | } | ||
180 | 167 | ||
181 | /* Ring buffer helper functions */ | 168 | /* Ring buffer helper functions */ |
169 | static inline void * ringbuf_ptr(uintptr_t p) | ||
170 | { | ||
171 | return buffer + p; | ||
172 | } | ||
182 | 173 | ||
183 | static inline uintptr_t ringbuf_offset(const void *ptr) | 174 | static inline uintptr_t ringbuf_offset(const void *ptr) |
184 | { | 175 | { |
185 | return (uintptr_t)(ptr - (void*)buffer); | 176 | return (uintptr_t)(ptr - (void *)buffer); |
186 | } | 177 | } |
187 | 178 | ||
188 | /* Buffer pointer (p) plus value (v), wrapped if necessary */ | 179 | /* Buffer pointer (p) plus value (v), wrapped if necessary */ |
@@ -194,7 +185,6 @@ static inline uintptr_t ringbuf_add(uintptr_t p, size_t v) | |||
194 | return res; | 185 | return res; |
195 | } | 186 | } |
196 | 187 | ||
197 | |||
198 | /* Buffer pointer (p) minus value (v), wrapped if necessary */ | 188 | /* Buffer pointer (p) minus value (v), wrapped if necessary */ |
199 | static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v) | 189 | static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v) |
200 | { | 190 | { |
@@ -205,7 +195,6 @@ static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v) | |||
205 | return res - v; | 195 | return res - v; |
206 | } | 196 | } |
207 | 197 | ||
208 | |||
209 | /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */ | 198 | /* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */ |
210 | static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2) | 199 | static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2) |
211 | { | 200 | { |
@@ -216,9 +205,6 @@ static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2) | |||
216 | return res; | 205 | return res; |
217 | } | 206 | } |
218 | 207 | ||
219 | /* Bytes available in the buffer */ | ||
220 | #define BUF_USED ringbuf_sub(buf_widx, buf_ridx) | ||
221 | |||
222 | /* Real buffer watermark */ | 208 | /* Real buffer watermark */ |
223 | #define BUF_WATERMARK MIN(conf_watermark, high_watermark) | 209 | #define BUF_WATERMARK MIN(conf_watermark, high_watermark) |
224 | 210 | ||
@@ -232,112 +218,119 @@ find_handle : Get a handle pointer from an ID | |||
232 | move_handle : Move a handle in the buffer (with or without its data) | 218 | move_handle : Move a handle in the buffer (with or without its data) |
233 | 219 | ||
234 | These functions only handle the linked list structure. They don't touch the | 220 | These functions only handle the linked list structure. They don't touch the |
235 | contents of the struct memory_handle headers. They also change the buf_*idx | 221 | contents of the struct memory_handle headers. |
236 | pointers when necessary and manage the handle IDs. | ||
237 | 222 | ||
238 | The first and current (== last) handle are kept track of. | 223 | The first and current (== last) handle are kept track of. |
239 | A new handle is added at buf_widx and becomes the current one. | 224 | A new handle is added at to the end and becomes the current one. |
240 | buf_widx always points to the current writing position for the current handle | 225 | |
241 | buf_ridx always points to the location of the first handle. | 226 | num_handles = N |
242 | buf_ridx == buf_widx means the buffer is empty. | 227 | first_handle -> h0 -> h1 -> h2 -> ... hN-1 -> NULL |
228 | ^ | ||
229 | cur_handle -------------------------+ | ||
243 | */ | 230 | */ |
244 | 231 | ||
232 | static int next_handle_id(void) | ||
233 | { | ||
234 | static int cur_handle_id = 0; | ||
235 | |||
236 | /* Wrap signed int is safe and 0 doesn't happen */ | ||
237 | int next_hid = (cur_handle_id + 1) & BUF_HANDLE_MASK; | ||
238 | if (next_hid == 0) | ||
239 | next_hid = 1; | ||
240 | |||
241 | cur_handle_id = next_hid; | ||
242 | |||
243 | return next_hid; | ||
244 | } | ||
245 | |||
246 | /* adds the handle to the linked list */ | ||
247 | static void link_cur_handle(struct memory_handle *h) | ||
248 | { | ||
249 | h->next = NULL; | ||
250 | |||
251 | if (first_handle) | ||
252 | cur_handle->next = h; | ||
253 | else | ||
254 | first_handle = h; /* the first one */ | ||
255 | |||
256 | cur_handle = h; | ||
257 | num_handles++; | ||
258 | } | ||
245 | 259 | ||
246 | /* Add a new handle to the linked list and return it. It will have become the | 260 | /* Add a new handle to the linked list and return it. It will have become the |
247 | new current handle. | 261 | new current handle. |
262 | flags contains information on how this may be allocated | ||
248 | data_size must contain the size of what will be in the handle. | 263 | data_size must contain the size of what will be in the handle. |
249 | can_wrap tells us whether this type of data may wrap on buffer | 264 | widx_out points to variable to receive first available byte of data area |
250 | alloc_all tells us if we must immediately be able to allocate data_size | ||
251 | returns a valid memory handle if all conditions for allocation are met. | 265 | returns a valid memory handle if all conditions for allocation are met. |
252 | NULL if there memory_handle itself cannot be allocated or if the | 266 | NULL if there memory_handle itself cannot be allocated or if the |
253 | data_size cannot be allocated and alloc_all is set. */ | 267 | data_size cannot be allocated and alloc_all is set. */ |
254 | static struct memory_handle *add_handle(size_t data_size, bool can_wrap, | 268 | static struct memory_handle * |
255 | bool alloc_all) | 269 | add_handle(unsigned int flags, size_t data_size, size_t *data_out) |
256 | { | 270 | { |
257 | /* gives each handle a unique id */ | 271 | /* Gives each handle a unique id */ |
258 | static int cur_handle_id = 0; | ||
259 | size_t shift; | ||
260 | size_t widx, new_widx; | ||
261 | size_t len; | ||
262 | ssize_t overlap; | ||
263 | |||
264 | if (num_handles >= BUF_MAX_HANDLES) | 272 | if (num_handles >= BUF_MAX_HANDLES) |
265 | return NULL; | 273 | return NULL; |
266 | 274 | ||
267 | widx = buf_widx; | 275 | size_t ridx = 0, widx = 0; |
276 | off_t cur_total = 0; | ||
277 | |||
278 | if (first_handle) { | ||
279 | /* Buffer is not empty */ | ||
280 | ridx = ringbuf_offset(first_handle); | ||
281 | widx = cur_handle->data; | ||
282 | cur_total = cur_handle->filesize - cur_handle->start; | ||
283 | } | ||
268 | 284 | ||
269 | if (cur_handle && cur_handle->filerem > 0) { | 285 | if (cur_total > 0) { |
270 | /* the current handle hasn't finished buffering. We can only add | 286 | /* the current handle hasn't finished buffering. We can only add |
271 | a new one if there is already enough free space to finish | 287 | a new one if there is already enough free space to finish |
272 | the buffering. */ | 288 | the buffering. */ |
273 | size_t req = cur_handle->filerem; | 289 | if (ringbuf_add_cross(widx, cur_total, ridx) >= 0) { |
274 | if (ringbuf_add_cross(cur_handle->widx, req, buf_ridx) >= 0) { | ||
275 | /* Not enough space to finish allocation */ | 290 | /* Not enough space to finish allocation */ |
276 | return NULL; | 291 | return NULL; |
277 | } else { | 292 | } else { |
278 | /* Allocate the remainder of the space for the current handle */ | 293 | /* Apply all the needed reserve */ |
279 | widx = ringbuf_add(cur_handle->widx, cur_handle->filerem); | 294 | widx = ringbuf_add(widx, cur_total); |
280 | } | 295 | } |
281 | } | 296 | } |
282 | 297 | ||
283 | /* align to 4 bytes up always leaving a gap */ | 298 | /* Align to pointer size up */ |
284 | new_widx = ringbuf_add(widx, 4) & ~3; | 299 | size_t adjust = ALIGN_UP(widx, sizeof(intptr_t)) - widx; |
285 | 300 | size_t index = ringbuf_add(widx, adjust); | |
286 | len = data_size + sizeof(struct memory_handle); | 301 | size_t len = data_size + sizeof(struct memory_handle); |
287 | 302 | ||
288 | /* First, will the handle wrap? */ | 303 | /* First, will the handle wrap? */ |
289 | /* If the handle would wrap, move to the beginning of the buffer, | 304 | /* If the handle would wrap, move to the beginning of the buffer, |
290 | * or if the data must not but would wrap, move it to the beginning */ | 305 | * or if the data must not but would wrap, move it to the beginning */ |
291 | if (new_widx + sizeof(struct memory_handle) > buffer_len || | 306 | if (index + sizeof(struct memory_handle) > buffer_len || |
292 | (!can_wrap && new_widx + len > buffer_len)) { | 307 | (!(flags & H_CANWRAP) && index + len > buffer_len)) { |
293 | new_widx = 0; | 308 | index = 0; |
294 | } | 309 | } |
295 | 310 | ||
296 | /* How far we shifted the new_widx to align things, must be < buffer_len */ | 311 | /* How far we shifted index to align things, must be < buffer_len */ |
297 | shift = ringbuf_sub(new_widx, widx); | 312 | size_t shift = ringbuf_sub(index, widx); |
298 | 313 | ||
299 | /* How much space are we short in the actual ring buffer? */ | 314 | /* How much space are we short in the actual ring buffer? */ |
300 | overlap = ringbuf_add_cross(widx, shift + len, buf_ridx); | 315 | ssize_t overlap = ringbuf_add_cross(widx, shift + len, ridx); |
301 | if (overlap >= 0 && (alloc_all || (size_t)overlap >= data_size)) { | 316 | if (overlap >= 0 && |
317 | ((flags & H_ALLOCALL) || (size_t)overlap >= data_size)) { | ||
302 | /* Not enough space for required allocations */ | 318 | /* Not enough space for required allocations */ |
303 | return NULL; | 319 | return NULL; |
304 | } | 320 | } |
305 | 321 | ||
306 | /* There is enough space for the required data, advance the buf_widx and | 322 | /* There is enough space for the required data, initialize the struct */ |
307 | * initialize the struct */ | 323 | struct memory_handle *h = ringbuf_ptr(index); |
308 | buf_widx = new_widx; | ||
309 | |||
310 | struct memory_handle *new_handle = | ||
311 | (struct memory_handle *)(&buffer[buf_widx]); | ||
312 | |||
313 | /* Prevent buffering thread from looking at it */ | ||
314 | new_handle->filerem = 0; | ||
315 | |||
316 | /* Handle can be moved by default */ | ||
317 | new_handle->pinned = 0; | ||
318 | |||
319 | /* Handle data can be waited for by default */ | ||
320 | new_handle->signaled = 0; | ||
321 | 324 | ||
322 | /* only advance the buffer write index of the size of the struct */ | 325 | h->id = next_handle_id(); |
323 | buf_widx = ringbuf_add(buf_widx, sizeof(struct memory_handle)); | 326 | h->flags = flags; |
327 | h->pinned = 0; /* Can be moved */ | ||
328 | h->signaled = 0; /* Data can be waited for */ | ||
324 | 329 | ||
325 | new_handle->id = cur_handle_id; | 330 | /* Return the start of the data area */ |
326 | /* Wrap signed int is safe and 0 doesn't happen */ | 331 | *data_out = ringbuf_add(index, sizeof (struct memory_handle)); |
327 | cur_handle_id = (cur_handle_id + 1) & BUF_HANDLE_MASK; | ||
328 | new_handle->next = NULL; | ||
329 | num_handles++; | ||
330 | |||
331 | if (!first_handle) | ||
332 | /* the new handle is the first one */ | ||
333 | first_handle = new_handle; | ||
334 | 332 | ||
335 | if (cur_handle) | 333 | return h; |
336 | cur_handle->next = new_handle; | ||
337 | |||
338 | cur_handle = new_handle; | ||
339 | |||
340 | return new_handle; | ||
341 | } | 334 | } |
342 | 335 | ||
343 | /* Delete a given memory handle from the linked list | 336 | /* Delete a given memory handle from the linked list |
@@ -347,28 +340,25 @@ static bool rm_handle(const struct memory_handle *h) | |||
347 | if (h == NULL) | 340 | if (h == NULL) |
348 | return true; | 341 | return true; |
349 | 342 | ||
350 | if (h == first_handle) { | 343 | struct memory_handle *m = first_handle; |
351 | first_handle = h->next; | 344 | struct memory_handle *c = cur_handle; |
352 | if (h == cur_handle) { | 345 | |
346 | if (h == m) { | ||
347 | m = m->next; | ||
348 | first_handle = m; | ||
349 | if (!m) { | ||
353 | /* h was the first and last handle: the buffer is now empty */ | 350 | /* h was the first and last handle: the buffer is now empty */ |
354 | cur_handle = NULL; | 351 | cur_handle = NULL; |
355 | buf_ridx = buf_widx = 0; | ||
356 | } else { | ||
357 | /* update buf_ridx to point to the new first handle */ | ||
358 | buf_ridx = (size_t)ringbuf_offset(first_handle); | ||
359 | } | 352 | } |
360 | } else { | 353 | } else { |
361 | struct memory_handle *m = first_handle; | ||
362 | /* Find the previous handle */ | 354 | /* Find the previous handle */ |
363 | while (m && m->next != h) { | 355 | while (m && m->next != h) { |
364 | m = m->next; | 356 | m = m->next; |
365 | } | 357 | } |
366 | if (m && m->next == h) { | 358 | if (m && m->next == h) { |
367 | m->next = h->next; | 359 | m->next = h->next; |
368 | if (h == cur_handle) { | 360 | if (h == c) |
369 | cur_handle = m; | 361 | cur_handle = m; |
370 | buf_widx = cur_handle->widx; | ||
371 | } | ||
372 | } else { | 362 | } else { |
373 | /* If we don't find ourselves, this is a seriously incoherent | 363 | /* If we don't find ourselves, this is a seriously incoherent |
374 | state with a corrupted list and severe action is needed! */ | 364 | state with a corrupted list and severe action is needed! */ |
@@ -392,15 +382,18 @@ static struct memory_handle *find_handle(int handle_id) | |||
392 | if (handle_id < 0 || !first_handle) | 382 | if (handle_id < 0 || !first_handle) |
393 | return NULL; | 383 | return NULL; |
394 | 384 | ||
395 | /* simple caching because most of the time the requested handle | 385 | /* Simple caching because most of the time the requested handle |
396 | will either be the same as the last, or the one after the last */ | 386 | will either be the same as the last, or the one after the last */ |
397 | if (cached_handle) { | 387 | struct memory_handle *cached = cached_handle; |
398 | if (cached_handle->id == handle_id) { | 388 | if (cached) { |
399 | return cached_handle; | 389 | if (cached->id == handle_id) { |
400 | } else if (cached_handle->next && | 390 | return cached; |
401 | (cached_handle->next->id == handle_id)) { | 391 | } else { |
402 | cached_handle = cached_handle->next; | 392 | cached = cached->next; |
403 | return cached_handle; | 393 | if (cached && cached->id == handle_id) { |
394 | cached_handle = cached; | ||
395 | return cached; | ||
396 | } | ||
404 | } | 397 | } |
405 | } | 398 | } |
406 | 399 | ||
@@ -408,6 +401,7 @@ static struct memory_handle *find_handle(int handle_id) | |||
408 | while (m && m->id != handle_id) { | 401 | while (m && m->id != handle_id) { |
409 | m = m->next; | 402 | m = m->next; |
410 | } | 403 | } |
404 | |||
411 | /* This condition can only be reached with !m or m->id == handle_id */ | 405 | /* This condition can only be reached with !m or m->id == handle_id */ |
412 | if (m) | 406 | if (m) |
413 | cached_handle = m; | 407 | cached_handle = m; |
@@ -425,36 +419,33 @@ static struct memory_handle *find_handle(int handle_id) | |||
425 | list for adjustment. This function has no side effects if false | 419 | list for adjustment. This function has no side effects if false |
426 | is returned. */ | 420 | is returned. */ |
427 | static bool move_handle(struct memory_handle **h, size_t *delta, | 421 | static bool move_handle(struct memory_handle **h, size_t *delta, |
428 | size_t data_size, bool can_wrap) | 422 | size_t data_size) |
429 | { | 423 | { |
430 | struct memory_handle *dest; | ||
431 | const struct memory_handle *src; | 424 | const struct memory_handle *src; |
432 | size_t final_delta = *delta, size_to_move; | ||
433 | uintptr_t oldpos, newpos; | ||
434 | intptr_t overlap, overlap_old; | ||
435 | 425 | ||
436 | if (h == NULL || (src = *h) == NULL) | 426 | if (h == NULL || (src = *h) == NULL) |
437 | return false; | 427 | return false; |
438 | 428 | ||
439 | size_to_move = sizeof(struct memory_handle) + data_size; | 429 | size_t size_to_move = sizeof(struct memory_handle) + data_size; |
440 | 430 | ||
441 | /* Align to four bytes, down */ | 431 | /* Align to pointer size down */ |
442 | final_delta &= ~3; | 432 | size_t final_delta = *delta; |
433 | final_delta = ALIGN_DOWN(final_delta, sizeof(intptr_t)); | ||
443 | if (final_delta < sizeof(struct memory_handle)) { | 434 | if (final_delta < sizeof(struct memory_handle)) { |
444 | /* It's not legal to move less than the size of the struct */ | 435 | /* It's not legal to move less than the size of the struct */ |
445 | return false; | 436 | return false; |
446 | } | 437 | } |
447 | 438 | ||
448 | oldpos = ringbuf_offset(src); | 439 | uintptr_t oldpos = ringbuf_offset(src); |
449 | newpos = ringbuf_add(oldpos, final_delta); | 440 | uintptr_t newpos = ringbuf_add(oldpos, final_delta); |
450 | overlap = ringbuf_add_cross(newpos, size_to_move, buffer_len); | 441 | intptr_t overlap = ringbuf_add_cross(newpos, size_to_move, buffer_len); |
451 | overlap_old = ringbuf_add_cross(oldpos, size_to_move, buffer_len); | 442 | intptr_t overlap_old = ringbuf_add_cross(oldpos, size_to_move, buffer_len); |
452 | 443 | ||
453 | if (overlap > 0) { | 444 | if (overlap > 0) { |
454 | /* Some part of the struct + data would wrap, maybe ok */ | 445 | /* Some part of the struct + data would wrap, maybe ok */ |
455 | ssize_t correction = 0; | 446 | ssize_t correction = 0; |
456 | /* If the overlap lands inside the memory_handle */ | 447 | /* If the overlap lands inside the memory_handle */ |
457 | if (!can_wrap) { | 448 | if (!(src->flags & H_CANWRAP)) { |
458 | /* Otherwise the overlap falls in the data area and must all be | 449 | /* Otherwise the overlap falls in the data area and must all be |
459 | * backed out. This may become conditional if ever we move | 450 | * backed out. This may become conditional if ever we move |
460 | * data that is allowed to wrap (ie audio) */ | 451 | * data that is allowed to wrap (ie audio) */ |
@@ -466,8 +457,8 @@ static bool move_handle(struct memory_handle **h, size_t *delta, | |||
466 | correction = overlap - data_size; | 457 | correction = overlap - data_size; |
467 | } | 458 | } |
468 | if (correction) { | 459 | if (correction) { |
469 | /* Align correction to four bytes up */ | 460 | /* Align correction to pointer size up */ |
470 | correction = (correction + 3) & ~3; | 461 | correction = ALIGN_UP(correction, sizeof(intptr_t)); |
471 | if (final_delta < correction + sizeof(struct memory_handle)) { | 462 | if (final_delta < correction + sizeof(struct memory_handle)) { |
472 | /* Delta cannot end up less than the size of the struct */ | 463 | /* Delta cannot end up less than the size of the struct */ |
473 | return false; | 464 | return false; |
@@ -478,11 +469,10 @@ static bool move_handle(struct memory_handle **h, size_t *delta, | |||
478 | } | 469 | } |
479 | } | 470 | } |
480 | 471 | ||
481 | dest = (struct memory_handle *)(&buffer[newpos]); | 472 | struct memory_handle *dest = ringbuf_ptr(newpos); |
482 | 473 | ||
483 | if (src == first_handle) { | 474 | if (src == first_handle) { |
484 | first_handle = dest; | 475 | first_handle = dest; |
485 | buf_ridx = newpos; | ||
486 | } else { | 476 | } else { |
487 | struct memory_handle *m = first_handle; | 477 | struct memory_handle *m = first_handle; |
488 | while (m && m->next != src) { | 478 | while (m && m->next != src) { |
@@ -533,7 +523,7 @@ static bool move_handle(struct memory_handle **h, size_t *delta, | |||
533 | */ | 523 | */ |
534 | if (overlap_old > 0) { | 524 | if (overlap_old > 0) { |
535 | /* Move over already wrapped data by the final delta */ | 525 | /* Move over already wrapped data by the final delta */ |
536 | memmove(&buffer[final_delta], buffer, overlap_old); | 526 | memmove(ringbuf_ptr(final_delta), ringbuf_ptr(0), overlap_old); |
537 | if (overlap <= 0) | 527 | if (overlap <= 0) |
538 | size_to_move -= overlap_old; | 528 | size_to_move -= overlap_old; |
539 | } | 529 | } |
@@ -541,7 +531,7 @@ static bool move_handle(struct memory_handle **h, size_t *delta, | |||
541 | if (overlap > 0) { | 531 | if (overlap > 0) { |
542 | /* Move data that now wraps to the beginning */ | 532 | /* Move data that now wraps to the beginning */ |
543 | size_to_move -= overlap; | 533 | size_to_move -= overlap; |
544 | memmove(buffer, SKIPBYTES(src, size_to_move), | 534 | memmove(ringbuf_ptr(0), SKIPBYTES(src, size_to_move), |
545 | overlap_old > 0 ? final_delta : (size_t)overlap); | 535 | overlap_old > 0 ? final_delta : (size_t)overlap); |
546 | } | 536 | } |
547 | 537 | ||
@@ -568,59 +558,44 @@ fill_buffer : Call buffer_handle for all handles that have data to buffer | |||
568 | 558 | ||
569 | These functions are used by the buffering thread to manage buffer space. | 559 | These functions are used by the buffering thread to manage buffer space. |
570 | */ | 560 | */ |
571 | static size_t handle_size_available(const struct memory_handle *h) | ||
572 | { | ||
573 | /* Obtain proper distances from data start */ | ||
574 | size_t rd = ringbuf_sub(h->ridx, h->data); | ||
575 | size_t wr = ringbuf_sub(h->widx, h->data); | ||
576 | |||
577 | if (LIKELY(wr > rd)) | ||
578 | return wr - rd; | ||
579 | |||
580 | return 0; /* ridx is ahead of or equal to widx at this time */ | ||
581 | } | ||
582 | 561 | ||
583 | static void update_data_counters(struct data_counters *dc) | 562 | static int update_data_counters(struct data_counters *dc) |
584 | { | 563 | { |
585 | size_t buffered = 0; | 564 | size_t buffered = 0; |
586 | size_t wasted = 0; | ||
587 | size_t remaining = 0; | 565 | size_t remaining = 0; |
588 | size_t useful = 0; | 566 | size_t useful = 0; |
589 | |||
590 | struct memory_handle *m; | ||
591 | bool is_useful; | ||
592 | 567 | ||
593 | if (dc == NULL) | 568 | if (dc == NULL) |
594 | dc = &data_counters; | 569 | dc = &data_counters; |
595 | 570 | ||
596 | mutex_lock(&llist_mutex); | 571 | mutex_lock(&llist_mutex); |
597 | 572 | ||
598 | m = find_handle(base_handle_id); | 573 | int num = num_handles; |
599 | is_useful = m == NULL; | 574 | struct memory_handle *m = find_handle(base_handle_id); |
575 | bool is_useful = m == NULL; | ||
576 | |||
577 | for (m = first_handle; m; m = m->next) | ||
578 | { | ||
579 | off_t pos = m->pos; | ||
580 | off_t end = m->end; | ||
600 | 581 | ||
601 | m = first_handle; | 582 | buffered += end - m->start; |
602 | while (m) { | 583 | remaining += m->filesize - end; |
603 | buffered += m->available; | ||
604 | /* wasted could come out larger than the buffer size if ridx's are | ||
605 | overlapping data ahead of their handles' buffered data */ | ||
606 | wasted += ringbuf_sub(m->ridx, m->data); | ||
607 | remaining += m->filerem; | ||
608 | 584 | ||
609 | if (m->id == base_handle_id) | 585 | if (m->id == base_handle_id) |
610 | is_useful = true; | 586 | is_useful = true; |
611 | 587 | ||
612 | if (is_useful) | 588 | if (is_useful) |
613 | useful += handle_size_available(m); | 589 | useful += end - pos; |
614 | |||
615 | m = m->next; | ||
616 | } | 590 | } |
617 | 591 | ||
618 | mutex_unlock(&llist_mutex); | 592 | mutex_unlock(&llist_mutex); |
619 | 593 | ||
620 | dc->buffered = buffered; | 594 | dc->buffered = buffered; |
621 | dc->wasted = wasted; | ||
622 | dc->remaining = remaining; | 595 | dc->remaining = remaining; |
623 | dc->useful = useful; | 596 | dc->useful = useful; |
597 | |||
598 | return num; | ||
624 | } | 599 | } |
625 | 600 | ||
626 | static inline bool buffer_is_low(void) | 601 | static inline bool buffer_is_low(void) |
@@ -635,62 +610,64 @@ static bool buffer_handle(int handle_id, size_t to_buffer) | |||
635 | { | 610 | { |
636 | logf("buffer_handle(%d, %lu)", handle_id, (unsigned long)to_buffer); | 611 | logf("buffer_handle(%d, %lu)", handle_id, (unsigned long)to_buffer); |
637 | struct memory_handle *h = find_handle(handle_id); | 612 | struct memory_handle *h = find_handle(handle_id); |
638 | bool stop = false; | ||
639 | |||
640 | if (!h) | 613 | if (!h) |
641 | return true; | 614 | return true; |
642 | 615 | ||
643 | logf(" type: %d", (int)h->type); | 616 | logf(" type: %d", (int)h->type); |
644 | 617 | ||
645 | if (h->filerem == 0) { | 618 | if (h->end >= h->filesize) { |
646 | /* nothing left to buffer */ | 619 | /* nothing left to buffer */ |
647 | return true; | 620 | return true; |
648 | } | 621 | } |
649 | 622 | ||
650 | if (h->fd < 0) { /* file closed, reopen */ | 623 | if (h->fd < 0) { /* file closed, reopen */ |
651 | if (*h->path) | 624 | if (h->path[0] != '\0') |
652 | h->fd = open(h->path, O_RDONLY); | 625 | h->fd = open(h->path, O_RDONLY); |
653 | 626 | ||
654 | if (h->fd < 0) | 627 | if (h->fd < 0) { |
655 | { | ||
656 | /* could not open the file, truncate it where it is */ | 628 | /* could not open the file, truncate it where it is */ |
657 | h->filesize -= h->filerem; | 629 | h->filesize = h->end; |
658 | h->filerem = 0; | ||
659 | return true; | 630 | return true; |
660 | } | 631 | } |
661 | 632 | ||
662 | if (h->offset) | 633 | if (h->start) |
663 | lseek(h->fd, h->offset, SEEK_SET); | 634 | lseek(h->fd, h->start, SEEK_SET); |
664 | } | 635 | } |
665 | 636 | ||
666 | trigger_cpu_boost(); | 637 | trigger_cpu_boost(); |
667 | 638 | ||
668 | if (h->type == TYPE_ID3) { | 639 | if (h->type == TYPE_ID3) { |
669 | if (!get_metadata((struct mp3entry *)(buffer + h->data), | 640 | if (!get_metadata(ringbuf_ptr(h->data), h->fd, h->path)) { |
670 | h->fd, h->path)) { | ||
671 | /* metadata parsing failed: clear the buffer. */ | 641 | /* metadata parsing failed: clear the buffer. */ |
672 | wipe_mp3entry((struct mp3entry *)(buffer + h->data)); | 642 | wipe_mp3entry(ringbuf_ptr(h->data)); |
673 | } | 643 | } |
674 | close(h->fd); | 644 | close_fd(&h->fd); |
675 | h->fd = -1; | 645 | h->widx = ringbuf_add(h->data, h->filesize); |
676 | h->filerem = 0; | 646 | h->end = h->filesize; |
677 | h->available = sizeof(struct mp3entry); | ||
678 | h->widx = ringbuf_add(h->widx, sizeof(struct mp3entry)); | ||
679 | send_event(BUFFER_EVENT_FINISHED, &handle_id); | 647 | send_event(BUFFER_EVENT_FINISHED, &handle_id); |
680 | return true; | 648 | return true; |
681 | } | 649 | } |
682 | 650 | ||
683 | while (h->filerem > 0 && !stop) | 651 | bool stop = false; |
652 | while (h->end < h->filesize && !stop) | ||
684 | { | 653 | { |
685 | /* max amount to copy */ | 654 | /* max amount to copy */ |
686 | ssize_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK), | 655 | size_t widx = h->widx; |
687 | buffer_len - h->widx); | 656 | |
688 | uintptr_t offset = h->next ? ringbuf_offset(h->next) : buf_ridx; | 657 | ssize_t copy_n = h->filesize - h->end; |
689 | ssize_t overlap = ringbuf_add_cross(h->widx, copy_n, offset) + 1; | 658 | copy_n = MIN(copy_n, BUFFERING_DEFAULT_FILECHUNK); |
659 | copy_n = MIN(copy_n, (off_t)(buffer_len - widx)); | ||
660 | |||
661 | uintptr_t offset = ringbuf_offset(h->next ?: first_handle); | ||
662 | ssize_t overlap = ringbuf_add_cross(widx, copy_n, offset); | ||
663 | |||
664 | /* read only up to available space and stop if it would overwrite | ||
665 | the next handle; stop one byte early for last handle to avoid | ||
666 | empty/full alias */ | ||
667 | if (!h->next) | ||
668 | overlap++; | ||
690 | 669 | ||
691 | if (overlap > 0) { | 670 | if (overlap > 0) { |
692 | /* read only up to available space and stop if it would overwrite | ||
693 | or be on top of the reading position or the next handle */ | ||
694 | stop = true; | 671 | stop = true; |
695 | copy_n -= overlap; | 672 | copy_n -= overlap; |
696 | } | 673 | } |
@@ -699,7 +676,7 @@ static bool buffer_handle(int handle_id, size_t to_buffer) | |||
699 | return false; /* no space for read */ | 676 | return false; /* no space for read */ |
700 | 677 | ||
701 | /* rc is the actual amount read */ | 678 | /* rc is the actual amount read */ |
702 | int rc = read(h->fd, &buffer[h->widx], copy_n); | 679 | ssize_t rc = read(h->fd, ringbuf_ptr(widx), copy_n); |
703 | 680 | ||
704 | if (rc <= 0) { | 681 | if (rc <= 0) { |
705 | /* Some kind of filesystem error, maybe recoverable if not codec */ | 682 | /* Some kind of filesystem error, maybe recoverable if not codec */ |
@@ -708,31 +685,21 @@ static bool buffer_handle(int handle_id, size_t to_buffer) | |||
708 | break; | 685 | break; |
709 | } | 686 | } |
710 | 687 | ||
711 | logf("File ended %ld bytes early\n", (long)h->filerem); | 688 | logf("File ended %lu bytes early\n", |
712 | h->filesize -= h->filerem; | 689 | (unsigned long)(h->filesize - h->end)); |
713 | h->filerem = 0; | 690 | h->filesize = h->end; |
714 | break; | 691 | break; |
715 | } | 692 | } |
716 | 693 | ||
717 | /* Advance buffer */ | 694 | /* Advance buffer and make data available to users */ |
718 | h->widx = ringbuf_add(h->widx, rc); | 695 | h->widx = ringbuf_add(widx, rc); |
719 | if (h == cur_handle) | 696 | h->end += rc; |
720 | buf_widx = h->widx; | 697 | |
721 | h->available += rc; | 698 | yield(); |
722 | h->filerem -= rc; | ||
723 | |||
724 | /* If this is a large file, see if we need to break or give the codec | ||
725 | * more time */ | ||
726 | if (h->type == TYPE_PACKET_AUDIO && | ||
727 | pcmbuf_is_lowdata() && !buffer_is_low()) { | ||
728 | sleep(1); | ||
729 | } else { | ||
730 | yield(); | ||
731 | } | ||
732 | 699 | ||
733 | if (to_buffer == 0) { | 700 | if (to_buffer == 0) { |
734 | /* Normal buffering - check queue */ | 701 | /* Normal buffering - check queue */ |
735 | if(!queue_empty(&buffering_queue)) | 702 | if (!queue_empty(&buffering_queue)) |
736 | break; | 703 | break; |
737 | } else { | 704 | } else { |
738 | if (to_buffer <= (size_t)rc) | 705 | if (to_buffer <= (size_t)rc) |
@@ -741,10 +708,9 @@ static bool buffer_handle(int handle_id, size_t to_buffer) | |||
741 | } | 708 | } |
742 | } | 709 | } |
743 | 710 | ||
744 | if (h->filerem == 0) { | 711 | if (h->end >= h->filesize) { |
745 | /* finished buffering the file */ | 712 | /* finished buffering the file */ |
746 | close(h->fd); | 713 | close_fd(&h->fd); |
747 | h->fd = -1; | ||
748 | send_event(BUFFER_EVENT_FINISHED, &handle_id); | 714 | send_event(BUFFER_EVENT_FINISHED, &handle_id); |
749 | } | 715 | } |
750 | 716 | ||
@@ -752,21 +718,17 @@ static bool buffer_handle(int handle_id, size_t to_buffer) | |||
752 | } | 718 | } |
753 | 719 | ||
754 | /* Close the specified handle id and free its allocation. */ | 720 | /* Close the specified handle id and free its allocation. */ |
721 | /* Q_CLOSE_HANDLE */ | ||
755 | static bool close_handle(int handle_id) | 722 | static bool close_handle(int handle_id) |
756 | { | 723 | { |
757 | bool retval = true; | 724 | bool retval = true; |
758 | struct memory_handle *h; | ||
759 | 725 | ||
760 | mutex_lock(&llist_mutex); | 726 | mutex_lock(&llist_mutex); |
761 | h = find_handle(handle_id); | 727 | struct memory_handle *h = find_handle(handle_id); |
762 | 728 | ||
763 | /* If the handle is not found, it is closed */ | 729 | /* If the handle is not found, it is closed */ |
764 | if (h) { | 730 | if (h) { |
765 | if (h->fd >= 0) { | 731 | close_fd(&h->fd); |
766 | close(h->fd); | ||
767 | h->fd = -1; | ||
768 | } | ||
769 | |||
770 | /* rm_handle returns true unless the handle somehow persists after | 732 | /* rm_handle returns true unless the handle somehow persists after |
771 | exit */ | 733 | exit */ |
772 | retval = rm_handle(h); | 734 | retval = rm_handle(h); |
@@ -791,23 +753,23 @@ static void shrink_handle(struct memory_handle *h) | |||
791 | size_t delta = ringbuf_sub(h->ridx, h->data); | 753 | size_t delta = ringbuf_sub(h->ridx, h->data); |
792 | 754 | ||
793 | /* The value of delta might change for alignment reasons */ | 755 | /* The value of delta might change for alignment reasons */ |
794 | if (!move_handle(&h, &delta, 0, true)) | 756 | if (!move_handle(&h, &delta, 0)) |
795 | return; | 757 | return; |
796 | 758 | ||
797 | h->data = ringbuf_add(h->data, delta); | 759 | h->data = ringbuf_add(h->data, delta); |
798 | h->available -= delta; | 760 | h->start += delta; |
799 | h->offset += delta; | ||
800 | } else { | 761 | } else { |
801 | /* metadata handle: we can move all of it */ | 762 | /* metadata handle: we can move all of it */ |
802 | if (h->pinned || !h->next || h->filerem != 0) | 763 | if (h->pinned || !h->next) |
803 | return; /* Pinned, last handle or not finished loading */ | 764 | return; /* Pinned, last handle */ |
804 | 765 | ||
766 | size_t data_size = h->filesize - h->start; | ||
805 | uintptr_t handle_distance = | 767 | uintptr_t handle_distance = |
806 | ringbuf_sub(ringbuf_offset(h->next), h->data); | 768 | ringbuf_sub(ringbuf_offset(h->next), h->data); |
807 | size_t delta = handle_distance - h->available; | 769 | size_t delta = handle_distance - data_size; |
808 | 770 | ||
809 | /* The value of delta might change for alignment reasons */ | 771 | /* The value of delta might change for alignment reasons */ |
810 | if (!move_handle(&h, &delta, h->available, h->type==TYPE_CODEC)) | 772 | if (!move_handle(&h, &delta, data_size)) |
811 | return; | 773 | return; |
812 | 774 | ||
813 | size_t olddata = h->data; | 775 | size_t olddata = h->data; |
@@ -815,15 +777,24 @@ static void shrink_handle(struct memory_handle *h) | |||
815 | h->ridx = ringbuf_add(h->ridx, delta); | 777 | h->ridx = ringbuf_add(h->ridx, delta); |
816 | h->widx = ringbuf_add(h->widx, delta); | 778 | h->widx = ringbuf_add(h->widx, delta); |
817 | 779 | ||
818 | if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) { | 780 | switch (h->type) |
819 | /* when moving an mp3entry we need to readjust its pointers. */ | 781 | { |
820 | adjust_mp3entry((struct mp3entry *)&buffer[h->data], | 782 | case TYPE_ID3: |
821 | (void *)&buffer[h->data], | 783 | if (h->filesize != sizeof(struct mp3entry)) |
822 | (const void *)&buffer[olddata]); | 784 | break; |
823 | } else if (h->type == TYPE_BITMAP) { | 785 | /* when moving an mp3entry we need to readjust its pointers */ |
824 | /* adjust the bitmap's pointer */ | 786 | adjust_mp3entry(ringbuf_ptr(h->data), ringbuf_ptr(h->data), |
825 | struct bitmap *bmp = (struct bitmap *)&buffer[h->data]; | 787 | ringbuf_ptr(olddata)); |
826 | bmp->data = &buffer[h->data + sizeof(struct bitmap)]; | 788 | break; |
789 | |||
790 | case TYPE_BITMAP: | ||
791 | /* adjust the bitmap's pointer */ | ||
792 | ((struct bitmap *)ringbuf_ptr(h->data))->data = | ||
793 | ringbuf_ptr(h->data + sizeof(struct bitmap)); | ||
794 | break; | ||
795 | |||
796 | default: | ||
797 | break; | ||
827 | } | 798 | } |
828 | } | 799 | } |
829 | } | 800 | } |
@@ -839,11 +810,9 @@ static bool fill_buffer(void) | |||
839 | shrink_handle(m); | 810 | shrink_handle(m); |
840 | 811 | ||
841 | while (queue_empty(&buffering_queue) && m) { | 812 | while (queue_empty(&buffering_queue) && m) { |
842 | if (m->filerem > 0) { | 813 | if (m->end < m->filesize && !buffer_handle(m->id, 0)) { |
843 | if (!buffer_handle(m->id, 0)) { | 814 | m = NULL; |
844 | m = NULL; | 815 | break; |
845 | break; | ||
846 | } | ||
847 | } | 816 | } |
848 | m = m->next; | 817 | m = m->next; |
849 | } | 818 | } |
@@ -863,26 +832,24 @@ static bool fill_buffer(void) | |||
863 | buffer, with a struct bitmap and the actual data immediately following. | 832 | buffer, with a struct bitmap and the actual data immediately following. |
864 | Return value is the total size (struct + data). */ | 833 | Return value is the total size (struct + data). */ |
865 | static int load_image(int fd, const char *path, | 834 | static int load_image(int fd, const char *path, |
866 | struct bufopen_bitmap_data *data) | 835 | struct bufopen_bitmap_data *data, |
836 | size_t bufidx) | ||
867 | { | 837 | { |
868 | int rc; | 838 | int rc; |
869 | struct bitmap *bmp = (struct bitmap *)&buffer[buf_widx]; | 839 | struct bitmap *bmp = ringbuf_ptr(bufidx); |
870 | struct dim *dim = data->dim; | 840 | struct dim *dim = data->dim; |
871 | struct mp3_albumart *aa = data->embedded_albumart; | 841 | struct mp3_albumart *aa = data->embedded_albumart; |
872 | 842 | ||
873 | /* get the desired image size */ | 843 | /* get the desired image size */ |
874 | bmp->width = dim->width, bmp->height = dim->height; | 844 | bmp->width = dim->width, bmp->height = dim->height; |
875 | /* FIXME: alignment may be needed for the data buffer. */ | 845 | /* FIXME: alignment may be needed for the data buffer. */ |
876 | bmp->data = &buffer[buf_widx + sizeof(struct bitmap)]; | 846 | bmp->data = ringbuf_ptr(bufidx + sizeof(struct bitmap)); |
877 | #ifndef HAVE_JPEG | 847 | |
878 | (void) path; | ||
879 | #endif | ||
880 | #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1) | 848 | #if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1) |
881 | bmp->maskdata = NULL; | 849 | bmp->maskdata = NULL; |
882 | #endif | 850 | #endif |
883 | 851 | int free = (int)MIN(buffer_len - buf_used(), buffer_len - bufidx) | |
884 | int free = (int)MIN(buffer_len - BUF_USED, buffer_len - buf_widx) | 852 | - sizeof(struct bitmap); |
885 | - sizeof(struct bitmap); | ||
886 | 853 | ||
887 | #ifdef HAVE_JPEG | 854 | #ifdef HAVE_JPEG |
888 | if (aa != NULL) { | 855 | if (aa != NULL) { |
@@ -892,14 +859,16 @@ static int load_image(int fd, const char *path, | |||
892 | } | 859 | } |
893 | else if (strcmp(path + strlen(path) - 4, ".bmp")) | 860 | else if (strcmp(path + strlen(path) - 4, ".bmp")) |
894 | rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER| | 861 | rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER| |
895 | FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL); | 862 | FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL); |
896 | else | 863 | else |
897 | #endif | 864 | #endif |
898 | rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER| | 865 | rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER| |
899 | FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL); | 866 | FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL); |
867 | |||
900 | return rc + (rc > 0 ? sizeof(struct bitmap) : 0); | 868 | return rc + (rc > 0 ? sizeof(struct bitmap) : 0); |
869 | (void)path; | ||
901 | } | 870 | } |
902 | #endif | 871 | #endif /* HAVE_ALBUMART */ |
903 | 872 | ||
904 | 873 | ||
905 | /* | 874 | /* |
@@ -933,11 +902,9 @@ management functions for all the actual handle management work. | |||
933 | int bufopen(const char *file, size_t offset, enum data_type type, | 902 | int bufopen(const char *file, size_t offset, enum data_type type, |
934 | void *user_data) | 903 | void *user_data) |
935 | { | 904 | { |
936 | #ifndef HAVE_ALBUMART | ||
937 | /* currently only used for aa loading */ | ||
938 | (void)user_data; | ||
939 | #endif | ||
940 | int handle_id = ERR_BUFFER_FULL; | 905 | int handle_id = ERR_BUFFER_FULL; |
906 | size_t data; | ||
907 | struct memory_handle *h; | ||
941 | 908 | ||
942 | /* No buffer refs until after the mutex_lock call! */ | 909 | /* No buffer refs until after the mutex_lock call! */ |
943 | 910 | ||
@@ -945,24 +912,23 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
945 | /* ID3 case: allocate space, init the handle and return. */ | 912 | /* ID3 case: allocate space, init the handle and return. */ |
946 | mutex_lock(&llist_mutex); | 913 | mutex_lock(&llist_mutex); |
947 | 914 | ||
948 | struct memory_handle *h = | 915 | h = add_handle(H_ALLOCALL, sizeof(struct mp3entry), &data); |
949 | add_handle(sizeof(struct mp3entry), false, true); | ||
950 | 916 | ||
951 | if (h) { | 917 | if (h) { |
952 | handle_id = h->id; | 918 | handle_id = h->id; |
953 | h->fd = -1; | ||
954 | h->filesize = sizeof(struct mp3entry); | ||
955 | h->offset = 0; | ||
956 | h->data = buf_widx; | ||
957 | h->ridx = buf_widx; | ||
958 | h->widx = buf_widx; | ||
959 | h->available = 0; | ||
960 | h->type = type; | ||
961 | strlcpy(h->path, file, MAX_PATH); | ||
962 | 919 | ||
963 | buf_widx = ringbuf_add(buf_widx, sizeof(struct mp3entry)); | 920 | h->type = type; |
921 | strlcpy(h->path, file, MAX_PATH); | ||
922 | h->fd = -1; | ||
923 | h->data = data; | ||
924 | h->ridx = data; | ||
925 | h->widx = data; | ||
926 | h->filesize = sizeof(struct mp3entry); | ||
927 | h->start = 0; | ||
928 | h->pos = 0; | ||
929 | h->end = 0; | ||
964 | 930 | ||
965 | h->filerem = sizeof(struct mp3entry); | 931 | link_cur_handle(h); |
966 | 932 | ||
967 | /* Inform the buffering thread that we added a handle */ | 933 | /* Inform the buffering thread that we added a handle */ |
968 | LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id); | 934 | LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id); |
@@ -975,7 +941,7 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
975 | else if (type == TYPE_UNKNOWN) | 941 | else if (type == TYPE_UNKNOWN) |
976 | return ERR_UNSUPPORTED_TYPE; | 942 | return ERR_UNSUPPORTED_TYPE; |
977 | #ifdef APPLICATION | 943 | #ifdef APPLICATION |
978 | /* loading code from memory is not supported in application builds */ | 944 | /* Loading code from memory is not supported in application builds */ |
979 | else if (type == TYPE_CODEC) | 945 | else if (type == TYPE_CODEC) |
980 | return ERR_UNSUPPORTED_TYPE; | 946 | return ERR_UNSUPPORTED_TYPE; |
981 | #endif | 947 | #endif |
@@ -987,27 +953,31 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
987 | size_t size = 0; | 953 | size_t size = 0; |
988 | #ifdef HAVE_ALBUMART | 954 | #ifdef HAVE_ALBUMART |
989 | if (type == TYPE_BITMAP) { | 955 | if (type == TYPE_BITMAP) { |
990 | /* if albumart is embedded, the complete file is not buffered, | 956 | /* If albumart is embedded, the complete file is not buffered, |
991 | * but only the jpeg part; filesize() would be wrong */ | 957 | * but only the jpeg part; filesize() would be wrong */ |
992 | struct bufopen_bitmap_data *aa = (struct bufopen_bitmap_data*)user_data; | 958 | struct bufopen_bitmap_data *aa = user_data; |
993 | if (aa->embedded_albumart) | 959 | if (aa->embedded_albumart) |
994 | size = aa->embedded_albumart->size; | 960 | size = aa->embedded_albumart->size; |
995 | } | 961 | } |
996 | #endif | 962 | #endif |
963 | |||
997 | if (size == 0) | 964 | if (size == 0) |
998 | size = filesize(fd); | 965 | size = filesize(fd); |
999 | bool can_wrap = type==TYPE_PACKET_AUDIO || type==TYPE_CODEC; | 966 | |
967 | unsigned int hflags = 0; | ||
968 | if (type == TYPE_PACKET_AUDIO || type == TYPE_CODEC) | ||
969 | hflags = H_CANWRAP; | ||
1000 | 970 | ||
1001 | size_t adjusted_offset = offset; | 971 | size_t adjusted_offset = offset; |
1002 | if (adjusted_offset > size) | 972 | if (adjusted_offset > size) |
1003 | adjusted_offset = 0; | 973 | adjusted_offset = 0; |
1004 | 974 | ||
1005 | /* Reserve extra space because alignment can move data forward */ | 975 | /* Reserve extra space because alignment can move data forward */ |
1006 | size_t padded_size = STORAGE_PAD(size-adjusted_offset); | 976 | size_t padded_size = STORAGE_PAD(size - adjusted_offset); |
1007 | 977 | ||
1008 | mutex_lock(&llist_mutex); | 978 | mutex_lock(&llist_mutex); |
1009 | 979 | ||
1010 | struct memory_handle *h = add_handle(padded_size, can_wrap, false); | 980 | h = add_handle(hflags, padded_size, &data); |
1011 | if (!h) { | 981 | if (!h) { |
1012 | DEBUGF("%s(): failed to add handle\n", __func__); | 982 | DEBUGF("%s(): failed to add handle\n", __func__); |
1013 | mutex_unlock(&llist_mutex); | 983 | mutex_unlock(&llist_mutex); |
@@ -1016,8 +986,10 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
1016 | } | 986 | } |
1017 | 987 | ||
1018 | handle_id = h->id; | 988 | handle_id = h->id; |
989 | |||
990 | h->type = type; | ||
1019 | strlcpy(h->path, file, MAX_PATH); | 991 | strlcpy(h->path, file, MAX_PATH); |
1020 | h->offset = adjusted_offset; | 992 | h->fd = -1; |
1021 | 993 | ||
1022 | #ifdef STORAGE_WANTS_ALIGN | 994 | #ifdef STORAGE_WANTS_ALIGN |
1023 | /* Don't bother to storage align bitmaps because they are not | 995 | /* Don't bother to storage align bitmaps because they are not |
@@ -1025,44 +997,40 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
1025 | */ | 997 | */ |
1026 | if (type != TYPE_BITMAP) { | 998 | if (type != TYPE_BITMAP) { |
1027 | /* Align to desired storage alignment */ | 999 | /* Align to desired storage alignment */ |
1028 | size_t alignment_pad = STORAGE_OVERLAP(adjusted_offset - | 1000 | size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)adjusted_offset - |
1029 | (size_t)(&buffer[buf_widx])); | 1001 | (uintptr_t)ringbuf_ptr(data)); |
1030 | buf_widx = ringbuf_add(buf_widx, alignment_pad); | 1002 | data = ringbuf_add(data, alignment_pad); |
1031 | } | 1003 | } |
1032 | #endif /* STORAGE_WANTS_ALIGN */ | 1004 | #endif /* STORAGE_WANTS_ALIGN */ |
1033 | 1005 | ||
1034 | h->fd = -1; | 1006 | h->data = data; |
1035 | h->data = buf_widx; | 1007 | h->ridx = data; |
1036 | h->ridx = buf_widx; | 1008 | h->start = adjusted_offset; |
1037 | h->widx = buf_widx; | 1009 | h->pos = adjusted_offset; |
1038 | h->available = 0; | ||
1039 | h->type = type; | ||
1040 | 1010 | ||
1041 | #ifdef HAVE_ALBUMART | 1011 | #ifdef HAVE_ALBUMART |
1042 | if (type == TYPE_BITMAP) { | 1012 | if (type == TYPE_BITMAP) { |
1043 | /* Bitmap file: we load the data instead of the file */ | 1013 | /* Bitmap file: we load the data instead of the file */ |
1044 | int rc; | 1014 | int rc = load_image(fd, file, user_data, data); |
1045 | rc = load_image(fd, file, (struct bufopen_bitmap_data*)user_data); | ||
1046 | if (rc <= 0) { | 1015 | if (rc <= 0) { |
1047 | rm_handle(h); | ||
1048 | handle_id = ERR_FILE_ERROR; | 1016 | handle_id = ERR_FILE_ERROR; |
1049 | } else { | 1017 | } else { |
1050 | h->filesize = rc; | 1018 | data = ringbuf_add(data, rc); |
1051 | h->available = rc; | 1019 | size = rc; |
1052 | buf_widx = ringbuf_add(buf_widx, rc); | 1020 | adjusted_offset = rc; |
1053 | h->widx = buf_widx; | ||
1054 | } | 1021 | } |
1055 | } | 1022 | } |
1056 | else | 1023 | else |
1057 | #endif | 1024 | #endif |
1058 | { | 1025 | if (type == TYPE_CUESHEET) { |
1059 | if (type == TYPE_CUESHEET) | 1026 | h->fd = fd; |
1060 | h->fd = fd; | 1027 | } |
1061 | 1028 | ||
1029 | if (handle_id >= 0) { | ||
1030 | h->widx = data; | ||
1062 | h->filesize = size; | 1031 | h->filesize = size; |
1063 | h->available = 0; | 1032 | h->end = adjusted_offset; |
1064 | h->widx = buf_widx; | 1033 | link_cur_handle(h); |
1065 | h->filerem = size - adjusted_offset; | ||
1066 | } | 1034 | } |
1067 | 1035 | ||
1068 | mutex_unlock(&llist_mutex); | 1036 | mutex_unlock(&llist_mutex); |
@@ -1084,6 +1052,9 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
1084 | 1052 | ||
1085 | logf("bufopen: new hdl %d", handle_id); | 1053 | logf("bufopen: new hdl %d", handle_id); |
1086 | return handle_id; | 1054 | return handle_id; |
1055 | |||
1056 | /* Currently only used for aa loading */ | ||
1057 | (void)user_data; | ||
1087 | } | 1058 | } |
1088 | 1059 | ||
1089 | /* Open a new handle from data that needs to be copied from memory. | 1060 | /* Open a new handle from data that needs to be copied from memory. |
@@ -1095,16 +1066,15 @@ int bufopen(const char *file, size_t offset, enum data_type type, | |||
1095 | */ | 1066 | */ |
1096 | int bufalloc(const void *src, size_t size, enum data_type type) | 1067 | int bufalloc(const void *src, size_t size, enum data_type type) |
1097 | { | 1068 | { |
1098 | int handle_id; | ||
1099 | |||
1100 | if (type == TYPE_UNKNOWN) | 1069 | if (type == TYPE_UNKNOWN) |
1101 | return ERR_UNSUPPORTED_TYPE; | 1070 | return ERR_UNSUPPORTED_TYPE; |
1102 | 1071 | ||
1103 | handle_id = ERR_BUFFER_FULL; | 1072 | int handle_id = ERR_BUFFER_FULL; |
1104 | 1073 | ||
1105 | mutex_lock(&llist_mutex); | 1074 | mutex_lock(&llist_mutex); |
1106 | 1075 | ||
1107 | struct memory_handle *h = add_handle(size, false, true); | 1076 | size_t data; |
1077 | struct memory_handle *h = add_handle(H_ALLOCALL, size, &data); | ||
1108 | 1078 | ||
1109 | if (h) { | 1079 | if (h) { |
1110 | handle_id = h->id; | 1080 | handle_id = h->id; |
@@ -1112,23 +1082,24 @@ int bufalloc(const void *src, size_t size, enum data_type type) | |||
1112 | if (src) { | 1082 | if (src) { |
1113 | if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) { | 1083 | if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) { |
1114 | /* specially take care of struct mp3entry */ | 1084 | /* specially take care of struct mp3entry */ |
1115 | copy_mp3entry((struct mp3entry *)&buffer[buf_widx], | 1085 | copy_mp3entry(ringbuf_ptr(data), src); |
1116 | (const struct mp3entry *)src); | ||
1117 | } else { | 1086 | } else { |
1118 | memcpy(&buffer[buf_widx], src, size); | 1087 | memcpy(ringbuf_ptr(data), src, size); |
1119 | } | 1088 | } |
1120 | } | 1089 | } |
1121 | 1090 | ||
1122 | h->fd = -1; | 1091 | h->type = type; |
1123 | *h->path = 0; | 1092 | h->path[0] = '\0'; |
1124 | h->filesize = size; | 1093 | h->fd = -1; |
1125 | h->offset = 0; | 1094 | h->data = data; |
1126 | h->ridx = buf_widx; | 1095 | h->ridx = data; |
1127 | h->data = buf_widx; | 1096 | h->widx = ringbuf_add(data, size); |
1128 | buf_widx = ringbuf_add(buf_widx, size); | 1097 | h->filesize = size; |
1129 | h->widx = buf_widx; | 1098 | h->start = 0; |
1130 | h->available = size; | 1099 | h->pos = 0; |
1131 | h->type = type; | 1100 | h->end = size; |
1101 | |||
1102 | link_cur_handle(h); | ||
1132 | } | 1103 | } |
1133 | 1104 | ||
1134 | mutex_unlock(&llist_mutex); | 1105 | mutex_unlock(&llist_mutex); |
@@ -1155,32 +1126,36 @@ bool bufclose(int handle_id) | |||
1155 | 1126 | ||
1156 | /* Backend to bufseek and bufadvance. Call only in response to | 1127 | /* Backend to bufseek and bufadvance. Call only in response to |
1157 | Q_REBUFFER_HANDLE! */ | 1128 | Q_REBUFFER_HANDLE! */ |
1158 | static void rebuffer_handle(int handle_id, size_t newpos) | 1129 | static void rebuffer_handle(int handle_id, off_t newpos) |
1159 | { | 1130 | { |
1160 | struct memory_handle *h = find_handle(handle_id); | 1131 | struct memory_handle *h = find_handle(handle_id); |
1161 | |||
1162 | if (!h) { | 1132 | if (!h) { |
1163 | queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND); | 1133 | queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND); |
1164 | return; | 1134 | return; |
1165 | } | 1135 | } |
1166 | 1136 | ||
1137 | /* Check that we still need to do this since the request could have | ||
1138 | possibly been met by this time */ | ||
1139 | if (newpos >= h->start && newpos <= h->end) { | ||
1140 | h->ridx = ringbuf_add(h->data, newpos - h->start); | ||
1141 | h->pos = newpos; | ||
1142 | queue_reply(&buffering_queue, 0); | ||
1143 | return; | ||
1144 | } | ||
1145 | |||
1167 | /* When seeking foward off of the buffer, if it is a short seek attempt to | 1146 | /* When seeking foward off of the buffer, if it is a short seek attempt to |
1168 | avoid rebuffering the whole track, just read enough to satisfy */ | 1147 | avoid rebuffering the whole track, just read enough to satisfy */ |
1169 | if (newpos > h->offset && | 1148 | off_t amount = newpos - h->pos; |
1170 | newpos - h->offset < BUFFERING_DEFAULT_FILECHUNK) { | 1149 | |
1171 | 1150 | if (amount > 0 && amount <= BUFFERING_DEFAULT_FILECHUNK) { | |
1172 | size_t amount = newpos - h->offset; | 1151 | h->ridx = ringbuf_add(h->data, newpos - h->start); |
1173 | h->ridx = ringbuf_add(h->data, amount); | 1152 | h->pos = newpos; |
1174 | 1153 | ||
1175 | if (buffer_handle(handle_id, amount + 1)) { | 1154 | if (buffer_handle(handle_id, amount + 1) && h->end >= h->pos) { |
1176 | size_t rd = ringbuf_sub(h->ridx, h->data); | 1155 | /* It really did succeed */ |
1177 | size_t wr = ringbuf_sub(h->widx, h->data); | 1156 | queue_reply(&buffering_queue, 0); |
1178 | if (wr >= rd) { | 1157 | buffer_handle(handle_id, 0); /* Ok, try the rest */ |
1179 | /* It really did succeed */ | 1158 | return; |
1180 | queue_reply(&buffering_queue, 0); | ||
1181 | buffer_handle(handle_id, 0); /* Ok, try the rest */ | ||
1182 | return; | ||
1183 | } | ||
1184 | } | 1159 | } |
1185 | /* Data collision or other file error - must reset */ | 1160 | /* Data collision or other file error - must reset */ |
1186 | 1161 | ||
@@ -1188,10 +1163,7 @@ static void rebuffer_handle(int handle_id, size_t newpos) | |||
1188 | newpos = h->filesize; /* file truncation happened above */ | 1163 | newpos = h->filesize; /* file truncation happened above */ |
1189 | } | 1164 | } |
1190 | 1165 | ||
1191 | /* Reset the handle to its new position */ | 1166 | size_t next = ringbuf_offset(h->next ?: first_handle); |
1192 | h->offset = newpos; | ||
1193 | |||
1194 | size_t next = h->next ? ringbuf_offset(h->next) : buf_ridx; | ||
1195 | 1167 | ||
1196 | #ifdef STORAGE_WANTS_ALIGN | 1168 | #ifdef STORAGE_WANTS_ALIGN |
1197 | /* Strip alignment padding then redo */ | 1169 | /* Strip alignment padding then redo */ |
@@ -1200,8 +1172,8 @@ static void rebuffer_handle(int handle_id, size_t newpos) | |||
1200 | /* Align to desired storage alignment if space permits - handle could | 1172 | /* Align to desired storage alignment if space permits - handle could |
1201 | have been shrunken too close to the following one after a previous | 1173 | have been shrunken too close to the following one after a previous |
1202 | rebuffer. */ | 1174 | rebuffer. */ |
1203 | size_t alignment_pad = | 1175 | size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)newpos - |
1204 | STORAGE_OVERLAP(h->offset - (size_t)(&buffer[new_index])); | 1176 | (uintptr_t)ringbuf_ptr(new_index)); |
1205 | 1177 | ||
1206 | if (ringbuf_add_cross(new_index, alignment_pad, next) >= 0) | 1178 | if (ringbuf_add_cross(new_index, alignment_pad, next) >= 0) |
1207 | alignment_pad = 0; /* Forego storage alignment this time */ | 1179 | alignment_pad = 0; /* Forego storage alignment this time */ |
@@ -1212,23 +1184,19 @@ static void rebuffer_handle(int handle_id, size_t newpos) | |||
1212 | size_t new_index = h->data; | 1184 | size_t new_index = h->data; |
1213 | #endif /* STORAGE_WANTS_ALIGN */ | 1185 | #endif /* STORAGE_WANTS_ALIGN */ |
1214 | 1186 | ||
1187 | /* Reset the handle to its new position */ | ||
1215 | h->ridx = h->widx = h->data = new_index; | 1188 | h->ridx = h->widx = h->data = new_index; |
1216 | 1189 | h->start = h->pos = h->end = newpos; | |
1217 | if (h == cur_handle) | ||
1218 | buf_widx = new_index; | ||
1219 | |||
1220 | h->available = 0; | ||
1221 | h->filerem = h->filesize - h->offset; | ||
1222 | 1190 | ||
1223 | if (h->fd >= 0) | 1191 | if (h->fd >= 0) |
1224 | lseek(h->fd, h->offset, SEEK_SET); | 1192 | lseek(h->fd, newpos, SEEK_SET); |
1225 | 1193 | ||
1226 | if (h->next && ringbuf_sub(next, h->data) <= h->filesize - newpos) { | 1194 | off_t filerem = h->filesize - newpos; |
1195 | if (h->next && ringbuf_add_cross(new_index, filerem, next) > 0) { | ||
1227 | /* There isn't enough space to rebuffer all of the track from its new | 1196 | /* There isn't enough space to rebuffer all of the track from its new |
1228 | offset, so we ask the user to free some */ | 1197 | offset, so we ask the user to free some */ |
1229 | DEBUGF("%s(): space is needed\n", __func__); | 1198 | DEBUGF("%s(): space is needed\n", __func__); |
1230 | int hid = handle_id; | 1199 | send_event(BUFFER_EVENT_REBUFFER, &(int){ handle_id }); |
1231 | send_event(BUFFER_EVENT_REBUFFER, &hid); | ||
1232 | } | 1200 | } |
1233 | 1201 | ||
1234 | /* Now we do the rebuffer */ | 1202 | /* Now we do the rebuffer */ |
@@ -1237,25 +1205,20 @@ static void rebuffer_handle(int handle_id, size_t newpos) | |||
1237 | } | 1205 | } |
1238 | 1206 | ||
1239 | /* Backend to bufseek and bufadvance */ | 1207 | /* Backend to bufseek and bufadvance */ |
1240 | static int seek_handle(struct memory_handle *h, size_t newpos) | 1208 | static int seek_handle(struct memory_handle *h, off_t newpos) |
1241 | { | 1209 | { |
1242 | if (newpos > h->filesize) { | 1210 | if ((newpos < h->start || newpos >= h->end) && |
1243 | /* access beyond the end of the file */ | 1211 | (newpos < h->filesize || h->end < h->filesize)) { |
1244 | return ERR_INVALID_VALUE; | ||
1245 | } | ||
1246 | else if ((newpos < h->offset || h->offset + h->available <= newpos) && | ||
1247 | (newpos < h->filesize || h->filerem > 0)) { | ||
1248 | /* access before or after buffered data and not to end of file or file | 1212 | /* access before or after buffered data and not to end of file or file |
1249 | is not buffered to the end-- a rebuffer is needed. */ | 1213 | is not buffered to the end-- a rebuffer is needed. */ |
1250 | struct buf_message_data parm = { h->id, newpos }; | ||
1251 | return queue_send(&buffering_queue, Q_REBUFFER_HANDLE, | 1214 | return queue_send(&buffering_queue, Q_REBUFFER_HANDLE, |
1252 | (intptr_t)&parm); | 1215 | (intptr_t)&(struct buf_message_data){ h->id, newpos }); |
1253 | } | 1216 | } |
1254 | else { | 1217 | else { |
1255 | h->ridx = ringbuf_add(h->data, newpos - h->offset); | 1218 | h->ridx = ringbuf_add(h->data, newpos - h->start); |
1219 | h->pos = newpos; | ||
1220 | return 0; | ||
1256 | } | 1221 | } |
1257 | |||
1258 | return 0; | ||
1259 | } | 1222 | } |
1260 | 1223 | ||
1261 | /* Set reading index in handle (relatively to the start of the file). | 1224 | /* Set reading index in handle (relatively to the start of the file). |
@@ -1271,14 +1234,17 @@ int bufseek(int handle_id, size_t newpos) | |||
1271 | if (!h) | 1234 | if (!h) |
1272 | return ERR_HANDLE_NOT_FOUND; | 1235 | return ERR_HANDLE_NOT_FOUND; |
1273 | 1236 | ||
1237 | if (newpos > (size_t)h->filesize) | ||
1238 | return ERR_INVALID_VALUE; | ||
1239 | |||
1274 | return seek_handle(h, newpos); | 1240 | return seek_handle(h, newpos); |
1275 | } | 1241 | } |
1276 | 1242 | ||
1277 | /* Advance the reading index in a handle (relatively to its current position). | 1243 | /* Advance the reading index in a handle (relatively to its current position). |
1278 | Return 0 for success and for failure: | 1244 | Return 0 for success and for failure: |
1279 | ERR_HANDLE_NOT_FOUND if the handle wasn't found | 1245 | ERR_HANDLE_NOT_FOUND if the handle wasn't found |
1280 | ERR_INVALID_VALUE if the new requested position was beyond the end of | 1246 | ERR_INVALID_VALUE if the new requested position was before the beginning |
1281 | the file | 1247 | or beyond the end of the file |
1282 | */ | 1248 | */ |
1283 | int bufadvance(int handle_id, off_t offset) | 1249 | int bufadvance(int handle_id, off_t offset) |
1284 | { | 1250 | { |
@@ -1286,8 +1252,13 @@ int bufadvance(int handle_id, off_t offset) | |||
1286 | if (!h) | 1252 | if (!h) |
1287 | return ERR_HANDLE_NOT_FOUND; | 1253 | return ERR_HANDLE_NOT_FOUND; |
1288 | 1254 | ||
1289 | size_t newpos = h->offset + ringbuf_sub(h->ridx, h->data) + offset; | 1255 | off_t pos = h->pos; |
1290 | return seek_handle(h, newpos); | 1256 | |
1257 | if ((offset < 0 && offset < -pos) || | ||
1258 | (offset >= 0 && offset > h->filesize - pos)) | ||
1259 | return ERR_INVALID_VALUE; | ||
1260 | |||
1261 | return seek_handle(h, pos + offset); | ||
1291 | } | 1262 | } |
1292 | 1263 | ||
1293 | /* Get the read position from the start of the file | 1264 | /* Get the read position from the start of the file |
@@ -1299,63 +1270,76 @@ off_t bufftell(int handle_id) | |||
1299 | const struct memory_handle *h = find_handle(handle_id); | 1270 | const struct memory_handle *h = find_handle(handle_id); |
1300 | if (!h) | 1271 | if (!h) |
1301 | return ERR_HANDLE_NOT_FOUND; | 1272 | return ERR_HANDLE_NOT_FOUND; |
1302 | return h->offset + ringbuf_sub(h->ridx, h->data); | 1273 | |
1274 | return h->pos; | ||
1303 | } | 1275 | } |
1304 | 1276 | ||
1305 | /* Used by bufread and bufgetdata to prepare the buffer and retrieve the | 1277 | /* Used by bufread and bufgetdata to prepare the buffer and retrieve the |
1306 | * actual amount of data available for reading. This function explicitly | 1278 | * actual amount of data available for reading. It does range checks on |
1307 | * does not check the validity of the input handle. It does do range checks | 1279 | * size and returns a valid (and explicit) amount of data for reading */ |
1308 | * on size and returns a valid (and explicit) amount of data for reading */ | ||
1309 | static struct memory_handle *prep_bufdata(int handle_id, size_t *size, | 1280 | static struct memory_handle *prep_bufdata(int handle_id, size_t *size, |
1310 | bool guardbuf_limit) | 1281 | bool guardbuf_limit) |
1311 | { | 1282 | { |
1312 | struct memory_handle *h = find_handle(handle_id); | 1283 | struct memory_handle *h = find_handle(handle_id); |
1313 | size_t realsize; | ||
1314 | |||
1315 | if (!h) | 1284 | if (!h) |
1316 | return NULL; | 1285 | return NULL; |
1317 | 1286 | ||
1318 | size_t avail = handle_size_available(h); | 1287 | if (h->pos >= h->filesize) { |
1319 | |||
1320 | if (avail == 0 && h->filerem == 0) { | ||
1321 | /* File is finished reading */ | 1288 | /* File is finished reading */ |
1322 | *size = 0; | 1289 | *size = 0; |
1323 | return h; | 1290 | return h; |
1324 | } | 1291 | } |
1325 | 1292 | ||
1326 | realsize = *size; | 1293 | off_t realsize = *size; |
1294 | off_t filerem = h->filesize - h->pos; | ||
1327 | 1295 | ||
1328 | if (realsize == 0 || realsize > avail + h->filerem) | 1296 | if (realsize <= 0 || realsize > filerem) |
1329 | realsize = avail + h->filerem; | 1297 | realsize = filerem; /* clip to eof */ |
1330 | 1298 | ||
1331 | if (guardbuf_limit && h->type == TYPE_PACKET_AUDIO | 1299 | if (guardbuf_limit && realsize > GUARD_BUFSIZE) { |
1332 | && realsize > GUARD_BUFSIZE) { | ||
1333 | logf("data request > guardbuf"); | 1300 | logf("data request > guardbuf"); |
1334 | /* If more than the size of the guardbuf is requested and this is a | 1301 | /* If more than the size of the guardbuf is requested and this is a |
1335 | * bufgetdata, limit to guard_bufsize over the end of the buffer */ | 1302 | * bufgetdata, limit to guard_bufsize over the end of the buffer */ |
1336 | realsize = MIN(realsize, buffer_len - h->ridx + GUARD_BUFSIZE); | 1303 | realsize = MIN((size_t)realsize, buffer_len - h->ridx + GUARD_BUFSIZE); |
1337 | /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */ | 1304 | /* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */ |
1338 | } | 1305 | } |
1339 | 1306 | ||
1340 | if (h->filerem > 0 && avail < realsize) { | 1307 | off_t end = h->end; |
1341 | /* Data isn't ready. Request buffering */ | 1308 | off_t wait_end = h->pos + realsize; |
1342 | LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id); | 1309 | |
1343 | queue_send(&buffering_queue, Q_START_FILL, handle_id); | 1310 | if (end < wait_end && end < h->filesize) { |
1344 | /* Wait for the data to be ready */ | 1311 | /* Wait for the data to be ready */ |
1312 | unsigned int request = 1; | ||
1313 | |||
1345 | do | 1314 | do |
1346 | { | 1315 | { |
1316 | if (--request == 0) { | ||
1317 | request = 100; | ||
1318 | /* Data (still) isn't ready; ping buffering thread */ | ||
1319 | LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id); | ||
1320 | queue_send(&buffering_queue, Q_START_FILL, handle_id); | ||
1321 | } | ||
1322 | |||
1347 | sleep(0); | 1323 | sleep(0); |
1348 | /* it is not safe for a non-buffering thread to sleep while | 1324 | /* it is not safe for a non-buffering thread to sleep while |
1349 | * holding a handle */ | 1325 | * holding a handle */ |
1350 | h = find_handle(handle_id); | 1326 | h = find_handle(handle_id); |
1351 | if (!h || h->signaled != 0) | 1327 | if (!h) |
1352 | return NULL; | 1328 | return NULL; |
1353 | avail = handle_size_available(h); | 1329 | |
1330 | if (h->signaled != 0) | ||
1331 | return NULL; /* Wait must be abandoned */ | ||
1332 | |||
1333 | end = h->end; | ||
1354 | } | 1334 | } |
1355 | while (h->filerem > 0 && avail < realsize); | 1335 | while (end < wait_end && end < h->filesize); |
1336 | |||
1337 | filerem = h->filesize - h->pos; | ||
1338 | if (realsize > filerem) | ||
1339 | realsize = filerem; | ||
1356 | } | 1340 | } |
1357 | 1341 | ||
1358 | *size = MIN(realsize, avail); | 1342 | *size = realsize; |
1359 | return h; | 1343 | return h; |
1360 | } | 1344 | } |
1361 | 1345 | ||
@@ -1374,23 +1358,21 @@ static struct memory_handle *prep_bufdata(int handle_id, size_t *size, | |||
1374 | */ | 1358 | */ |
1375 | ssize_t bufread(int handle_id, size_t size, void *dest) | 1359 | ssize_t bufread(int handle_id, size_t size, void *dest) |
1376 | { | 1360 | { |
1377 | const struct memory_handle *h; | 1361 | const struct memory_handle *h = |
1378 | size_t adjusted_size = size; | 1362 | prep_bufdata(handle_id, &size, false); |
1379 | |||
1380 | h = prep_bufdata(handle_id, &adjusted_size, false); | ||
1381 | if (!h) | 1363 | if (!h) |
1382 | return ERR_HANDLE_NOT_FOUND; | 1364 | return ERR_HANDLE_NOT_FOUND; |
1383 | 1365 | ||
1384 | if (h->ridx + adjusted_size > buffer_len) { | 1366 | if (h->ridx + size > buffer_len) { |
1385 | /* the data wraps around the end of the buffer */ | 1367 | /* the data wraps around the end of the buffer */ |
1386 | size_t read = buffer_len - h->ridx; | 1368 | size_t read = buffer_len - h->ridx; |
1387 | memcpy(dest, &buffer[h->ridx], read); | 1369 | memcpy(dest, ringbuf_ptr(h->ridx), read); |
1388 | memcpy(dest+read, buffer, adjusted_size - read); | 1370 | memcpy(dest + read, ringbuf_ptr(0), size - read); |
1389 | } else { | 1371 | } else { |
1390 | memcpy(dest, &buffer[h->ridx], adjusted_size); | 1372 | memcpy(dest, ringbuf_ptr(h->ridx), size); |
1391 | } | 1373 | } |
1392 | 1374 | ||
1393 | return adjusted_size; | 1375 | return size; |
1394 | } | 1376 | } |
1395 | 1377 | ||
1396 | /* Update the "data" pointer to make the handle's data available to the caller. | 1378 | /* Update the "data" pointer to make the handle's data available to the caller. |
@@ -1404,81 +1386,80 @@ ssize_t bufread(int handle_id, size_t size, void *dest) | |||
1404 | */ | 1386 | */ |
1405 | ssize_t bufgetdata(int handle_id, size_t size, void **data) | 1387 | ssize_t bufgetdata(int handle_id, size_t size, void **data) |
1406 | { | 1388 | { |
1407 | const struct memory_handle *h; | 1389 | struct memory_handle *h = |
1408 | size_t adjusted_size = size; | 1390 | prep_bufdata(handle_id, &size, true); |
1409 | |||
1410 | h = prep_bufdata(handle_id, &adjusted_size, true); | ||
1411 | if (!h) | 1391 | if (!h) |
1412 | return ERR_HANDLE_NOT_FOUND; | 1392 | return ERR_HANDLE_NOT_FOUND; |
1413 | 1393 | ||
1414 | if (h->ridx + adjusted_size > buffer_len) { | 1394 | if (h->ridx + size > buffer_len) { |
1415 | /* the data wraps around the end of the buffer : | 1395 | /* the data wraps around the end of the buffer : |
1416 | use the guard buffer to provide the requested amount of data. */ | 1396 | use the guard buffer to provide the requested amount of data. */ |
1417 | size_t copy_n = h->ridx + adjusted_size - buffer_len; | 1397 | size_t copy_n = h->ridx + size - buffer_len; |
1418 | /* prep_bufdata ensures | 1398 | /* prep_bufdata ensures |
1419 | adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE, | 1399 | adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE, |
1420 | so copy_n <= GUARD_BUFSIZE */ | 1400 | so copy_n <= GUARD_BUFSIZE */ |
1421 | memcpy(guard_buffer, (const unsigned char *)buffer, copy_n); | 1401 | memcpy(guard_buffer, ringbuf_ptr(0), copy_n); |
1422 | } | 1402 | } |
1423 | 1403 | ||
1424 | if (data) | 1404 | if (data) |
1425 | *data = &buffer[h->ridx]; | 1405 | *data = ringbuf_ptr(h->ridx); |
1426 | 1406 | ||
1427 | return adjusted_size; | 1407 | return size; |
1428 | } | 1408 | } |
1429 | 1409 | ||
1430 | ssize_t bufgettail(int handle_id, size_t size, void **data) | 1410 | ssize_t bufgettail(int handle_id, size_t size, void **data) |
1431 | { | 1411 | { |
1432 | size_t tidx; | 1412 | if (thread_self() != buffering_thread_id) |
1433 | 1413 | return ERR_WRONG_THREAD; /* only from buffering thread */ | |
1434 | const struct memory_handle *h; | ||
1435 | 1414 | ||
1436 | h = find_handle(handle_id); | 1415 | /* We don't support tail requests of > guardbuf_size, for simplicity */ |
1416 | if (size > GUARD_BUFSIZE) | ||
1417 | return ERR_INVALID_VALUE; | ||
1437 | 1418 | ||
1419 | const struct memory_handle *h = find_handle(handle_id); | ||
1438 | if (!h) | 1420 | if (!h) |
1439 | return ERR_HANDLE_NOT_FOUND; | 1421 | return ERR_HANDLE_NOT_FOUND; |
1440 | 1422 | ||
1441 | if (h->filerem) | 1423 | if (h->end >= h->filesize) { |
1442 | return ERR_HANDLE_NOT_DONE; | 1424 | size_t tidx = ringbuf_sub(h->widx, size); |
1443 | 1425 | ||
1444 | /* We don't support tail requests of > guardbuf_size, for simplicity */ | 1426 | if (tidx + size > buffer_len) { |
1445 | if (size > GUARD_BUFSIZE) | 1427 | size_t copy_n = tidx + size - buffer_len; |
1446 | return ERR_INVALID_VALUE; | 1428 | memcpy(guard_buffer, ringbuf_ptr(0), copy_n); |
1447 | 1429 | } | |
1448 | tidx = ringbuf_sub(h->widx, size); | ||
1449 | 1430 | ||
1450 | if (tidx + size > buffer_len) { | 1431 | *data = ringbuf_ptr(tidx); |
1451 | size_t copy_n = tidx + size - buffer_len; | 1432 | } |
1452 | memcpy(guard_buffer, (const unsigned char *)buffer, copy_n); | 1433 | else { |
1434 | size = ERR_HANDLE_NOT_DONE; | ||
1453 | } | 1435 | } |
1454 | 1436 | ||
1455 | *data = &buffer[tidx]; | ||
1456 | return size; | 1437 | return size; |
1457 | } | 1438 | } |
1458 | 1439 | ||
1459 | ssize_t bufcuttail(int handle_id, size_t size) | 1440 | ssize_t bufcuttail(int handle_id, size_t size) |
1460 | { | 1441 | { |
1461 | struct memory_handle *h; | 1442 | if (thread_self() != buffering_thread_id) |
1462 | size_t adjusted_size = size; | 1443 | return ERR_WRONG_THREAD; /* only from buffering thread */ |
1463 | |||
1464 | h = find_handle(handle_id); | ||
1465 | 1444 | ||
1445 | struct memory_handle *h = find_handle(handle_id); | ||
1466 | if (!h) | 1446 | if (!h) |
1467 | return ERR_HANDLE_NOT_FOUND; | 1447 | return ERR_HANDLE_NOT_FOUND; |
1468 | 1448 | ||
1469 | if (h->filerem) | 1449 | if (h->end >= h->filesize) { |
1470 | return ERR_HANDLE_NOT_DONE; | 1450 | /* Cannot trim to before read position */ |
1471 | 1451 | size_t available = h->end - MAX(h->start, h->pos); | |
1472 | if (h->available < adjusted_size) | 1452 | if (available < size) |
1473 | adjusted_size = h->available; | 1453 | size = available; |
1474 | 1454 | ||
1475 | h->available -= adjusted_size; | 1455 | h->widx = ringbuf_sub(h->widx, size); |
1476 | h->filesize -= adjusted_size; | 1456 | h->filesize -= size; |
1477 | h->widx = ringbuf_sub(h->widx, adjusted_size); | 1457 | h->end -= size; |
1478 | if (h == cur_handle) | 1458 | } else { |
1479 | buf_widx = h->widx; | 1459 | size = ERR_HANDLE_NOT_DONE; |
1460 | } | ||
1480 | 1461 | ||
1481 | return adjusted_size; | 1462 | return size; |
1482 | } | 1463 | } |
1483 | 1464 | ||
1484 | 1465 | ||
@@ -1507,7 +1488,7 @@ ssize_t buf_handle_offset(int handle_id) | |||
1507 | const struct memory_handle *h = find_handle(handle_id); | 1488 | const struct memory_handle *h = find_handle(handle_id); |
1508 | if (!h) | 1489 | if (!h) |
1509 | return ERR_HANDLE_NOT_FOUND; | 1490 | return ERR_HANDLE_NOT_FOUND; |
1510 | return h->offset; | 1491 | return h->start; |
1511 | } | 1492 | } |
1512 | 1493 | ||
1513 | void buf_set_base_handle(int handle_id) | 1494 | void buf_set_base_handle(int handle_id) |
@@ -1530,7 +1511,7 @@ ssize_t buf_handle_remaining(int handle_id) | |||
1530 | const struct memory_handle *h = find_handle(handle_id); | 1511 | const struct memory_handle *h = find_handle(handle_id); |
1531 | if (!h) | 1512 | if (!h) |
1532 | return ERR_HANDLE_NOT_FOUND; | 1513 | return ERR_HANDLE_NOT_FOUND; |
1533 | return h->filerem; | 1514 | return h->filesize - h->end; |
1534 | } | 1515 | } |
1535 | 1516 | ||
1536 | bool buf_is_handle(int handle_id) | 1517 | bool buf_is_handle(int handle_id) |
@@ -1572,7 +1553,11 @@ size_t buf_length(void) | |||
1572 | /* Return the amount of buffer space used */ | 1553 | /* Return the amount of buffer space used */ |
1573 | size_t buf_used(void) | 1554 | size_t buf_used(void) |
1574 | { | 1555 | { |
1575 | return BUF_USED; | 1556 | struct memory_handle *first = first_handle; |
1557 | if (!first) | ||
1558 | return 0; | ||
1559 | |||
1560 | return ringbuf_sub(cur_handle->widx, ringbuf_offset(first)); | ||
1576 | } | 1561 | } |
1577 | 1562 | ||
1578 | void buf_set_watermark(size_t bytes) | 1563 | void buf_set_watermark(size_t bytes) |
@@ -1615,7 +1600,6 @@ static void NORETURN_ATTR buffering_thread(void) | |||
1615 | { | 1600 | { |
1616 | bool filling = false; | 1601 | bool filling = false; |
1617 | struct queue_event ev; | 1602 | struct queue_event ev; |
1618 | struct buf_message_data *parm; | ||
1619 | 1603 | ||
1620 | while (true) | 1604 | while (true) |
1621 | { | 1605 | { |
@@ -1654,11 +1638,14 @@ static void NORETURN_ATTR buffering_thread(void) | |||
1654 | break; | 1638 | break; |
1655 | 1639 | ||
1656 | case Q_REBUFFER_HANDLE: | 1640 | case Q_REBUFFER_HANDLE: |
1657 | parm = (struct buf_message_data *)ev.data; | 1641 | { |
1642 | struct buf_message_data *parm = | ||
1643 | (struct buf_message_data *)ev.data; | ||
1658 | LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld", | 1644 | LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld", |
1659 | parm->handle_id, parm->data); | 1645 | parm->handle_id, parm->data); |
1660 | rebuffer_handle(parm->handle_id, parm->data); | 1646 | rebuffer_handle(parm->handle_id, parm->data); |
1661 | break; | 1647 | break; |
1648 | } | ||
1662 | 1649 | ||
1663 | case Q_CLOSE_HANDLE: | 1650 | case Q_CLOSE_HANDLE: |
1664 | LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data); | 1651 | LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data); |
@@ -1689,7 +1676,7 @@ static void NORETURN_ATTR buffering_thread(void) | |||
1689 | if (num_handles > 0 && data_counters.useful <= high_watermark) | 1676 | if (num_handles > 0 && data_counters.useful <= high_watermark) |
1690 | send_event(BUFFER_EVENT_BUFFER_LOW, 0); | 1677 | send_event(BUFFER_EVENT_BUFFER_LOW, 0); |
1691 | 1678 | ||
1692 | if (data_counters.remaining > 0 && BUF_USED <= high_watermark) { | 1679 | if (data_counters.remaining > 0 && buf_used() <= high_watermark) { |
1693 | /* This is a new fill, shrink the buffer up first */ | 1680 | /* This is a new fill, shrink the buffer up first */ |
1694 | if (!filling) | 1681 | if (!filling) |
1695 | shrink_buffer(); | 1682 | shrink_buffer(); |
@@ -1769,9 +1756,6 @@ bool buffering_reset(char *buf, size_t buflen) | |||
1769 | buffer_len = buflen; | 1756 | buffer_len = buflen; |
1770 | guard_buffer = buf + buflen; | 1757 | guard_buffer = buf + buflen; |
1771 | 1758 | ||
1772 | buf_widx = 0; | ||
1773 | buf_ridx = 0; | ||
1774 | |||
1775 | first_handle = NULL; | 1759 | first_handle = NULL; |
1776 | cur_handle = NULL; | 1760 | cur_handle = NULL; |
1777 | cached_handle = NULL; | 1761 | cached_handle = NULL; |
@@ -1794,10 +1778,8 @@ bool buffering_reset(char *buf, size_t buflen) | |||
1794 | void buffering_get_debugdata(struct buffering_debug *dbgdata) | 1778 | void buffering_get_debugdata(struct buffering_debug *dbgdata) |
1795 | { | 1779 | { |
1796 | struct data_counters dc; | 1780 | struct data_counters dc; |
1797 | update_data_counters(&dc); | 1781 | dbgdata->num_handles = update_data_counters(&dc); |
1798 | dbgdata->num_handles = num_handles; | ||
1799 | dbgdata->data_rem = dc.remaining; | 1782 | dbgdata->data_rem = dc.remaining; |
1800 | dbgdata->wasted_space = dc.wasted; | ||
1801 | dbgdata->buffered_data = dc.buffered; | 1783 | dbgdata->buffered_data = dc.buffered; |
1802 | dbgdata->useful_data = dc.useful; | 1784 | dbgdata->useful_data = dc.useful; |
1803 | dbgdata->watermark = BUF_WATERMARK; | 1785 | dbgdata->watermark = BUF_WATERMARK; |
diff --git a/apps/buffering.h b/apps/buffering.h index 6d52794233..218f77ed85 100644 --- a/apps/buffering.h +++ b/apps/buffering.h | |||
@@ -45,7 +45,7 @@ enum data_type { | |||
45 | #define ERR_FILE_ERROR -4 | 45 | #define ERR_FILE_ERROR -4 |
46 | #define ERR_HANDLE_NOT_DONE -5 | 46 | #define ERR_HANDLE_NOT_DONE -5 |
47 | #define ERR_UNSUPPORTED_TYPE -6 | 47 | #define ERR_UNSUPPORTED_TYPE -6 |
48 | 48 | #define ERR_WRONG_THREAD -7 | |
49 | 49 | ||
50 | /* Initialise the buffering subsystem */ | 50 | /* Initialise the buffering subsystem */ |
51 | void buffering_init(void) INIT_ATTR; | 51 | void buffering_init(void) INIT_ATTR; |
@@ -116,10 +116,6 @@ void buf_back_off_storage(bool back_off); | |||
116 | #endif | 116 | #endif |
117 | 117 | ||
118 | /* Settings */ | 118 | /* Settings */ |
119 | enum { | ||
120 | BUFFERING_SET_WATERMARK = 1, | ||
121 | BUFFERING_SET_CHUNKSIZE, | ||
122 | }; | ||
123 | void buf_set_watermark(size_t bytes); | 119 | void buf_set_watermark(size_t bytes); |
124 | size_t buf_get_watermark(void); | 120 | size_t buf_get_watermark(void); |
125 | 121 | ||
@@ -127,7 +123,6 @@ size_t buf_get_watermark(void); | |||
127 | struct buffering_debug { | 123 | struct buffering_debug { |
128 | int num_handles; | 124 | int num_handles; |
129 | size_t buffered_data; | 125 | size_t buffered_data; |
130 | size_t wasted_space; | ||
131 | size_t data_rem; | 126 | size_t data_rem; |
132 | size_t useful_data; | 127 | size_t useful_data; |
133 | size_t watermark; | 128 | size_t watermark; |