summaryrefslogtreecommitdiff
path: root/apps
diff options
context:
space:
mode:
authorBrandon Low <lostlogic@rockbox.org>2007-10-27 01:37:33 +0000
committerBrandon Low <lostlogic@rockbox.org>2007-10-27 01:37:33 +0000
commit404c6fbdb288de0e88eeb7484e2ab524ef438871 (patch)
tree7bc184e1d4f06e4fa37289c724f69fe9ae916992 /apps
parentd08131a1172b09a701de9fc0b24e045866c9fe6a (diff)
downloadrockbox-404c6fbdb288de0e88eeb7484e2ab524ef438871.tar.gz
rockbox-404c6fbdb288de0e88eeb7484e2ab524ef438871.zip
Add some const keywords, improve some comments, add a safety check or two, should have no functional difference
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15326 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps')
-rw-r--r--apps/buffering.c63
1 files changed, 34 insertions, 29 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index 44d3e60b1f..b8fd16f870 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -94,31 +94,31 @@
94 94
95/* Ring buffer helper macros */ 95/* Ring buffer helper macros */
96/* Buffer pointer (p) plus value (v), wrapped if necessary */ 96/* Buffer pointer (p) plus value (v), wrapped if necessary */
97#define RINGBUF_ADD(p,v) ((p+v)<buffer_len ? p+v : p+v-buffer_len) 97#define RINGBUF_ADD(p,v) (((p)+(v))<buffer_len ? (p)+(v) : (p)+(v)-buffer_len)
98/* Buffer pointer (p) minus value (v), wrapped if necessary */ 98/* Buffer pointer (p) minus value (v), wrapped if necessary */
99#define RINGBUF_SUB(p,v) ((p>=v) ? p-v : p+buffer_len-v) 99#define RINGBUF_SUB(p,v) ((p>=v) ? (p)-(v) : (p)+buffer_len-(v))
100/* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */ 100/* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
101#define RINGBUF_ADD_CROSS(p1,v,p2) \ 101#define RINGBUF_ADD_CROSS(p1,v,p2) \
102((p1<p2) ? (int)(p1+v)-(int)p2 : (int)(p1+v-p2)-(int)buffer_len) 102((p1<p2) ? (int)((p1)+(v))-(int)(p2) : (int)((p1)+(v)-(p2))-(int)buffer_len)
103/* Bytes available in the buffer */ 103/* Bytes available in the buffer */
104#define BUF_USED RINGBUF_SUB(buf_widx, buf_ridx) 104#define BUF_USED RINGBUF_SUB(buf_widx, buf_ridx)
105 105
106/* assert(sizeof(struct memory_handle)%4==0) */
106struct memory_handle { 107struct memory_handle {
107 int id; /* A unique ID for the handle */ 108 int id; /* A unique ID for the handle */
108 enum data_type type; 109 enum data_type type; /* Type of data buffered with this handle */
109 char path[MAX_PATH]; 110 char path[MAX_PATH]; /* Path if data originated in a file */
110 int fd; 111 int fd; /* File descriptor to path (-1 if closed) */
111 size_t data; /* Start index of the handle's data buffer */ 112 size_t data; /* Start index of the handle's data buffer */
112 volatile size_t ridx; /* Current read pointer, relative to the main buffer */ 113 volatile size_t ridx; /* Read pointer, relative to the main buffer */
113 size_t widx; /* Current write pointer */ 114 size_t widx; /* Write pointer */
114 size_t filesize; /* File total length */ 115 size_t filesize; /* File total length */
115 size_t filerem; /* Remaining bytes of file NOT in buffer */ 116 size_t filerem; /* Remaining bytes of file NOT in buffer */
116 volatile size_t available; /* Available bytes to read from buffer */ 117 volatile size_t available; /* Available bytes to read from buffer */
117 size_t offset; /* Offset at which we started reading the file */ 118 size_t offset; /* Offset at which we started reading the file */
118 struct memory_handle *next; 119 struct memory_handle *next;
119}; 120};
120/* at all times, we have: filesize == offset + available + filerem */ 121/* invariant: filesize == offset + available + filerem */
121
122 122
123static char *buffer; 123static char *buffer;
124static char *guard_buffer; 124static char *guard_buffer;
@@ -131,8 +131,10 @@ static volatile size_t buf_ridx; /* current reading position */
131 131
132/* Configuration */ 132/* Configuration */
133static size_t conf_watermark = 0; /* Level to trigger filebuf fill */ 133static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
134static size_t conf_filechunk = 0; /* Largest chunk the codec accepts */ 134static size_t conf_filechunk = 0; /* Bytes-per-read for buffering (impacts
135static size_t conf_preseek = 0; /* Codec pre-seek margin */ 135 responsiveness of buffering thread) */
136static size_t conf_preseek = 0; /* Distance a codec may look backwards after
137 seeking, to prevent double rebuffers */
136#if MEM > 8 138#if MEM > 8
137static size_t high_watermark = 0; /* High watermark for rebuffer */ 139static size_t high_watermark = 0; /* High watermark for rebuffer */
138#endif 140#endif
@@ -149,7 +151,7 @@ static int base_handle_id;
149static struct mutex llist_mutex; 151static struct mutex llist_mutex;
150 152
151/* Handle cache (makes find_handle faster). 153/* Handle cache (makes find_handle faster).
152 This needs be to be global so that move_handle can invalidate it. */ 154 This is global so that move_handle and rm_handle can invalidate it. */
153static struct memory_handle *cached_handle = NULL; 155static struct memory_handle *cached_handle = NULL;
154 156
155static buffer_low_callback buffer_low_callback_funcs[MAX_BUF_CALLBACKS]; 157static buffer_low_callback buffer_low_callback_funcs[MAX_BUF_CALLBACKS];
@@ -266,8 +268,11 @@ static struct memory_handle *add_handle(size_t *data_size)
266 268
267/* Delete a given memory handle from the linked list 269/* Delete a given memory handle from the linked list
268 and return true for success. Nothing is actually erased from memory. */ 270 and return true for success. Nothing is actually erased from memory. */
269static bool rm_handle(struct memory_handle *h) 271static bool rm_handle(const struct memory_handle *h)
270{ 272{
273 if (h == NULL)
274 return false;
275
271 mutex_lock(&llist_mutex); 276 mutex_lock(&llist_mutex);
272 277
273 if (h == first_handle) { 278 if (h == first_handle) {
@@ -275,7 +280,7 @@ static bool rm_handle(struct memory_handle *h)
275 if (h == cur_handle) { 280 if (h == cur_handle) {
276 /* h was the first and last handle: the buffer is now empty */ 281 /* h was the first and last handle: the buffer is now empty */
277 cur_handle = NULL; 282 cur_handle = NULL;
278 buf_ridx = buf_widx; 283 buf_ridx = buf_widx = 0;
279 } else { 284 } else {
280 /* update buf_ridx to point to the new first handle */ 285 /* update buf_ridx to point to the new first handle */
281 buf_ridx = (void *)first_handle - (void *)buffer; 286 buf_ridx = (void *)first_handle - (void *)buffer;
@@ -285,7 +290,7 @@ static bool rm_handle(struct memory_handle *h)
285 while (m && m->next != h) { 290 while (m && m->next != h) {
286 m = m->next; 291 m = m->next;
287 } 292 }
288 if (h && m && m->next == h) { 293 if (m && m->next == h) {
289 m->next = h->next; 294 m->next = h->next;
290 if (h == cur_handle) { 295 if (h == cur_handle) {
291 cur_handle = m; 296 cur_handle = m;
@@ -309,7 +314,7 @@ static bool rm_handle(struct memory_handle *h)
309 314
310/* Return a pointer to the memory handle of given ID. 315/* Return a pointer to the memory handle of given ID.
311 NULL if the handle wasn't found */ 316 NULL if the handle wasn't found */
312static struct memory_handle *find_handle(int handle_id) 317static struct memory_handle *find_handle(const int handle_id)
313{ 318{
314 if (handle_id <= 0) 319 if (handle_id <= 0)
315 return NULL; 320 return NULL;
@@ -336,9 +341,8 @@ static struct memory_handle *find_handle(int handle_id)
336 m = m->next; 341 m = m->next;
337 } 342 }
338 /* This condition can only be reached with !m or m->id == handle_id */ 343 /* This condition can only be reached with !m or m->id == handle_id */
339 if (m) { 344 if (m)
340 cached_handle = m; 345 cached_handle = m;
341 }
342 346
343 mutex_unlock(&llist_mutex); 347 mutex_unlock(&llist_mutex);
344 return m; 348 return m;
@@ -853,7 +857,7 @@ int bufseek(int handle_id, size_t newpos)
853 Return 0 for success and < 0 for failure */ 857 Return 0 for success and < 0 for failure */
854int bufadvance(int handle_id, off_t offset) 858int bufadvance(int handle_id, off_t offset)
855{ 859{
856 struct memory_handle *h = find_handle(handle_id); 860 const struct memory_handle *h = find_handle(handle_id);
857 if (!h) 861 if (!h)
858 return -1; 862 return -1;
859 863
@@ -865,7 +869,7 @@ int bufadvance(int handle_id, off_t offset)
865 Return the number of bytes copied or < 0 for failure. */ 869 Return the number of bytes copied or < 0 for failure. */
866ssize_t bufread(int handle_id, size_t size, void *dest) 870ssize_t bufread(int handle_id, size_t size, void *dest)
867{ 871{
868 struct memory_handle *h = find_handle(handle_id); 872 const struct memory_handle *h = find_handle(handle_id);
869 if (!h) 873 if (!h)
870 return -1; 874 return -1;
871 875
@@ -908,7 +912,7 @@ ssize_t bufread(int handle_id, size_t size, void *dest)
908 The guard buffer may be used to provide the requested size */ 912 The guard buffer may be used to provide the requested size */
909ssize_t bufgetdata(int handle_id, size_t size, void **data) 913ssize_t bufgetdata(int handle_id, size_t size, void **data)
910{ 914{
911 struct memory_handle *h = find_handle(handle_id); 915 const struct memory_handle *h = find_handle(handle_id);
912 if (!h) 916 if (!h)
913 return -1; 917 return -1;
914 918
@@ -964,7 +968,7 @@ management functions for all the actual handle management work.
964/* Get a handle offset from a pointer */ 968/* Get a handle offset from a pointer */
965ssize_t buf_get_offset(int handle_id, void *ptr) 969ssize_t buf_get_offset(int handle_id, void *ptr)
966{ 970{
967 struct memory_handle *h = find_handle(handle_id); 971 const struct memory_handle *h = find_handle(handle_id);
968 if (!h) 972 if (!h)
969 return -1; 973 return -1;
970 974
@@ -973,7 +977,7 @@ ssize_t buf_get_offset(int handle_id, void *ptr)
973 977
974ssize_t buf_handle_offset(int handle_id) 978ssize_t buf_handle_offset(int handle_id)
975{ 979{
976 struct memory_handle *h = find_handle(handle_id); 980 const struct memory_handle *h = find_handle(handle_id);
977 if (!h) 981 if (!h)
978 return -1; 982 return -1;
979 return h->offset; 983 return h->offset;
@@ -1142,6 +1146,7 @@ void buffering_thread(void)
1142 data_counters.buffered < high_watermark) 1146 data_counters.buffered < high_watermark)
1143 { 1147 {
1144 fill_buffer(); 1148 fill_buffer();
1149 update_data_counters();
1145 } 1150 }
1146 1151
1147 if (ata_disk_is_active() && queue_empty(&buffering_queue) && 1152 if (ata_disk_is_active() && queue_empty(&buffering_queue) &&