summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Martitz <kugel@rockbox.org>2014-01-28 15:33:40 +0100
committerThomas Martitz <kugel@rockbox.org>2014-02-02 16:59:29 +0100
commitd66346789ccdf685a6720a739b88f194f56a60e2 (patch)
treeb9c69c8801c787adbc7ec3f46f985c09599e565e
parentd608d2203aff93d6d68e7afbac7767cf95c03b8b (diff)
downloadrockbox-d66346789ccdf685a6720a739b88f194f56a60e2.tar.gz
rockbox-d66346789ccdf685a6720a739b88f194f56a60e2.zip
buflib: Check the validity of of handles passed to buflib_get_data() in DEBUG builds.
Change-Id: Ic274bfb4a8e1a1a10f9a54186b9173dbc0faa4c8
-rw-r--r--firmware/buflib.c9
-rw-r--r--firmware/export/config/librockplay.h0
-rw-r--r--firmware/include/buflib.h8
-rw-r--r--firmware/kernel/pthread/corelock.c18
-rw-r--r--firmware/kernel/pthread/mutex.c21
-rw-r--r--firmware/kernel/pthread/thread.c272
6 files changed, 326 insertions, 2 deletions
diff --git a/firmware/buflib.c b/firmware/buflib.c
index f6a565715d..294d2926d3 100644
--- a/firmware/buflib.c
+++ b/firmware/buflib.c
@@ -898,6 +898,15 @@ const char* buflib_get_name(struct buflib_context *ctx, int handle)
898} 898}
899 899
900#ifdef DEBUG 900#ifdef DEBUG
901
902void *buflib_get_data(struct buflib_context *ctx, int handle)
903{
904 if (handle <= 0)
905 buflib_panic(ctx, "invalid handle access: %d", handle);
906
907 return (void*)(ctx->handle_table[-handle].alloc);
908}
909
901void buflib_check_valid(struct buflib_context *ctx) 910void buflib_check_valid(struct buflib_context *ctx)
902{ 911{
903 union buflib_data *crc_slot; 912 union buflib_data *crc_slot;
diff --git a/firmware/export/config/librockplay.h b/firmware/export/config/librockplay.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/firmware/export/config/librockplay.h
diff --git a/firmware/include/buflib.h b/firmware/include/buflib.h
index 171ab5bcd7..30484431f8 100644
--- a/firmware/include/buflib.h
+++ b/firmware/include/buflib.h
@@ -237,10 +237,14 @@ int buflib_alloc_maximum(struct buflib_context* ctx, const char* name,
237 * 237 *
238 * Returns: The start pointer of the allocation 238 * Returns: The start pointer of the allocation
239 */ 239 */
240static inline void* buflib_get_data(struct buflib_context *context, int handle) 240#ifdef DEBUG
241void* buflib_get_data(struct buflib_context *ctx, int handle);
242#else
243static inline void* buflib_get_data(struct buflib_context *ctx, int handle)
241{ 244{
242 return (void*)(context->handle_table[-handle].alloc); 245 return (void*)(ctx->handle_table[-handle].alloc);
243} 246}
247#endif
244 248
245/** 249/**
246 * Shrink the memory allocation associated with the given handle 250 * Shrink the memory allocation associated with the given handle
diff --git a/firmware/kernel/pthread/corelock.c b/firmware/kernel/pthread/corelock.c
new file mode 100644
index 0000000000..10b4329639
--- /dev/null
+++ b/firmware/kernel/pthread/corelock.c
@@ -0,0 +1,18 @@
1#include <pthread.h>
2#include "kernel.h"
3
4void corelock_init(struct corelock *lk)
5{
6 lk->mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
7}
8
9void corelock_lock(struct corelock *lk)
10{
11 pthread_mutex_lock(&lk->mutex);
12}
13
14
15void corelock_unlock(struct corelock *lk)
16{
17 pthread_mutex_unlock(&lk->mutex);
18}
diff --git a/firmware/kernel/pthread/mutex.c b/firmware/kernel/pthread/mutex.c
new file mode 100644
index 0000000000..49503b5d82
--- /dev/null
+++ b/firmware/kernel/pthread/mutex.c
@@ -0,0 +1,21 @@
1#include <pthread.h>
2#include "kernel.h"
3
4void mutex_init(struct mutex *m)
5{
6 pthread_mutexattr_t attr;
7 pthread_mutexattr_init(&attr);
8 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
9 pthread_mutex_init(&m->mutex, &attr);
10 pthread_mutexattr_destroy(&attr);
11}
12
13void mutex_lock(struct mutex *m)
14{
15 pthread_mutex_lock(&m->mutex);
16}
17
18void mutex_unlock(struct mutex *m)
19{
20 pthread_mutex_unlock(&m->mutex);
21}
diff --git a/firmware/kernel/pthread/thread.c b/firmware/kernel/pthread/thread.c
new file mode 100644
index 0000000000..a80ce876e8
--- /dev/null
+++ b/firmware/kernel/pthread/thread.c
@@ -0,0 +1,272 @@
1#include <stdlib.h>
2#include <stdbool.h>
3#include <errno.h>
4#include <pthread.h>
5#include "/usr/include/semaphore.h"
6#include "kernel.h"
7#include "thread.h"
8
9#define NSEC_PER_SEC 1000000000L
10static inline void timespec_add_ns(struct timespec *a, uint64_t ns)
11{
12 lldiv_t q = lldiv(a->tv_nsec + ns, NSEC_PER_SEC);
13 a->tv_sec += q.quot;
14 a->tv_nsec = q.rem;
15}
16
17static int threads_initialized;
18
19struct thread_init_data {
20 void (*function)(void);
21 bool start_frozen;
22 sem_t init_sem;
23 struct thread_entry *entry;
24};
25
26__thread struct thread_entry *_current;
27
28struct thread_entry* thread_self_entry(void)
29{
30 return _current;
31}
32
33unsigned int thread_self(void)
34{
35 return (unsigned) pthread_self();
36}
37
38static struct thread_entry_item {
39 unsigned thread_id;
40 struct thread_entry *entry;
41} entry_lookup[32];
42
43
44
45static struct thread_entry_item *__find_thread_entry(unsigned thread_id)
46{
47 int i;
48
49 for (i = 0; i < 32; i++)
50 {
51 if (entry_lookup[i].thread_id == thread_id)
52 return &entry_lookup[i];
53 }
54 return NULL;
55}
56
57static struct thread_entry *find_thread_entry(unsigned thread_id)
58{
59 return __find_thread_entry(thread_id)->entry;
60}
61
62static void *trampoline(void *arg)
63{
64 struct thread_init_data *data = arg;
65
66 void (*thread_fn)(void) = data->function;
67
68 _current = data->entry;
69
70 if (data->start_frozen)
71 {
72 struct corelock thaw_lock;
73 struct thread_entry *queue = NULL;
74 corelock_init(&thaw_lock);
75 corelock_lock(&thaw_lock);
76
77 _current->lock = &thaw_lock;
78 _current->bqp = &queue;
79 sem_post(&data->init_sem);
80 block_thread_switch(_current, _current->lock);
81 _current->lock = NULL;
82
83 corelock_unlock(&thaw_lock);
84 }
85 else
86 sem_post(&data->init_sem);
87
88 free(data);
89 thread_fn();
90
91 return NULL;
92}
93
94void thread_thaw(unsigned int thread_id)
95{
96 struct thread_entry *e = find_thread_entry(thread_id);
97 if (e->lock)
98 {
99 corelock_lock(e->lock);
100 wakeup_thread(e->bqp);
101 corelock_unlock(e->lock);
102 }
103 /* else: no lock. must be running already */
104}
105
106void init_threads(void)
107{
108 struct thread_entry_item *item0 = &entry_lookup[0];
109 item0->entry = calloc(1, sizeof(struct thread_entry));
110 item0->thread_id = pthread_self();
111
112 _current = item0->entry;
113 pthread_cond_init(&item0->entry->cond, NULL);
114 threads_initialized = 1;
115}
116
117
118unsigned int create_thread(void (*function)(void),
119 void* stack, size_t stack_size,
120 unsigned flags, const char *name
121 //~ IF_PRIO(, int priority)
122 IF_COP(, unsigned int core))
123{
124 pthread_t retval;
125
126 struct thread_init_data *data = calloc(1, sizeof(struct thread_init_data));
127 struct thread_entry *entry = calloc(1, sizeof(struct thread_entry));
128 struct thread_entry_item *item;
129
130 if (!threads_initialized)
131 abort();
132
133 data->function = function;
134 data->start_frozen = flags & CREATE_THREAD_FROZEN;
135 data->entry = entry;
136 pthread_cond_init(&entry->cond, NULL);
137 entry->runnable = true;
138 entry->l = (struct thread_list) { NULL, NULL };
139 sem_init(&data->init_sem, 0, 0);
140
141 if (pthread_create(&retval, NULL, trampoline, data) < 0)
142 return -1;
143
144 sem_wait(&data->init_sem);
145
146 item = __find_thread_entry(0);
147 item->thread_id = retval;
148 item->entry = entry;
149
150 pthread_setname_np(retval, name);
151
152
153 return retval;
154}
155
156static void add_to_list_l(struct thread_entry **list,
157 struct thread_entry *thread)
158{
159 if (*list == NULL)
160 {
161 /* Insert into unoccupied list */
162 thread->l.next = thread;
163 thread->l.prev = thread;
164 *list = thread;
165 }
166 else
167 {
168 /* Insert last */
169 thread->l.next = *list;
170 thread->l.prev = (*list)->l.prev;
171 thread->l.prev->l.next = thread;
172 (*list)->l.prev = thread;
173 }
174}
175
176static void remove_from_list_l(struct thread_entry **list,
177 struct thread_entry *thread)
178{
179 if (thread == thread->l.next)
180 {
181 /* The only item */
182 *list = NULL;
183 return;
184 }
185
186 if (thread == *list)
187 {
188 /* List becomes next item */
189 *list = thread->l.next;
190 }
191
192 /* Fix links to jump over the removed entry. */
193 thread->l.prev->l.next = thread->l.next;
194 thread->l.next->l.prev = thread->l.prev;
195}
196
197unsigned int thread_queue_wake(struct thread_entry **list)
198{
199 unsigned int result = THREAD_NONE;
200
201 for (;;)
202 {
203 unsigned int rc = wakeup_thread(list);
204
205 if (rc == THREAD_NONE)
206 break;
207
208 result |= rc;
209 }
210
211 return result;
212}
213
214/* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point
215 * to a corelock instance, and this corelock must be held by the caller */
216void block_thread_switch(struct thread_entry *t, struct corelock *cl)
217{
218 t->runnable = false;
219 add_to_list_l(t->bqp, t);
220 while(!t->runnable)
221 pthread_cond_wait(&t->cond, &cl->mutex);
222}
223
224void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corelock *cl)
225{
226 int err = 0;
227 struct timespec ts;
228
229 clock_gettime(CLOCK_REALTIME, &ts);
230 timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ));
231
232 t->runnable = false;
233 add_to_list_l(t->bqp, t);
234 while(!t->runnable && !err)
235 err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts);
236
237 if (err == ETIMEDOUT)
238 { /* the thread timed out and was not explicitely woken up.
239 * we need to do this now to mark it runnable again */
240 remove_from_list_l(t->bqp, t);
241 t->runnable = true;
242 if (t->wakeup_ext_cb)
243 t->wakeup_ext_cb(t);
244 }
245}
246
247unsigned int wakeup_thread(struct thread_entry **list)
248{
249 struct thread_entry *t = *list;
250 if (t)
251 {
252 remove_from_list_l(list, t);
253 t->runnable = true;
254 pthread_cond_signal(&t->cond);
255 }
256 return THREAD_NONE;
257}
258
259
260void yield(void) {}
261
262unsigned sleep(unsigned ticks)
263{
264 struct timespec ts;
265
266 ts.tv_sec = ticks/HZ;
267 ts.tv_nsec = (ticks % HZ) * (NSEC_PER_SEC/HZ);
268
269 nanosleep(&ts, NULL);
270
271 return 0;
272}