summaryrefslogtreecommitdiff
path: root/firmware/kernel/pthread/thread.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-08-08 06:33:51 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-16 05:15:37 -0400
commit6ed00870abd566d7267d2436c2693f5a281cda2f (patch)
tree6011c73e302254fc73f61a1b8b1f295ded1f5d56 /firmware/kernel/pthread/thread.c
parenteb63d8b4a2a7cbe4e98216b48a75391718fcebd7 (diff)
downloadrockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.tar.gz
rockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.zip
Base scheduler queues off linked lists and do cleanup/consolidation
Abstracts threading from itself a bit, changes the way its queues are handled and does type hiding for that as well. Do alot here due to already required major brain surgery. Threads may now be on a run queue and a wait queue simultaneously so that the expired timer only has to wake the thread but not remove it from the wait queue which simplifies the implicit wake handling. List formats change for wait queues-- doubly-linked, not circular. Timeout queue is now singly-linked. The run queue is still circular as before. Adds a better thread slot allocator that may keep the slot marked as used regardless of the thread state. Assists in dumping special tasks that switch_thread was tasked to perform (blocking tasks). Deletes alot of code yet surprisingly, gets larger than expected. Well, I'm not not minding that for the time being-- omlettes and break a few eggs and all that. Change-Id: I0834d7bb16b2aecb2f63b58886eeda6ae4f29d59
Diffstat (limited to 'firmware/kernel/pthread/thread.c')
-rw-r--r--firmware/kernel/pthread/thread.c81
1 files changed, 15 insertions, 66 deletions
diff --git a/firmware/kernel/pthread/thread.c b/firmware/kernel/pthread/thread.c
index 354a946698..71cbd1d136 100644
--- a/firmware/kernel/pthread/thread.c
+++ b/firmware/kernel/pthread/thread.c
@@ -3,8 +3,8 @@
3#include <errno.h> 3#include <errno.h>
4#include <pthread.h> 4#include <pthread.h>
5#include "/usr/include/semaphore.h" 5#include "/usr/include/semaphore.h"
6#include "thread-internal.h"
6#include "kernel.h" 7#include "kernel.h"
7#include "thread.h"
8 8
9#define NSEC_PER_SEC 1000000000L 9#define NSEC_PER_SEC 1000000000L
10static inline void timespec_add_ns(struct timespec *a, uint64_t ns) 10static inline void timespec_add_ns(struct timespec *a, uint64_t ns)
@@ -25,11 +25,6 @@ struct thread_init_data {
25 25
26__thread struct thread_entry *_current; 26__thread struct thread_entry *_current;
27 27
28struct thread_entry* thread_self_entry(void)
29{
30 return _current;
31}
32
33unsigned int thread_self(void) 28unsigned int thread_self(void)
34{ 29{
35 return (unsigned) pthread_self(); 30 return (unsigned) pthread_self();
@@ -70,12 +65,10 @@ static void *trampoline(void *arg)
70 if (data->start_frozen) 65 if (data->start_frozen)
71 { 66 {
72 struct corelock thaw_lock; 67 struct corelock thaw_lock;
73 struct thread_entry *queue = NULL;
74 corelock_init(&thaw_lock); 68 corelock_init(&thaw_lock);
75 corelock_lock(&thaw_lock); 69 corelock_lock(&thaw_lock);
76 70
77 _current->lock = &thaw_lock; 71 _current->lock = &thaw_lock;
78 _current->bqp = &queue;
79 sem_post(&data->init_sem); 72 sem_post(&data->init_sem);
80 block_thread_switch(_current, _current->lock); 73 block_thread_switch(_current, _current->lock);
81 _current->lock = NULL; 74 _current->lock = NULL;
@@ -97,7 +90,7 @@ void thread_thaw(unsigned int thread_id)
97 if (e->lock) 90 if (e->lock)
98 { 91 {
99 corelock_lock(e->lock); 92 corelock_lock(e->lock);
100 wakeup_thread(e->bqp); 93 wakeup_thread(e);
101 corelock_unlock(e->lock); 94 corelock_unlock(e->lock);
102 } 95 }
103 /* else: no lock. must be running already */ 96 /* else: no lock. must be running already */
@@ -135,7 +128,7 @@ unsigned int create_thread(void (*function)(void),
135 data->entry = entry; 128 data->entry = entry;
136 pthread_cond_init(&entry->cond, NULL); 129 pthread_cond_init(&entry->cond, NULL);
137 entry->runnable = true; 130 entry->runnable = true;
138 entry->l = (struct thread_list) { NULL, NULL }; 131
139 sem_init(&data->init_sem, 0, 0); 132 sem_init(&data->init_sem, 0, 0);
140 133
141 if (pthread_create(&retval, NULL, trampoline, data) < 0) 134 if (pthread_create(&retval, NULL, trampoline, data) < 0)
@@ -153,58 +146,19 @@ unsigned int create_thread(void (*function)(void),
153 return retval; 146 return retval;
154} 147}
155 148
156static void add_to_list_l(struct thread_entry **list,
157 struct thread_entry *thread)
158{
159 if (*list == NULL)
160 {
161 /* Insert into unoccupied list */
162 thread->l.next = thread;
163 thread->l.prev = thread;
164 *list = thread;
165 }
166 else
167 {
168 /* Insert last */
169 thread->l.next = *list;
170 thread->l.prev = (*list)->l.prev;
171 thread->l.prev->l.next = thread;
172 (*list)->l.prev = thread;
173 }
174}
175
176static void remove_from_list_l(struct thread_entry **list,
177 struct thread_entry *thread)
178{
179 if (thread == thread->l.next)
180 {
181 /* The only item */
182 *list = NULL;
183 return;
184 }
185
186 if (thread == *list)
187 {
188 /* List becomes next item */
189 *list = thread->l.next;
190 }
191
192 /* Fix links to jump over the removed entry. */
193 thread->l.prev->l.next = thread->l.next;
194 thread->l.next->l.prev = thread->l.prev;
195}
196
197/* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point 149/* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point
198 * to a corelock instance, and this corelock must be held by the caller */ 150 * to a corelock instance, and this corelock must be held by the caller */
199void block_thread_switch(struct thread_entry *t, struct corelock *cl) 151void block_thread_switch(struct thread_entry *t, struct corelock *cl)
200{ 152{
201 t->runnable = false; 153 t->runnable = false;
202 add_to_list_l(t->bqp, t); 154 if (wait_queue_ptr(t))
155 wait_queue_register(t);
203 while(!t->runnable) 156 while(!t->runnable)
204 pthread_cond_wait(&t->cond, &cl->mutex); 157 pthread_cond_wait(&t->cond, &cl->mutex);
205} 158}
206 159
207void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corelock *cl) 160void block_thread_switch_w_tmo(struct thread_entry *t, int timeout,
161 struct corelock *cl)
208{ 162{
209 int err = 0; 163 int err = 0;
210 struct timespec ts; 164 struct timespec ts;
@@ -213,30 +167,25 @@ void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corel
213 timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ)); 167 timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ));
214 168
215 t->runnable = false; 169 t->runnable = false;
216 add_to_list_l(t->bqp, t); 170 wait_queue_register(t->wqp, t);
217 while(!t->runnable && !err) 171 while(!t->runnable && !err)
218 err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts); 172 err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts);
219 173
220 if (err == ETIMEDOUT) 174 if (err == ETIMEDOUT)
221 { /* the thread timed out and was not explicitely woken up. 175 { /* the thread timed out and was not explicitely woken up.
222 * we need to do this now to mark it runnable again */ 176 * we need to do this now to mark it runnable again */
223 remove_from_list_l(t->bqp, t);
224 t->runnable = true; 177 t->runnable = true;
225 if (t->wakeup_ext_cb) 178 /* NOTE: objects do their own removal upon timer expiration */
226 t->wakeup_ext_cb(t);
227 } 179 }
228} 180}
229 181
230unsigned int wakeup_thread(struct thread_entry **list) 182unsigned int wakeup_thread(struct thread_entry *t)
231{ 183{
232 struct thread_entry *t = *list; 184 if (t->wqp)
233 if (t) 185 wait_queue_remove(t);
234 { 186 t->runnable = true;
235 remove_from_list_l(list, t); 187 pthread_cond_signal(&t->cond);
236 t->runnable = true; 188 return THREAD_OK;
237 pthread_cond_signal(&t->cond);
238 }
239 return THREAD_NONE;
240} 189}
241 190
242 191