summaryrefslogtreecommitdiff
path: root/firmware/kernel/queue.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-08-08 06:33:51 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-16 05:15:37 -0400
commit6ed00870abd566d7267d2436c2693f5a281cda2f (patch)
tree6011c73e302254fc73f61a1b8b1f295ded1f5d56 /firmware/kernel/queue.c
parenteb63d8b4a2a7cbe4e98216b48a75391718fcebd7 (diff)
downloadrockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.tar.gz
rockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.zip
Base scheduler queues off linked lists and do cleanup/consolidation
Abstracts threading from itself a bit, changes the way its queues are handled and does type hiding for that as well. Do alot here due to already required major brain surgery. Threads may now be on a run queue and a wait queue simultaneously so that the expired timer only has to wake the thread but not remove it from the wait queue which simplifies the implicit wake handling. List formats change for wait queues-- doubly-linked, not circular. Timeout queue is now singly-linked. The run queue is still circular as before. Adds a better thread slot allocator that may keep the slot marked as used regardless of the thread state. Assists in dumping special tasks that switch_thread was tasked to perform (blocking tasks). Deletes alot of code yet surprisingly, gets larger than expected. Well, I'm not not minding that for the time being-- omlettes and break a few eggs and all that. Change-Id: I0834d7bb16b2aecb2f63b58886eeda6ae4f29d59
Diffstat (limited to 'firmware/kernel/queue.c')
-rw-r--r--firmware/kernel/queue.c145
1 files changed, 50 insertions, 95 deletions
diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c
index 0ba7d7e00b..927e55274c 100644
--- a/firmware/kernel/queue.c
+++ b/firmware/kernel/queue.c
@@ -51,7 +51,7 @@ static struct
51 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX | 51 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
52 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL | 52 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
53 * \/ \/ \/ 53 * \/ \/ \/
54 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-< 54 * q->send->list: 0<-|T0|<->|T1|<->|T2|<-------->|T3|->0
55 * q->send->curr_sender: /\ 55 * q->send->curr_sender: /\
56 * 56 *
57 * Thread has E0 in its own struct queue_event. 57 * Thread has E0 in its own struct queue_event.
@@ -65,20 +65,20 @@ static struct
65 * more efficent to reject the majority of cases that don't need this 65 * more efficent to reject the majority of cases that don't need this
66 * called. 66 * called.
67 */ 67 */
68static void queue_release_sender(struct thread_entry * volatile * sender, 68static void queue_release_sender_inner(
69 intptr_t retval) 69 struct thread_entry * volatile * sender, intptr_t retval)
70{ 70{
71 struct thread_entry *thread = *sender; 71 struct thread_entry *thread = *sender;
72
73 *sender = NULL; /* Clear slot. */ 72 *sender = NULL; /* Clear slot. */
74#ifdef HAVE_WAKEUP_EXT_CB
75 thread->wakeup_ext_cb = NULL; /* Clear callback. */
76#endif
77 thread->retval = retval; /* Assign thread-local return value. */ 73 thread->retval = retval; /* Assign thread-local return value. */
78 *thread->bqp = thread; /* Move blocking queue head to thread since 74 wakeup_thread(thread, WAKEUP_RELEASE);
79 wakeup_thread wakes the first thread in 75}
80 the list. */ 76
81 wakeup_thread(thread->bqp, WAKEUP_RELEASE); 77static inline void queue_release_sender(
78 struct thread_entry * volatile * sender, intptr_t retval)
79{
80 if(UNLIKELY(*sender))
81 queue_release_sender_inner(sender, retval);
82} 82}
83 83
84/* Releases any waiting threads that are queued with queue_send - 84/* Releases any waiting threads that are queued with queue_send -
@@ -93,26 +93,11 @@ static void queue_release_all_senders(struct event_queue *q)
93 { 93 {
94 struct thread_entry **spp = 94 struct thread_entry **spp =
95 &q->send->senders[i & QUEUE_LENGTH_MASK]; 95 &q->send->senders[i & QUEUE_LENGTH_MASK];
96 96 queue_release_sender(spp, 0);
97 if(*spp)
98 {
99 queue_release_sender(spp, 0);
100 }
101 } 97 }
102 } 98 }
103} 99}
104 100
105#ifdef HAVE_WAKEUP_EXT_CB
106/* Callback to do extra forced removal steps from sender list in addition
107 * to the normal blocking queue removal and priority dis-inherit */
108static void queue_remove_sender_thread_cb(struct thread_entry *thread)
109{
110 *((struct thread_entry **)thread->retval) = NULL;
111 thread->wakeup_ext_cb = NULL;
112 thread->retval = 0;
113}
114#endif /* HAVE_WAKEUP_EXT_CB */
115
116/* Enables queue_send on the specified queue - caller allocates the extra 101/* Enables queue_send on the specified queue - caller allocates the extra
117 * data structure. Only queues which are taken to be owned by a thread should 102 * data structure. Only queues which are taken to be owned by a thread should
118 * enable this however an official owner is not compulsory but must be 103 * enable this however an official owner is not compulsory but must be
@@ -132,11 +117,12 @@ void queue_enable_queue_send(struct event_queue *q,
132 if(send != NULL && q->send == NULL) 117 if(send != NULL && q->send == NULL)
133 { 118 {
134 memset(send, 0, sizeof(*send)); 119 memset(send, 0, sizeof(*send));
120 wait_queue_init(&send->list);
135#ifdef HAVE_PRIORITY_SCHEDULING 121#ifdef HAVE_PRIORITY_SCHEDULING
136 send->blocker.priority = PRIORITY_IDLE; 122 blocker_init(&send->blocker);
137 if(owner_id != 0) 123 if(owner_id != 0)
138 { 124 {
139 send->blocker.thread = thread_id_entry(owner_id); 125 send->blocker.thread = __thread_id_entry(owner_id);
140 q->blocker_p = &send->blocker; 126 q->blocker_p = &send->blocker;
141 } 127 }
142#endif 128#endif
@@ -154,24 +140,14 @@ static inline void queue_do_unblock_sender(struct queue_sender_list *send,
154 unsigned int i) 140 unsigned int i)
155{ 141{
156 if(send) 142 if(send)
157 { 143 queue_release_sender(&send->senders[i], 0);
158 struct thread_entry **spp = &send->senders[i];
159
160 if(UNLIKELY(*spp))
161 {
162 queue_release_sender(spp, 0);
163 }
164 }
165} 144}
166 145
167/* Perform the auto-reply sequence */ 146/* Perform the auto-reply sequence */
168static inline void queue_do_auto_reply(struct queue_sender_list *send) 147static inline void queue_do_auto_reply(struct queue_sender_list *send)
169{ 148{
170 if(send && send->curr_sender) 149 if(send)
171 {
172 /* auto-reply */
173 queue_release_sender(&send->curr_sender, 0); 150 queue_release_sender(&send->curr_sender, 0);
174 }
175} 151}
176 152
177/* Moves waiting thread's refrence from the senders array to the 153/* Moves waiting thread's refrence from the senders array to the
@@ -191,7 +167,6 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send,
191 /* Move thread reference from array to the next thread 167 /* Move thread reference from array to the next thread
192 that queue_reply will release */ 168 that queue_reply will release */
193 send->curr_sender = *spp; 169 send->curr_sender = *spp;
194 (*spp)->retval = (intptr_t)spp;
195 *spp = NULL; 170 *spp = NULL;
196 } 171 }
197 /* else message was posted asynchronously with queue_post */ 172 /* else message was posted asynchronously with queue_post */
@@ -205,18 +180,28 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send,
205#define queue_do_fetch_sender(send, rd) 180#define queue_do_fetch_sender(send, rd)
206#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 181#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
207 182
183static void queue_wake_waiter_inner(struct thread_entry *thread)
184{
185 wakeup_thread(thread, WAKEUP_DEFAULT);
186}
187
188static inline void queue_wake_waiter(struct event_queue *q)
189{
190 struct thread_entry *thread = WQ_THREAD_FIRST(&q->queue);
191 if(thread != NULL)
192 queue_wake_waiter_inner(thread);
193}
194
208/* Queue must not be available for use during this call */ 195/* Queue must not be available for use during this call */
209void queue_init(struct event_queue *q, bool register_queue) 196void queue_init(struct event_queue *q, bool register_queue)
210{ 197{
211 int oldlevel = disable_irq_save(); 198 int oldlevel = disable_irq_save();
212 199
213 if(register_queue) 200 if(register_queue)
214 {
215 corelock_lock(&all_queues.cl); 201 corelock_lock(&all_queues.cl);
216 }
217 202
218 corelock_init(&q->cl); 203 corelock_init(&q->cl);
219 q->queue = NULL; 204 wait_queue_init(&q->queue);
220 /* What garbage is in write is irrelevant because of the masking design- 205 /* What garbage is in write is irrelevant because of the masking design-
221 * any other functions the empty the queue do this as well so that 206 * any other functions the empty the queue do this as well so that
222 * queue_count and queue_empty return sane values in the case of a 207 * queue_count and queue_empty return sane values in the case of a
@@ -261,7 +246,7 @@ void queue_delete(struct event_queue *q)
261 corelock_unlock(&all_queues.cl); 246 corelock_unlock(&all_queues.cl);
262 247
263 /* Release thread(s) waiting on queue head */ 248 /* Release thread(s) waiting on queue head */
264 thread_queue_wake(&q->queue); 249 wait_queue_wake(&q->queue);
265 250
266#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 251#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
267 if(q->send) 252 if(q->send)
@@ -293,7 +278,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
293 278
294#ifdef HAVE_PRIORITY_SCHEDULING 279#ifdef HAVE_PRIORITY_SCHEDULING
295 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || 280 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
296 QUEUE_GET_THREAD(q) == thread_self_entry(), 281 QUEUE_GET_THREAD(q) == __running_self_entry(),
297 "queue_wait->wrong thread\n"); 282 "queue_wait->wrong thread\n");
298#endif 283#endif
299 284
@@ -307,18 +292,12 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
307 292
308 while(1) 293 while(1)
309 { 294 {
310 struct thread_entry *current;
311
312 rd = q->read; 295 rd = q->read;
313 if (rd != q->write) /* A waking message could disappear */ 296 if (rd != q->write) /* A waking message could disappear */
314 break; 297 break;
315 298
316 current = thread_self_entry(); 299 struct thread_entry *current = __running_self_entry();
317 300 block_thread(current, TIMEOUT_BLOCK, &q->queue, NULL);
318 IF_COP( current->obj_cl = &q->cl; )
319 current->bqp = &q->queue;
320
321 block_thread(current, TIMEOUT_BLOCK);
322 301
323 corelock_unlock(&q->cl); 302 corelock_unlock(&q->cl);
324 switch_thread(); 303 switch_thread();
@@ -349,16 +328,9 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
349 int oldlevel; 328 int oldlevel;
350 unsigned int rd, wr; 329 unsigned int rd, wr;
351 330
352 /* this function works only with a positive number (or zero) of ticks */
353 if (ticks == TIMEOUT_BLOCK)
354 {
355 queue_wait(q, ev);
356 return;
357 }
358
359#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 331#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
360 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || 332 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
361 QUEUE_GET_THREAD(q) == thread_self_entry(), 333 QUEUE_GET_THREAD(q) == __running_self_entry(),
362 "queue_wait_w_tmo->wrong thread\n"); 334 "queue_wait_w_tmo->wrong thread\n");
363#endif 335#endif
364 336
@@ -372,14 +344,10 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
372 344
373 rd = q->read; 345 rd = q->read;
374 wr = q->write; 346 wr = q->write;
375 if (rd == wr && ticks > 0) 347 if (rd == wr && ticks != 0)
376 { 348 {
377 struct thread_entry *current = thread_self_entry(); 349 struct thread_entry *current = __running_self_entry();
378 350 block_thread(current, ticks, &q->queue, NULL);
379 IF_COP( current->obj_cl = &q->cl; )
380 current->bqp = &q->queue;
381
382 block_thread(current, ticks);
383 corelock_unlock(&q->cl); 351 corelock_unlock(&q->cl);
384 352
385 switch_thread(); 353 switch_thread();
@@ -389,6 +357,8 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
389 357
390 rd = q->read; 358 rd = q->read;
391 wr = q->write; 359 wr = q->write;
360
361 wait_queue_try_remove(current);
392 } 362 }
393 363
394#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 364#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
@@ -436,7 +406,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
436 queue_do_unblock_sender(q->send, wr); 406 queue_do_unblock_sender(q->send, wr);
437 407
438 /* Wakeup a waiting thread if any */ 408 /* Wakeup a waiting thread if any */
439 wakeup_thread(&q->queue, WAKEUP_DEFAULT); 409 queue_wake_waiter(q);
440 410
441 corelock_unlock(&q->cl); 411 corelock_unlock(&q->cl);
442 restore_irq(oldlevel); 412 restore_irq(oldlevel);
@@ -465,28 +435,17 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
465 { 435 {
466 struct queue_sender_list *send = q->send; 436 struct queue_sender_list *send = q->send;
467 struct thread_entry **spp = &send->senders[wr]; 437 struct thread_entry **spp = &send->senders[wr];
468 struct thread_entry *current = thread_self_entry(); 438 struct thread_entry *current = __running_self_entry();
469 439
470 if(UNLIKELY(*spp)) 440 /* overflow protect - unblock any thread waiting at this index */
471 { 441 queue_release_sender(spp, 0);
472 /* overflow protect - unblock any thread waiting at this index */
473 queue_release_sender(spp, 0);
474 }
475 442
476 /* Wakeup a waiting thread if any */ 443 /* Wakeup a waiting thread if any */
477 wakeup_thread(&q->queue, WAKEUP_DEFAULT); 444 queue_wake_waiter(q);
478 445
479 /* Save thread in slot, add to list and wait for reply */ 446 /* Save thread in slot, add to list and wait for reply */
480 *spp = current; 447 *spp = current;
481 IF_COP( current->obj_cl = &q->cl; ) 448 block_thread(current, TIMEOUT_BLOCK, &send->list, q->blocker_p);
482 IF_PRIO( current->blocker = q->blocker_p; )
483#ifdef HAVE_WAKEUP_EXT_CB
484 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
485#endif
486 current->retval = (intptr_t)spp;
487 current->bqp = &send->list;
488
489 block_thread(current, TIMEOUT_BLOCK);
490 449
491 corelock_unlock(&q->cl); 450 corelock_unlock(&q->cl);
492 switch_thread(); 451 switch_thread();
@@ -495,7 +454,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
495 } 454 }
496 455
497 /* Function as queue_post if sending is not enabled */ 456 /* Function as queue_post if sending is not enabled */
498 wakeup_thread(&q->queue, WAKEUP_DEFAULT); 457 queue_wake_waiter(q);
499 458
500 corelock_unlock(&q->cl); 459 corelock_unlock(&q->cl);
501 restore_irq(oldlevel); 460 restore_irq(oldlevel);
@@ -530,16 +489,12 @@ void queue_reply(struct event_queue *q, intptr_t retval)
530{ 489{
531 if(q->send && q->send->curr_sender) 490 if(q->send && q->send->curr_sender)
532 { 491 {
533 struct queue_sender_list *sender;
534
535 int oldlevel = disable_irq_save(); 492 int oldlevel = disable_irq_save();
536 corelock_lock(&q->cl); 493 corelock_lock(&q->cl);
537 494
538 sender = q->send; 495 struct queue_sender_list *send = q->send;
539 496 if(send)
540 /* Double-check locking */ 497 queue_release_sender(&send->curr_sender, retval);
541 if(LIKELY(sender && sender->curr_sender))
542 queue_release_sender(&sender->curr_sender, retval);
543 498
544 corelock_unlock(&q->cl); 499 corelock_unlock(&q->cl);
545 restore_irq(oldlevel); 500 restore_irq(oldlevel);