summaryrefslogtreecommitdiff
path: root/firmware/kernel/queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/queue.c')
-rw-r--r--firmware/kernel/queue.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c
index 379e3f62c8..22a8da9bd3 100644
--- a/firmware/kernel/queue.c
+++ b/firmware/kernel/queue.c
@@ -84,7 +84,7 @@ static void queue_release_sender(struct thread_entry * volatile * sender,
84 *thread->bqp = thread; /* Move blocking queue head to thread since 84 *thread->bqp = thread; /* Move blocking queue head to thread since
85 wakeup_thread wakes the first thread in 85 wakeup_thread wakes the first thread in
86 the list. */ 86 the list. */
87 wakeup_thread(thread->bqp); 87 wakeup_thread(thread->bqp, WAKEUP_RELEASE);
88} 88}
89 89
90/* Releases any waiting threads that are queued with queue_send - 90/* Releases any waiting threads that are queued with queue_send -
@@ -108,16 +108,16 @@ static void queue_release_all_senders(struct event_queue *q)
108 } 108 }
109} 109}
110 110
111#ifdef HAVE_WAKEUP_EXT_CB
111/* Callback to do extra forced removal steps from sender list in addition 112/* Callback to do extra forced removal steps from sender list in addition
112 * to the normal blocking queue removal and priority dis-inherit */ 113 * to the normal blocking queue removal and priority dis-inherit */
113static void queue_remove_sender_thread_cb(struct thread_entry *thread) 114static void queue_remove_sender_thread_cb(struct thread_entry *thread)
114{ 115{
115 *((struct thread_entry **)thread->retval) = NULL; 116 *((struct thread_entry **)thread->retval) = NULL;
116#ifdef HAVE_WAKEUP_EXT_CB
117 thread->wakeup_ext_cb = NULL; 117 thread->wakeup_ext_cb = NULL;
118#endif
119 thread->retval = 0; 118 thread->retval = 0;
120} 119}
120#endif /* HAVE_WAKEUP_EXT_CB */
121 121
122/* Enables queue_send on the specified queue - caller allocates the extra 122/* Enables queue_send on the specified queue - caller allocates the extra
123 * data structure. Only queues which are taken to be owned by a thread should 123 * data structure. Only queues which are taken to be owned by a thread should
@@ -139,7 +139,6 @@ void queue_enable_queue_send(struct event_queue *q,
139 { 139 {
140 memset(send, 0, sizeof(*send)); 140 memset(send, 0, sizeof(*send));
141#ifdef HAVE_PRIORITY_SCHEDULING 141#ifdef HAVE_PRIORITY_SCHEDULING
142 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
143 send->blocker.priority = PRIORITY_IDLE; 142 send->blocker.priority = PRIORITY_IDLE;
144 if(owner_id != 0) 143 if(owner_id != 0)
145 { 144 {
@@ -268,7 +267,7 @@ void queue_delete(struct event_queue *q)
268 corelock_unlock(&all_queues.cl); 267 corelock_unlock(&all_queues.cl);
269 268
270 /* Release thread(s) waiting on queue head */ 269 /* Release thread(s) waiting on queue head */
271 thread_queue_wake(&q->queue); 270 thread_queue_wake(&q->queue, NULL);
272 271
273#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 272#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
274 if(q->send) 273 if(q->send)
@@ -325,7 +324,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
325 IF_COP( current->obj_cl = &q->cl; ) 324 IF_COP( current->obj_cl = &q->cl; )
326 current->bqp = &q->queue; 325 current->bqp = &q->queue;
327 326
328 block_thread(current); 327 block_thread(current, TIMEOUT_BLOCK);
329 328
330 corelock_unlock(&q->cl); 329 corelock_unlock(&q->cl);
331 switch_thread(); 330 switch_thread();
@@ -386,7 +385,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
386 IF_COP( current->obj_cl = &q->cl; ) 385 IF_COP( current->obj_cl = &q->cl; )
387 current->bqp = &q->queue; 386 current->bqp = &q->queue;
388 387
389 block_thread_w_tmo(current, ticks); 388 block_thread(current, ticks);
390 corelock_unlock(&q->cl); 389 corelock_unlock(&q->cl);
391 390
392 switch_thread(); 391 switch_thread();
@@ -443,7 +442,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
443 queue_do_unblock_sender(q->send, wr); 442 queue_do_unblock_sender(q->send, wr);
444 443
445 /* Wakeup a waiting thread if any */ 444 /* Wakeup a waiting thread if any */
446 wakeup_thread(&q->queue); 445 wakeup_thread(&q->queue, WAKEUP_DEFAULT);
447 446
448 corelock_unlock(&q->cl); 447 corelock_unlock(&q->cl);
449 restore_irq(oldlevel); 448 restore_irq(oldlevel);
@@ -481,7 +480,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
481 } 480 }
482 481
483 /* Wakeup a waiting thread if any */ 482 /* Wakeup a waiting thread if any */
484 wakeup_thread(&q->queue); 483 wakeup_thread(&q->queue, WAKEUP_DEFAULT);
485 484
486 /* Save thread in slot, add to list and wait for reply */ 485 /* Save thread in slot, add to list and wait for reply */
487 *spp = current; 486 *spp = current;
@@ -493,7 +492,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
493 current->retval = (intptr_t)spp; 492 current->retval = (intptr_t)spp;
494 current->bqp = &send->list; 493 current->bqp = &send->list;
495 494
496 block_thread(current); 495 block_thread(current, TIMEOUT_BLOCK);
497 496
498 corelock_unlock(&q->cl); 497 corelock_unlock(&q->cl);
499 switch_thread(); 498 switch_thread();
@@ -502,7 +501,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
502 } 501 }
503 502
504 /* Function as queue_post if sending is not enabled */ 503 /* Function as queue_post if sending is not enabled */
505 wakeup_thread(&q->queue); 504 wakeup_thread(&q->queue, WAKEUP_DEFAULT);
506 505
507 corelock_unlock(&q->cl); 506 corelock_unlock(&q->cl);
508 restore_irq(oldlevel); 507 restore_irq(oldlevel);