summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c80
1 files changed, 48 insertions, 32 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index aaa675241b..41d1d00689 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -274,7 +274,7 @@ void yield(void)
274 * more efficent to reject the majority of cases that don't need this 274 * more efficent to reject the majority of cases that don't need this
275 * called. 275 * called.
276 */ 276 */
277static void queue_release_sender(struct thread_entry **sender, 277static void queue_release_sender(struct thread_entry * volatile * sender,
278 intptr_t retval) 278 intptr_t retval)
279{ 279{
280 struct thread_entry *thread = *sender; 280 struct thread_entry *thread = *sender;
@@ -427,8 +427,11 @@ void queue_init(struct event_queue *q, bool register_queue)
427 427
428 corelock_init(&q->cl); 428 corelock_init(&q->cl);
429 q->queue = NULL; 429 q->queue = NULL;
430 q->read = 0; 430 /* What garbage is in write is irrelevant because of the masking design-
431 q->write = 0; 431 * any other functions the empty the queue do this as well so that
432 * queue_count and queue_empty return sane values in the case of a
433 * concurrent change without locking inside them. */
434 q->read = q->write;
432#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 435#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
433 q->send = NULL; /* No message sending by default */ 436 q->send = NULL; /* No message sending by default */
434 IF_PRIO( q->blocker_p = NULL; ) 437 IF_PRIO( q->blocker_p = NULL; )
@@ -484,8 +487,7 @@ void queue_delete(struct event_queue *q)
484 } 487 }
485#endif 488#endif
486 489
487 q->read = 0; 490 q->read = q->write;
488 q->write = 0;
489 491
490 corelock_unlock(&q->cl); 492 corelock_unlock(&q->cl);
491 restore_irq(oldlevel); 493 restore_irq(oldlevel);
@@ -510,29 +512,31 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
510 512
511 /* auto-reply */ 513 /* auto-reply */
512 queue_do_auto_reply(q->send); 514 queue_do_auto_reply(q->send);
513 515
514 if (q->read == q->write) 516 while(1)
515 { 517 {
516 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT); 518 struct thread_entry *current;
517 519
518 do 520 rd = q->read;
519 { 521 if (rd != q->write) /* A waking message could disappear */
520 IF_COP( current->obj_cl = &q->cl; ) 522 break;
521 current->bqp = &q->queue;
522 523
523 block_thread(current); 524 current = thread_id_entry(THREAD_ID_CURRENT);
524 525
525 corelock_unlock(&q->cl); 526 IF_COP( current->obj_cl = &q->cl; )
526 switch_thread(); 527 current->bqp = &q->queue;
527 528
528 oldlevel = disable_irq_save(); 529 block_thread(current);
529 corelock_lock(&q->cl); 530
530 } 531 corelock_unlock(&q->cl);
531 /* A message that woke us could now be gone */ 532 switch_thread();
532 while (q->read == q->write); 533
534 oldlevel = disable_irq_save();
535 corelock_lock(&q->cl);
533 } 536 }
534 537
535 rd = q->read++ & QUEUE_LENGTH_MASK; 538 q->read = rd + 1;
539 rd &= QUEUE_LENGTH_MASK;
536 *ev = q->events[rd]; 540 *ev = q->events[rd];
537 541
538 /* Get data for a waiting thread if one */ 542 /* Get data for a waiting thread if one */
@@ -545,6 +549,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
545void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) 549void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
546{ 550{
547 int oldlevel; 551 int oldlevel;
552 unsigned int rd, wr;
548 553
549#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 554#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
550 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || 555 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
@@ -558,7 +563,9 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
558 /* Auto-reply */ 563 /* Auto-reply */
559 queue_do_auto_reply(q->send); 564 queue_do_auto_reply(q->send);
560 565
561 if (q->read == q->write && ticks > 0) 566 rd = q->read;
567 wr = q->write;
568 if (rd == wr && ticks > 0)
562 { 569 {
563 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT); 570 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
564 571
@@ -572,13 +579,17 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
572 579
573 oldlevel = disable_irq_save(); 580 oldlevel = disable_irq_save();
574 corelock_lock(&q->cl); 581 corelock_lock(&q->cl);
582
583 rd = q->read;
584 wr = q->write;
575 } 585 }
576 586
577 /* no worry about a removed message here - status is checked inside 587 /* no worry about a removed message here - status is checked inside
578 locks - perhaps verify if timeout or false alarm */ 588 locks - perhaps verify if timeout or false alarm */
579 if (q->read != q->write) 589 if (rd != wr)
580 { 590 {
581 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; 591 q->read = rd + 1;
592 rd &= QUEUE_LENGTH_MASK;
582 *ev = q->events[rd]; 593 *ev = q->events[rd];
583 /* Get data for a waiting thread if one */ 594 /* Get data for a waiting thread if one */
584 queue_do_fetch_sender(q->send, rd); 595 queue_do_fetch_sender(q->send, rd);
@@ -700,13 +711,16 @@ void queue_reply(struct event_queue *q, intptr_t retval)
700{ 711{
701 if(q->send && q->send->curr_sender) 712 if(q->send && q->send->curr_sender)
702 { 713 {
714 struct queue_sender_list *sender;
715
703 int oldlevel = disable_irq_save(); 716 int oldlevel = disable_irq_save();
704 corelock_lock(&q->cl); 717 corelock_lock(&q->cl);
718
719 sender = q->send;
720
705 /* Double-check locking */ 721 /* Double-check locking */
706 IF_COP( if(LIKELY(q->send && q->send->curr_sender)) ) 722 if(LIKELY(sender && sender->curr_sender))
707 { 723 queue_release_sender(&sender->curr_sender, retval);
708 queue_release_sender(&q->send->curr_sender, retval);
709 }
710 724
711 corelock_unlock(&q->cl); 725 corelock_unlock(&q->cl);
712 restore_irq(oldlevel); 726 restore_irq(oldlevel);
@@ -716,6 +730,8 @@ void queue_reply(struct event_queue *q, intptr_t retval)
716 730
717bool queue_peek(struct event_queue *q, struct queue_event *ev) 731bool queue_peek(struct event_queue *q, struct queue_event *ev)
718{ 732{
733 unsigned int rd;
734
719 if(q->read == q->write) 735 if(q->read == q->write)
720 return false; 736 return false;
721 737
@@ -724,9 +740,10 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
724 int oldlevel = disable_irq_save(); 740 int oldlevel = disable_irq_save();
725 corelock_lock(&q->cl); 741 corelock_lock(&q->cl);
726 742
727 if(q->read != q->write) 743 rd = q->read;
744 if(rd != q->write)
728 { 745 {
729 *ev = q->events[q->read & QUEUE_LENGTH_MASK]; 746 *ev = q->events[rd & QUEUE_LENGTH_MASK];
730 have_msg = true; 747 have_msg = true;
731 } 748 }
732 749
@@ -756,8 +773,7 @@ void queue_clear(struct event_queue* q)
756 dequeued sent message will be handled by owning thread */ 773 dequeued sent message will be handled by owning thread */
757 queue_release_all_senders(q); 774 queue_release_all_senders(q);
758 775
759 q->read = 0; 776 q->read = q->write;
760 q->write = 0;
761 777
762 corelock_unlock(&q->cl); 778 corelock_unlock(&q->cl);
763 restore_irq(oldlevel); 779 restore_irq(oldlevel);