summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2010-12-27 10:05:09 +0000
committerMichael Sevakis <jethead71@rockbox.org>2010-12-27 10:05:09 +0000
commit7b4eb44395bced7073e37d0b8b0d83fb2f518482 (patch)
treeaf526b3cf241e9eeb2bd3c1508791078f93f4157
parent479414faccedeeb1ca7d0b6074bd69f2ef6dd441 (diff)
downloadrockbox-7b4eb44395bced7073e37d0b8b0d83fb2f518482.tar.gz
rockbox-7b4eb44395bced7073e37d0b8b0d83fb2f518482.zip
Certain data accesses in the kernel should have volatile semantics to be correct and not rely on the whims of the compiler. Change queue clearing to simply catch read up to write rather than reset both to 0 to ensure sane results for queue_count and queue_empty with concurrency. Binsize may or may not increase a bit depending upon whether the output was as intended in all places; wrong stuff was already unlikely to cause any issue.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@28909 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--firmware/export/kernel.h10
-rw-r--r--firmware/export/thread.h2
-rw-r--r--firmware/kernel.c80
3 files changed, 54 insertions, 38 deletions
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index c7fcd93284..4656d87fb2 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -106,7 +106,7 @@ struct queue_sender_list
106 struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */ 106 struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
107 struct thread_entry *list; /* list of senders in map */ 107 struct thread_entry *list; /* list of senders in map */
108 /* Send info for last message dequeued or NULL if replied or not sent */ 108 /* Send info for last message dequeued or NULL if replied or not sent */
109 struct thread_entry *curr_sender; 109 struct thread_entry * volatile curr_sender;
110#ifdef HAVE_PRIORITY_SCHEDULING 110#ifdef HAVE_PRIORITY_SCHEDULING
111 struct blocker blocker; 111 struct blocker blocker;
112#endif 112#endif
@@ -126,10 +126,10 @@ struct event_queue
126{ 126{
127 struct thread_entry *queue; /* waiter list */ 127 struct thread_entry *queue; /* waiter list */
128 struct queue_event events[QUEUE_LENGTH]; /* list of events */ 128 struct queue_event events[QUEUE_LENGTH]; /* list of events */
129 unsigned int read; /* head of queue */ 129 unsigned int volatile read; /* head of queue */
130 unsigned int write; /* tail of queue */ 130 unsigned int volatile write; /* tail of queue */
131#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 131#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
132 struct queue_sender_list *send; /* list of threads waiting for 132 struct queue_sender_list * volatile send; /* list of threads waiting for
133 reply to an event */ 133 reply to an event */
134#ifdef HAVE_PRIORITY_SCHEDULING 134#ifdef HAVE_PRIORITY_SCHEDULING
135 struct blocker *blocker_p; /* priority inheritance info 135 struct blocker *blocker_p; /* priority inheritance info
@@ -171,7 +171,7 @@ struct semaphore
171struct wakeup 171struct wakeup
172{ 172{
173 struct thread_entry *queue; /* waiter list */ 173 struct thread_entry *queue; /* waiter list */
174 bool signalled; /* signalled status */ 174 bool volatile signalled; /* signalled status */
175 IF_COP( struct corelock cl; ) /* multiprocessor sync */ 175 IF_COP( struct corelock cl; ) /* multiprocessor sync */
176}; 176};
177#endif 177#endif
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 87c2d2d709..ba777dc3d1 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -269,7 +269,7 @@ struct thread_entry
269 /* Only enabled when using queue_send for now */ 269 /* Only enabled when using queue_send for now */
270#endif 270#endif
271#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1 271#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
272 intptr_t retval; /* Return value from a blocked operation/ 272 volatile intptr_t retval; /* Return value from a blocked operation/
273 misc. use */ 273 misc. use */
274#endif 274#endif
275#ifdef HAVE_PRIORITY_SCHEDULING 275#ifdef HAVE_PRIORITY_SCHEDULING
diff --git a/firmware/kernel.c b/firmware/kernel.c
index aaa675241b..41d1d00689 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -274,7 +274,7 @@ void yield(void)
274 * more efficent to reject the majority of cases that don't need this 274 * more efficent to reject the majority of cases that don't need this
275 * called. 275 * called.
276 */ 276 */
277static void queue_release_sender(struct thread_entry **sender, 277static void queue_release_sender(struct thread_entry * volatile * sender,
278 intptr_t retval) 278 intptr_t retval)
279{ 279{
280 struct thread_entry *thread = *sender; 280 struct thread_entry *thread = *sender;
@@ -427,8 +427,11 @@ void queue_init(struct event_queue *q, bool register_queue)
427 427
428 corelock_init(&q->cl); 428 corelock_init(&q->cl);
429 q->queue = NULL; 429 q->queue = NULL;
430 q->read = 0; 430 /* What garbage is in write is irrelevant because of the masking design-
431 q->write = 0; 431 * any other functions the empty the queue do this as well so that
432 * queue_count and queue_empty return sane values in the case of a
433 * concurrent change without locking inside them. */
434 q->read = q->write;
432#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 435#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
433 q->send = NULL; /* No message sending by default */ 436 q->send = NULL; /* No message sending by default */
434 IF_PRIO( q->blocker_p = NULL; ) 437 IF_PRIO( q->blocker_p = NULL; )
@@ -484,8 +487,7 @@ void queue_delete(struct event_queue *q)
484 } 487 }
485#endif 488#endif
486 489
487 q->read = 0; 490 q->read = q->write;
488 q->write = 0;
489 491
490 corelock_unlock(&q->cl); 492 corelock_unlock(&q->cl);
491 restore_irq(oldlevel); 493 restore_irq(oldlevel);
@@ -510,29 +512,31 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
510 512
511 /* auto-reply */ 513 /* auto-reply */
512 queue_do_auto_reply(q->send); 514 queue_do_auto_reply(q->send);
513 515
514 if (q->read == q->write) 516 while(1)
515 { 517 {
516 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT); 518 struct thread_entry *current;
517 519
518 do 520 rd = q->read;
519 { 521 if (rd != q->write) /* A waking message could disappear */
520 IF_COP( current->obj_cl = &q->cl; ) 522 break;
521 current->bqp = &q->queue;
522 523
523 block_thread(current); 524 current = thread_id_entry(THREAD_ID_CURRENT);
524 525
525 corelock_unlock(&q->cl); 526 IF_COP( current->obj_cl = &q->cl; )
526 switch_thread(); 527 current->bqp = &q->queue;
527 528
528 oldlevel = disable_irq_save(); 529 block_thread(current);
529 corelock_lock(&q->cl); 530
530 } 531 corelock_unlock(&q->cl);
531 /* A message that woke us could now be gone */ 532 switch_thread();
532 while (q->read == q->write); 533
534 oldlevel = disable_irq_save();
535 corelock_lock(&q->cl);
533 } 536 }
534 537
535 rd = q->read++ & QUEUE_LENGTH_MASK; 538 q->read = rd + 1;
539 rd &= QUEUE_LENGTH_MASK;
536 *ev = q->events[rd]; 540 *ev = q->events[rd];
537 541
538 /* Get data for a waiting thread if one */ 542 /* Get data for a waiting thread if one */
@@ -545,6 +549,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
545void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) 549void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
546{ 550{
547 int oldlevel; 551 int oldlevel;
552 unsigned int rd, wr;
548 553
549#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 554#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
550 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || 555 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
@@ -558,7 +563,9 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
558 /* Auto-reply */ 563 /* Auto-reply */
559 queue_do_auto_reply(q->send); 564 queue_do_auto_reply(q->send);
560 565
561 if (q->read == q->write && ticks > 0) 566 rd = q->read;
567 wr = q->write;
568 if (rd == wr && ticks > 0)
562 { 569 {
563 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT); 570 struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
564 571
@@ -572,13 +579,17 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
572 579
573 oldlevel = disable_irq_save(); 580 oldlevel = disable_irq_save();
574 corelock_lock(&q->cl); 581 corelock_lock(&q->cl);
582
583 rd = q->read;
584 wr = q->write;
575 } 585 }
576 586
577 /* no worry about a removed message here - status is checked inside 587 /* no worry about a removed message here - status is checked inside
578 locks - perhaps verify if timeout or false alarm */ 588 locks - perhaps verify if timeout or false alarm */
579 if (q->read != q->write) 589 if (rd != wr)
580 { 590 {
581 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; 591 q->read = rd + 1;
592 rd &= QUEUE_LENGTH_MASK;
582 *ev = q->events[rd]; 593 *ev = q->events[rd];
583 /* Get data for a waiting thread if one */ 594 /* Get data for a waiting thread if one */
584 queue_do_fetch_sender(q->send, rd); 595 queue_do_fetch_sender(q->send, rd);
@@ -700,13 +711,16 @@ void queue_reply(struct event_queue *q, intptr_t retval)
700{ 711{
701 if(q->send && q->send->curr_sender) 712 if(q->send && q->send->curr_sender)
702 { 713 {
714 struct queue_sender_list *sender;
715
703 int oldlevel = disable_irq_save(); 716 int oldlevel = disable_irq_save();
704 corelock_lock(&q->cl); 717 corelock_lock(&q->cl);
718
719 sender = q->send;
720
705 /* Double-check locking */ 721 /* Double-check locking */
706 IF_COP( if(LIKELY(q->send && q->send->curr_sender)) ) 722 if(LIKELY(sender && sender->curr_sender))
707 { 723 queue_release_sender(&sender->curr_sender, retval);
708 queue_release_sender(&q->send->curr_sender, retval);
709 }
710 724
711 corelock_unlock(&q->cl); 725 corelock_unlock(&q->cl);
712 restore_irq(oldlevel); 726 restore_irq(oldlevel);
@@ -716,6 +730,8 @@ void queue_reply(struct event_queue *q, intptr_t retval)
716 730
717bool queue_peek(struct event_queue *q, struct queue_event *ev) 731bool queue_peek(struct event_queue *q, struct queue_event *ev)
718{ 732{
733 unsigned int rd;
734
719 if(q->read == q->write) 735 if(q->read == q->write)
720 return false; 736 return false;
721 737
@@ -724,9 +740,10 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
724 int oldlevel = disable_irq_save(); 740 int oldlevel = disable_irq_save();
725 corelock_lock(&q->cl); 741 corelock_lock(&q->cl);
726 742
727 if(q->read != q->write) 743 rd = q->read;
744 if(rd != q->write)
728 { 745 {
729 *ev = q->events[q->read & QUEUE_LENGTH_MASK]; 746 *ev = q->events[rd & QUEUE_LENGTH_MASK];
730 have_msg = true; 747 have_msg = true;
731 } 748 }
732 749
@@ -756,8 +773,7 @@ void queue_clear(struct event_queue* q)
756 dequeued sent message will be handled by owning thread */ 773 dequeued sent message will be handled by owning thread */
757 queue_release_all_senders(q); 774 queue_release_all_senders(q);
758 775
759 q->read = 0; 776 q->read = q->write;
760 q->write = 0;
761 777
762 corelock_unlock(&q->cl); 778 corelock_unlock(&q->cl);
763 restore_irq(oldlevel); 779 restore_irq(oldlevel);