summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 920893818a..553f6721a1 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -352,11 +352,12 @@ static void queue_remove_sender_thread_cb(struct thread_entry *thread)
352 * specified for priority inheritance to operate. 352 * specified for priority inheritance to operate.
353 * 353 *
354 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous 354 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
355 * messages results in an undefined order of message replies. 355 * messages results in an undefined order of message replies or possible default
356 * replies if two or more waits happen before a reply is done.
356 */ 357 */
357void queue_enable_queue_send(struct event_queue *q, 358void queue_enable_queue_send(struct event_queue *q,
358 struct queue_sender_list *send, 359 struct queue_sender_list *send,
359 struct thread_entry *owner) 360 unsigned int owner_id)
360{ 361{
361 int oldlevel = disable_irq_save(); 362 int oldlevel = disable_irq_save();
362 corelock_lock(&q->cl); 363 corelock_lock(&q->cl);
@@ -367,9 +368,11 @@ void queue_enable_queue_send(struct event_queue *q,
367#ifdef HAVE_PRIORITY_SCHEDULING 368#ifdef HAVE_PRIORITY_SCHEDULING
368 send->blocker.wakeup_protocol = wakeup_priority_protocol_release; 369 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
369 send->blocker.priority = PRIORITY_IDLE; 370 send->blocker.priority = PRIORITY_IDLE;
370 send->blocker.thread = owner; 371 if(owner_id != 0)
371 if(owner != NULL) 372 {
373 send->blocker.thread = thread_id_entry(owner_id);
372 q->blocker_p = &send->blocker; 374 q->blocker_p = &send->blocker;
375 }
373#endif 376#endif
374 q->send = send; 377 q->send = send;
375 } 378 }
@@ -377,7 +380,7 @@ void queue_enable_queue_send(struct event_queue *q,
377 corelock_unlock(&q->cl); 380 corelock_unlock(&q->cl);
378 restore_irq(oldlevel); 381 restore_irq(oldlevel);
379 382
380 (void)owner; 383 (void)owner_id;
381} 384}
382 385
383/* Unblock a blocked thread at a given event index */ 386/* Unblock a blocked thread at a given event index */
@@ -532,7 +535,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
532 535
533#ifdef HAVE_PRIORITY_SCHEDULING 536#ifdef HAVE_PRIORITY_SCHEDULING
534 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || 537 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
535 QUEUE_GET_THREAD(q) == thread_get_current(), 538 QUEUE_GET_THREAD(q) == cores[CURRENT_CORE].running,
536 "queue_wait->wrong thread\n"); 539 "queue_wait->wrong thread\n");
537#endif 540#endif
538 541
@@ -579,7 +582,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
579 582
580#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 583#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
581 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || 584 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
582 QUEUE_GET_THREAD(q) == thread_get_current(), 585 QUEUE_GET_THREAD(q) == cores[CURRENT_CORE].running,
583 "queue_wait_w_tmo->wrong thread\n"); 586 "queue_wait_w_tmo->wrong thread\n");
584#endif 587#endif
585 588
@@ -914,10 +917,10 @@ void mutex_lock(struct mutex *m)
914void mutex_unlock(struct mutex *m) 917void mutex_unlock(struct mutex *m)
915{ 918{
916 /* unlocker not being the owner is an unlocking violation */ 919 /* unlocker not being the owner is an unlocking violation */
917 KERNEL_ASSERT(MUTEX_GET_THREAD(m) == thread_get_current(), 920 KERNEL_ASSERT(MUTEX_GET_THREAD(m) == cores[CURRENT_CORE].running,
918 "mutex_unlock->wrong thread (%s != %s)\n", 921 "mutex_unlock->wrong thread (%s != %s)\n",
919 MUTEX_GET_THREAD(m)->name, 922 MUTEX_GET_THREAD(m)->name,
920 thread_get_current()->name); 923 cores[CURRENT_CORE].running->name);
921 924
922 if(m->count > 0) 925 if(m->count > 0)
923 { 926 {
@@ -990,7 +993,7 @@ void spinlock_lock(struct spinlock *l)
990void spinlock_unlock(struct spinlock *l) 993void spinlock_unlock(struct spinlock *l)
991{ 994{
992 /* unlocker not being the owner is an unlocking violation */ 995 /* unlocker not being the owner is an unlocking violation */
993 KERNEL_ASSERT(l->thread == thread_get_current(), 996 KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
994 "spinlock_unlock->wrong thread\n"); 997 "spinlock_unlock->wrong thread\n");
995 998
996 if(l->count > 0) 999 if(l->count > 0)