summaryrefslogtreecommitdiff
path: root/firmware
diff options
context:
space:
mode:
Diffstat (limited to 'firmware')
-rw-r--r--firmware/export/thread.h3
-rw-r--r--firmware/kernel.c7
-rw-r--r--firmware/thread.c67
3 files changed, 70 insertions, 7 deletions
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index c9132af524..279ea44835 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -119,6 +119,8 @@ struct core_entry {
119 struct thread_entry threads[MAXTHREADS]; 119 struct thread_entry threads[MAXTHREADS];
120 struct thread_entry *running; 120 struct thread_entry *running;
121 struct thread_entry *sleeping; 121 struct thread_entry *sleeping;
122 struct thread_entry *waking;
123 struct thread_entry **wakeup_list;
122#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 124#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
123 int switch_to_irq_level; 125 int switch_to_irq_level;
124 #define STAY_IRQ_LEVEL -1 126 #define STAY_IRQ_LEVEL -1
@@ -193,6 +195,7 @@ void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
193#endif 195#endif
194#endif 196#endif
195void wakeup_thread(struct thread_entry **thread); 197void wakeup_thread(struct thread_entry **thread);
198void wakeup_thread_irq_safe(struct thread_entry **thread);
196#ifdef HAVE_PRIORITY_SCHEDULING 199#ifdef HAVE_PRIORITY_SCHEDULING
197int thread_set_priority(struct thread_entry *thread, int priority); 200int thread_set_priority(struct thread_entry *thread, int priority);
198int thread_get_priority(struct thread_entry *thread); 201int thread_get_priority(struct thread_entry *thread);
diff --git a/firmware/kernel.c b/firmware/kernel.c
index c5e47a81ff..e09edeff77 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -126,7 +126,7 @@ static void queue_release_sender(struct thread_entry **sender,
126 intptr_t retval) 126 intptr_t retval)
127{ 127{
128 (*sender)->retval = retval; 128 (*sender)->retval = retval;
129 wakeup_thread(sender); 129 wakeup_thread_irq_safe(sender);
130#if 0 130#if 0
131 /* This should _never_ happen - there must never be multiple 131 /* This should _never_ happen - there must never be multiple
132 threads in this list and it is a corrupt state */ 132 threads in this list and it is a corrupt state */
@@ -289,11 +289,14 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
289 } 289 }
290#endif 290#endif
291 291
292 wakeup_thread(&q->thread); 292 wakeup_thread_irq_safe(&q->thread);
293 set_irq_level(oldlevel); 293 set_irq_level(oldlevel);
294} 294}
295 295
296#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 296#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
297/* No wakeup_thread_irq_safe here because IRQ handlers are not allowed
298 use of this function - we only aim to protect the queue integrity by
299 turning them off. */
297intptr_t queue_send(struct event_queue *q, long id, intptr_t data) 300intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
298{ 301{
299 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 302 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
diff --git a/firmware/thread.c b/firmware/thread.c
index 8022d94862..cbe12b4eae 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -289,15 +289,56 @@ void check_sleepers(void)
289 } 289 }
290} 290}
291 291
292/* Safely finish waking all threads potentialy woken by interrupts -
293 * statearg already zeroed in wakeup_thread. */
294static void wake_list_awaken(void)
295{
296 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
297
298 /* No need for another check in the IRQ lock since IRQs are allowed
299 only to add threads to the waking list. They won't be adding more
300 until we're done here though. */
301
302 struct thread_entry *waking = cores[CURRENT_CORE].waking;
303 struct thread_entry *running = cores[CURRENT_CORE].running;
304
305 if (running != NULL)
306 {
307 /* Place waking threads at the end of the running list. */
308 struct thread_entry *tmp;
309 waking->prev->next = running;
310 running->prev->next = waking;
311 tmp = running->prev;
312 running->prev = waking->prev;
313 waking->prev = tmp;
314 }
315 else
316 {
317 /* Just transfer the list as-is - just came out of a core
318 * sleep. */
319 cores[CURRENT_CORE].running = waking;
320 }
321
322 /* Done with waking list */
323 cores[CURRENT_CORE].waking = NULL;
324 set_irq_level(oldlevel);
325}
326
292static inline void sleep_core(void) 327static inline void sleep_core(void)
293{ 328{
294 static long last_tick = 0; 329 static long last_tick = 0;
295#if CONFIG_CPU == S3C2440 330#if CONFIG_CPU == S3C2440
296 int i; 331 int i;
297#endif 332#endif
298 333
299 for (;;) 334 for (;;)
300 { 335 {
336 /* We want to do these ASAP as it may change the decision to sleep
337 the core or the core has woken because an interrupt occurred
338 and posted a message to a queue. */
339 if (cores[CURRENT_CORE].waking != NULL)
340 wake_list_awaken();
341
301 if (last_tick != current_tick) 342 if (last_tick != current_tick)
302 { 343 {
303 check_sleepers(); 344 check_sleepers();
@@ -397,7 +438,7 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
397#ifdef SIMULATOR 438#ifdef SIMULATOR
398 /* Do nothing */ 439 /* Do nothing */
399#else 440#else
400 441
401 /* Begin task switching by saving our current context so that we can 442 /* Begin task switching by saving our current context so that we can
402 * restore the state of the current thread later to the point prior 443 * restore the state of the current thread later to the point prior
403 * to this call. */ 444 * to this call. */
@@ -593,10 +634,12 @@ void wakeup_thread(struct thread_entry **list)
593 { 634 {
594 case STATE_BLOCKED: 635 case STATE_BLOCKED:
595 /* Remove thread from the list of blocked threads and add it 636 /* Remove thread from the list of blocked threads and add it
596 * to the scheduler's list of running processes. */ 637 * to the scheduler's list of running processes. List removal
638 * is safe since each object maintains it's own list of
639 * sleepers and queues protect against reentrancy. */
597 remove_from_list(list, thread); 640 remove_from_list(list, thread);
598 add_to_list(&cores[CURRENT_CORE].running, thread); 641 add_to_list(cores[CURRENT_CORE].wakeup_list, thread);
599 642
600 case STATE_BLOCKED_W_TMO: 643 case STATE_BLOCKED_W_TMO:
601 /* Just remove the timeout to cause scheduler to immediately 644 /* Just remove the timeout to cause scheduler to immediately
602 * wake up the thread. */ 645 * wake up the thread. */
@@ -610,6 +653,18 @@ void wakeup_thread(struct thread_entry **list)
610 } 653 }
611} 654}
612 655
656/* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
657 before calling. */
658void wakeup_thread_irq_safe(struct thread_entry **list)
659{
660 struct core_entry *core = &cores[CURRENT_CORE];
661 /* Switch wakeup lists and call wakeup_thread */
662 core->wakeup_list = &core->waking;
663 wakeup_thread(list);
664 /* Switch back to normal running list */
665 core->wakeup_list = &core->running;
666}
667
613/*--------------------------------------------------------------------------- 668/*---------------------------------------------------------------------------
614 * Create a thread 669 * Create a thread
615 * If using a dual core architecture, specify which core to start the thread 670 * If using a dual core architecture, specify which core to start the thread
@@ -794,6 +849,8 @@ void init_threads(void)
794 memset(cores, 0, sizeof cores); 849 memset(cores, 0, sizeof cores);
795 cores[core].sleeping = NULL; 850 cores[core].sleeping = NULL;
796 cores[core].running = NULL; 851 cores[core].running = NULL;
852 cores[core].waking = NULL;
853 cores[core].wakeup_list = &cores[core].running;
797#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 854#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
798 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL; 855 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
799#endif 856#endif