summaryrefslogtreecommitdiff
path: root/firmware
diff options
context:
space:
mode:
authorBrandon Low <lostlogic@rockbox.org>2006-11-11 05:33:24 +0000
committerBrandon Low <lostlogic@rockbox.org>2006-11-11 05:33:24 +0000
commit8a82892e52127f50efaafaeda3ae841e8bbefe2d (patch)
tree1dfa1a18c05018045db4fe8e67d1dc3fbc5a2d72 /firmware
parent806d8f3505ef7e477f9af4d1b07fe30cd1f28fb3 (diff)
downloadrockbox-8a82892e52127f50efaafaeda3ae841e8bbefe2d.tar.gz
rockbox-8a82892e52127f50efaafaeda3ae841e8bbefe2d.zip
Thread API enhancements.
1) block_thread -> block_thread + block_thread_w_tmo -- this call was always used in distinct ways so having one call with a conditional was ugly. 2) enhance Slasheri's scheduler controlled boost concept. now any thread may trigger a boost which will last until that thread next sleeps. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@11509 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware')
-rw-r--r--firmware/export/thread.h25
-rw-r--r--firmware/kernel.c6
-rw-r--r--firmware/thread.c213
3 files changed, 139 insertions, 105 deletions
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 8bb9ae2608..0c1567de97 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -78,14 +78,22 @@ struct regs
78 78
79#endif /* !SIMULATOR */ 79#endif /* !SIMULATOR */
80 80
81#define STATE_RUNNING 0 81#define STATE_RUNNING 0x00000000
82#define STATE_BLOCKED 1 82#define STATE_BLOCKED 0x20000000
83#define STATE_SLEEPING 2 83#define STATE_SLEEPING 0x40000000
84#define STATE_BLOCKED_W_TMO 3 84#define STATE_BLOCKED_W_TMO 0x60000000
85 85
86#define GET_STATE_ARG(state) (state & 0x3FFFFFFF) 86#define THREAD_STATE_MASK 0x60000000
87#define GET_STATE(state) ((state >> 30) & 3) 87#define STATE_ARG_MASK 0x1FFFFFFF
88#define SET_STATE(state,arg) ((state << 30) | (arg)) 88
89#define GET_STATE_ARG(state) (state & STATE_ARG_MASK)
90#define GET_STATE(state) (state & THREAD_STATE_MASK)
91#define SET_STATE(var,state,arg) (var = (state | ((arg) & STATE_ARG_MASK)))
92#define CLEAR_STATE_ARG(var) (var &= ~STATE_ARG_MASK)
93
94#define STATE_BOOSTED 0x80000000
95#define STATE_IS_BOOSTED(var) (var & STATE_BOOSTED)
96#define SET_BOOST_STATE(var) (var |= STATE_BOOSTED)
89 97
90struct thread_entry { 98struct thread_entry {
91#ifndef SIMULATOR 99#ifndef SIMULATOR
@@ -133,7 +141,8 @@ void trigger_cpu_boost(void);
133void remove_thread(struct thread_entry *thread); 141void remove_thread(struct thread_entry *thread);
134void switch_thread(bool save_context, struct thread_entry **blocked_list); 142void switch_thread(bool save_context, struct thread_entry **blocked_list);
135void sleep_thread(int ticks); 143void sleep_thread(int ticks);
136void block_thread(struct thread_entry **thread, int timeout); 144void block_thread(struct thread_entry **thread);
145void block_thread_w_tmo(struct thread_entry **thread, int timeout);
137void wakeup_thread(struct thread_entry **thread); 146void wakeup_thread(struct thread_entry **thread);
138#ifdef HAVE_PRIORITY_SCHEDULING 147#ifdef HAVE_PRIORITY_SCHEDULING
139int thread_set_priority(struct thread_entry *thread, int priority); 148int thread_set_priority(struct thread_entry *thread, int priority);
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 959122ea0a..79f26f58f2 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -132,7 +132,7 @@ void queue_wait(struct event_queue *q, struct event *ev)
132{ 132{
133 if (q->read == q->write) 133 if (q->read == q->write)
134 { 134 {
135 block_thread(&q->thread, 0); 135 block_thread(&q->thread);
136 } 136 }
137 137
138 *ev = q->events[(q->read++) & QUEUE_LENGTH_MASK]; 138 *ev = q->events[(q->read++) & QUEUE_LENGTH_MASK];
@@ -142,7 +142,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
142{ 142{
143 if (q->read == q->write && ticks > 0) 143 if (q->read == q->write && ticks > 0)
144 { 144 {
145 block_thread(&q->thread, ticks); 145 block_thread_w_tmo(&q->thread, ticks);
146 } 146 }
147 147
148 if (q->read != q->write) 148 if (q->read != q->write)
@@ -469,7 +469,7 @@ void mutex_lock(struct mutex *m)
469 if (m->locked) 469 if (m->locked)
470 { 470 {
471 /* Wait until the lock is open... */ 471 /* Wait until the lock is open... */
472 block_thread(&m->thread, 0); 472 block_thread(&m->thread);
473 } 473 }
474 474
475 /* ...and lock it */ 475 /* ...and lock it */
diff --git a/firmware/thread.c b/firmware/thread.c
index 20e2a8c9b9..f0f123f074 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -36,11 +36,11 @@ struct core_entry cores[NUM_CORES] IBSS_ATTR;
36static unsigned short highest_priority IBSS_ATTR; 36static unsigned short highest_priority IBSS_ATTR;
37#endif 37#endif
38#ifdef HAVE_SCHEDULER_BOOSTCTRL 38#ifdef HAVE_SCHEDULER_BOOSTCTRL
39static bool cpu_boosted IBSS_ATTR; 39static int boosted_threads IBSS_ATTR;
40#endif 40#endif
41 41
42/* Define to enable additional checks for blocking violations etc. */ 42/* Define to enable additional checks for blocking violations etc. */
43// #define THREAD_EXTRA_CHECKS 43#define THREAD_EXTRA_CHECKS
44 44
45static const char main_thread_name[] = "main"; 45static const char main_thread_name[] = "main";
46 46
@@ -52,9 +52,8 @@ extern int stackend[];
52extern int cop_stackbegin[]; 52extern int cop_stackbegin[];
53extern int cop_stackend[]; 53extern int cop_stackend[];
54#else 54#else
55/* The coprocessor stack is not set up in the bootloader code, but the 55/* The coprocessor stack is not set up in the bootloader code, but the threading
56 threading is. No threads are run on the coprocessor, so set up some dummy 56 * is. No threads are run on the coprocessor, so set up some dummy stack */
57 stack */
58int *cop_stackbegin = stackbegin; 57int *cop_stackbegin = stackbegin;
59int *cop_stackend = stackend; 58int *cop_stackend = stackend;
60#endif 59#endif
@@ -71,7 +70,8 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
71 ICODE_ATTR; 70 ICODE_ATTR;
72 71
73static inline void store_context(void* addr) __attribute__ ((always_inline)); 72static inline void store_context(void* addr) __attribute__ ((always_inline));
74static inline void load_context(const void* addr) __attribute__ ((always_inline)); 73static inline void load_context(const void* addr)
74 __attribute__ ((always_inline));
75 75
76#if defined(CPU_ARM) 76#if defined(CPU_ARM)
77/*--------------------------------------------------------------------------- 77/*---------------------------------------------------------------------------
@@ -188,8 +188,7 @@ static inline void load_context(const void* addr)
188 188
189#endif 189#endif
190 190
191static void add_to_list(struct thread_entry **list, 191static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
192 struct thread_entry *thread)
193{ 192{
194 if (*list == NULL) 193 if (*list == NULL)
195 { 194 {
@@ -255,6 +254,7 @@ void check_sleepers(void)
255 * back to life again. */ 254 * back to life again. */
256 remove_from_list(&cores[CURRENT_CORE].sleeping, current); 255 remove_from_list(&cores[CURRENT_CORE].sleeping, current);
257 add_to_list(&cores[CURRENT_CORE].running, current); 256 add_to_list(&cores[CURRENT_CORE].running, current);
257 current->statearg = 0;
258 258
259 /* If there is no more processes in the list, break the loop. */ 259 /* If there is no more processes in the list, break the loop. */
260 if (cores[CURRENT_CORE].sleeping == NULL) 260 if (cores[CURRENT_CORE].sleeping == NULL)
@@ -290,14 +290,6 @@ static inline void sleep_core(void)
290 if (cores[CURRENT_CORE].running != NULL) 290 if (cores[CURRENT_CORE].running != NULL)
291 break; 291 break;
292 292
293#ifdef HAVE_SCHEDULER_BOOSTCTRL
294 if (cpu_boosted)
295 {
296 cpu_boost(false);
297 cpu_boosted = false;
298 }
299#endif
300
301 /* Enter sleep mode to reduce power usage, woken up on interrupt */ 293 /* Enter sleep mode to reduce power usage, woken up on interrupt */
302#ifdef CPU_COLDFIRE 294#ifdef CPU_COLDFIRE
303 asm volatile ("stop #0x2000"); 295 asm volatile ("stop #0x2000");
@@ -338,22 +330,33 @@ void profile_thread(void) {
338void change_thread_state(struct thread_entry **blocked_list) 330void change_thread_state(struct thread_entry **blocked_list)
339{ 331{
340 struct thread_entry *old; 332 struct thread_entry *old;
333 unsigned long new_state;
341 334
342 /* Remove the thread from the list of running threads. */ 335 /* Remove the thread from the list of running threads. */
343 old = cores[CURRENT_CORE].running; 336 old = cores[CURRENT_CORE].running;
344 remove_from_list(&cores[CURRENT_CORE].running, old); 337 new_state = GET_STATE(old->statearg);
345 338
346 /* And put the thread into a new list of inactive threads. */ 339 /* Check if a thread state change has been requested. */
347 if (GET_STATE(old->statearg) == STATE_BLOCKED) 340 if (new_state)
348 add_to_list(blocked_list, old); 341 {
349 else 342 /* Change running thread state and switch to next thread. */
350 add_to_list(&cores[CURRENT_CORE].sleeping, old); 343 remove_from_list(&cores[CURRENT_CORE].running, old);
351 344
345 /* And put the thread into a new list of inactive threads. */
346 if (new_state == STATE_BLOCKED)
347 add_to_list(blocked_list, old);
348 else
349 add_to_list(&cores[CURRENT_CORE].sleeping, old);
350
352#ifdef HAVE_PRIORITY_SCHEDULING 351#ifdef HAVE_PRIORITY_SCHEDULING
353 /* Reset priorities */ 352 /* Reset priorities */
354 if (old->priority == highest_priority) 353 if (old->priority == highest_priority)
355 highest_priority = 100; 354 highest_priority = 100;
356#endif 355#endif
356 }
357 else
358 /* Switch to the next running thread. */
359 cores[CURRENT_CORE].running = old->next;
357} 360}
358 361
359/*--------------------------------------------------------------------------- 362/*---------------------------------------------------------------------------
@@ -381,19 +384,10 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
381 /* Check if the current thread stack is overflown */ 384 /* Check if the current thread stack is overflown */
382 stackptr = cores[CURRENT_CORE].running->stack; 385 stackptr = cores[CURRENT_CORE].running->stack;
383 if(stackptr[0] != DEADBEEF) 386 if(stackptr[0] != DEADBEEF)
384 panicf("Stkov %s", cores[CURRENT_CORE].running->name); 387 panicf("Stkov %s", cores[CURRENT_CORE].running->name);
385 388
386 /* Check if a thread state change has been requested. */ 389 /* Rearrange thread lists as needed */
387 if (cores[CURRENT_CORE].running->statearg) 390 change_thread_state(blocked_list);
388 {
389 /* Change running thread state and switch to next thread. */
390 change_thread_state(blocked_list);
391 }
392 else
393 {
394 /* Switch to the next running thread. */
395 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
396 }
397 } 391 }
398 392
399 /* Go through the list of sleeping task to check if we need to wake up 393 /* Go through the list of sleeping task to check if we need to wake up
@@ -411,11 +405,11 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
411 if (priority < highest_priority) 405 if (priority < highest_priority)
412 highest_priority = priority; 406 highest_priority = priority;
413 407
414 if (priority == highest_priority || (current_tick 408 if (priority == highest_priority ||
415 - cores[CURRENT_CORE].running->last_run > priority * 8)) 409 (current_tick - cores[CURRENT_CORE].running->last_run >
416 { 410 priority * 8))
417 break; 411 break;
418 } 412
419 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next; 413 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
420 } 414 }
421 415
@@ -434,63 +428,94 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
434 428
435void sleep_thread(int ticks) 429void sleep_thread(int ticks)
436{ 430{
431 struct thread_entry *current;
432
433 current = cores[CURRENT_CORE].running;
434
435#ifdef HAVE_SCHEDULER_BOOSTCTRL
436 if (STATE_IS_BOOSTED(current->statearg)) {
437 boosted_threads--;
438 if (!boosted_threads)
439 cpu_boost(false);
440 }
441#endif
442
437 /* Set the thread's new state and timeout and finally force a task switch 443 /* Set the thread's new state and timeout and finally force a task switch
438 * so that scheduler removes thread from the list of running processes 444 * so that scheduler removes thread from the list of running processes
439 * and puts it in list of sleeping tasks. */ 445 * and puts it in list of sleeping tasks. */
440 cores[CURRENT_CORE].running->statearg = 446 SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
441 SET_STATE(STATE_SLEEPING, current_tick + ticks + 1);
442 switch_thread(true, NULL); 447 switch_thread(true, NULL);
448}
449
450void block_thread(struct thread_entry **list)
451{
452 struct thread_entry *current;
453 /* Get the entry for the current running thread. */
454 current = cores[CURRENT_CORE].running;
455
456#ifdef HAVE_SCHEDULER_BOOSTCTRL
457 /* Keep the boosted state over indefinite block calls, because
458 * we are waiting until the earliest time that someone else
459 * completes an action */
460 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
461#endif
462
463#ifdef THREAD_EXTRA_CHECKS
464 /* We are not allowed to mix blocking types in one queue. */
465 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
466 panicf("Blocking violation B->*T");
467#endif
443 468
469 /* Set the state to blocked and ask the scheduler to switch tasks,
470 * this takes us off of the run queue until we are explicitly woken */
471 SET_STATE(current->statearg, STATE_BLOCKED, 0);
472 switch_thread(true, list);
473
474#ifdef HAVE_SCHEDULER_BOOSTCTRL
475 /* Reset only the boosted flag to indicate we are up and running again. */
476 current->statearg = boost_flag;
477#else
444 /* Clear all flags to indicate we are up and running again. */ 478 /* Clear all flags to indicate we are up and running again. */
445 cores[CURRENT_CORE].running->statearg = 0; 479 current->statearg = 0;
480#endif
446} 481}
447 482
448void block_thread(struct thread_entry **list, int timeout) 483void block_thread_w_tmo(struct thread_entry **list, int timeout)
449{ 484{
450 struct thread_entry *current; 485 struct thread_entry *current;
451
452 /* Get the entry for the current running thread. */ 486 /* Get the entry for the current running thread. */
453 current = cores[CURRENT_CORE].running; 487 current = cores[CURRENT_CORE].running;
454 488
455 /* At next task switch scheduler will immediately change the thread 489#ifdef HAVE_SCHEDULER_BOOSTCTRL
456 * state (and we also force the task switch to happen). */ 490 /* A block with a timeout is a sleep situation, whatever we are waiting
457 if (timeout) 491 * for _may or may not_ happen, regardless of boost state, (user input
458 { 492 * for instance), so this thread no longer needs to boost */
459#ifdef THREAD_EXTRA_CHECKS 493 if (STATE_IS_BOOSTED(current->statearg)) {
460 /* We can store only one thread to the "list" if thread is used 494 boosted_threads--;
461 * in other list (such as core's list for sleeping tasks). */ 495 if (!boosted_threads)
462 if (*list) 496 cpu_boost(false);
463 panicf("Blocking violation T->*B"); 497 }
464#endif 498#endif
465
466 current->statearg =
467 SET_STATE(STATE_BLOCKED_W_TMO, current_tick + timeout);
468 *list = current;
469 499
470 /* Now force a task switch and block until we have been woken up
471 * by another thread or timeout is reached. */
472 switch_thread(true, NULL);
473
474 /* If timeout is reached, we must set list back to NULL here. */
475 *list = NULL;
476 }
477 else
478 {
479#ifdef THREAD_EXTRA_CHECKS 500#ifdef THREAD_EXTRA_CHECKS
480 /* We are not allowed to mix blocking types in one queue. */ 501 /* We can store only one thread to the "list" if thread is used
481 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO) 502 * in other list (such as core's list for sleeping tasks). */
482 panicf("Blocking violation B->*T"); 503 if (*list)
504 panicf("Blocking violation T->*B");
483#endif 505#endif
484
485 current->statearg = SET_STATE(STATE_BLOCKED, 0);
486
487 /* Now force a task switch and block until we have been woken up
488 * by another thread or timeout is reached. */
489 switch_thread(true, list);
490 }
491 506
492 /* Clear all flags to indicate we are up and running again. */ 507 /* Set the state to blocked with the specified timeout */
493 current->statearg = 0; 508 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
509
510 /* Set the "list" for explicit wakeup */
511 *list = current;
512
513 /* Now force a task switch and block until we have been woken up
514 * by another thread or timeout is reached. */
515 switch_thread(true, NULL);
516
517 /* It is now safe for another thread to block on this "list" */
518 *list = NULL;
494} 519}
495 520
496void wakeup_thread(struct thread_entry **list) 521void wakeup_thread(struct thread_entry **list)
@@ -512,14 +537,11 @@ void wakeup_thread(struct thread_entry **list)
512 * to the scheduler's list of running processes. */ 537 * to the scheduler's list of running processes. */
513 remove_from_list(list, thread); 538 remove_from_list(list, thread);
514 add_to_list(&cores[CURRENT_CORE].running, thread); 539 add_to_list(&cores[CURRENT_CORE].running, thread);
515 thread->statearg = 0;
516 break;
517 540
518 case STATE_BLOCKED_W_TMO: 541 case STATE_BLOCKED_W_TMO:
519 /* Just remove the timeout to cause scheduler to immediately 542 /* Just remove the timeout to cause scheduler to immediately
520 * wake up the thread. */ 543 * wake up the thread. */
521 thread->statearg &= 0xC0000000; 544 thread->statearg = 0;
522 *list = NULL;
523 break; 545 break;
524 546
525 default: 547 default:
@@ -600,10 +622,12 @@ struct thread_entry*
600#ifdef HAVE_SCHEDULER_BOOSTCTRL 622#ifdef HAVE_SCHEDULER_BOOSTCTRL
601void trigger_cpu_boost(void) 623void trigger_cpu_boost(void)
602{ 624{
603 if (!cpu_boosted) 625 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
604 { 626 {
605 cpu_boost(true); 627 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
606 cpu_boosted = true; 628 if (!boosted_threads)
629 cpu_boost(true);
630 boosted_threads++;
607 } 631 }
608} 632}
609#endif 633#endif
@@ -675,12 +699,12 @@ void init_threads(void)
675 highest_priority = 100; 699 highest_priority = 100;
676#endif 700#endif
677#ifdef HAVE_SCHEDULER_BOOSTCTRL 701#ifdef HAVE_SCHEDULER_BOOSTCTRL
678 cpu_boosted = false; 702 boosted_threads = 0;
679#endif 703#endif
680 add_to_list(&cores[core].running, &cores[core].threads[0]); 704 add_to_list(&cores[core].running, &cores[core].threads[0]);
681 705
682 /* In multiple core setups, each core has a different stack. There is probably 706 /* In multiple core setups, each core has a different stack. There is
683 a much better way to do this. */ 707 * probably a much better way to do this. */
684 if (core == CPU) 708 if (core == CPU)
685 { 709 {
686 cores[CPU].threads[0].stack = stackbegin; 710 cores[CPU].threads[0].stack = stackbegin;
@@ -688,7 +712,8 @@ void init_threads(void)
688 } else { 712 } else {
689#if NUM_CORES > 1 /* This code path will not be run on single core targets */ 713#if NUM_CORES > 1 /* This code path will not be run on single core targets */
690 cores[COP].threads[0].stack = cop_stackbegin; 714 cores[COP].threads[0].stack = cop_stackbegin;
691 cores[COP].threads[0].stack_size = (int)cop_stackend - (int)cop_stackbegin; 715 cores[COP].threads[0].stack_size =
716 (int)cop_stackend - (int)cop_stackbegin;
692#endif 717#endif
693 } 718 }
694 cores[core].threads[0].context.start = 0; /* thread 0 already running */ 719 cores[core].threads[0].context.start = 0; /* thread 0 already running */