summaryrefslogtreecommitdiff
path: root/firmware/thread.c
diff options
context:
space:
mode:
authorMiika Pekkarinen <miipekk@ihme.org>2006-09-16 16:18:11 +0000
committerMiika Pekkarinen <miipekk@ihme.org>2006-09-16 16:18:11 +0000
commita85044bf9eaa0a7206c1978d3cfd57ab2d7fae2f (patch)
treea30695ed540bf32365d577f46398f712c7a494c4 /firmware/thread.c
parentbaf5494341cdd6cdb9590e21d429920b9bc4a2c6 (diff)
downloadrockbox-a85044bf9eaa0a7206c1978d3cfd57ab2d7fae2f.tar.gz
rockbox-a85044bf9eaa0a7206c1978d3cfd57ab2d7fae2f.zip
New scheduler, with priorities for swcodec platforms. Frequent task
switching should be more efficient and tasks are stored in linked lists to eliminate unnecessary task switching to improve performance. Audio should no longer skip on swcodec targets caused by too CPU hungry UI thread or background threads. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@10958 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/thread.c')
-rw-r--r--firmware/thread.c485
1 files changed, 397 insertions, 88 deletions
diff --git a/firmware/thread.c b/firmware/thread.c
index eb39c7ad32..e4dcbbcf9a 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -23,12 +23,18 @@
23#include "system.h" 23#include "system.h"
24#include "kernel.h" 24#include "kernel.h"
25#include "cpu.h" 25#include "cpu.h"
26 26#include "string.h"
27 27
28#define DEADBEEF ((unsigned int)0xdeadbeef) 28#define DEADBEEF ((unsigned int)0xdeadbeef)
29/* Cast to the the machine int type, whose size could be < 4. */ 29/* Cast to the the machine int type, whose size could be < 4. */
30 30
31struct core_entry cores[NUM_CORES] IBSS_ATTR; 31struct core_entry cores[NUM_CORES] IBSS_ATTR;
32#ifdef HAVE_PRIORITY_SCHEDULING
33static unsigned short highest_priority IBSS_ATTR;
34#endif
35
36/* Define to enable additional checks for blocking violations etc. */
37// #define THREAD_EXTRA_CHECKS
32 38
33static const char main_thread_name[] = "main"; 39static const char main_thread_name[] = "main";
34 40
@@ -48,7 +54,16 @@ int *cop_stackend = stackend;
48#endif 54#endif
49#endif 55#endif
50 56
51void switch_thread(void) ICODE_ATTR; 57/* Conserve IRAM
58static void add_to_list(struct thread_entry **list,
59 struct thread_entry *thread) ICODE_ATTR;
60static void remove_from_list(struct thread_entry **list,
61 struct thread_entry *thread) ICODE_ATTR;
62*/
63
64void switch_thread(bool save_context, struct thread_entry **blocked_list)
65 ICODE_ATTR;
66
52static inline void store_context(void* addr) __attribute__ ((always_inline)); 67static inline void store_context(void* addr) __attribute__ ((always_inline));
53static inline void load_context(const void* addr) __attribute__ ((always_inline)); 68static inline void load_context(const void* addr) __attribute__ ((always_inline));
54 69
@@ -219,24 +234,109 @@ static inline void load_context(const void* addr)
219 234
220#endif 235#endif
221 236
222/*--------------------------------------------------------------------------- 237static void add_to_list(struct thread_entry **list,
223 * Switch thread in round robin fashion. 238 struct thread_entry *thread)
224 *---------------------------------------------------------------------------
225 */
226void switch_thread(void)
227{ 239{
228#ifdef RB_PROFILE 240 if (*list == NULL)
229 profile_thread_stopped(cores[CURRENT_CORE].current_thread); 241 {
230#endif 242 thread->next = thread;
231 int current; 243 thread->prev = thread;
232 unsigned int *stackptr; 244 *list = thread;
245 }
246 else
247 {
248 /* Insert last */
249 thread->next = *list;
250 thread->prev = (*list)->prev;
251 thread->prev->next = thread;
252 (*list)->prev = thread;
253
254 /* Insert next
255 thread->next = (*list)->next;
256 thread->prev = *list;
257 thread->next->prev = thread;
258 (*list)->next = thread;
259 */
260 }
261}
233 262
234#ifdef SIMULATOR 263static void remove_from_list(struct thread_entry **list,
235 /* Do nothing */ 264 struct thread_entry *thread)
236#else 265{
237 while (cores[CURRENT_CORE].num_sleepers == cores[CURRENT_CORE].num_threads) 266 if (list != NULL)
267 {
268 if (thread == thread->next)
269 {
270 *list = NULL;
271 return;
272 }
273
274 if (thread == *list)
275 *list = thread->next;
276 }
277
278 /* Fix links to jump over the removed entry. */
279 thread->prev->next = thread->next;
280 thread->next->prev = thread->prev;
281}
282
283/* Compiler trick: Don't declare as static to prevent putting
284 * function in IRAM. */
285void check_sleepers(void)
286{
287 struct thread_entry *current, *next;
288
289 /* Check sleeping threads. */
290 current = cores[CURRENT_CORE].sleeping;
291 if (current == NULL)
292 return ;
293
294 for (;;)
295 {
296 next = current->next;
297
298 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
299 {
300 /* Sleep timeout has been reached so bring the thread
301 * back to life again. */
302 remove_from_list(&cores[CURRENT_CORE].sleeping, current);
303 add_to_list(&cores[CURRENT_CORE].running, current);
304
305 /* If there is no more processes in the list, break the loop. */
306 if (cores[CURRENT_CORE].sleeping == NULL)
307 break;
308
309 current = next;
310 continue;
311 }
312
313 current = next;
314
315 /* Break the loop once we have walked through the list of all
316 * sleeping processes. */
317 if (current == cores[CURRENT_CORE].sleeping)
318 break;
319 }
320}
321
322static inline void sleep_core(void)
323{
324 static long last_tick = 0;
325
326 for (;;)
238 { 327 {
239 /* Enter sleep mode, woken up on interrupt */ 328 if (last_tick != current_tick)
329 {
330 check_sleepers();
331 last_tick = current_tick;
332 }
333
334 /* We must sleep until there is at least one process in the list
335 * of running processes. */
336 if (cores[CURRENT_CORE].running != NULL)
337 break;
338
339 /* Enter sleep mode to reduce power usage, woken up on interrupt */
240#ifdef CPU_COLDFIRE 340#ifdef CPU_COLDFIRE
241 asm volatile ("stop #0x2000"); 341 asm volatile ("stop #0x2000");
242#elif CONFIG_CPU == SH7034 342#elif CONFIG_CPU == SH7034
@@ -257,49 +357,232 @@ void switch_thread(void)
257 CLKCON |= 2; 357 CLKCON |= 2;
258#endif 358#endif
259 } 359 }
260#endif 360}
261 current = cores[CURRENT_CORE].current_thread; 361
262 store_context(&cores[CURRENT_CORE].threads[current].context); 362#ifdef RB_PROFILE
263 363static int get_threadnum(struct thread_entry *thread)
264#if CONFIG_CPU != TCC730 364{
265 /* Check if the current thread stack is overflown */ 365 int i;
266 stackptr = cores[CURRENT_CORE].threads[current].stack; 366
267 if(stackptr[0] != DEADBEEF) 367 for (i = 0; i < MAXTHREADS; i++)
268 panicf("Stkov %s", cores[CURRENT_CORE].threads[current].name); 368 {
369 if (&cores[CURRENT_CORE].threads[i] == thread)
370 return i;
371 }
372
373 return -1;
374}
269#endif 375#endif
270 376
271 if (++current >= cores[CURRENT_CORE].num_threads) 377/* Compiler trick: Don't declare as static to prevent putting
272 current = 0; 378 * function in IRAM. */
379void change_thread_state(struct thread_entry **blocked_list)
380{
381 struct thread_entry *old;
382
383 /* Remove the thread from the list of running threads. */
384 old = cores[CURRENT_CORE].running;
385 remove_from_list(&cores[CURRENT_CORE].running, old);
386
387 /* And put the thread into a new list of inactive threads. */
388 if (GET_STATE(old->statearg) == STATE_BLOCKED)
389 add_to_list(blocked_list, old);
390 else
391 add_to_list(&cores[CURRENT_CORE].sleeping, old);
392
393#ifdef HAVE_PRIORITY_SCHEDULING
394 /* Reset priorities */
395 if (old->priority == highest_priority)
396 highest_priority = 100;
397#endif
398}
273 399
274 cores[CURRENT_CORE].current_thread = current; 400/*---------------------------------------------------------------------------
275 load_context(&cores[CURRENT_CORE].threads[current].context); 401 * Switch thread in round robin fashion.
402 *---------------------------------------------------------------------------
403 */
404void switch_thread(bool save_context, struct thread_entry **blocked_list)
405{
276#ifdef RB_PROFILE 406#ifdef RB_PROFILE
277 profile_thread_started(cores[CURRENT_CORE].current_thread); 407 profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running));
408#endif
409 unsigned int *stackptr;
410
411#ifdef SIMULATOR
412 /* Do nothing */
413#else
414
415 /* Begin task switching by saving our current context so that we can
416 * restore the state of the current thread later to the point prior
417 * to this call. */
418 if (save_context)
419 {
420 store_context(&cores[CURRENT_CORE].running->context);
421
422# if CONFIG_CPU != TCC730
423 /* Check if the current thread stack is overflown */
424 stackptr = cores[CURRENT_CORE].running->stack;
425 if(stackptr[0] != DEADBEEF)
426 panicf("Stkov %s", cores[CURRENT_CORE].running->name);
427# endif
428
429 /* Check if a thread state change has been requested. */
430 if (cores[CURRENT_CORE].running->statearg)
431 {
432 /* Change running thread state and switch to next thread. */
433 change_thread_state(blocked_list);
434 }
435 else
436 {
437 /* Switch to the next running thread. */
438 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
439 }
440 }
441
442 /* Go through the list of sleeping task to check if we need to wake up
443 * any of them due to timeout. Also puts core into sleep state until
444 * there is at least one running process again. */
445 sleep_core();
446
447#ifdef HAVE_PRIORITY_SCHEDULING
448 /* Select the new task based on priorities and the last time a process
449 * got CPU time. */
450 for (;;)
451 {
452 int priority = cores[CURRENT_CORE].running->priority;
453
454 if (priority < highest_priority)
455 highest_priority = priority;
456
457 if (priority == highest_priority || (current_tick
458 - cores[CURRENT_CORE].running->last_run > priority * 8))
459 {
460 break;
461 }
462 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
463 }
464
465 /* Reset the value of thread's last running time to the current time. */
466 cores[CURRENT_CORE].running->last_run = current_tick;
467#endif
468
469#endif
470 /* And finally give control to the next thread. */
471 load_context(&cores[CURRENT_CORE].running->context);
472
473#ifdef RB_PROFILE
474 profile_thread_started(get_threadnum(cores[CURRENT_CORE].running));
278#endif 475#endif
279} 476}
280 477
281void sleep_thread(void) 478void sleep_thread(int ticks)
282{ 479{
283 ++cores[CURRENT_CORE].num_sleepers; 480 /* Set the thread's new state and timeout and finally force a task switch
284 switch_thread(); 481 * so that scheduler removes thread from the list of running processes
482 * and puts it in list of sleeping tasks. */
483 cores[CURRENT_CORE].running->statearg =
484 SET_STATE(STATE_SLEEPING, current_tick + ticks + 1);
485 switch_thread(true, NULL);
486
487 /* Clear all flags to indicate we are up and running again. */
488 cores[CURRENT_CORE].running->statearg = 0;
285} 489}
286 490
287void wake_up_thread(void) 491void block_thread(struct thread_entry **list, int timeout)
288{ 492{
289 cores[CURRENT_CORE].num_sleepers = 0; 493 struct thread_entry *current;
494
495 /* Get the entry for the current running thread. */
496 current = cores[CURRENT_CORE].running;
497
498 /* At next task switch scheduler will immediately change the thread
499 * state (and we also force the task switch to happen). */
500 if (timeout)
501 {
502#ifdef THREAD_EXTRA_CHECKS
503 /* We can store only one thread to the "list" if thread is used
504 * in other list (such as core's list for sleeping tasks). */
505 if (*list)
506 panicf("Blocking violation T->*B");
507#endif
508
509 current->statearg =
510 SET_STATE(STATE_BLOCKED_W_TMO, current_tick + timeout);
511 *list = current;
512
513 /* Now force a task switch and block until we have been woken up
514 * by another thread or timeout is reached. */
515 switch_thread(true, NULL);
516
517 /* If timeout is reached, we must set list back to NULL here. */
518 *list = NULL;
519 }
520 else
521 {
522#ifdef THREAD_EXTRA_CHECKS
523 /* We are not allowed to mix blocking types in one queue. */
524 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
525 panicf("Blocking violation B->*T");
526#endif
527
528 current->statearg = SET_STATE(STATE_BLOCKED, 0);
529
530 /* Now force a task switch and block until we have been woken up
531 * by another thread or timeout is reached. */
532 switch_thread(true, list);
533 }
534
535 /* Clear all flags to indicate we are up and running again. */
536 current->statearg = 0;
290} 537}
291 538
539void wakeup_thread(struct thread_entry **list)
540{
541 struct thread_entry *thread;
542
543 /* Check if there is a blocked thread at all. */
544 if (*list == NULL)
545 return ;
546
547 /* Wake up the last thread first. */
548 thread = *list;
549
550 /* Determine thread's current state. */
551 switch (GET_STATE(thread->statearg))
552 {
553 case STATE_BLOCKED:
554 /* Remove thread from the list of blocked threads and add it
555 * to the scheduler's list of running processes. */
556 remove_from_list(list, thread);
557 add_to_list(&cores[CURRENT_CORE].running, thread);
558 thread->statearg = 0;
559 break;
560
561 case STATE_BLOCKED_W_TMO:
562 /* Just remove the timeout to cause scheduler to immediately
563 * wake up the thread. */
564 thread->statearg &= 0xC0000000;
565 *list = NULL;
566 break;
567
568 default:
569 /* Nothing to do. Thread has already been woken up
570 * or it's state is not blocked or blocked with timeout. */
571 return ;
572 }
573}
292 574
293/*--------------------------------------------------------------------------- 575/*---------------------------------------------------------------------------
294 * Create thread on the current core. 576 * Create thread on the current core.
295 * Return ID if context area could be allocated, else -1. 577 * Return ID if context area could be allocated, else -1.
296 *--------------------------------------------------------------------------- 578 *---------------------------------------------------------------------------
297 */ 579 */
298int create_thread(void (*function)(void), void* stack, int stack_size, 580struct thread_entry*
299 const char *name) 581 create_thread(void (*function)(void), void* stack, int stack_size,
582 const char *name IF_PRIO(, int priority))
300{ 583{
301 return create_thread_on_core(CURRENT_CORE, function, stack, stack_size, 584 return create_thread_on_core(CURRENT_CORE, function, stack, stack_size,
302 name); 585 name IF_PRIO(, priority));
303} 586}
304 587
305/*--------------------------------------------------------------------------- 588/*---------------------------------------------------------------------------
@@ -307,18 +590,28 @@ int create_thread(void (*function)(void), void* stack, int stack_size,
307 * Return ID if context area could be allocated, else -1. 590 * Return ID if context area could be allocated, else -1.
308 *--------------------------------------------------------------------------- 591 *---------------------------------------------------------------------------
309 */ 592 */
310int create_thread_on_core(unsigned int core, void (*function)(void), void* stack, int stack_size, 593struct thread_entry*
311 const char *name) 594 create_thread_on_core(unsigned int core, void (*function)(void),
595 void* stack, int stack_size,
596 const char *name IF_PRIO(, int priority))
312{ 597{
313 unsigned int i; 598 unsigned int i;
314 unsigned int stacklen; 599 unsigned int stacklen;
315 unsigned int *stackptr; 600 unsigned int *stackptr;
601 int n;
316 struct regs *regs; 602 struct regs *regs;
317 struct thread_entry *thread; 603 struct thread_entry *thread;
318 604
319 if (cores[core].num_threads >= MAXTHREADS) 605 for (n = 0; n < MAXTHREADS; n++)
320 return -1; 606 {
321 607 if (cores[core].threads[n].name == NULL)
608 break;
609 }
610
611 if (n == MAXTHREADS)
612 return NULL;
613
614
322 /* Munge the stack to make it easy to spot stack overflows */ 615 /* Munge the stack to make it easy to spot stack overflows */
323 stacklen = stack_size / sizeof(int); 616 stacklen = stack_size / sizeof(int);
324 stackptr = stack; 617 stackptr = stack;
@@ -328,10 +621,17 @@ int create_thread_on_core(unsigned int core, void (*function)(void), void* stack
328 } 621 }
329 622
330 /* Store interesting information */ 623 /* Store interesting information */
331 thread = &cores[core].threads[cores[core].num_threads]; 624 thread = &cores[core].threads[n];
332 thread->name = name; 625 thread->name = name;
333 thread->stack = stack; 626 thread->stack = stack;
334 thread->stack_size = stack_size; 627 thread->stack_size = stack_size;
628 thread->statearg = 0;
629#ifdef HAVE_PRIORITY_SCHEDULING
630 thread->priority = priority;
631 highest_priority = 100;
632#endif
633 add_to_list(&cores[core].running, thread);
634
335 regs = &thread->context; 635 regs = &thread->context;
336#if defined(CPU_COLDFIRE) || (CONFIG_CPU == SH7034) || defined(CPU_ARM) 636#if defined(CPU_COLDFIRE) || (CONFIG_CPU == SH7034) || defined(CPU_ARM)
337 /* Align stack to an even 32 bit boundary */ 637 /* Align stack to an even 32 bit boundary */
@@ -343,8 +643,7 @@ int create_thread_on_core(unsigned int core, void (*function)(void), void* stack
343#endif 643#endif
344 regs->start = (void*)function; 644 regs->start = (void*)function;
345 645
346 wake_up_thread(); 646 return thread;
347 return cores[core].num_threads++; /* return the current ID, e.g for remove_thread() */
348} 647}
349 648
350/*--------------------------------------------------------------------------- 649/*---------------------------------------------------------------------------
@@ -352,44 +651,58 @@ int create_thread_on_core(unsigned int core, void (*function)(void), void* stack
352 * Parameter is the ID as returned from create_thread(). 651 * Parameter is the ID as returned from create_thread().
353 *--------------------------------------------------------------------------- 652 *---------------------------------------------------------------------------
354 */ 653 */
355void remove_thread(int threadnum) 654void remove_thread(struct thread_entry *thread)
356{ 655{
357 remove_thread_on_core(CURRENT_CORE, threadnum); 656 if (thread == NULL)
657 thread = cores[CURRENT_CORE].running;
658
659 /* Free the entry by removing thread name. */
660 thread->name = NULL;
661#ifdef HAVE_PRIORITY_SCHEDULING
662 highest_priority = 100;
663#endif
664
665 if (thread == cores[CURRENT_CORE].running)
666 {
667 remove_from_list(&cores[CURRENT_CORE].running, thread);
668 switch_thread(false, NULL);
669 return ;
670 }
671
672 if (thread == cores[CURRENT_CORE].sleeping)
673 remove_from_list(&cores[CURRENT_CORE].sleeping, thread);
674
675 remove_from_list(NULL, thread);
358} 676}
359 677
360/*--------------------------------------------------------------------------- 678#ifdef HAVE_PRIORITY_SCHEDULING
361 * Remove a thread on the specified core from the scheduler. 679void thread_set_priority(struct thread_entry *thread, int priority)
362 * Parameters are the core and the ID as returned from create_thread().
363 *---------------------------------------------------------------------------
364 */
365void remove_thread_on_core(unsigned int core, int threadnum)
366{ 680{
367 int i; 681 if (thread == NULL)
368 682 thread = cores[CURRENT_CORE].running;
369 if (threadnum >= cores[core].num_threads) 683
370 return; 684 thread->priority = priority;
371 685 highest_priority = 100;
372 cores[core].num_threads--;
373 for (i=threadnum; i<cores[core].num_threads-1; i++)
374 { /* move all entries which are behind */
375 cores[core].threads[i] = cores[core].threads[i+1];
376 }
377
378 if (cores[core].current_thread == threadnum) /* deleting the current one? */
379 cores[core].current_thread = cores[core].num_threads; /* set beyond last, avoid store harm */
380 else if (cores[core].current_thread > threadnum) /* within the moved positions? */
381 cores[core].current_thread--; /* adjust it, point to same context again */
382} 686}
687#endif
383 688
384void init_threads(void) 689void init_threads(void)
385{ 690{
386 unsigned int core = CURRENT_CORE; 691 unsigned int core = CURRENT_CORE;
387 692
388 cores[core].num_threads = 1; /* We have 1 thread to begin with */ 693 memset(cores, 0, sizeof cores);
389 cores[core].current_thread = 0; /* The current thread is number 0 */ 694 cores[core].sleeping = NULL;
695 cores[core].running = NULL;
390 cores[core].threads[0].name = main_thread_name; 696 cores[core].threads[0].name = main_thread_name;
391/* In multiple core setups, each core has a different stack. There is probably 697 cores[core].threads[0].statearg = 0;
392 a much better way to do this. */ 698#ifdef HAVE_PRIORITY_SCHEDULING
699 cores[core].threads[0].priority = PRIORITY_USER_INTERFACE;
700 highest_priority = 100;
701#endif
702 add_to_list(&cores[core].running, &cores[core].threads[0]);
703
704 /* In multiple core setups, each core has a different stack. There is probably
705 a much better way to do this. */
393 if (core == CPU) 706 if (core == CPU)
394 { 707 {
395 cores[CPU].threads[0].stack = stackbegin; 708 cores[CPU].threads[0].stack = stackbegin;
@@ -405,28 +718,24 @@ void init_threads(void)
405#else 718#else
406 cores[core].threads[0].context.start = 0; /* thread 0 already running */ 719 cores[core].threads[0].context.start = 0; /* thread 0 already running */
407#endif 720#endif
408 cores[core].num_sleepers = 0;
409}
410
411int thread_stack_usage(int threadnum)
412{
413 return thread_stack_usage_on_core(CURRENT_CORE, threadnum);
414} 721}
415 722
416int thread_stack_usage_on_core(unsigned int core, int threadnum) 723int thread_stack_usage(const struct thread_entry *thread)
417{ 724{
418 unsigned int i; 725 unsigned int i;
419 unsigned int *stackptr = cores[core].threads[threadnum].stack; 726 unsigned int *stackptr = thread->stack;
420
421 if (threadnum >= cores[core].num_threads)
422 return -1;
423 727
424 for (i = 0;i < cores[core].threads[threadnum].stack_size/sizeof(int);i++) 728 for (i = 0;i < thread->stack_size/sizeof(int);i++)
425 { 729 {
426 if (stackptr[i] != DEADBEEF) 730 if (stackptr[i] != DEADBEEF)
427 break; 731 break;
428 } 732 }
429 733
430 return ((cores[core].threads[threadnum].stack_size - i * sizeof(int)) * 100) / 734 return ((thread->stack_size - i * sizeof(int)) * 100) /
431 cores[core].threads[threadnum].stack_size; 735 thread->stack_size;
736}
737
738int thread_get_status(const struct thread_entry *thread)
739{
740 return GET_STATE(thread->statearg);
432} 741}