diff options
Diffstat (limited to 'firmware')
-rw-r--r-- | firmware/thread.c | 94 |
1 files changed, 50 insertions, 44 deletions
diff --git a/firmware/thread.c b/firmware/thread.c index b8bfeb4ef3..eba27b74d5 100644 --- a/firmware/thread.c +++ b/firmware/thread.c | |||
@@ -388,12 +388,11 @@ static void remove_from_list(struct thread_entry **list, | |||
388 | static void check_sleepers(void) __attribute__ ((noinline)); | 388 | static void check_sleepers(void) __attribute__ ((noinline)); |
389 | static void check_sleepers(void) | 389 | static void check_sleepers(void) |
390 | { | 390 | { |
391 | const unsigned int core = CURRENT_CORE; | ||
391 | struct thread_entry *current, *next; | 392 | struct thread_entry *current, *next; |
392 | 393 | ||
393 | /* Check sleeping threads. */ | 394 | /* Check sleeping threads. */ |
394 | current = cores[CURRENT_CORE].sleeping; | 395 | current = cores[core].sleeping; |
395 | if (current == NULL) | ||
396 | return ; | ||
397 | 396 | ||
398 | for (;;) | 397 | for (;;) |
399 | { | 398 | { |
@@ -403,12 +402,12 @@ static void check_sleepers(void) | |||
403 | { | 402 | { |
404 | /* Sleep timeout has been reached so bring the thread | 403 | /* Sleep timeout has been reached so bring the thread |
405 | * back to life again. */ | 404 | * back to life again. */ |
406 | remove_from_list(&cores[CURRENT_CORE].sleeping, current); | 405 | remove_from_list(&cores[core].sleeping, current); |
407 | add_to_list(&cores[CURRENT_CORE].running, current); | 406 | add_to_list(&cores[core].running, current); |
408 | current->statearg = 0; | 407 | current->statearg = 0; |
409 | 408 | ||
410 | /* If there is no more processes in the list, break the loop. */ | 409 | /* If there is no more processes in the list, break the loop. */ |
411 | if (cores[CURRENT_CORE].sleeping == NULL) | 410 | if (cores[core].sleeping == NULL) |
412 | break; | 411 | break; |
413 | 412 | ||
414 | current = next; | 413 | current = next; |
@@ -419,7 +418,7 @@ static void check_sleepers(void) | |||
419 | 418 | ||
420 | /* Break the loop once we have walked through the list of all | 419 | /* Break the loop once we have walked through the list of all |
421 | * sleeping processes. */ | 420 | * sleeping processes. */ |
422 | if (current == cores[CURRENT_CORE].sleeping) | 421 | if (current == cores[core].sleeping) |
423 | break; | 422 | break; |
424 | } | 423 | } |
425 | } | 424 | } |
@@ -429,14 +428,15 @@ static void check_sleepers(void) | |||
429 | static void wake_list_awaken(void) __attribute__ ((noinline)); | 428 | static void wake_list_awaken(void) __attribute__ ((noinline)); |
430 | static void wake_list_awaken(void) | 429 | static void wake_list_awaken(void) |
431 | { | 430 | { |
431 | const unsigned int core = CURRENT_CORE; | ||
432 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 432 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
433 | 433 | ||
434 | /* No need for another check in the IRQ lock since IRQs are allowed | 434 | /* No need for another check in the IRQ lock since IRQs are allowed |
435 | only to add threads to the waking list. They won't be adding more | 435 | only to add threads to the waking list. They won't be adding more |
436 | until we're done here though. */ | 436 | until we're done here though. */ |
437 | 437 | ||
438 | struct thread_entry *waking = cores[CURRENT_CORE].waking; | 438 | struct thread_entry *waking = cores[core].waking; |
439 | struct thread_entry *running = cores[CURRENT_CORE].running; | 439 | struct thread_entry *running = cores[core].running; |
440 | 440 | ||
441 | if (running != NULL) | 441 | if (running != NULL) |
442 | { | 442 | { |
@@ -452,33 +452,36 @@ static void wake_list_awaken(void) | |||
452 | { | 452 | { |
453 | /* Just transfer the list as-is - just came out of a core | 453 | /* Just transfer the list as-is - just came out of a core |
454 | * sleep. */ | 454 | * sleep. */ |
455 | cores[CURRENT_CORE].running = waking; | 455 | cores[core].running = waking; |
456 | } | 456 | } |
457 | 457 | ||
458 | /* Done with waking list */ | 458 | /* Done with waking list */ |
459 | cores[CURRENT_CORE].waking = NULL; | 459 | cores[core].waking = NULL; |
460 | set_irq_level(oldlevel); | 460 | set_irq_level(oldlevel); |
461 | } | 461 | } |
462 | 462 | ||
463 | static inline void sleep_core(void) | 463 | static inline void sleep_core(void) |
464 | { | 464 | { |
465 | const unsigned int core = CURRENT_CORE; | ||
466 | |||
465 | for (;;) | 467 | for (;;) |
466 | { | 468 | { |
467 | /* We want to do these ASAP as it may change the decision to sleep | 469 | /* We want to do these ASAP as it may change the decision to sleep |
468 | the core or the core has woken because an interrupt occurred | 470 | the core or the core has woken because an interrupt occurred |
469 | and posted a message to a queue. */ | 471 | and posted a message to a queue. */ |
470 | if (cores[CURRENT_CORE].waking != NULL) | 472 | if (cores[core].waking != NULL) |
471 | wake_list_awaken(); | 473 | wake_list_awaken(); |
472 | 474 | ||
473 | if (cores[CURRENT_CORE].last_tick != current_tick) | 475 | if (cores[core].last_tick != current_tick) |
474 | { | 476 | { |
475 | check_sleepers(); | 477 | if (cores[core].sleeping != NULL) |
476 | cores[CURRENT_CORE].last_tick = current_tick; | 478 | check_sleepers(); |
479 | cores[core].last_tick = current_tick; | ||
477 | } | 480 | } |
478 | 481 | ||
479 | /* We must sleep until there is at least one process in the list | 482 | /* We must sleep until there is at least one process in the list |
480 | * of running processes. */ | 483 | * of running processes. */ |
481 | if (cores[CURRENT_CORE].running != NULL) | 484 | if (cores[core].running != NULL) |
482 | break; | 485 | break; |
483 | 486 | ||
484 | /* Enter sleep mode to reduce power usage, woken up on interrupt */ | 487 | /* Enter sleep mode to reduce power usage, woken up on interrupt */ |
@@ -508,34 +511,35 @@ void profile_thread(void) { | |||
508 | static void change_thread_state(struct thread_entry **blocked_list) __attribute__ ((noinline)); | 511 | static void change_thread_state(struct thread_entry **blocked_list) __attribute__ ((noinline)); |
509 | static void change_thread_state(struct thread_entry **blocked_list) | 512 | static void change_thread_state(struct thread_entry **blocked_list) |
510 | { | 513 | { |
514 | const unsigned int core = CURRENT_CORE; | ||
511 | struct thread_entry *old; | 515 | struct thread_entry *old; |
512 | unsigned long new_state; | 516 | unsigned long new_state; |
513 | 517 | ||
514 | /* Remove the thread from the list of running threads. */ | 518 | /* Remove the thread from the list of running threads. */ |
515 | old = cores[CURRENT_CORE].running; | 519 | old = cores[core].running; |
516 | new_state = GET_STATE(old->statearg); | 520 | new_state = GET_STATE(old->statearg); |
517 | 521 | ||
518 | /* Check if a thread state change has been requested. */ | 522 | /* Check if a thread state change has been requested. */ |
519 | if (new_state) | 523 | if (new_state) |
520 | { | 524 | { |
521 | /* Change running thread state and switch to next thread. */ | 525 | /* Change running thread state and switch to next thread. */ |
522 | remove_from_list(&cores[CURRENT_CORE].running, old); | 526 | remove_from_list(&cores[core].running, old); |
523 | 527 | ||
524 | /* And put the thread into a new list of inactive threads. */ | 528 | /* And put the thread into a new list of inactive threads. */ |
525 | if (new_state == STATE_BLOCKED) | 529 | if (new_state == STATE_BLOCKED) |
526 | add_to_list(blocked_list, old); | 530 | add_to_list(blocked_list, old); |
527 | else | 531 | else |
528 | add_to_list(&cores[CURRENT_CORE].sleeping, old); | 532 | add_to_list(&cores[core].sleeping, old); |
529 | 533 | ||
530 | #ifdef HAVE_PRIORITY_SCHEDULING | 534 | #ifdef HAVE_PRIORITY_SCHEDULING |
531 | /* Reset priorities */ | 535 | /* Reset priorities */ |
532 | if (old->priority == cores[CURRENT_CORE].highest_priority) | 536 | if (old->priority == cores[core].highest_priority) |
533 | cores[CURRENT_CORE].highest_priority = 100; | 537 | cores[core].highest_priority = 100; |
534 | #endif | 538 | #endif |
535 | } | 539 | } |
536 | else | 540 | else |
537 | /* Switch to the next running thread. */ | 541 | /* Switch to the next running thread. */ |
538 | cores[CURRENT_CORE].running = old->next; | 542 | cores[core].running = old->next; |
539 | } | 543 | } |
540 | 544 | ||
541 | /*--------------------------------------------------------------------------- | 545 | /*--------------------------------------------------------------------------- |
@@ -544,8 +548,10 @@ static void change_thread_state(struct thread_entry **blocked_list) | |||
544 | */ | 548 | */ |
545 | void switch_thread(bool save_context, struct thread_entry **blocked_list) | 549 | void switch_thread(bool save_context, struct thread_entry **blocked_list) |
546 | { | 550 | { |
551 | const unsigned int core = CURRENT_CORE; | ||
552 | |||
547 | #ifdef RB_PROFILE | 553 | #ifdef RB_PROFILE |
548 | profile_thread_stopped(get_threadnum(cores[CURRENT_CORE].running)); | 554 | profile_thread_stopped(get_threadnum(cores[core].running)); |
549 | #endif | 555 | #endif |
550 | unsigned int *stackptr; | 556 | unsigned int *stackptr; |
551 | 557 | ||
@@ -560,13 +566,13 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list) | |||
560 | * to this call. */ | 566 | * to this call. */ |
561 | if (save_context) | 567 | if (save_context) |
562 | { | 568 | { |
563 | store_context(&cores[CURRENT_CORE].running->context); | 569 | store_context(&cores[core].running->context); |
564 | 570 | ||
565 | /* Check if the current thread stack is overflown */ | 571 | /* Check if the current thread stack is overflown */ |
566 | stackptr = cores[CURRENT_CORE].running->stack; | 572 | stackptr = cores[core].running->stack; |
567 | if(stackptr[0] != DEADBEEF) | 573 | if(stackptr[0] != DEADBEEF) |
568 | #ifdef THREAD_EXTRA_CHECKS | 574 | #ifdef THREAD_EXTRA_CHECKS |
569 | thread_panicf("Stkov", cores[CURRENT_CORE].running, NULL); | 575 | thread_panicf("Stkov", cores[core].running, NULL); |
570 | #else | 576 | #else |
571 | thread_stkov(); | 577 | thread_stkov(); |
572 | #endif | 578 | #endif |
@@ -577,10 +583,10 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list) | |||
577 | /* This has to be done after the scheduler is finished with the | 583 | /* This has to be done after the scheduler is finished with the |
578 | blocked_list pointer so that an IRQ can't kill us by attempting | 584 | blocked_list pointer so that an IRQ can't kill us by attempting |
579 | a wake but before attempting any core sleep. */ | 585 | a wake but before attempting any core sleep. */ |
580 | if (cores[CURRENT_CORE].switch_to_irq_level != STAY_IRQ_LEVEL) | 586 | if (cores[core].switch_to_irq_level != STAY_IRQ_LEVEL) |
581 | { | 587 | { |
582 | int level = cores[CURRENT_CORE].switch_to_irq_level; | 588 | int level = cores[core].switch_to_irq_level; |
583 | cores[CURRENT_CORE].switch_to_irq_level = STAY_IRQ_LEVEL; | 589 | cores[core].switch_to_irq_level = STAY_IRQ_LEVEL; |
584 | set_irq_level(level); | 590 | set_irq_level(level); |
585 | } | 591 | } |
586 | } | 592 | } |
@@ -595,34 +601,34 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list) | |||
595 | * got CPU time. */ | 601 | * got CPU time. */ |
596 | for (;;) | 602 | for (;;) |
597 | { | 603 | { |
598 | int priority = cores[CURRENT_CORE].running->priority; | 604 | int priority = cores[core].running->priority; |
599 | 605 | ||
600 | if (priority < cores[CURRENT_CORE].highest_priority) | 606 | if (priority < cores[core].highest_priority) |
601 | cores[CURRENT_CORE].highest_priority = priority; | 607 | cores[core].highest_priority = priority; |
602 | 608 | ||
603 | if (priority == cores[CURRENT_CORE].highest_priority || | 609 | if (priority == cores[core].highest_priority || |
604 | (current_tick - cores[CURRENT_CORE].running->last_run > | 610 | (current_tick - cores[core].running->last_run > |
605 | priority * 8) || | 611 | priority * 8) || |
606 | cores[CURRENT_CORE].running->priority_x != 0) | 612 | cores[core].running->priority_x != 0) |
607 | { | 613 | { |
608 | break; | 614 | break; |
609 | } | 615 | } |
610 | 616 | ||
611 | cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next; | 617 | cores[core].running = cores[core].running->next; |
612 | } | 618 | } |
613 | 619 | ||
614 | /* Reset the value of thread's last running time to the current time. */ | 620 | /* Reset the value of thread's last running time to the current time. */ |
615 | cores[CURRENT_CORE].running->last_run = current_tick; | 621 | cores[core].running->last_run = current_tick; |
616 | #endif | 622 | #endif |
617 | 623 | ||
618 | #endif | 624 | #endif |
619 | unlock_cores(); | 625 | unlock_cores(); |
620 | 626 | ||
621 | /* And finally give control to the next thread. */ | 627 | /* And finally give control to the next thread. */ |
622 | load_context(&cores[CURRENT_CORE].running->context); | 628 | load_context(&cores[core].running->context); |
623 | 629 | ||
624 | #ifdef RB_PROFILE | 630 | #ifdef RB_PROFILE |
625 | profile_thread_started(get_threadnum(cores[CURRENT_CORE].running)); | 631 | profile_thread_started(get_threadnum(cores[core].running)); |
626 | #endif | 632 | #endif |
627 | } | 633 | } |
628 | 634 | ||
@@ -819,7 +825,7 @@ void wakeup_thread_irq_safe(struct thread_entry **list) | |||
819 | struct thread_entry* | 825 | struct thread_entry* |
820 | create_thread(void (*function)(void), void* stack, int stack_size, | 826 | create_thread(void (*function)(void), void* stack, int stack_size, |
821 | const char *name IF_PRIO(, int priority) | 827 | const char *name IF_PRIO(, int priority) |
822 | IF_COP(, unsigned int core, bool fallback)) | 828 | IF_COP(, unsigned int core, bool fallback)) |
823 | { | 829 | { |
824 | unsigned int i; | 830 | unsigned int i; |
825 | unsigned int stacklen; | 831 | unsigned int stacklen; |
@@ -845,8 +851,8 @@ struct thread_entry* | |||
845 | { | 851 | { |
846 | if (fallback) | 852 | if (fallback) |
847 | return create_thread(function, stack, stack_size, name | 853 | return create_thread(function, stack, stack_size, name |
848 | IF_PRIO(, priority) IF_COP(, CPU, false)); | 854 | IF_PRIO(, priority) IF_COP(, CPU, false)); |
849 | else | 855 | else |
850 | return NULL; | 856 | return NULL; |
851 | } | 857 | } |
852 | #endif | 858 | #endif |
@@ -929,7 +935,7 @@ void trigger_cpu_boost(void) | |||
929 | void remove_thread(struct thread_entry *thread) | 935 | void remove_thread(struct thread_entry *thread) |
930 | { | 936 | { |
931 | lock_cores(); | 937 | lock_cores(); |
932 | 938 | ||
933 | if (thread == NULL) | 939 | if (thread == NULL) |
934 | thread = cores[CURRENT_CORE].running; | 940 | thread = cores[CURRENT_CORE].running; |
935 | 941 | ||
@@ -995,7 +1001,7 @@ struct thread_entry * thread_get_current(void) | |||
995 | 1001 | ||
996 | void init_threads(void) | 1002 | void init_threads(void) |
997 | { | 1003 | { |
998 | unsigned int core = CURRENT_CORE; | 1004 | const unsigned int core = CURRENT_CORE; |
999 | int slot; | 1005 | int slot; |
1000 | 1006 | ||
1001 | /* Let main CPU initialize first. */ | 1007 | /* Let main CPU initialize first. */ |