summaryrefslogtreecommitdiff
path: root/firmware/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/thread.c')
-rw-r--r--firmware/thread.c78
1 files changed, 56 insertions, 22 deletions
diff --git a/firmware/thread.c b/firmware/thread.c
index 2281f43e53..281ab0fa54 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -64,6 +64,10 @@ int *cop_stackend = stackend;
64#endif 64#endif
65#endif 65#endif
66 66
67#if (NUM_CORES > 1)
68bool IDATA_ATTR kernel_running_on_cop = false;
69#endif
70
67/* Conserve IRAM 71/* Conserve IRAM
68static void add_to_list(struct thread_entry **list, 72static void add_to_list(struct thread_entry **list,
69 struct thread_entry *thread) ICODE_ATTR; 73 struct thread_entry *thread) ICODE_ATTR;
@@ -316,10 +320,13 @@ static inline void sleep_core(void)
316#elif CONFIG_CPU == SH7034 320#elif CONFIG_CPU == SH7034
317 and_b(0x7F, &SBYCR); 321 and_b(0x7F, &SBYCR);
318 asm volatile ("sleep"); 322 asm volatile ("sleep");
319#elif CONFIG_CPU == PP5020 323#elif defined (CPU_PP)
320 /* This should sleep the CPU. It appears to wake by itself on 324 /* This should sleep the CPU. It appears to wake by itself on
321 interrupts */ 325 interrupts */
322 CPU_CTL = 0x80000000; 326 if (CURRENT_CORE == CPU)
327 CPU_CTL = PROC_SLEEP;
328 else
329 COP_CTL = PROC_SLEEP;
323#elif CONFIG_CPU == S3C2440 330#elif CONFIG_CPU == S3C2440
324 CLKCON |= (1 << 2); /* set IDLE bit */ 331 CLKCON |= (1 << 2); /* set IDLE bit */
325 for(i=0; i<10; i++); /* wait for IDLE */ 332 for(i=0; i<10; i++); /* wait for IDLE */
@@ -608,27 +615,16 @@ void wakeup_thread(struct thread_entry **list)
608} 615}
609 616
610/*--------------------------------------------------------------------------- 617/*---------------------------------------------------------------------------
611 * Create thread on the current core. 618 * Create a thread
612 * Return ID if context area could be allocated, else -1. 619 * If using a dual core architecture, specify which core to start the thread
620 * on, and whether to fall back to the other core if it can't be created
621 * Return ID if context area could be allocated, else NULL.
613 *--------------------------------------------------------------------------- 622 *---------------------------------------------------------------------------
614 */ 623 */
615struct thread_entry* 624struct thread_entry*
616 create_thread(void (*function)(void), void* stack, int stack_size, 625 create_thread(void (*function)(void), void* stack, int stack_size,
617 const char *name IF_PRIO(, int priority)) 626 const char *name IF_PRIO(, int priority)
618{ 627 IF_COP(, unsigned int core, bool fallback))
619 return create_thread_on_core(CURRENT_CORE, function, stack, stack_size,
620 name IF_PRIO(, priority));
621}
622
623/*---------------------------------------------------------------------------
624 * Create thread on a specific core.
625 * Return ID if context area could be allocated, else -1.
626 *---------------------------------------------------------------------------
627 */
628struct thread_entry*
629 create_thread_on_core(unsigned int core, void (*function)(void),
630 void* stack, int stack_size,
631 const char *name IF_PRIO(, int priority))
632{ 628{
633 unsigned int i; 629 unsigned int i;
634 unsigned int stacklen; 630 unsigned int stacklen;
@@ -637,6 +633,29 @@ struct thread_entry*
637 struct regs *regs; 633 struct regs *regs;
638 struct thread_entry *thread; 634 struct thread_entry *thread;
639 635
636/*****
637 * Ugly code alert!
638 * To prevent ifdef hell while keeping the binary size down, we define
639 * core here if it hasn't been passed as a parameter
640 *****/
641#if NUM_CORES == 1
642#define core CPU
643#endif
644
645#if NUM_CORES > 1
646/* If the kernel hasn't initialised on the COP (most likely due to an old
647 * bootloader) then refuse to start threads on the COP
648 */
649 if((core == COP) && !kernel_running_on_cop)
650 {
651 if (fallback)
652 return create_thread(function, stack, stack_size, name
653 IF_PRIO(, priority) IF_COP(, CPU, false));
654 else
655 return NULL;
656 }
657#endif
658
640 for (n = 0; n < MAXTHREADS; n++) 659 for (n = 0; n < MAXTHREADS; n++)
641 { 660 {
642 if (cores[core].threads[n].name == NULL) 661 if (cores[core].threads[n].name == NULL)
@@ -644,8 +663,15 @@ struct thread_entry*
644 } 663 }
645 664
646 if (n == MAXTHREADS) 665 if (n == MAXTHREADS)
647 return NULL; 666 {
648 667#if NUM_CORES > 1
668 if (fallback)
669 return create_thread(function, stack, stack_size, name
670 IF_PRIO(, priority) IF_COP(, 1 - core, fallback));
671 else
672#endif
673 return NULL;
674 }
649 675
650 /* Munge the stack to make it easy to spot stack overflows */ 676 /* Munge the stack to make it easy to spot stack overflows */
651 stacklen = stack_size / sizeof(int); 677 stacklen = stack_size / sizeof(int);
@@ -677,6 +703,9 @@ struct thread_entry*
677 THREAD_CPU_INIT(core, thread); 703 THREAD_CPU_INIT(core, thread);
678 704
679 return thread; 705 return thread;
706#if NUM_CORES == 1
707#undef core
708#endif
680} 709}
681 710
682#ifdef HAVE_SCHEDULER_BOOSTCTRL 711#ifdef HAVE_SCHEDULER_BOOSTCTRL
@@ -751,7 +780,8 @@ void init_threads(void)
751{ 780{
752 unsigned int core = CURRENT_CORE; 781 unsigned int core = CURRENT_CORE;
753 782
754 memset(cores, 0, sizeof cores); 783 if (core == CPU)
784 memset(cores, 0, sizeof cores);
755 cores[core].sleeping = NULL; 785 cores[core].sleeping = NULL;
756 cores[core].running = NULL; 786 cores[core].running = NULL;
757 cores[core].threads[0].name = main_thread_name; 787 cores[core].threads[0].name = main_thread_name;
@@ -779,6 +809,10 @@ void init_threads(void)
779#endif 809#endif
780 } 810 }
781 cores[core].threads[0].context.start = 0; /* thread 0 already running */ 811 cores[core].threads[0].context.start = 0; /* thread 0 already running */
812#if NUM_CORES > 1
813 if(core == COP)
814 kernel_running_on_cop = true; /* can we use context.start for this? */
815#endif
782} 816}
783 817
784int thread_stack_usage(const struct thread_entry *thread) 818int thread_stack_usage(const struct thread_entry *thread)