summaryrefslogtreecommitdiff
path: root/firmware/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/thread.c')
-rw-r--r--firmware/thread.c504
1 files changed, 294 insertions, 210 deletions
diff --git a/firmware/thread.c b/firmware/thread.c
index 86b90f4f0d..d0a0229430 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -45,59 +45,13 @@ static int boosted_threads IBSS_ATTR;
45#endif 45#endif
46 46
47/* Define to enable additional checks for blocking violations etc. */ 47/* Define to enable additional checks for blocking violations etc. */
48#define THREAD_EXTRA_CHECKS 48#define THREAD_EXTRA_CHECKS 0
49 49
50static const char main_thread_name[] = "main"; 50static const char main_thread_name[] = "main";
51 51
52extern int stackbegin[]; 52extern int stackbegin[];
53extern int stackend[]; 53extern int stackend[];
54 54
55#ifdef CPU_PP
56#ifndef BOOTLOADER
57extern int cop_stackbegin[];
58extern int cop_stackend[];
59#else
60/* The coprocessor stack is not set up in the bootloader code, but the threading
61 * is. No threads are run on the coprocessor, so set up some dummy stack */
62int *cop_stackbegin = stackbegin;
63int *cop_stackend = stackend;
64#endif
65#endif
66
67#if NUM_CORES > 1
68#if 0
69static long cores_locked IBSS_ATTR;
70
71#define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
72#define UNLOCK(...) cores_locked = 0
73#endif
74
75/* #warning "Core locking mechanism should be fixed on H10/4G!" */
76
77inline void lock_cores(void)
78{
79#if 0
80 if (!cores[CURRENT_CORE].lock_issued)
81 {
82 LOCK();
83 cores[CURRENT_CORE].lock_issued = true;
84 }
85#endif
86}
87
88inline void unlock_cores(void)
89{
90#if 0
91 if (cores[CURRENT_CORE].lock_issued)
92 {
93 cores[CURRENT_CORE].lock_issued = false;
94 UNLOCK();
95 }
96#endif
97}
98
99#endif
100
101/* Conserve IRAM 55/* Conserve IRAM
102static void add_to_list(struct thread_entry **list, 56static void add_to_list(struct thread_entry **list,
103 struct thread_entry *thread) ICODE_ATTR; 57 struct thread_entry *thread) ICODE_ATTR;
@@ -115,79 +69,115 @@ static inline void core_sleep(void) __attribute__((always_inline));
115 69
116#if defined(CPU_ARM) 70#if defined(CPU_ARM)
117/*--------------------------------------------------------------------------- 71/*---------------------------------------------------------------------------
118 * Store non-volatile context. 72 * Start the thread running and terminate it if it returns
119 *--------------------------------------------------------------------------- 73 *---------------------------------------------------------------------------
120 */ 74 */
121static inline void store_context(void* addr) 75static void start_thread(void) __attribute__((naked,used));
76static void start_thread(void)
122{ 77{
123 asm volatile( 78 /* r0 = context */
124 "stmia %0, { r4-r11, sp, lr }\n" 79 asm volatile (
125 : : "r" (addr) 80 "ldr sp, [r0, #32] \n" /* Load initial sp */
126 ); 81 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
82 "mov r1, #0 \n" /* Mark thread as running */
83 "str r1, [r0, #40] \n"
84#if NUM_CORES > 1
85 "ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
86 "mov lr, pc \n" /* This could be the first entry into */
87 "bx r0 \n" /* plugin or codec code for this core. */
88#endif
89 "mov lr, pc \n" /* Call thread function */
90 "bx r4 \n"
91 "mov r0, #0 \n" /* remove_thread(NULL) */
92 "ldr pc, =remove_thread \n"
93 ".ltorg \n" /* Dump constant pool */
94 ); /* No clobber list - new thread doesn't care */
127} 95}
128 96
129/*--------------------------------------------------------------------------- 97/*---------------------------------------------------------------------------
130 * Load non-volatile context. 98 * Store non-volatile context.
131 *--------------------------------------------------------------------------- 99 *---------------------------------------------------------------------------
132 */ 100 */
133static void start_thread(void (*thread_func)(void), const void* addr) __attribute__((naked,used)); 101static inline void store_context(void* addr)
134static void start_thread(void (*thread_func)(void), const void* addr)
135{ 102{
136 /* r0 = thread_func, r1 = addr */ 103 asm volatile(
137#if NUM_CORES > 1 && CONFIG_CPU != PP5002 104 "stmia %0, { r4-r11, sp, lr } \n"
138 asm volatile ( 105 : : "r" (addr)
139 "mov r2, #0 \n"
140 "str r2, [r1, #40] \n"
141 "ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
142 "ldr r2, [r1] \n"
143 "orr r2, r2, #6 \n"
144 "str r2, [r1] \n"
145 "ldr r1, =0x6000c000 \n"
146 "1: \n"
147 "ldr r2, [r1] \n"
148 "tst r2, #0x8000 \n"
149 "bne 1b \n"
150 "mov pc, r0 \n"
151 : : : "r1", "r2"
152 );
153#else
154 asm volatile (
155 "mov r2, #0 \n"
156 "str r2, [r1, #40] \n"
157 "mov pc, r0 \n"
158 : : : "r1", "r2"
159 ); 106 );
160#endif
161 (void)thread_func;
162 (void)addr;
163} 107}
164 108
109/* For startup, place context pointer in r4 slot, start_thread pointer in r5
110 * slot, and thread function pointer in context.start. See load_context for
111 * what happens when thread is initially going to run. */
112#define THREAD_STARTUP_INIT(core, thread, function) \
113 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
114 (thread)->context.r[1] = (unsigned int)start_thread, \
115 (thread)->context.start = (void *)function; })
116
165static inline void load_context(const void* addr) 117static inline void load_context(const void* addr)
166{ 118{
167 asm volatile( 119 asm volatile(
168 "ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */ 120 "ldr r0, [%0, #40] \n" /* Load start pointer */
169 "ldr r0, [%0, #40] \n" /* load start pointer */ 121 "cmp r0, #0 \n" /* Check for NULL */
170 "cmp r0, #0 \n" /* check for NULL */ 122 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
171 "movne r1, %0 \n" /* if not already running, jump to start */ 123 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
172 "ldrne pc, =start_thread \n" 124 : : "r" (addr) : "r0" /* only! */
173 : : "r" (addr) : "r0", "r1"
174 ); 125 );
175} 126}
176 127
177#if defined (CPU_PP) 128#if defined (CPU_PP)
178static inline void core_sleep(void) 129
130#if NUM_CORES > 1
131extern int cpu_idlestackbegin[];
132extern int cpu_idlestackend[];
133extern int cop_idlestackbegin[];
134extern int cop_idlestackend[];
135static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
179{ 136{
180 unlock_cores(); 137 [CPU] = cpu_idlestackbegin,
138 [COP] = cop_idlestackbegin
139};
140#else /* NUM_CORES == 1 */
141#ifndef BOOTLOADER
142extern int cop_stackbegin[];
143extern int cop_stackend[];
144#else
145/* The coprocessor stack is not set up in the bootloader code, but the threading
146 * is. No threads are run on the coprocessor, so set up some dummy stack */
147int *cop_stackbegin = stackbegin;
148int *cop_stackend = stackend;
149#endif /* BOOTLOADER */
150#endif /* NUM_CORES */
181 151
152static inline void core_sleep(void)
153{
182 /* This should sleep the CPU. It appears to wake by itself on 154 /* This should sleep the CPU. It appears to wake by itself on
183 interrupts */ 155 interrupts */
184 if (CURRENT_CORE == CPU) 156 if (CURRENT_CORE == CPU)
185 CPU_CTL = PROC_SLEEP; 157 CPU_CTL = PROC_SLEEP;
186 else 158 else
187 COP_CTL = PROC_SLEEP; 159 COP_CTL = PROC_SLEEP;
160}
188 161
189 lock_cores(); 162#if NUM_CORES > 1
163/*---------------------------------------------------------------------------
164 * Switches to a stack that always resides in the Rockbox core.
165 *
166 * Needed when a thread suicides on a core other than the main CPU since the
167 * stack used when idling is the stack of the last thread to run. This stack
168 * may not reside in the core in which case the core will continue to use a
169 * stack from an unloaded module until another thread runs on it.
170 *---------------------------------------------------------------------------
171 */
172static inline void switch_to_idle_stack(const unsigned int core)
173{
174 asm volatile (
175 "str sp, [%0] \n" /* save original stack pointer on idle stack */
176 "mov sp, %0 \n" /* switch stacks */
177 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
190} 178}
179#endif /* NUM_CORES */
180
191#elif CONFIG_CPU == S3C2440 181#elif CONFIG_CPU == S3C2440
192static inline void core_sleep(void) 182static inline void core_sleep(void)
193{ 183{
@@ -205,14 +195,50 @@ static inline void core_sleep(void)
205 195
206#elif defined(CPU_COLDFIRE) 196#elif defined(CPU_COLDFIRE)
207/*--------------------------------------------------------------------------- 197/*---------------------------------------------------------------------------
198 * Start the thread running and terminate it if it returns
199 *---------------------------------------------------------------------------
200 */
201void start_thread(void); /* Provide C access to ASM label */
202static void __start_thread(void) __attribute__((used));
203static void __start_thread(void)
204{
205 /* a0=macsr, a1=context */
206 asm volatile (
207 "start_thread: \n" /* Start here - no naked attribute */
208 "move.l %a0, %macsr \n" /* Set initial mac status reg */
209 "lea.l 48(%a1), %a1 \n"
210 "move.l (%a1)+, %sp \n" /* Set initial stack */
211 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
212 "clr.l (%a1) \n" /* Mark thread running */
213 "jsr (%a2) \n" /* Call thread function */
214 "clr.l -(%sp) \n" /* remove_thread(NULL) */
215 "jsr remove_thread \n"
216 );
217}
218
219/* Set EMAC unit to fractional mode with saturation for each new thread,
220 * since that's what'll be the most useful for most things which the dsp
221 * will do. Codecs should still initialize their preferred modes
222 * explicitly. Context pointer is placed in d2 slot and start_thread
223 * pointer in d3 slot. thread function pointer is placed in context.start.
224 * See load_context for what happens when thread is initially going to
225 * run.
226 */
227#define THREAD_STARTUP_INIT(core, thread, function) \
228 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
229 (thread)->context.d[0] = (unsigned int)&(thread)->context, \
230 (thread)->context.d[1] = (unsigned int)start_thread, \
231 (thread)->context.start = (void *)(function); })
232
233/*---------------------------------------------------------------------------
208 * Store non-volatile context. 234 * Store non-volatile context.
209 *--------------------------------------------------------------------------- 235 *---------------------------------------------------------------------------
210 */ 236 */
211static inline void store_context(void* addr) 237static inline void store_context(void* addr)
212{ 238{
213 asm volatile ( 239 asm volatile (
214 "move.l %%macsr,%%d0 \n" 240 "move.l %%macsr,%%d0 \n"
215 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n" 241 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
216 : : "a" (addr) : "d0" /* only! */ 242 : : "a" (addr) : "d0" /* only! */
217 ); 243 );
218} 244}
@@ -224,14 +250,13 @@ static inline void store_context(void* addr)
224static inline void load_context(const void* addr) 250static inline void load_context(const void* addr)
225{ 251{
226 asm volatile ( 252 asm volatile (
227 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */ 253 "move.l 52(%0), %%d0 \n" /* Get start address */
228 "move.l %%d0,%%macsr \n" 254 "beq.b 1f \n" /* NULL -> already running */
229 "move.l (52,%0),%%d0 \n" /* Get start address */ 255 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
230 "beq.b 1f \n" /* NULL -> already running */ 256 "jmp (%%a2) \n" /* Start the thread */
231 "clr.l (52,%0) \n" /* Clear start address.. */ 257 "1: \n"
232 "move.l %%d0,%0 \n" 258 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
233 "jmp (%0) \n" /* ..and start the thread */ 259 "move.l %%d0, %%macsr \n"
234 "1: \n"
235 : : "a" (addr) : "d0" /* only! */ 260 : : "a" (addr) : "d0" /* only! */
236 ); 261 );
237} 262}
@@ -250,14 +275,45 @@ static inline void core_sleep(void)
250 275
251#elif CONFIG_CPU == SH7034 276#elif CONFIG_CPU == SH7034
252/*--------------------------------------------------------------------------- 277/*---------------------------------------------------------------------------
278 * Start the thread running and terminate it if it returns
279 *---------------------------------------------------------------------------
280 */
281void start_thread(void); /* Provide C access to ASM label */
282static void __start_thread(void) __attribute__((used));
283static void __start_thread(void)
284{
285 /* r8 = context */
286 asm volatile (
287 "_start_thread: \n" /* Start here - no naked attribute */
288 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
289 "mov.l @(28, r8), r15 \n" /* Set initial sp */
290 "mov #0, r1 \n" /* Start the thread */
291 "jsr @r0 \n"
292 "mov.l r1, @(36, r8) \n" /* Clear start address */
293 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
294 "jmp @r0 \n"
295 "mov #0, r4 \n"
296 "1: \n"
297 ".long _remove_thread \n"
298 );
299}
300
301/* Place context pointer in r8 slot, function pointer in r9 slot, and
302 * start_thread pointer in context_start */
303#define THREAD_STARTUP_INIT(core, thread, function) \
304 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
305 (thread)->context.r[1] = (unsigned int)(function), \
306 (thread)->context.start = (void*)start_thread; })
307
308/*---------------------------------------------------------------------------
253 * Store non-volatile context. 309 * Store non-volatile context.
254 *--------------------------------------------------------------------------- 310 *---------------------------------------------------------------------------
255 */ 311 */
256static inline void store_context(void* addr) 312static inline void store_context(void* addr)
257{ 313{
258 asm volatile ( 314 asm volatile (
259 "add #36,%0 \n" 315 "add #36, %0 \n" /* Start at last reg. By the time routine */
260 "sts.l pr, @-%0 \n" 316 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
261 "mov.l r15,@-%0 \n" 317 "mov.l r15,@-%0 \n"
262 "mov.l r14,@-%0 \n" 318 "mov.l r14,@-%0 \n"
263 "mov.l r13,@-%0 \n" 319 "mov.l r13,@-%0 \n"
@@ -277,23 +333,20 @@ static inline void store_context(void* addr)
277static inline void load_context(const void* addr) 333static inline void load_context(const void* addr)
278{ 334{
279 asm volatile ( 335 asm volatile (
280 "mov.l @%0+,r8 \n" 336 "mov.l @(36, %0), r0 \n" /* Get start address */
281 "mov.l @%0+,r9 \n" 337 "tst r0, r0 \n"
282 "mov.l @%0+,r10 \n" 338 "bt .running \n" /* NULL -> already running */
283 "mov.l @%0+,r11 \n" 339 "jmp @r0 \n" /* r8 = context */
284 "mov.l @%0+,r12 \n" 340 ".running: \n"
285 "mov.l @%0+,r13 \n" 341 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
286 "mov.l @%0+,r14 \n" 342 "mov.l @%0+, r9 \n"
287 "mov.l @%0+,r15 \n" 343 "mov.l @%0+, r10 \n"
288 "lds.l @%0+,pr \n" 344 "mov.l @%0+, r11 \n"
289 "mov.l @%0,r0 \n" /* Get start address */ 345 "mov.l @%0+, r12 \n"
290 "tst r0,r0 \n" 346 "mov.l @%0+, r13 \n"
291 "bt .running \n" /* NULL -> already running */ 347 "mov.l @%0+, r14 \n"
292 "lds r0,pr \n" 348 "mov.l @%0+, r15 \n"
293 "mov #0,r0 \n" 349 "lds.l @%0+, pr \n"
294 "rts \n" /* Start the thread */
295 "mov.l r0,@%0 \n" /* Clear start address */
296 ".running: \n"
297 : : "r" (addr) : "r0" /* only! */ 350 : : "r" (addr) : "r0" /* only! */
298 ); 351 );
299} 352}
@@ -311,38 +364,36 @@ static inline void core_sleep(void)
311#define THREAD_CPU_INIT(core, thread) 364#define THREAD_CPU_INIT(core, thread)
312#endif 365#endif
313 366
314#ifdef THREAD_EXTRA_CHECKS 367#if THREAD_EXTRA_CHECKS
315static void thread_panicf_format_name(char *buffer, struct thread_entry *thread) 368static void thread_panicf(const char *msg, struct thread_entry *thread)
316{ 369{
317 *buffer = '\0'; 370#if NUM_CORES > 1
318 if (thread) 371 const unsigned int core = thread->core;
319 { 372#endif
320 /* Display thread name if one or ID if none */ 373 static char name[32];
321 const char *fmt = thread->name ? " %s" : " %08lX"; 374 thread_get_name(name, 32, thread);
322 intptr_t name = thread->name ? 375 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
323 (intptr_t)thread->name : (intptr_t)thread;
324 snprintf(buffer, 16, fmt, name);
325 }
326} 376}
327 377static void thread_stkov(struct thread_entry *thread)
328static void thread_panicf(const char *msg,
329 struct thread_entry *thread1, struct thread_entry *thread2)
330{ 378{
331 static char thread1_name[16], thread2_name[16]; 379 thread_panicf("Stkov", thread);
332 thread_panicf_format_name(thread1_name, thread1);
333 thread_panicf_format_name(thread2_name, thread2);
334 panicf ("%s%s%s", msg, thread1_name, thread2_name);
335} 380}
381#define THREAD_PANICF(msg, thread) \
382 thread_panicf(msg, thread)
383#define THREAD_ASSERT(exp, msg, thread) \
384 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
336#else 385#else
337static void thread_stkov(void) 386static void thread_stkov(struct thread_entry *thread)
338{ 387{
339 /* Display thread name if one or ID if none */ 388#if NUM_CORES > 1
340 struct thread_entry *current = cores[CURRENT_CORE].running; 389 const unsigned int core = thread->core;
341 const char *fmt = current->name ? "%s %s" : "%s %08lX"; 390#endif
342 intptr_t name = current->name ? 391 static char name[32];
343 (intptr_t)current->name : (intptr_t)current; 392 thread_get_name(name, 32, thread);
344 panicf(fmt, "Stkov", name); 393 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
345} 394}
395#define THREAD_PANICF(msg, thread)
396#define THREAD_ASSERT(exp, msg, thread)
346#endif /* THREAD_EXTRA_CHECKS */ 397#endif /* THREAD_EXTRA_CHECKS */
347 398
348static void add_to_list(struct thread_entry **list, struct thread_entry *thread) 399static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
@@ -564,8 +615,6 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
564 /* Do nothing */ 615 /* Do nothing */
565#else 616#else
566 617
567 lock_cores();
568
569 /* Begin task switching by saving our current context so that we can 618 /* Begin task switching by saving our current context so that we can
570 * restore the state of the current thread later to the point prior 619 * restore the state of the current thread later to the point prior
571 * to this call. */ 620 * to this call. */
@@ -576,11 +625,7 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
576 /* Check if the current thread stack is overflown */ 625 /* Check if the current thread stack is overflown */
577 stackptr = cores[core].running->stack; 626 stackptr = cores[core].running->stack;
578 if(stackptr[0] != DEADBEEF) 627 if(stackptr[0] != DEADBEEF)
579#ifdef THREAD_EXTRA_CHECKS 628 thread_stkov(cores[core].running);
580 thread_panicf("Stkov", cores[core].running, NULL);
581#else
582 thread_stkov();
583#endif
584 629
585 /* Rearrange thread lists as needed */ 630 /* Rearrange thread lists as needed */
586 change_thread_state(blocked_list); 631 change_thread_state(blocked_list);
@@ -627,7 +672,6 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
627#endif 672#endif
628 673
629#endif 674#endif
630 unlock_cores();
631 675
632 /* And finally give control to the next thread. */ 676 /* And finally give control to the next thread. */
633 load_context(&cores[core].running->context); 677 load_context(&cores[core].running->context);
@@ -641,8 +685,6 @@ void sleep_thread(int ticks)
641{ 685{
642 struct thread_entry *current; 686 struct thread_entry *current;
643 687
644 lock_cores();
645
646 current = cores[CURRENT_CORE].running; 688 current = cores[CURRENT_CORE].running;
647 689
648#ifdef HAVE_SCHEDULER_BOOSTCTRL 690#ifdef HAVE_SCHEDULER_BOOSTCTRL
@@ -668,8 +710,6 @@ void block_thread(struct thread_entry **list)
668{ 710{
669 struct thread_entry *current; 711 struct thread_entry *current;
670 712
671 lock_cores();
672
673 /* Get the entry for the current running thread. */ 713 /* Get the entry for the current running thread. */
674 current = cores[CURRENT_CORE].running; 714 current = cores[CURRENT_CORE].running;
675 715
@@ -680,11 +720,9 @@ void block_thread(struct thread_entry **list)
680 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg); 720 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
681#endif 721#endif
682 722
683#ifdef THREAD_EXTRA_CHECKS
684 /* We are not allowed to mix blocking types in one queue. */ 723 /* We are not allowed to mix blocking types in one queue. */
685 if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO) 724 THREAD_ASSERT(*list != NULL && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO,
686 thread_panicf("Blocking violation B->*T", current, *list); 725 "Blocking violation B->*T", current);
687#endif
688 726
689 /* Set the state to blocked and ask the scheduler to switch tasks, 727 /* Set the state to blocked and ask the scheduler to switch tasks,
690 * this takes us off of the run queue until we are explicitly woken */ 728 * this takes us off of the run queue until we are explicitly woken */
@@ -707,7 +745,6 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
707 /* Get the entry for the current running thread. */ 745 /* Get the entry for the current running thread. */
708 current = cores[CURRENT_CORE].running; 746 current = cores[CURRENT_CORE].running;
709 747
710 lock_cores();
711#ifdef HAVE_SCHEDULER_BOOSTCTRL 748#ifdef HAVE_SCHEDULER_BOOSTCTRL
712 /* A block with a timeout is a sleep situation, whatever we are waiting 749 /* A block with a timeout is a sleep situation, whatever we are waiting
713 * for _may or may not_ happen, regardless of boost state, (user input 750 * for _may or may not_ happen, regardless of boost state, (user input
@@ -722,12 +759,9 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
722 } 759 }
723#endif 760#endif
724 761
725#ifdef THREAD_EXTRA_CHECKS
726 /* We can store only one thread to the "list" if thread is used 762 /* We can store only one thread to the "list" if thread is used
727 * in other list (such as core's list for sleeping tasks). */ 763 * in other list (such as core's list for sleeping tasks). */
728 if (*list) 764 THREAD_ASSERT(*list == NULL, "Blocking violation T->*B", current);
729 thread_panicf("Blocking violation T->*B", current, NULL);
730#endif
731 765
732 /* Set the state to blocked with the specified timeout */ 766 /* Set the state to blocked with the specified timeout */
733 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout); 767 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
@@ -836,7 +870,6 @@ struct thread_entry*
836 unsigned int stacklen; 870 unsigned int stacklen;
837 unsigned int *stackptr; 871 unsigned int *stackptr;
838 int slot; 872 int slot;
839 struct regs *regs;
840 struct thread_entry *thread; 873 struct thread_entry *thread;
841 874
842/***** 875/*****
@@ -862,12 +895,9 @@ struct thread_entry*
862 } 895 }
863#endif 896#endif
864 897
865 lock_cores();
866
867 slot = find_empty_thread_slot(); 898 slot = find_empty_thread_slot();
868 if (slot < 0) 899 if (slot < 0)
869 { 900 {
870 unlock_cores();
871 return NULL; 901 return NULL;
872 } 902 }
873 903
@@ -899,17 +929,13 @@ struct thread_entry*
899 flush_icache(); 929 flush_icache();
900#endif 930#endif
901 931
902 regs = &thread->context;
903 /* Align stack to an even 32 bit boundary */ 932 /* Align stack to an even 32 bit boundary */
904 regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3); 933 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
905 regs->start = (void*)function; 934
935 /* Load the thread's context structure with needed startup information */
936 THREAD_STARTUP_INIT(core, thread, function);
906 937
907 /* Do any CPU specific inits after initializing common items
908 to have access to valid data */
909 THREAD_CPU_INIT(core, thread);
910
911 add_to_list(&cores[core].running, thread); 938 add_to_list(&cores[core].running, thread);
912 unlock_cores();
913 939
914 return thread; 940 return thread;
915#if NUM_CORES == 1 941#if NUM_CORES == 1
@@ -920,8 +946,6 @@ struct thread_entry*
920#ifdef HAVE_SCHEDULER_BOOSTCTRL 946#ifdef HAVE_SCHEDULER_BOOSTCTRL
921void trigger_cpu_boost(void) 947void trigger_cpu_boost(void)
922{ 948{
923 lock_cores();
924
925 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg)) 949 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
926 { 950 {
927 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg); 951 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
@@ -931,8 +955,6 @@ void trigger_cpu_boost(void)
931 } 955 }
932 boosted_threads++; 956 boosted_threads++;
933 } 957 }
934
935 unlock_cores();
936} 958}
937#endif 959#endif
938 960
@@ -943,10 +965,10 @@ void trigger_cpu_boost(void)
943 */ 965 */
944void remove_thread(struct thread_entry *thread) 966void remove_thread(struct thread_entry *thread)
945{ 967{
946 lock_cores(); 968 const unsigned int core = CURRENT_CORE;
947 969
948 if (thread == NULL) 970 if (thread == NULL)
949 thread = cores[CURRENT_CORE].running; 971 thread = cores[core].running;
950 972
951 /* Free the entry by removing thread name. */ 973 /* Free the entry by removing thread name. */
952 thread->name = NULL; 974 thread->name = NULL;
@@ -957,16 +979,26 @@ void remove_thread(struct thread_entry *thread)
957 if (thread == cores[IF_COP2(thread->core)].running) 979 if (thread == cores[IF_COP2(thread->core)].running)
958 { 980 {
959 remove_from_list(&cores[IF_COP2(thread->core)].running, thread); 981 remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
982#if NUM_CORES > 1
983 /* Switch to the idle stack if not on the main core (where "main"
984 * runs) */
985 if (core != CPU)
986 {
987 switch_to_idle_stack(core);
988 }
989
990 flush_icache();
991#endif
960 switch_thread(false, NULL); 992 switch_thread(false, NULL);
961 return ; 993 /* This should never and must never be reached - if it is, the
994 * state is corrupted */
995 THREAD_PANICF("remove_thread->K:*R", thread);
962 } 996 }
963 997
964 if (thread == cores[IF_COP2(thread->core)].sleeping) 998 if (thread == cores[IF_COP2(thread->core)].sleeping)
965 remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread); 999 remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
966 else 1000 else
967 remove_from_list(NULL, thread); 1001 remove_from_list(NULL, thread);
968
969 unlock_cores();
970} 1002}
971 1003
972#ifdef HAVE_PRIORITY_SCHEDULING 1004#ifdef HAVE_PRIORITY_SCHEDULING
@@ -974,14 +1006,12 @@ int thread_set_priority(struct thread_entry *thread, int priority)
974{ 1006{
975 int old_priority; 1007 int old_priority;
976 1008
977 lock_cores();
978 if (thread == NULL) 1009 if (thread == NULL)
979 thread = cores[CURRENT_CORE].running; 1010 thread = cores[CURRENT_CORE].running;
980 1011
981 old_priority = thread->priority; 1012 old_priority = thread->priority;
982 thread->priority = priority; 1013 thread->priority = priority;
983 cores[IF_COP2(thread->core)].highest_priority = 100; 1014 cores[IF_COP2(thread->core)].highest_priority = 100;
984 unlock_cores();
985 1015
986 return old_priority; 1016 return old_priority;
987} 1017}
@@ -1013,15 +1043,7 @@ void init_threads(void)
1013 const unsigned int core = CURRENT_CORE; 1043 const unsigned int core = CURRENT_CORE;
1014 int slot; 1044 int slot;
1015 1045
1016 /* Let main CPU initialize first. */ 1046 /* CPU will initialize first and then sleep */
1017#if NUM_CORES > 1
1018 if (core != CPU)
1019 {
1020 while (!cores[CPU].kernel_running) ;
1021 }
1022#endif
1023
1024 lock_cores();
1025 slot = find_empty_thread_slot(); 1047 slot = find_empty_thread_slot();
1026 1048
1027 cores[core].sleeping = NULL; 1049 cores[core].sleeping = NULL;
@@ -1042,30 +1064,40 @@ void init_threads(void)
1042 threads[slot].priority_x = 0; 1064 threads[slot].priority_x = 0;
1043 cores[core].highest_priority = 100; 1065 cores[core].highest_priority = 100;
1044#endif 1066#endif
1045#ifdef HAVE_SCHEDULER_BOOSTCTRL
1046 boosted_threads = 0;
1047#endif
1048 add_to_list(&cores[core].running, &threads[slot]); 1067 add_to_list(&cores[core].running, &threads[slot]);
1049 1068
1050 /* In multiple core setups, each core has a different stack. There is 1069 /* In multiple core setups, each core has a different stack. There is
1051 * probably a much better way to do this. */ 1070 * probably a much better way to do this. */
1052 if (core == CPU) 1071 if (core == CPU)
1053 { 1072 {
1073#ifdef HAVE_SCHEDULER_BOOSTCTRL
1074 boosted_threads = 0;
1075#endif
1054 threads[slot].stack = stackbegin; 1076 threads[slot].stack = stackbegin;
1055 threads[slot].stack_size = (int)stackend - (int)stackbegin; 1077 threads[slot].stack_size = (int)stackend - (int)stackbegin;
1056 }
1057#if NUM_CORES > 1 /* This code path will not be run on single core targets */ 1078#if NUM_CORES > 1 /* This code path will not be run on single core targets */
1079 /* Mark CPU initialized */
1080 cores[CPU].kernel_running = true;
1081 /* TODO: HAL interface for this */
1082 /* Wake up coprocessor and let it initialize kernel and threads */
1083 COP_CTL = PROC_WAKE;
1084 /* Sleep until finished */
1085 CPU_CTL = PROC_SLEEP;
1086 }
1058 else 1087 else
1059 { 1088 {
1060 threads[slot].stack = cop_stackbegin; 1089 /* Initial stack is the COP idle stack */
1061 threads[slot].stack_size = 1090 threads[slot].stack = cop_idlestackbegin;
1062 (int)cop_stackend - (int)cop_stackbegin; 1091 threads[slot].stack_size = IDLE_STACK_SIZE;
1063 } 1092 /* Mark COP initialized */
1064 1093 cores[COP].kernel_running = true;
1065 cores[core].kernel_running = true; 1094 /* Get COP safely primed inside switch_thread where it will remain
1095 * until a thread actually exists on it */
1096 CPU_CTL = PROC_WAKE;
1097 set_irq_level(0);
1098 remove_thread(NULL);
1066#endif 1099#endif
1067 1100 }
1068 unlock_cores();
1069} 1101}
1070 1102
1071int thread_stack_usage(const struct thread_entry *thread) 1103int thread_stack_usage(const struct thread_entry *thread)
@@ -1083,7 +1115,59 @@ int thread_stack_usage(const struct thread_entry *thread)
1083 thread->stack_size; 1115 thread->stack_size;
1084} 1116}
1085 1117
1118#if NUM_CORES > 1
1119/*---------------------------------------------------------------------------
1120 * Returns the maximum percentage of the core's idle stack ever used during
1121 * runtime.
1122 *---------------------------------------------------------------------------
1123 */
1124int idle_stack_usage(unsigned int core)
1125{
1126 unsigned int *stackptr = idle_stacks[core];
1127 int i, usage = 0;
1128
1129 for (i = 0; i < IDLE_STACK_WORDS; i++)
1130 {
1131 if (stackptr[i] != DEADBEEF)
1132 {
1133 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
1134 break;
1135 }
1136 }
1137
1138 return usage;
1139}
1140#endif
1141
1086int thread_get_status(const struct thread_entry *thread) 1142int thread_get_status(const struct thread_entry *thread)
1087{ 1143{
1088 return GET_STATE(thread->statearg); 1144 return GET_STATE(thread->statearg);
1089} 1145}
1146
1147/*---------------------------------------------------------------------------
1148 * Fills in the buffer with the specified thread's name. If the name is NULL,
1149 * empty, or the thread is in destruct state a formatted ID is written
1150 * instead.
1151 *---------------------------------------------------------------------------
1152 */
1153void thread_get_name(char *buffer, int size,
1154 struct thread_entry *thread)
1155{
1156 if (size <= 0)
1157 return;
1158
1159 *buffer = '\0';
1160
1161 if (thread)
1162 {
1163 /* Display thread name if one or ID if none */
1164 const char *name = thread->name;
1165 const char *fmt = "%s";
1166 if (name == NULL || *name == '\0')
1167 {
1168 name = (const char *)thread;
1169 fmt = "%08lX";
1170 }
1171 snprintf(buffer, size, fmt, name);
1172 }
1173}