summaryrefslogtreecommitdiff
path: root/firmware/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/thread.c')
-rw-r--r--firmware/thread.c2323
1 files changed, 1872 insertions, 451 deletions
diff --git a/firmware/thread.c b/firmware/thread.c
index 619a1e135a..c9ce049ea1 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -29,43 +29,150 @@
29#include <profile.h> 29#include <profile.h>
30#endif 30#endif
31 31
32#if NUM_CORES > 1 32/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
33# define IF_COP2(x) x 33#ifdef DEBUG
34#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
34#else 35#else
35# define IF_COP2(x) CURRENT_CORE 36#define THREAD_EXTRA_CHECKS 0
36#endif 37#endif
37 38
39/**
40 * General locking order to guarantee progress. Order must be observed but
41 * all stages are not nescessarily obligatory. Going from 1) to 3) is
42 * perfectly legal.
43 *
44 * 1) IRQ
45 * This is first because of the likelyhood of having an interrupt occur that
46 * also accesses one of the objects farther down the list. Any non-blocking
47 * synchronization done may already have a lock on something during normal
48 * execution and if an interrupt handler running on the same processor as
49 * the one that has the resource locked were to attempt to access the
50 * resource, the interrupt handler would wait forever waiting for an unlock
51 * that will never happen. There is no danger if the interrupt occurs on
52 * a different processor because the one that has the lock will eventually
53 * unlock and the other processor's handler may proceed at that time. Not
54 * nescessary when the resource in question is definitely not available to
55 * interrupt handlers.
56 *
57 * 2) Kernel Object
58 * 1) May be needed beforehand if the kernel object allows dual-use such as
59 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
65 *
66 * 3) Thread Slot
67 * This locks access to the thread's slot such that its state cannot be
68 * altered by another processor when a state change is in progress such as
69 * when it is in the process of going on a blocked list. An attempt to wake
70 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state.
72 *
73 * 4) Lists
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
81 *
82 * 5) Core Lists
83 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an
85 * operation may only be performed by the thread's own core in a normal
86 * execution context. The wakeup list is the prime example where a thread
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
90 */
38#define DEADBEEF ((unsigned int)0xdeadbeef) 91#define DEADBEEF ((unsigned int)0xdeadbeef)
39/* Cast to the the machine int type, whose size could be < 4. */ 92/* Cast to the the machine int type, whose size could be < 4. */
40
41struct core_entry cores[NUM_CORES] IBSS_ATTR; 93struct core_entry cores[NUM_CORES] IBSS_ATTR;
42struct thread_entry threads[MAXTHREADS] IBSS_ATTR; 94struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
43#ifdef HAVE_SCHEDULER_BOOSTCTRL 95#ifdef HAVE_SCHEDULER_BOOSTCTRL
44static int boosted_threads IBSS_ATTR; 96static int boosted_threads IBSS_ATTR;
45#endif 97#endif
46 98
47/* Define to enable additional checks for blocking violations etc. */
48#define THREAD_EXTRA_CHECKS 0
49
50static const char main_thread_name[] = "main"; 99static const char main_thread_name[] = "main";
51
52extern int stackbegin[]; 100extern int stackbegin[];
53extern int stackend[]; 101extern int stackend[];
54 102
55/* Conserve IRAM 103/* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
56static void add_to_list(struct thread_entry **list, 104 * never results in requiring a wait until the next tick (up to 10000uS!). Likely
57 struct thread_entry *thread) ICODE_ATTR; 105 * requires assembly and careful instruction ordering. Multicore requires
58static void remove_from_list(struct thread_entry **list, 106 * carefully timed sections in order to have synchronization without locking of
59 struct thread_entry *thread) ICODE_ATTR; 107 * any sort.
60*/ 108 *
109 * 1) Disable all interrupts (FIQ and IRQ for ARM for instance)
110 * 2) Check *waking == NULL.
111 * 3) *waking not NULL? Goto step 7.
112 * 4) On multicore, stay awake if directed to do so by another. If so, goto step 7.
113 * 5) If processor requires, atomically reenable interrupts and perform step 6.
114 * 6) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
115 * goto step 8.
116 * 7) Reenable interrupts.
117 * 8) Exit procedure.
118 */
119static inline void core_sleep(
120 IF_COP(unsigned int core,) struct thread_entry **waking)
121 __attribute__((always_inline));
122
123static void check_tmo_threads(void)
124 __attribute__((noinline));
125
126static inline void block_thread_on_l(
127 struct thread_queue *list, struct thread_entry *thread, unsigned state)
128 __attribute__((always_inline));
129
130static inline void block_thread_on_l_no_listlock(
131 struct thread_entry **list, struct thread_entry *thread, unsigned state)
132 __attribute__((always_inline));
133
134static inline void _block_thread_on_l(
135 struct thread_queue *list, struct thread_entry *thread,
136 unsigned state IF_SWCL(, const bool single))
137 __attribute__((always_inline));
138
139IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
140 struct thread_queue *list IF_SWCL(, const bool nolock))
141 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
142
143IF_SWCL(static inline) void _block_thread(
144 struct thread_queue *list IF_SWCL(, const bool nolock))
145 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
146
147static void add_to_list_tmo(struct thread_entry *thread)
148 __attribute__((noinline));
149
150static void core_schedule_wakeup(struct thread_entry *thread)
151 __attribute__((noinline));
152
153static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
154 __attribute__((always_inline));
155
156static inline void run_blocking_ops(
157 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
158 __attribute__((always_inline));
159
160static void thread_stkov(struct thread_entry *thread)
161 __attribute__((noinline));
61 162
62void switch_thread(bool save_context, struct thread_entry **blocked_list) 163static inline void store_context(void* addr)
63 ICODE_ATTR; 164 __attribute__((always_inline));
64 165
65static inline void store_context(void* addr) __attribute__ ((always_inline));
66static inline void load_context(const void* addr) 166static inline void load_context(const void* addr)
67 __attribute__ ((always_inline)); 167 __attribute__((always_inline));
68static inline void core_sleep(void) __attribute__((always_inline)); 168
169void switch_thread(struct thread_entry *old)
170 __attribute__((noinline));
171
172
173/****************************************************************************
174 * Processor-specific section
175 */
69 176
70#if defined(CPU_ARM) 177#if defined(CPU_ARM)
71/*--------------------------------------------------------------------------- 178/*---------------------------------------------------------------------------
@@ -94,6 +201,14 @@ static void start_thread(void)
94 ); /* No clobber list - new thread doesn't care */ 201 ); /* No clobber list - new thread doesn't care */
95} 202}
96 203
204/* For startup, place context pointer in r4 slot, start_thread pointer in r5
205 * slot, and thread function pointer in context.start. See load_context for
206 * what happens when thread is initially going to run. */
207#define THREAD_STARTUP_INIT(core, thread, function) \
208 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
209 (thread)->context.r[1] = (unsigned int)start_thread, \
210 (thread)->context.start = (void *)function; })
211
97/*--------------------------------------------------------------------------- 212/*---------------------------------------------------------------------------
98 * Store non-volatile context. 213 * Store non-volatile context.
99 *--------------------------------------------------------------------------- 214 *---------------------------------------------------------------------------
@@ -106,14 +221,10 @@ static inline void store_context(void* addr)
106 ); 221 );
107} 222}
108 223
109/* For startup, place context pointer in r4 slot, start_thread pointer in r5 224/*---------------------------------------------------------------------------
110 * slot, and thread function pointer in context.start. See load_context for 225 * Load non-volatile context.
111 * what happens when thread is initially going to run. */ 226 *---------------------------------------------------------------------------
112#define THREAD_STARTUP_INIT(core, thread, function) \ 227 */
113 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
114 (thread)->context.r[1] = (unsigned int)start_thread, \
115 (thread)->context.start = (void *)function; })
116
117static inline void load_context(const void* addr) 228static inline void load_context(const void* addr)
118{ 229{
119 asm volatile( 230 asm volatile(
@@ -139,14 +250,226 @@ static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
139}; 250};
140#endif /* NUM_CORES */ 251#endif /* NUM_CORES */
141 252
142static inline void core_sleep(void) 253#if CONFIG_CORELOCK == SW_CORELOCK
254/* Software core locks using Peterson's mutual exclusion algorithm */
255
256/*---------------------------------------------------------------------------
257 * Initialize the corelock structure.
258 *---------------------------------------------------------------------------
259 */
260void corelock_init(struct corelock *cl)
143{ 261{
144 /* This should sleep the CPU. It appears to wake by itself on 262 memset(cl, 0, sizeof (*cl));
145 interrupts */ 263}
146 if (CURRENT_CORE == CPU) 264
147 CPU_CTL = PROC_SLEEP; 265#if 1 /* Assembly locks to minimize overhead */
148 else 266/*---------------------------------------------------------------------------
149 COP_CTL = PROC_SLEEP; 267 * Wait for the corelock to become free and acquire it when it does.
268 *---------------------------------------------------------------------------
269 */
270void corelock_lock(struct corelock *cl) __attribute__((naked));
271void corelock_lock(struct corelock *cl)
272{
273 asm volatile (
274 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
275 "ldrb r1, [r1] \n"
276 "mov r3, #1 \n" /* cl->myl[core] = 1 */
277 "strb r3, [r0, r1, lsr #7] \n"
278 "and r2, r1, #1 \n" /* r2 = othercore */
279 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
280 "1: \n"
281 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 1 ? */
282 "cmp r3, #1 \n"
283 "ldreqb r3, [r0, #2] \n" /* && cl->turn == othercore ? */
284 "cmpeq r3, r2 \n"
285 "bxne lr \n" /* no? lock acquired */
286 "b 1b \n" /* keep trying */
287 : : "i"(&PROCESSOR_ID)
288 );
289 (void)cl;
290}
291
292/*---------------------------------------------------------------------------
293 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
294 *---------------------------------------------------------------------------
295 */
296int corelock_try_lock(struct corelock *cl) __attribute__((naked));
297int corelock_try_lock(struct corelock *cl)
298{
299 asm volatile (
300 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
301 "ldrb r1, [r1] \n"
302 "mov r3, #1 \n" /* cl->myl[core] = 1 */
303 "strb r3, [r0, r1, lsr #7] \n"
304 "and r2, r1, #1 \n" /* r2 = othercore */
305 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
306 "1: \n"
307 "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 1 ? */
308 "cmp r3, #1 \n"
309 "ldreqb r3, [r0, #2] \n" /* && cl->turn == othercore? */
310 "cmpeq r3, r2 \n"
311 "movne r0, #1 \n" /* no? lock acquired */
312 "bxne lr \n"
313 "mov r2, #0 \n" /* cl->myl[core] = 0 */
314 "strb r2, [r0, r1, lsr #7] \n"
315 "mov r0, r2 \n"
316 "bx lr \n" /* acquisition failed */
317 : : "i"(&PROCESSOR_ID)
318 );
319
320 return 0;
321 (void)cl;
322}
323
324/*---------------------------------------------------------------------------
325 * Release ownership of the corelock
326 *---------------------------------------------------------------------------
327 */
328void corelock_unlock(struct corelock *cl) __attribute__((naked));
329void corelock_unlock(struct corelock *cl)
330{
331 asm volatile (
332 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
333 "ldrb r1, [r1] \n"
334 "mov r2, #0 \n" /* cl->myl[core] = 0 */
335 "strb r2, [r0, r1, lsr #7] \n"
336 "bx lr \n"
337 : : "i"(&PROCESSOR_ID)
338 );
339 (void)cl;
340}
341#else /* C versions for reference */
342/*---------------------------------------------------------------------------
343 * Wait for the corelock to become free and aquire it when it does.
344 *---------------------------------------------------------------------------
345 */
346void corelock_lock(struct corelock *cl)
347{
348 const unsigned int core = CURRENT_CORE;
349 const unsigned int othercore = 1 - core;
350
351 cl->myl[core] = 1;
352 cl->turn = othercore;
353
354 while (cl->myl[othercore] == 1 && cl->turn == othercore);
355}
356
357/*---------------------------------------------------------------------------
358 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
359 *---------------------------------------------------------------------------
360 */
361int corelock_try_lock(struct corelock *cl)
362{
363 const unsigned int core = CURRENT_CORE;
364 const unsigned int othercore = 1 - core;
365
366 cl->myl[core] = 1;
367 cl->turn = othercore;
368
369 if (cl->myl[othercore] == 1 && cl->turn == othercore)
370 {
371 cl->myl[core] = 0;
372 return 0;
373 }
374
375 return 1;
376}
377
378/*---------------------------------------------------------------------------
379 * Release ownership of the corelock
380 *---------------------------------------------------------------------------
381 */
382void corelock_unlock(struct corelock *cl)
383{
384 cl->myl[CURRENT_CORE] = 0;
385}
386#endif /* ASM / C selection */
387
388#endif /* CONFIG_CORELOCK == SW_CORELOCK */
389
390/*---------------------------------------------------------------------------
391 * Put core in a power-saving state if waking list wasn't repopulated and if
392 * no other core requested a wakeup for it to perform a task.
393 *---------------------------------------------------------------------------
394 */
395static inline void core_sleep(IF_COP(unsigned int core,) struct thread_entry **waking)
396{
397#if NUM_CORES > 1
398#ifdef CPU_PP502x
399 /* Disabling IRQ and FIQ is important to making the fixed-time sequence
400 * non-interruptable */
401 asm volatile (
402 "mrs r2, cpsr \n" /* Disable IRQ, FIQ */
403 "orr r2, r2, #0xc0 \n"
404 "msr cpsr_c, r2 \n"
405 "ldr r0, [%[w]] \n" /* Check *waking */
406 "cmp r0, #0 \n" /* != NULL -> exit */
407 "bne 1f \n"
408 /* ------ fixed-time sequence ----- */
409 "ldr r0, [%[ms], %[oc], lsl #2] \n" /* Stay-awake requested? */
410 "mov r1, #0x80000000 \n"
411 "tst r0, #1 \n"
412 "streq r1, [%[ct], %[c], lsl #2] \n" /* Sleep if not */
413 "nop \n"
414 "mov r0, #0 \n"
415 "str r0, [%[ct], %[c], lsl #2] \n" /* Clear control reg */
416 /* -------------------------------- */
417 "1: \n"
418 "mov r0, #1 \n"
419 "add r1, %[ms], #8 \n"
420 "str r0, [r1, %[oc], lsl #2] \n" /* Clear mailbox */
421 "bic r2, r2, #0xc0 \n" /* Enable interrupts */
422 "msr cpsr_c, r2 \n"
423 :
424 : [ct]"r"(&PROC_CTL(CPU)), [ms]"r"(&PROC_MESSAGE(CPU)),
425 [c]"r" (core), [oc]"r"(1-core), [w]"r"(waking)
426 : "r0", "r1", "r2");
427#else
428 /* TODO: PP5002 */
429#endif /* CONFIG_CPU == */
430#else
431 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
432 if (*waking == NULL)
433 {
434 PROC_CTL(IF_COP_CORE(core)) = PROC_SLEEP;
435 }
436 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
437#endif /* NUM_CORES */
438}
439
440/*---------------------------------------------------------------------------
441 * Wake another processor core that is sleeping or prevent it from doing so
442 * if it was already destined. FIQ, IRQ should be disabled before calling.
443 *---------------------------------------------------------------------------
444 */
445void core_wake(IF_COP_VOID(unsigned int othercore))
446{
447#if NUM_CORES == 1
448 /* No wakey - core already wakey */
449#elif defined (CPU_PP502x)
450 /* avoid r0 since that contains othercore */
451 asm volatile (
452 "mrs r2, cpsr \n"
453 "orr r1, r2, #0xc0 \n"
454 "msr cpsr_c, r1 \n"
455 "mov r1, #1 \n"
456 /* ------ fixed-time sequence ----- */
457 "str r1, [%[ms], %[oc], lsl #2] \n" /* Send stay-awake message */
458 "nop \n"
459 "nop \n"
460 "ldr r1, [%[ct], %[oc], lsl #2] \n" /* Wake other core if asleep */
461 "tst r1, #0x80000000 \n"
462 "bic r1, r1, #0x80000000 \n"
463 "strne r1, [%[ct], %[oc], lsl #2] \n"
464 /* -------------------------------- */
465 "msr cpsr_c, r2 \n"
466 :
467 : [ct]"r"(&PROC_CTL(CPU)), [ms]"r"(&PROC_MESSAGE(CPU)),
468 [oc]"r" (othercore)
469 : "r1", "r2");
470#else
471 PROC_CTL(othercore) = PROC_WAKE;
472#endif
150} 473}
151 474
152#if NUM_CORES > 1 475#if NUM_CORES > 1
@@ -167,22 +490,120 @@ static inline void switch_to_idle_stack(const unsigned int core)
167 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); 490 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
168 (void)core; 491 (void)core;
169} 492}
493
494/*---------------------------------------------------------------------------
495 * Perform core switch steps that need to take place inside switch_thread.
496 *
497 * These steps must take place while before changing the processor and after
498 * having entered switch_thread since switch_thread may not do a normal return
499 * because the stack being used for anything the compiler saved will not belong
500 * to the thread's destination core and it may have been recycled for other
501 * purposes by the time a normal context load has taken place. switch_thread
502 * will also clobber anything stashed in the thread's context or stored in the
503 * nonvolatile registers if it is saved there before the call since the
504 * compiler's order of operations cannot be known for certain.
505 */
506static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
507{
508 /* Flush our data to ram */
509 flush_icache();
510 /* Stash thread in r4 slot */
511 thread->context.r[0] = (unsigned int)thread;
512 /* Stash restart address in r5 slot */
513 thread->context.r[1] = (unsigned int)thread->context.start;
514 /* Save sp in context.sp while still running on old core */
515 thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1];
516}
517
518/*---------------------------------------------------------------------------
519 * Machine-specific helper function for switching the processor a thread is
520 * running on. Basically, the thread suicides on the departing core and is
521 * reborn on the destination. Were it not for gcc's ill-behavior regarding
522 * naked functions written in C where it actually clobbers non-volatile
523 * registers before the intended prologue code, this would all be much
524 * simpler. Generic setup is done in switch_core itself.
525 */
526
527/*---------------------------------------------------------------------------
528 * This actually performs the core switch.
529 */
530static void switch_thread_core(unsigned int core, struct thread_entry *thread)
531 __attribute__((naked));
532static void switch_thread_core(unsigned int core, struct thread_entry *thread)
533{
534 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
535 * Stack access also isn't permitted until restoring the original stack and
536 * context. */
537 asm volatile (
538 "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
539 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
540 "ldr r2, [r2, r0, lsl #2] \n"
541 "add r2, r2, %0*4 \n"
542 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
543 "mov sp, r2 \n" /* switch stacks */
544 "adr r2, 1f \n" /* r2 = new core restart address */
545 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
546 "mov r0, r1 \n" /* switch_thread(thread) */
547 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
548 "1: \n"
549 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
550 "mov r1, #0 \n" /* Clear start address */
551 "str r1, [r0, #40] \n"
552 "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
553 "mov lr, pc \n"
554 "bx r0 \n"
555 "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
556 ".ltorg \n" /* Dump constant pool */
557 : : "i"(IDLE_STACK_WORDS)
558 );
559 (void)core; (void)thread;
560}
170#endif /* NUM_CORES */ 561#endif /* NUM_CORES */
171 562
172#elif CONFIG_CPU == S3C2440 563#elif CONFIG_CPU == S3C2440
173static inline void core_sleep(void) 564
565/*---------------------------------------------------------------------------
566 * Put core in a power-saving state if waking list wasn't repopulated.
567 *---------------------------------------------------------------------------
568 */
569static inline void core_sleep(struct thread_entry **waking)
174{ 570{
175 int i; 571 /* FIQ also changes the CLKCON register so FIQ must be disabled
176 CLKCON |= (1 << 2); /* set IDLE bit */ 572 when changing it here */
177 for(i=0; i<10; i++); /* wait for IDLE */ 573 asm volatile (
178 CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */ 574 "mrs r0, cpsr \n" /* Disable IRQ, FIQ */
575 "orr r0, r0, #0xc0 \n"
576 "msr cpsr_c, r0 \n"
577 "ldr r1, [%0] \n" /* Check *waking */
578 "cmp r1, #0 \n"
579 "bne 2f \n" /* != NULL -> exit */
580 "bic r0, r0, #0xc0 \n" /* Prepare IRQ, FIQ enable */
581 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
582 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
583 "orr r2, r2, #4 \n"
584 "str r2, [r1, #0xc] \n"
585 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
586 "mov r3, #0 \n" /* wait for IDLE */
587 "1: \n"
588 "add r3, r3, #1 \n"
589 "cmp r3, #10 \n"
590 "bne 1b \n"
591 "orr r0, r0, #0xc0 \n" /* Disable IRQ, FIQ */
592 "msr cpsr_c, r0 \n"
593 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
594 "bic r2, r2, #4 \n"
595 "str r2, [r1, #0xc] \n"
596 "2: \n"
597 "bic r0, r0, #0xc0 \n" /* Enable IRQ, FIQ */
598 "msr cpsr_c, r0 \n"
599 : : "r"(waking) : "r0", "r1", "r2", "r3");
179} 600}
180#else 601#else
181static inline void core_sleep(void) 602static inline void core_sleep(void)
182{ 603{
183 604
184} 605}
185#endif 606#endif /* CONFIG_CPU == */
186 607
187#elif defined(CPU_COLDFIRE) 608#elif defined(CPU_COLDFIRE)
188/*--------------------------------------------------------------------------- 609/*---------------------------------------------------------------------------
@@ -252,17 +673,28 @@ static inline void load_context(const void* addr)
252 ); 673 );
253} 674}
254 675
255static inline void core_sleep(void) 676/*---------------------------------------------------------------------------
677 * Put core in a power-saving state if waking list wasn't repopulated.
678 *---------------------------------------------------------------------------
679 */
680static inline void core_sleep(struct thread_entry **waking)
256{ 681{
257 asm volatile ("stop #0x2000"); 682 asm volatile (
258} 683 "moveq.l %1, %%d0 \n" /* Disable interrupts (not audio DMA) */
259 684 "lsl.l #8, %%d0 \n"
260/* Set EMAC unit to fractional mode with saturation for each new thread, 685 "move.w %%d0, %%sr \n"
261 since that's what'll be the most useful for most things which the dsp 686 "tst.l (%0) \n" /* Check *waking */
262 will do. Codecs should still initialize their preferred modes 687 "beq.b 1f \n" /* != NULL -> exit */
263 explicitly. */ 688 "moveq.l #0x20, %%d0 \n" /* Enable interrupts */
264#define THREAD_CPU_INIT(core, thread) \ 689 "lsl.l #8, %%d0 \n"
265 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; }) 690 "move.w %%d0, %%sr \n"
691 ".word 0x51fb \n" /* tpf.l - eat stop instruction */
692 "1: \n"
693 "stop #0x2000 \n" /* Supervisor mode, interrupts enabled
694 upon wakeup */
695 : : "a"(waking), "i"((0x2000 | HIGHEST_IRQ_LEVEL) >> 8) : "d0"
696 );
697};
266 698
267#elif CONFIG_CPU == SH7034 699#elif CONFIG_CPU == SH7034
268/*--------------------------------------------------------------------------- 700/*---------------------------------------------------------------------------
@@ -342,18 +774,37 @@ static inline void load_context(const void* addr)
342 ); 774 );
343} 775}
344 776
345static inline void core_sleep(void) 777/*---------------------------------------------------------------------------
778 * Put core in a power-saving state if waking list wasn't repopulated.
779 *---------------------------------------------------------------------------
780 */
781static inline void core_sleep(struct thread_entry **waking)
346{ 782{
347 and_b(0x7F, &SBYCR); 783 asm volatile (
348 asm volatile ("sleep"); 784 "mov %2, r1 \n" /* Disable interrupts */
785 "ldc r1, sr \n"
786 "mov.l @%1, r1 \n" /* Check *waking */
787 "tst r1, r1 \n"
788 "bf 1f \n" /* *waking != NULL ? exit */
789 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
790 "mov #0, r1 \n" /* Enable interrupts */
791 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
792 "bra 2f \n" /* bra and sleep are executed at once */
793 "sleep \n" /* Execute standby */
794 "1: \n"
795 "mov #0, r1 \n" /* Enable interrupts */
796 "ldc r1, sr \n"
797 "2: \n"
798 :
799 : "z"(&SBYCR-GBR), "r"(waking), "i"(HIGHEST_IRQ_LEVEL)
800 : "r1");
349} 801}
350 802
351#endif 803#endif /* CONFIG_CPU == */
352 804
353#ifndef THREAD_CPU_INIT 805/*
354/* No cpu specific init - make empty */ 806 * End Processor-specific section
355#define THREAD_CPU_INIT(core, thread) 807 ***************************************************************************/
356#endif
357 808
358#if THREAD_EXTRA_CHECKS 809#if THREAD_EXTRA_CHECKS
359static void thread_panicf(const char *msg, struct thread_entry *thread) 810static void thread_panicf(const char *msg, struct thread_entry *thread)
@@ -387,462 +838,1030 @@ static void thread_stkov(struct thread_entry *thread)
387#define THREAD_ASSERT(exp, msg, thread) 838#define THREAD_ASSERT(exp, msg, thread)
388#endif /* THREAD_EXTRA_CHECKS */ 839#endif /* THREAD_EXTRA_CHECKS */
389 840
390static void add_to_list(struct thread_entry **list, struct thread_entry *thread) 841/*---------------------------------------------------------------------------
842 * Lock a list pointer and returns its value
843 *---------------------------------------------------------------------------
844 */
845#if CONFIG_CORELOCK == SW_CORELOCK
846/* Separate locking function versions */
847
848/* Thread locking */
849#define GET_THREAD_STATE(thread) \
850 ({ corelock_lock(&(thread)->cl); (thread)->state; })
851#define TRY_GET_THREAD_STATE(thread) \
852 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
853#define UNLOCK_THREAD(thread, state) \
854 ({ corelock_unlock(&(thread)->cl); })
855#define UNLOCK_THREAD_SET_STATE(thread, _state) \
856 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
857
858/* List locking */
859#define LOCK_LIST(tqp) \
860 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
861#define UNLOCK_LIST(tqp, mod) \
862 ({ corelock_unlock(&(tqp)->cl); })
863#define UNLOCK_LIST_SET_PTR(tqp, mod) \
864 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
865
866/* Select the queue pointer directly */
867#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
868 ({ add_to_list_l(&(tqp)->queue, (thread)); })
869#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
870 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
871
872#elif CONFIG_CORELOCK == CORELOCK_SWAP
873/* Native swap/exchange versions */
874
875/* Thread locking */
876#define GET_THREAD_STATE(thread) \
877 ({ unsigned _s; \
878 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
879 _s; })
880#define TRY_GET_THREAD_STATE(thread) \
881 ({ xchg8(&(thread)->state, STATE_BUSY); })
882#define UNLOCK_THREAD(thread, _state) \
883 ({ (thread)->state = (_state); })
884#define UNLOCK_THREAD_SET_STATE(thread, _state) \
885 ({ (thread)->state = (_state); })
886
887/* List locking */
888#define LOCK_LIST(tqp) \
889 ({ struct thread_entry *_l; \
890 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
891 _l; })
892#define UNLOCK_LIST(tqp, mod) \
893 ({ (tqp)->queue = (mod); })
894#define UNLOCK_LIST_SET_PTR(tqp, mod) \
895 ({ (tqp)->queue = (mod); })
896
897/* Select the local queue pointer copy returned from LOCK_LIST */
898#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
899 ({ add_to_list_l(&(tc), (thread)); })
900#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
901 ({ remove_from_list_l(&(tc), (thread)); })
902
903#else
904/* Single-core/non-locked versions */
905
906/* Threads */
907#define GET_THREAD_STATE(thread) \
908 ({ (thread)->state; })
909#define UNLOCK_THREAD(thread, _state)
910#define UNLOCK_THREAD_SET_STATE(thread, _state) \
911 ({ (thread)->state = (_state); })
912
913/* Lists */
914#define LOCK_LIST(tqp) \
915 ({ (tqp)->queue; })
916#define UNLOCK_LIST(tqp, mod)
917#define UNLOCK_LIST_SET_PTR(tqp, mod) \
918 ({ (tqp)->queue = (mod); })
919
920/* Select the queue pointer directly */
921#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
922 ({ add_to_list_l(&(tqp)->queue, (thread)); })
923#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
924 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
925
926#endif /* locking selection */
927
928#if THREAD_EXTRA_CHECKS
929/*---------------------------------------------------------------------------
930 * Lock the thread slot to obtain the state and then unlock it. Waits for
931 * it not to be busy. Used for debugging.
932 *---------------------------------------------------------------------------
933 */
934static unsigned peek_thread_state(struct thread_entry *thread)
935{
936 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
937 unsigned state = GET_THREAD_STATE(thread);
938 UNLOCK_THREAD(thread, state);
939 set_irq_level(oldlevel);
940 return state;
941}
942#endif /* THREAD_EXTRA_CHECKS */
943
944/*---------------------------------------------------------------------------
945 * Adds a thread to a list of threads using "intert last". Uses the "l"
946 * links.
947 *---------------------------------------------------------------------------
948 */
949static void add_to_list_l(struct thread_entry **list,
950 struct thread_entry *thread)
391{ 951{
392 if (*list == NULL) 952 struct thread_entry *l = *list;
953
954 if (l == NULL)
393 { 955 {
394 thread->next = thread; 956 /* Insert into unoccupied list */
395 thread->prev = thread; 957 thread->l.next = thread;
958 thread->l.prev = thread;
396 *list = thread; 959 *list = thread;
960 return;
397 } 961 }
398 else 962
399 { 963 /* Insert last */
400 /* Insert last */ 964 thread->l.next = l;
401 thread->next = *list; 965 thread->l.prev = l->l.prev;
402 thread->prev = (*list)->prev; 966 thread->l.prev->l.next = thread;
403 thread->prev->next = thread; 967 l->l.prev = thread;
404 (*list)->prev = thread; 968
405 969 /* Insert next
406 /* Insert next 970 thread->l.next = l->l.next;
407 thread->next = (*list)->next; 971 thread->l.prev = l;
408 thread->prev = *list; 972 thread->l.next->l.prev = thread;
409 thread->next->prev = thread; 973 l->l.next = thread;
410 (*list)->next = thread; 974 */
411 */ 975}
412 } 976
977/*---------------------------------------------------------------------------
978 * Locks a list, adds the thread entry and unlocks the list on multicore.
979 * Defined as add_to_list_l on single-core.
980 *---------------------------------------------------------------------------
981 */
982#if NUM_CORES > 1
983static void add_to_list_l_locked(struct thread_queue *tq,
984 struct thread_entry *thread)
985{
986 struct thread_entry *t = LOCK_LIST(tq);
987 ADD_TO_LIST_L_SELECT(t, tq, thread);
988 UNLOCK_LIST(tq, t);
989 (void)t;
413} 990}
991#else
992#define add_to_list_l_locked(tq, thread) \
993 add_to_list_l(&(tq)->queue, (thread))
994#endif
414 995
415static void remove_from_list(struct thread_entry **list, 996/*---------------------------------------------------------------------------
416 struct thread_entry *thread) 997 * Removes a thread from a list of threads. Uses the "l" links.
998 *---------------------------------------------------------------------------
999 */
1000static void remove_from_list_l(struct thread_entry **list,
1001 struct thread_entry *thread)
417{ 1002{
418 if (list != NULL) 1003 struct thread_entry *prev, *next;
1004
1005 next = thread->l.next;
1006
1007 if (thread == next)
419 { 1008 {
420 if (thread == thread->next) 1009 /* The only item */
421 { 1010 *list = NULL;
422 *list = NULL; 1011 return;
423 return; 1012 }
424 } 1013
425 1014 if (thread == *list)
426 if (thread == *list) 1015 {
427 *list = thread->next; 1016 /* List becomes next item */
1017 *list = next;
428 } 1018 }
1019
1020 prev = thread->l.prev;
429 1021
430 /* Fix links to jump over the removed entry. */ 1022 /* Fix links to jump over the removed entry. */
431 thread->prev->next = thread->next; 1023 prev->l.next = next;
432 thread->next->prev = thread->prev; 1024 next->l.prev = prev;
433} 1025}
434 1026
435static void check_sleepers(void) __attribute__ ((noinline)); 1027/*---------------------------------------------------------------------------
436static void check_sleepers(void) 1028 * Locks a list, removes the thread entry and unlocks the list on multicore.
1029 * Defined as remove_from_list_l on single-core.
1030 *---------------------------------------------------------------------------
1031 */
1032#if NUM_CORES > 1
1033static void remove_from_list_l_locked(struct thread_queue *tq,
1034 struct thread_entry *thread)
437{ 1035{
438 const unsigned int core = CURRENT_CORE; 1036 struct thread_entry *t = LOCK_LIST(tq);
439 struct thread_entry *current, *next; 1037 REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
440 1038 UNLOCK_LIST(tq, t);
441 /* Check sleeping threads. */ 1039 (void)t;
442 current = cores[core].sleeping; 1040}
443 1041#else
444 for (;;) 1042#define remove_from_list_l_locked(tq, thread) \
1043 remove_from_list_l(&(tq)->queue, (thread))
1044#endif
1045
1046/*---------------------------------------------------------------------------
1047 * Add a thread from the core's timout list by linking the pointers in its
1048 * tmo structure.
1049 *---------------------------------------------------------------------------
1050 */
1051static void add_to_list_tmo(struct thread_entry *thread)
1052{
1053 /* Insert first */
1054 struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout;
1055
1056 thread->tmo.prev = thread;
1057 thread->tmo.next = t;
1058
1059 if (t != NULL)
1060 {
1061 /* Fix second item's prev pointer to point to this thread */
1062 t->tmo.prev = thread;
1063 }
1064
1065 cores[IF_COP_CORE(thread->core)].timeout = thread;
1066}
1067
1068/*---------------------------------------------------------------------------
1069 * Remove a thread from the core's timout list by unlinking the pointers in
1070 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
1071 * is cancelled.
1072 *---------------------------------------------------------------------------
1073 */
1074static void remove_from_list_tmo(struct thread_entry *thread)
1075{
1076 struct thread_entry *next = thread->tmo.next;
1077 struct thread_entry *prev;
1078
1079 if (thread == cores[IF_COP_CORE(thread->core)].timeout)
445 { 1080 {
446 next = current->next; 1081 /* Next item becomes list head */
447 1082 cores[IF_COP_CORE(thread->core)].timeout = next;
448 if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg)) 1083
1084 if (next != NULL)
449 { 1085 {
450 /* Sleep timeout has been reached so bring the thread 1086 /* Fix new list head's prev to point to itself. */
451 * back to life again. */ 1087 next->tmo.prev = next;
452 remove_from_list(&cores[core].sleeping, current);
453 add_to_list(&cores[core].running, current);
454 current->statearg = 0;
455
456 /* If there is no more processes in the list, break the loop. */
457 if (cores[core].sleeping == NULL)
458 break;
459
460 current = next;
461 continue;
462 } 1088 }
463 1089
464 current = next; 1090 thread->tmo.prev = NULL;
465 1091 return;
466 /* Break the loop once we have walked through the list of all 1092 }
467 * sleeping processes. */ 1093
468 if (current == cores[core].sleeping) 1094 prev = thread->tmo.prev;
469 break; 1095
1096 if (next != NULL)
1097 {
1098 next->tmo.prev = prev;
470 } 1099 }
1100
1101 prev->tmo.next = next;
1102 thread->tmo.prev = NULL;
471} 1103}
472 1104
473/* Safely finish waking all threads potentialy woken by interrupts - 1105/*---------------------------------------------------------------------------
474 * statearg already zeroed in wakeup_thread. */ 1106 * Schedules a thread wakeup on the specified core. Threads will be made
475static void wake_list_awaken(void) __attribute__ ((noinline)); 1107 * ready to run when the next task switch occurs. Note that this does not
476static void wake_list_awaken(void) 1108 * introduce an on-core delay since the soonest the next thread may run is
1109 * no sooner than that. Other cores and on-core interrupts may only ever
1110 * add to the list.
1111 *---------------------------------------------------------------------------
1112 */
1113static void core_schedule_wakeup(struct thread_entry *thread)
477{ 1114{
478 const unsigned int core = CURRENT_CORE;
479 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 1115 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1116 const unsigned int core = IF_COP_CORE(thread->core);
1117 add_to_list_l_locked(&cores[core].waking, thread);
1118#if NUM_CORES > 1
1119 if (core != CURRENT_CORE)
1120 {
1121 core_wake(core);
1122 }
1123#endif
1124 set_irq_level(oldlevel);
1125}
480 1126
481 /* No need for another check in the IRQ lock since IRQs are allowed 1127/*---------------------------------------------------------------------------
482 only to add threads to the waking list. They won't be adding more 1128 * If the waking list was populated, move all threads on it onto the running
483 until we're done here though. */ 1129 * list so they may be run ASAP.
484 1130 *---------------------------------------------------------------------------
485 struct thread_entry *waking = cores[core].waking; 1131 */
486 struct thread_entry *running = cores[core].running; 1132static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1133{
1134 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1135 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
1136 struct thread_entry *r = cores[IF_COP_CORE(core)].running;
487 1137
488 if (running != NULL) 1138 /* Tranfer all threads on waking list to running list in one
1139 swoop */
1140 if (r != NULL)
489 { 1141 {
490 /* Place waking threads at the end of the running list. */ 1142 /* Place waking threads at the end of the running list. */
491 struct thread_entry *tmp; 1143 struct thread_entry *tmp;
492 waking->prev->next = running; 1144 w->l.prev->l.next = r;
493 running->prev->next = waking; 1145 r->l.prev->l.next = w;
494 tmp = running->prev; 1146 tmp = r->l.prev;
495 running->prev = waking->prev; 1147 r->l.prev = w->l.prev;
496 waking->prev = tmp; 1148 w->l.prev = tmp;
497 } 1149 }
498 else 1150 else
499 { 1151 {
500 /* Just transfer the list as-is - just came out of a core 1152 /* Just transfer the list as-is */
501 * sleep. */ 1153 cores[IF_COP_CORE(core)].running = w;
502 cores[core].running = waking;
503 } 1154 }
1155 /* Just leave any timeout threads on the timeout list. If a timeout check
1156 * is due, they will be removed there. If they do a timeout again before
1157 * being removed, they will just stay on the list with a new expiration
1158 * tick. */
504 1159
505 /* Done with waking list */ 1160 /* Waking list is clear - NULL and unlock it */
506 cores[core].waking = NULL; 1161 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
507 set_irq_level(oldlevel); 1162 set_irq_level(oldlevel);
508} 1163}
509 1164
510static inline void sleep_core(void) 1165/*---------------------------------------------------------------------------
1166 * Check the core's timeout list when at least one thread is due to wake.
1167 * Filtering for the condition is done before making the call. Resets the
1168 * tick when the next check will occur.
1169 *---------------------------------------------------------------------------
1170 */
1171static void check_tmo_threads(void)
511{ 1172{
512 const unsigned int core = CURRENT_CORE; 1173 const unsigned int core = CURRENT_CORE;
1174 const long tick = current_tick; /* snapshot the current tick */
1175 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1176 struct thread_entry *next = cores[core].timeout;
513 1177
514 for (;;) 1178 /* If there are no processes waiting for a timeout, just keep the check
1179 tick from falling into the past. */
1180 if (next != NULL)
515 { 1181 {
516 /* We want to do these ASAP as it may change the decision to sleep 1182 /* Check sleeping threads. */
517 the core or the core has woken because an interrupt occurred 1183 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
518 and posted a message to a queue. */
519 if (cores[core].waking != NULL)
520 wake_list_awaken();
521 1184
522 if (cores[core].last_tick != current_tick) 1185 do
523 { 1186 {
524 if (cores[core].sleeping != NULL) 1187 /* Must make sure noone else is examining the state, wait until
525 check_sleepers(); 1188 slot is no longer busy */
526 cores[core].last_tick = current_tick; 1189 struct thread_entry *curr = next;
1190 next = curr->tmo.next;
1191
1192 unsigned state = GET_THREAD_STATE(curr);
1193
1194 if (state < TIMEOUT_STATE_FIRST)
1195 {
1196 /* Cleanup threads no longer on a timeout but still on the
1197 * list. */
1198 remove_from_list_tmo(curr);
1199 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1200 }
1201 else if (TIME_BEFORE(tick, curr->tmo_tick))
1202 {
1203 /* Timeout still pending - this will be the usual case */
1204 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1205 {
1206 /* Earliest timeout found so far - move the next check up
1207 to its time */
1208 next_tmo_check = curr->tmo_tick;
1209 }
1210 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1211 }
1212 else
1213 {
1214 /* Sleep timeout has been reached so bring the thread back to
1215 * life again. */
1216 if (state == STATE_BLOCKED_W_TMO)
1217 {
1218 remove_from_list_l_locked(curr->bqp, curr);
1219 }
1220
1221 remove_from_list_tmo(curr);
1222 add_to_list_l(&cores[core].running, curr);
1223 UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING);
1224 }
1225
1226 /* Break the loop once we have walked through the list of all
1227 * sleeping processes or have removed them all. */
527 } 1228 }
528 1229 while (next != NULL);
529 /* We must sleep until there is at least one process in the list
530 * of running processes. */
531 if (cores[core].running != NULL)
532 break;
533 1230
534 /* Enter sleep mode to reduce power usage, woken up on interrupt */ 1231 set_irq_level(oldlevel);
535 core_sleep();
536 } 1232 }
1233
1234 cores[core].next_tmo_check = next_tmo_check;
537} 1235}
538 1236
539#ifdef RB_PROFILE 1237/*---------------------------------------------------------------------------
540static int get_threadnum(struct thread_entry *thread) 1238 * Performs operations that must be done before blocking a thread but after
1239 * the state is saved - follows reverse of locking order. blk_ops.flags is
1240 * assumed to be nonzero.
1241 *---------------------------------------------------------------------------
1242 */
1243static inline void run_blocking_ops(
1244 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
541{ 1245{
542 int i; 1246#if NUM_CORES > 1
543 1247 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
544 for (i = 0; i < MAXTHREADS; i++) 1248 const unsigned flags = ops->flags;
1249
1250 if (flags == 0)
1251 return;
1252
1253 if (flags & TBOP_SWITCH_CORE)
545 { 1254 {
546 if (&threads[i] == thread) 1255 core_switch_blk_op(core, thread);
547 return i;
548 } 1256 }
549 1257
550 return -1; 1258#if CONFIG_CORELOCK == SW_CORELOCK
1259 if (flags & TBOP_UNLOCK_LIST)
1260 {
1261 UNLOCK_LIST(ops->list_p, NULL);
1262 }
1263
1264 if (flags & TBOP_UNLOCK_CORELOCK)
1265 {
1266 corelock_unlock(ops->cl_p);
1267 }
1268
1269 if (flags & TBOP_UNLOCK_THREAD)
1270 {
1271 UNLOCK_THREAD(ops->thread, 0);
1272 }
1273#elif CONFIG_CORELOCK == CORELOCK_SWAP
1274 /* Write updated variable value into memory location */
1275 switch (flags & TBOP_VAR_TYPE_MASK)
1276 {
1277 case TBOP_UNLOCK_LIST:
1278 UNLOCK_LIST(ops->list_p, ops->list_v);
1279 break;
1280 case TBOP_SET_VARi:
1281 *ops->var_ip = ops->var_iv;
1282 break;
1283 case TBOP_SET_VARu8:
1284 *ops->var_u8p = ops->var_u8v;
1285 break;
1286 }
1287#endif /* CONFIG_CORELOCK == */
1288
1289 /* Unlock thread's slot */
1290 if (flags & TBOP_UNLOCK_CURRENT)
1291 {
1292 UNLOCK_THREAD(thread, ops->state);
1293 }
1294
1295 /* Reset the IRQ level */
1296 if (flags & TBOP_IRQ_LEVEL)
1297 {
1298 set_irq_level(ops->irq_level);
1299 }
1300
1301 ops->flags = 0;
1302#else
1303 int level = cores[CURRENT_CORE].irq_level;
1304 if (level == STAY_IRQ_LEVEL)
1305 return;
1306
1307 cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL;
1308 set_irq_level(level);
1309#endif /* NUM_CORES */
551} 1310}
552 1311
553void profile_thread(void) { 1312
554 profstart(get_threadnum(cores[CURRENT_CORE].running)); 1313/*---------------------------------------------------------------------------
1314 * Runs any operations that may cause threads to be ready to run and then
1315 * sleeps the processor core until the next interrupt if none are.
1316 *---------------------------------------------------------------------------
1317 */
1318static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1319{
1320 for (;;)
1321 {
1322 /* We want to do these ASAP as it may change the decision to sleep
1323 * the core or a core has woken because an interrupt occurred
1324 * and posted a message to a queue. */
1325 if (cores[IF_COP_CORE(core)].waking.queue != NULL)
1326 {
1327 core_perform_wakeup(IF_COP(core));
1328 }
1329
1330 /* If there are threads on a timeout and the earliest wakeup is due,
1331 * check the list and wake any threads that need to start running
1332 * again. */
1333 if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
1334 {
1335 check_tmo_threads();
1336 }
1337
1338 /* If there is a ready to run task, return its ID and keep core
1339 * awake. */
1340 if (cores[IF_COP_CORE(core)].running != NULL)
1341 {
1342 return cores[IF_COP_CORE(core)].running;
1343 }
1344
1345 /* Enter sleep mode to reduce power usage - woken up on interrupt or
1346 * wakeup request from another core. May abort if the waking list
1347 * became populated (again). See beginning of this file for the
1348 * algorithm to atomically determine this. */
1349 core_sleep(IF_COP(core, ) &cores[IF_COP_CORE(core)].waking.queue);
1350 }
1351}
1352
1353#ifdef RB_PROFILE
1354void profile_thread(void)
1355{
1356 profstart(cores[CURRENT_CORE].running - threads);
555} 1357}
556#endif 1358#endif
557 1359
558static void change_thread_state(struct thread_entry **blocked_list) __attribute__ ((noinline)); 1360/*---------------------------------------------------------------------------
559static void change_thread_state(struct thread_entry **blocked_list) 1361 * Prepares a thread to block on an object's list and/or for a specified
1362 * duration - expects object and slot to be appropriately locked if needed.
1363 *---------------------------------------------------------------------------
1364 */
1365static inline void _block_thread_on_l(struct thread_queue *list,
1366 struct thread_entry *thread,
1367 unsigned state
1368 IF_SWCL(, const bool nolock))
560{ 1369{
561 const unsigned int core = CURRENT_CORE; 1370 /* If inlined, unreachable branches will be pruned with no size penalty
562 struct thread_entry *old; 1371 because constant params are used for state and nolock. */
563 unsigned long new_state; 1372 const unsigned int core = IF_COP_CORE(thread->core);
564 1373
565 /* Remove the thread from the list of running threads. */ 1374 /* Remove the thread from the list of running threads. */
566 old = cores[core].running; 1375 remove_from_list_l(&cores[core].running, thread);
567 new_state = GET_STATE(old->statearg);
568 1376
569 /* Check if a thread state change has been requested. */ 1377 /* Add a timeout to the block if not infinite */
570 if (new_state) 1378 switch (state)
571 { 1379 {
572 /* Change running thread state and switch to next thread. */ 1380 case STATE_BLOCKED:
573 remove_from_list(&cores[core].running, old); 1381 /* Put the thread into a new list of inactive threads. */
574 1382#if CONFIG_CORELOCK == SW_CORELOCK
575 /* And put the thread into a new list of inactive threads. */ 1383 if (nolock)
576 if (new_state == STATE_BLOCKED) 1384 {
577 add_to_list(blocked_list, old); 1385 thread->bqp = NULL; /* Indicate nolock list */
1386 thread->bqnlp = (struct thread_entry **)list;
1387 add_to_list_l((struct thread_entry **)list, thread);
1388 }
1389 else
1390#endif
1391 {
1392 thread->bqp = list;
1393 add_to_list_l_locked(list, thread);
1394 }
1395 break;
1396 case STATE_BLOCKED_W_TMO:
1397 /* Put the thread into a new list of inactive threads. */
1398#if CONFIG_CORELOCK == SW_CORELOCK
1399 if (nolock)
1400 {
1401 thread->bqp = NULL; /* Indicate nolock list */
1402 thread->bqnlp = (struct thread_entry **)list;
1403 add_to_list_l((struct thread_entry **)list, thread);
1404 }
578 else 1405 else
579 add_to_list(&cores[core].sleeping, old);
580
581#ifdef HAVE_PRIORITY_SCHEDULING
582 /* Reset priorities */
583 if (old->priority == cores[core].highest_priority)
584 cores[core].highest_priority = 100;
585#endif 1406#endif
1407 {
1408 thread->bqp = list;
1409 add_to_list_l_locked(list, thread);
1410 }
1411 /* Fall-through */
1412 case STATE_SLEEPING:
1413 /* If this thread times out sooner than any other thread, update
1414 next_tmo_check to its timeout */
1415 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1416 {
1417 cores[core].next_tmo_check = thread->tmo_tick;
1418 }
1419
1420 if (thread->tmo.prev == NULL)
1421 {
1422 add_to_list_tmo(thread);
1423 }
1424 /* else thread was never removed from list - just keep it there */
1425 break;
586 } 1426 }
587 else 1427
588 /* Switch to the next running thread. */ 1428#ifdef HAVE_PRIORITY_SCHEDULING
589 cores[core].running = old->next; 1429 /* Reset priorities */
1430 if (thread->priority == cores[core].highest_priority)
1431 cores[core].highest_priority = LOWEST_PRIORITY;
1432#endif
1433
1434#if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
1435 /* Safe to set state now */
1436 thread->state = state;
1437#elif CONFIG_CORELOCK == CORELOCK_SWAP
1438 cores[core].blk_ops.state = state;
1439#endif
1440
1441#if NUM_CORES > 1
1442 /* Delay slot unlock until task switch */
1443 cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
1444#endif
1445}
1446
1447static inline void block_thread_on_l(
1448 struct thread_queue *list, struct thread_entry *thread, unsigned state)
1449{
1450 _block_thread_on_l(list, thread, state IF_SWCL(, false));
1451}
1452
1453static inline void block_thread_on_l_no_listlock(
1454 struct thread_entry **list, struct thread_entry *thread, unsigned state)
1455{
1456 _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
590} 1457}
591 1458
592/*--------------------------------------------------------------------------- 1459/*---------------------------------------------------------------------------
593 * Switch thread in round robin fashion. 1460 * Switch thread in round robin fashion for any given priority. Any thread
1461 * that removed itself from the running list first must specify itself in
1462 * the paramter.
1463 *
1464 * INTERNAL: Intended for use by kernel and not for programs.
594 *--------------------------------------------------------------------------- 1465 *---------------------------------------------------------------------------
595 */ 1466 */
596void switch_thread(bool save_context, struct thread_entry **blocked_list) 1467void switch_thread(struct thread_entry *old)
597{ 1468{
598 const unsigned int core = CURRENT_CORE; 1469 const unsigned int core = CURRENT_CORE;
1470 struct thread_entry *thread = cores[core].running;
1471
1472 if (old == NULL)
1473 {
1474 /* Move to next thread */
1475 old = thread;
1476 cores[core].running = old->l.next;
1477 }
1478 /* else running list is already at next thread */
599 1479
600#ifdef RB_PROFILE 1480#ifdef RB_PROFILE
601 profile_thread_stopped(get_threadnum(cores[core].running)); 1481 profile_thread_stopped(old - threads);
602#endif 1482#endif
603 unsigned int *stackptr;
604
605#ifdef SIMULATOR
606 /* Do nothing */
607#else
608 1483
609 /* Begin task switching by saving our current context so that we can 1484 /* Begin task switching by saving our current context so that we can
610 * restore the state of the current thread later to the point prior 1485 * restore the state of the current thread later to the point prior
611 * to this call. */ 1486 * to this call. */
612 if (save_context) 1487 store_context(&old->context);
613 {
614 store_context(&cores[core].running->context);
615 1488
616 /* Check if the current thread stack is overflown */ 1489 /* Check if the current thread stack is overflown */
617 stackptr = cores[core].running->stack; 1490 if(((unsigned int *)old->stack)[0] != DEADBEEF)
618 if(stackptr[0] != DEADBEEF) 1491 thread_stkov(old);
619 thread_stkov(cores[core].running); 1492
620 1493 /* Run any blocking operations requested before switching/sleeping */
621 /* Rearrange thread lists as needed */ 1494 run_blocking_ops(IF_COP(core, old));
622 change_thread_state(blocked_list);
623 1495
624 /* This has to be done after the scheduler is finished with the
625 blocked_list pointer so that an IRQ can't kill us by attempting
626 a wake but before attempting any core sleep. */
627 if (cores[core].switch_to_irq_level != STAY_IRQ_LEVEL)
628 {
629 int level = cores[core].switch_to_irq_level;
630 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
631 set_irq_level(level);
632 }
633 }
634
635 /* Go through the list of sleeping task to check if we need to wake up 1496 /* Go through the list of sleeping task to check if we need to wake up
636 * any of them due to timeout. Also puts core into sleep state until 1497 * any of them due to timeout. Also puts core into sleep state until
637 * there is at least one running process again. */ 1498 * there is at least one running process again. */
638 sleep_core(); 1499 thread = sleep_core(IF_COP(core));
639 1500
640#ifdef HAVE_PRIORITY_SCHEDULING 1501#ifdef HAVE_PRIORITY_SCHEDULING
641 /* Select the new task based on priorities and the last time a process 1502 /* Select the new task based on priorities and the last time a process
642 * got CPU time. */ 1503 * got CPU time. */
643 for (;;) 1504 for (;;)
644 { 1505 {
645 int priority = cores[core].running->priority; 1506 int priority = MIN(thread->priority, thread->priority_x);
646 1507
647 if (priority < cores[core].highest_priority) 1508 if (priority < cores[core].highest_priority)
648 cores[core].highest_priority = priority; 1509 cores[core].highest_priority = priority;
649 1510
650 if (priority == cores[core].highest_priority || 1511 if (priority == cores[core].highest_priority ||
651 (current_tick - cores[core].running->last_run > 1512 (current_tick - thread->last_run > priority * 8))
652 priority * 8) ||
653 cores[core].running->priority_x != 0)
654 { 1513 {
1514 cores[core].running = thread;
655 break; 1515 break;
656 } 1516 }
657 1517
658 cores[core].running = cores[core].running->next; 1518 thread = thread->l.next;
659 } 1519 }
660 1520
661 /* Reset the value of thread's last running time to the current time. */ 1521 /* Reset the value of thread's last running time to the current time. */
662 cores[core].running->last_run = current_tick; 1522 thread->last_run = current_tick;
663#endif 1523#endif /* HAVE_PRIORITY_SCHEDULING */
664 1524
665#endif
666
667 /* And finally give control to the next thread. */ 1525 /* And finally give control to the next thread. */
668 load_context(&cores[core].running->context); 1526 load_context(&thread->context);
669 1527
670#ifdef RB_PROFILE 1528#ifdef RB_PROFILE
671 profile_thread_started(get_threadnum(cores[core].running)); 1529 profile_thread_started(thread - threads);
672#endif 1530#endif
673} 1531}
674 1532
675void sleep_thread(int ticks) 1533/*---------------------------------------------------------------------------
1534 * Removes the boost flag from a thread and unboosts the CPU if thread count
1535 * of boosted threads reaches zero. Requires thread slot to be locked first.
1536 *---------------------------------------------------------------------------
1537 */
1538static inline void unboost_thread(struct thread_entry *thread)
676{ 1539{
677 struct thread_entry *current;
678
679 current = cores[CURRENT_CORE].running;
680
681#ifdef HAVE_SCHEDULER_BOOSTCTRL 1540#ifdef HAVE_SCHEDULER_BOOSTCTRL
682 if (STATE_IS_BOOSTED(current->statearg)) 1541 if (thread->boosted != 0)
683 { 1542 {
684 boosted_threads--; 1543 thread->boosted = 0;
685 if (!boosted_threads) 1544 if (--boosted_threads == 0)
686 { 1545 {
687 cpu_boost(false); 1546 cpu_boost(false);
688 } 1547 }
689 } 1548 }
690#endif 1549#endif
691 1550 (void)thread;
692 /* Set the thread's new state and timeout and finally force a task switch
693 * so that scheduler removes thread from the list of running processes
694 * and puts it in list of sleeping tasks. */
695 SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
696
697 switch_thread(true, NULL);
698} 1551}
699 1552
700void block_thread(struct thread_entry **list) 1553/*---------------------------------------------------------------------------
1554 * Sleeps a thread for a specified number of ticks and unboost the thread if
1555 * if it is boosted. If ticks is zero, it does not delay but instead switches
1556 * tasks.
1557 *
1558 * INTERNAL: Intended for use by kernel and not for programs.
1559 *---------------------------------------------------------------------------
1560 */
1561void sleep_thread(int ticks)
701{ 1562{
702 struct thread_entry *current;
703
704 /* Get the entry for the current running thread. */ 1563 /* Get the entry for the current running thread. */
705 current = cores[CURRENT_CORE].running; 1564 struct thread_entry *current = cores[CURRENT_CORE].running;
706 1565
707#ifdef HAVE_SCHEDULER_BOOSTCTRL 1566#if NUM_CORES > 1
708 /* Keep the boosted state over indefinite block calls, because 1567 /* Lock thread slot */
709 * we are waiting until the earliest time that someone else 1568 GET_THREAD_STATE(current);
710 * completes an action */
711 unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
712#endif 1569#endif
713 1570
714 /* We are not allowed to mix blocking types in one queue. */ 1571 /* Remove our boosted status if any */
715 THREAD_ASSERT(*list != NULL && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO, 1572 unboost_thread(current);
716 "Blocking violation B->*T", current); 1573
717 1574 /* Set our timeout, change lists, and finally switch threads.
1575 * Unlock during switch on mulicore. */
1576 current->tmo_tick = current_tick + ticks + 1;
1577 block_thread_on_l(NULL, current, STATE_SLEEPING);
1578 switch_thread(current);
1579
1580 /* Our status should be STATE_RUNNING */
1581 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1582 "S:R->!*R", current);
1583}
1584
1585/*---------------------------------------------------------------------------
1586 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1587 * Caller with interrupt-accessible lists should disable interrupts first
1588 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1589 *
1590 * INTERNAL: Intended for use by kernel objects and not for programs.
1591 *---------------------------------------------------------------------------
1592 */
1593IF_SWCL(static inline) void _block_thread(struct thread_queue *list
1594 IF_SWCL(, const bool nolock))
1595{
1596 /* Get the entry for the current running thread. */
1597 struct thread_entry *current = cores[CURRENT_CORE].running;
1598
718 /* Set the state to blocked and ask the scheduler to switch tasks, 1599 /* Set the state to blocked and ask the scheduler to switch tasks,
719 * this takes us off of the run queue until we are explicitly woken */ 1600 * this takes us off of the run queue until we are explicitly woken */
720 SET_STATE(current->statearg, STATE_BLOCKED, 0);
721 1601
722 switch_thread(true, list); 1602#if NUM_CORES > 1
1603 /* Lock thread slot */
1604 GET_THREAD_STATE(current);
1605#endif
723 1606
724#ifdef HAVE_SCHEDULER_BOOSTCTRL 1607#if CONFIG_CORELOCK == SW_CORELOCK
725 /* Reset only the boosted flag to indicate we are up and running again. */ 1608 /* One branch optimized away during inlining */
726 current->statearg = boost_flag; 1609 if (nolock)
727#else 1610 {
728 /* Clear all flags to indicate we are up and running again. */ 1611 block_thread_on_l_no_listlock((struct thread_entry **)list,
729 current->statearg = 0; 1612 current, STATE_BLOCKED);
1613 }
1614 else
730#endif 1615#endif
1616 {
1617 block_thread_on_l(list, current, STATE_BLOCKED);
1618 }
1619
1620 switch_thread(current);
1621
1622 /* Our status should be STATE_RUNNING */
1623 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1624 "B:R->!*R", current);
1625}
1626
1627#if CONFIG_CORELOCK == SW_CORELOCK
1628/* Inline lock/nolock version of _block_thread into these functions */
1629void block_thread(struct thread_queue *tq)
1630{
1631 _block_thread(tq, false);
731} 1632}
732 1633
733void block_thread_w_tmo(struct thread_entry **list, int timeout) 1634void block_thread_no_listlock(struct thread_entry **list)
1635{
1636 _block_thread((struct thread_queue *)list, true);
1637}
1638#endif /* CONFIG_CORELOCK */
1639
1640/*---------------------------------------------------------------------------
1641 * Block a thread on a blocking queue for a specified time interval or until
1642 * explicitly woken - whichever happens first.
1643 * Caller with interrupt-accessible lists should disable interrupts first
1644 * and request that interrupt level be restored after switching out the
1645 * current thread.
1646 *
1647 * INTERNAL: Intended for use by kernel objects and not for programs.
1648 *---------------------------------------------------------------------------
1649 */
1650void block_thread_w_tmo(struct thread_queue *list, int timeout)
734{ 1651{
735 struct thread_entry *current;
736 /* Get the entry for the current running thread. */ 1652 /* Get the entry for the current running thread. */
737 current = cores[CURRENT_CORE].running; 1653 struct thread_entry *current = cores[CURRENT_CORE].running;
738 1654
739#ifdef HAVE_SCHEDULER_BOOSTCTRL 1655#if NUM_CORES > 1
1656 /* Lock thread slot */
1657 GET_THREAD_STATE(current);
1658#endif
1659
740 /* A block with a timeout is a sleep situation, whatever we are waiting 1660 /* A block with a timeout is a sleep situation, whatever we are waiting
741 * for _may or may not_ happen, regardless of boost state, (user input 1661 * for _may or may not_ happen, regardless of boost state, (user input
742 * for instance), so this thread no longer needs to boost */ 1662 * for instance), so this thread no longer needs to boost */
743 if (STATE_IS_BOOSTED(current->statearg)) 1663 unboost_thread(current);
744 {
745 boosted_threads--;
746 if (!boosted_threads)
747 {
748 cpu_boost(false);
749 }
750 }
751#endif
752
753 /* We can store only one thread to the "list" if thread is used
754 * in other list (such as core's list for sleeping tasks). */
755 THREAD_ASSERT(*list == NULL, "Blocking violation T->*B", current);
756 1664
757 /* Set the state to blocked with the specified timeout */ 1665 /* Set the state to blocked with the specified timeout */
758 SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout); 1666 current->tmo_tick = current_tick + timeout;
759 1667 /* Set the list for explicit wakeup */
760 /* Set the "list" for explicit wakeup */ 1668 block_thread_on_l(list, current, STATE_BLOCKED_W_TMO);
761 *list = current;
762 1669
763 /* Now force a task switch and block until we have been woken up 1670 /* Now force a task switch and block until we have been woken up
764 * by another thread or timeout is reached. */ 1671 * by another thread or timeout is reached - whichever happens first */
765 switch_thread(true, NULL); 1672 switch_thread(current);
766 1673
767 /* It is now safe for another thread to block on this "list" */ 1674 /* Our status should be STATE_RUNNING */
768 *list = NULL; 1675 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1676 "T:R->!*R", current);
769} 1677}
770 1678
771#if !defined(SIMULATOR) 1679/*---------------------------------------------------------------------------
772void set_irq_level_and_block_thread(struct thread_entry **list, int level) 1680 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
1681 * that called sleep().
1682 * Caller with interrupt-accessible lists should disable interrupts first.
1683 * This code should be considered a critical section by the caller.
1684 *
1685 * INTERNAL: Intended for use by kernel objects and not for programs.
1686 *---------------------------------------------------------------------------
1687 */
1688IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
1689 struct thread_queue *list IF_SWCL(, const bool nolock))
773{ 1690{
774 cores[CURRENT_CORE].switch_to_irq_level = level; 1691 struct thread_entry *t;
775 block_thread(list); 1692 struct thread_entry *thread;
776} 1693 unsigned state;
777 1694
778void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list, 1695 /* Wake up the last thread first. */
779 int timeout, int level) 1696#if CONFIG_CORELOCK == SW_CORELOCK
780{ 1697 /* One branch optimized away during inlining */
781 cores[CURRENT_CORE].switch_to_irq_level = level; 1698 if (nolock)
782 block_thread_w_tmo(list, timeout); 1699 {
783} 1700 t = list->queue;
1701 }
1702 else
784#endif 1703#endif
1704 {
1705 t = LOCK_LIST(list);
1706 }
785 1707
786void wakeup_thread(struct thread_entry **list)
787{
788 struct thread_entry *thread;
789
790 /* Check if there is a blocked thread at all. */ 1708 /* Check if there is a blocked thread at all. */
791 if (*list == NULL) 1709 if (t == NULL)
792 { 1710 {
793 return ; 1711#if CONFIG_CORELOCK == SW_CORELOCK
1712 if (!nolock)
1713#endif
1714 {
1715 UNLOCK_LIST(list, NULL);
1716 }
1717 return NULL;
794 } 1718 }
795 1719
796 /* Wake up the last thread first. */ 1720 thread = t;
797 thread = *list; 1721
798 1722#if NUM_CORES > 1
1723#if CONFIG_CORELOCK == SW_CORELOCK
1724 if (nolock)
1725 {
1726 /* Lock thread only, not list */
1727 state = GET_THREAD_STATE(thread);
1728 }
1729 else
1730#endif
1731 {
1732 /* This locks in reverse order from other routines so a retry in the
1733 correct order may be needed */
1734 state = TRY_GET_THREAD_STATE(thread);
1735 if (state == STATE_BUSY)
1736 {
1737 /* Unlock list and retry slot, then list */
1738 UNLOCK_LIST(list, t);
1739 state = GET_THREAD_STATE(thread);
1740 t = LOCK_LIST(list);
1741 /* Be sure thread still exists here - it couldn't have re-added
1742 itself if it was woken elsewhere because this function is
1743 serialized within the object that owns the list. */
1744 if (thread != t)
1745 {
1746 /* Thread disappeared :( */
1747 UNLOCK_LIST(list, t);
1748 UNLOCK_THREAD(thread, state);
1749 return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
1750 }
1751 }
1752 }
1753#else /* NUM_CORES == 1 */
1754 state = GET_THREAD_STATE(thread);
1755#endif /* NUM_CORES */
1756
799 /* Determine thread's current state. */ 1757 /* Determine thread's current state. */
800 switch (GET_STATE(thread->statearg)) 1758 switch (state)
801 { 1759 {
802 case STATE_BLOCKED: 1760 case STATE_BLOCKED:
803 /* Remove thread from the list of blocked threads and add it 1761 case STATE_BLOCKED_W_TMO:
804 * to the scheduler's list of running processes. List removal 1762 /* Remove thread from object's blocked list - select t or list depending
805 * is safe since each object maintains it's own list of 1763 on locking type at compile time */
806 * sleepers and queues protect against reentrancy. */ 1764 REMOVE_FROM_LIST_L_SELECT(t, list, thread);
807 remove_from_list(list, thread); 1765#if CONFIG_CORELOCK == SW_CORELOCK
808 add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread); 1766 /* Statment optimized away during inlining if nolock != false */
809 1767 if (!nolock)
810 case STATE_BLOCKED_W_TMO: 1768#endif
811 /* Just remove the timeout to cause scheduler to immediately 1769 {
812 * wake up the thread. */ 1770 UNLOCK_LIST(list, t); /* Unlock list - removal complete */
813 thread->statearg = 0; 1771 }
814 break; 1772
815 1773#ifdef HAVE_PRIORITY_SCHEDULING
816 default: 1774 /* Give the task a kick to avoid a stall after wakeup.
817 /* Nothing to do. Thread has already been woken up 1775 Not really proper treatment - TODO later. */
818 * or it's state is not blocked or blocked with timeout. */ 1776 thread->last_run = current_tick - 8*LOWEST_PRIORITY;
819 return ; 1777#endif
1778 core_schedule_wakeup(thread);
1779 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
1780 return thread;
1781 default:
1782 /* Nothing to do. State is not blocked. */
1783#if THREAD_EXTRA_CHECKS
1784 THREAD_PANICF("wakeup_thread->block invalid", thread);
1785 case STATE_RUNNING:
1786 case STATE_KILLED:
1787#endif
1788#if CONFIG_CORELOCK == SW_CORELOCK
1789 /* Statement optimized away during inlining if nolock != false */
1790 if (!nolock)
1791#endif
1792 {
1793 UNLOCK_LIST(list, t); /* Unlock the object's list */
1794 }
1795 UNLOCK_THREAD(thread, state); /* Unlock thread slot */
1796 return NULL;
820 } 1797 }
821} 1798}
822 1799
823inline static int find_empty_thread_slot(void) 1800#if CONFIG_CORELOCK == SW_CORELOCK
1801/* Inline lock/nolock version of _wakeup_thread into these functions */
1802struct thread_entry * wakeup_thread(struct thread_queue *tq)
1803{
1804 return _wakeup_thread(tq, false);
1805}
1806
1807struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
1808{
1809 return _wakeup_thread((struct thread_queue *)list, true);
1810}
1811#endif /* CONFIG_CORELOCK */
1812
1813/*---------------------------------------------------------------------------
1814 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1815 * will be locked on multicore.
1816 *---------------------------------------------------------------------------
1817 */
1818static int find_empty_thread_slot(void)
824{ 1819{
1820#if NUM_CORES > 1
1821 /* Any slot could be on an IRQ-accessible list */
1822 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1823#endif
1824 /* Thread slots are not locked on single core */
1825
825 int n; 1826 int n;
826 1827
827 for (n = 0; n < MAXTHREADS; n++) 1828 for (n = 0; n < MAXTHREADS; n++)
828 { 1829 {
829 if (threads[n].name == NULL) 1830 /* Obtain current slot state - lock it on multicore */
830 return n; 1831 unsigned state = GET_THREAD_STATE(&threads[n]);
1832
1833 if (state == STATE_KILLED
1834#if NUM_CORES > 1
1835 && threads[n].name != THREAD_DESTRUCT
1836#endif
1837 )
1838 {
1839 /* Slot is empty - leave it locked and caller will unlock */
1840 break;
1841 }
1842
1843 /* Finished examining slot - no longer busy - unlock on multicore */
1844 UNLOCK_THREAD(&threads[n], state);
831 } 1845 }
832 1846
833 return -1; 1847#if NUM_CORES > 1
1848 set_irq_level(oldlevel); /* Reenable interrups - this slot is
1849 not accesible to them yet */
1850#endif
1851
1852 return n;
834} 1853}
835 1854
836/* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled 1855
837 before calling. */ 1856/*---------------------------------------------------------------------------
838void wakeup_thread_irq_safe(struct thread_entry **list) 1857 * Place the current core in idle mode - woken up on interrupt or wake
1858 * request from another core.
1859 *---------------------------------------------------------------------------
1860 */
1861void core_idle(void)
839{ 1862{
840 struct core_entry *core = &cores[CURRENT_CORE]; 1863 const unsigned int core = CURRENT_CORE;
841 /* Switch wakeup lists and call wakeup_thread */ 1864 core_sleep(IF_COP(core,) &cores[core].waking.queue);
842 core->wakeup_list = &core->waking;
843 wakeup_thread(list);
844 /* Switch back to normal running list */
845 core->wakeup_list = &core->running;
846} 1865}
847 1866
848/*--------------------------------------------------------------------------- 1867/*---------------------------------------------------------------------------
@@ -854,44 +1873,23 @@ void wakeup_thread_irq_safe(struct thread_entry **list)
854 */ 1873 */
855struct thread_entry* 1874struct thread_entry*
856 create_thread(void (*function)(void), void* stack, int stack_size, 1875 create_thread(void (*function)(void), void* stack, int stack_size,
857 const char *name IF_PRIO(, int priority) 1876 unsigned flags, const char *name
858 IF_COP(, unsigned int core, bool fallback)) 1877 IF_PRIO(, int priority)
1878 IF_COP(, unsigned int core))
859{ 1879{
860 unsigned int i; 1880 unsigned int i;
861 unsigned int stacklen; 1881 unsigned int stacklen;
862 unsigned int *stackptr; 1882 unsigned int *stackptr;
863 int slot; 1883 int slot;
864 struct thread_entry *thread; 1884 struct thread_entry *thread;
865 1885 unsigned state;
866/*****
867 * Ugly code alert!
868 * To prevent ifdef hell while keeping the binary size down, we define
869 * core here if it hasn't been passed as a parameter
870 *****/
871#if NUM_CORES == 1
872#define core CPU
873#endif
874
875#if NUM_CORES > 1
876/* If the kernel hasn't initialised on the COP (most likely due to an old
877 * bootloader) then refuse to start threads on the COP
878 */
879 if ((core == COP) && !cores[core].kernel_running)
880 {
881 if (fallback)
882 return create_thread(function, stack, stack_size, name
883 IF_PRIO(, priority) IF_COP(, CPU, false));
884 else
885 return NULL;
886 }
887#endif
888 1886
889 slot = find_empty_thread_slot(); 1887 slot = find_empty_thread_slot();
890 if (slot < 0) 1888 if (slot >= MAXTHREADS)
891 { 1889 {
892 return NULL; 1890 return NULL;
893 } 1891 }
894 1892
895 /* Munge the stack to make it easy to spot stack overflows */ 1893 /* Munge the stack to make it easy to spot stack overflows */
896 stacklen = stack_size / sizeof(int); 1894 stacklen = stack_size / sizeof(int);
897 stackptr = stack; 1895 stackptr = stack;
@@ -905,11 +1903,19 @@ struct thread_entry*
905 thread->name = name; 1903 thread->name = name;
906 thread->stack = stack; 1904 thread->stack = stack;
907 thread->stack_size = stack_size; 1905 thread->stack_size = stack_size;
908 thread->statearg = 0; 1906 thread->bqp = NULL;
1907#if CONFIG_CORELOCK == SW_CORELOCK
1908 thread->bqnlp = NULL;
1909#endif
1910 thread->queue = NULL;
1911#ifdef HAVE_SCHEDULER_BOOSTCTRL
1912 thread->boosted = 0;
1913#endif
909#ifdef HAVE_PRIORITY_SCHEDULING 1914#ifdef HAVE_PRIORITY_SCHEDULING
910 thread->priority_x = 0; 1915 thread->priority_x = LOWEST_PRIORITY;
911 thread->priority = priority; 1916 thread->priority = priority;
912 cores[core].highest_priority = 100; 1917 thread->last_run = current_tick - priority * 8;
1918 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
913#endif 1919#endif
914 1920
915#if NUM_CORES > 1 1921#if NUM_CORES > 1
@@ -921,6 +1927,12 @@ struct thread_entry*
921 flush_icache(); 1927 flush_icache();
922 } 1928 }
923#endif 1929#endif
1930
1931 /* Thread is not on any timeout list but be a bit paranoid */
1932 thread->tmo.prev = NULL;
1933
1934 state = (flags & CREATE_THREAD_FROZEN) ?
1935 STATE_FROZEN : STATE_RUNNING;
924 1936
925 /* Align stack to an even 32 bit boundary */ 1937 /* Align stack to an even 32 bit boundary */
926 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3); 1938 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
@@ -928,50 +1940,149 @@ struct thread_entry*
928 /* Load the thread's context structure with needed startup information */ 1940 /* Load the thread's context structure with needed startup information */
929 THREAD_STARTUP_INIT(core, thread, function); 1941 THREAD_STARTUP_INIT(core, thread, function);
930 1942
931 add_to_list(&cores[core].running, thread); 1943 if (state == STATE_RUNNING)
1944 {
1945#if NUM_CORES > 1
1946 if (core != CURRENT_CORE)
1947 {
1948 /* Next task switch on other core moves thread to running list */
1949 core_schedule_wakeup(thread);
1950 }
1951 else
1952#endif
1953 {
1954 /* Place on running list immediately */
1955 add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
1956 }
1957 }
932 1958
1959 /* remove lock and set state */
1960 UNLOCK_THREAD_SET_STATE(thread, state);
1961
933 return thread; 1962 return thread;
934#if NUM_CORES == 1
935#undef core
936#endif
937} 1963}
938 1964
939#ifdef HAVE_SCHEDULER_BOOSTCTRL 1965#ifdef HAVE_SCHEDULER_BOOSTCTRL
940void trigger_cpu_boost(void) 1966void trigger_cpu_boost(void)
941{ 1967{
942 if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg)) 1968 /* No IRQ disable nescessary since the current thread cannot be blocked
1969 on an IRQ-accessible list */
1970 struct thread_entry *current = cores[CURRENT_CORE].running;
1971 unsigned state;
1972
1973 state = GET_THREAD_STATE(current);
1974
1975 if (current->boosted == 0)
943 { 1976 {
944 SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg); 1977 current->boosted = 1;
945 if (!boosted_threads) 1978 if (++boosted_threads == 1)
946 { 1979 {
947 cpu_boost(true); 1980 cpu_boost(true);
948 } 1981 }
949 boosted_threads++;
950 } 1982 }
1983
1984 UNLOCK_THREAD(current, state);
1985 (void)state;
951} 1986}
952#endif 1987#endif /* HAVE_SCHEDULER_BOOSTCTRL */
953 1988
954/*--------------------------------------------------------------------------- 1989/*---------------------------------------------------------------------------
955 * Remove a thread on the current core from the scheduler. 1990 * Remove a thread from the scheduler.
956 * Parameter is the ID as returned from create_thread(). 1991 * Parameter is the ID as returned from create_thread().
1992 *
1993 * Use with care on threads that are not under careful control as this may
1994 * leave various objects in an undefined state. When trying to kill a thread
1995 * on another processor, be sure you know what it's doing and won't be
1996 * switching around itself.
957 *--------------------------------------------------------------------------- 1997 *---------------------------------------------------------------------------
958 */ 1998 */
959void remove_thread(struct thread_entry *thread) 1999void remove_thread(struct thread_entry *thread)
960{ 2000{
2001#if NUM_CORES > 1
2002 /* core is not constant here because of core switching */
2003 unsigned int core = CURRENT_CORE;
2004 unsigned int old_core = NUM_CORES;
2005#else
961 const unsigned int core = CURRENT_CORE; 2006 const unsigned int core = CURRENT_CORE;
2007#endif
2008 unsigned state;
2009 int oldlevel;
962 2010
963 if (thread == NULL) 2011 if (thread == NULL)
964 thread = cores[core].running; 2012 thread = cores[core].running;
965 2013
966 /* Free the entry by removing thread name. */ 2014 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
967 thread->name = NULL; 2015 state = GET_THREAD_STATE(thread);
2016
2017 if (state == STATE_KILLED)
2018 {
2019 goto thread_killed;
2020 }
2021
2022#if NUM_CORES > 1
2023 if (thread->core != core)
2024 {
2025 /* Switch cores and safely extract the thread there */
2026 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
2027 condition if the thread runs away to another processor. */
2028 unsigned int new_core = thread->core;
2029 const char *old_name = thread->name;
2030
2031 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2032 UNLOCK_THREAD(thread, state);
2033 set_irq_level(oldlevel);
2034
2035 old_core = switch_core(new_core);
2036
2037 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2038 state = GET_THREAD_STATE(thread);
2039
2040 core = new_core;
2041
2042 if (state == STATE_KILLED)
2043 {
2044 /* Thread suicided before we could kill it */
2045 goto thread_killed;
2046 }
2047
2048 /* Reopen slot - it's locked again anyway */
2049 thread->name = old_name;
2050
2051 if (thread->core != core)
2052 {
2053 /* We won't play thread tag - just forget it */
2054 UNLOCK_THREAD(thread, state);
2055 set_irq_level(oldlevel);
2056 goto thread_kill_abort;
2057 }
2058
2059 /* Perform the extraction and switch ourselves back to the original
2060 processor */
2061 }
2062#endif /* NUM_CORES > 1 */
2063
968#ifdef HAVE_PRIORITY_SCHEDULING 2064#ifdef HAVE_PRIORITY_SCHEDULING
969 cores[IF_COP2(thread->core)].highest_priority = 100; 2065 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
970#endif 2066#endif
971 2067 if (thread->tmo.prev != NULL)
972 if (thread == cores[IF_COP2(thread->core)].running)
973 { 2068 {
974 remove_from_list(&cores[IF_COP2(thread->core)].running, thread); 2069 /* Clean thread off the timeout list if a timeout check hasn't
2070 * run yet */
2071 remove_from_list_tmo(thread);
2072 }
2073
2074 if (thread == cores[core].running)
2075 {
2076 /* Suicide - thread has unconditional rights to do this */
2077 /* Maintain locks until switch-out */
2078#if NUM_CORES > 1
2079 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2080 cores[core].blk_ops.irq_level = oldlevel;
2081#else
2082 cores[core].irq_level = oldlevel;
2083#endif
2084 block_thread_on_l(NULL, thread, STATE_KILLED);
2085
975#if NUM_CORES > 1 2086#if NUM_CORES > 1
976 /* Switch to the idle stack if not on the main core (where "main" 2087 /* Switch to the idle stack if not on the main core (where "main"
977 * runs) */ 2088 * runs) */
@@ -982,55 +2093,347 @@ void remove_thread(struct thread_entry *thread)
982 2093
983 flush_icache(); 2094 flush_icache();
984#endif 2095#endif
985 switch_thread(false, NULL); 2096 /* Signal this thread */
2097 thread_queue_wake_no_listlock(&thread->queue);
2098 /* Switch tasks and never return */
2099 switch_thread(thread);
986 /* This should never and must never be reached - if it is, the 2100 /* This should never and must never be reached - if it is, the
987 * state is corrupted */ 2101 * state is corrupted */
988 THREAD_PANICF("remove_thread->K:*R", thread); 2102 THREAD_PANICF("remove_thread->K:*R", thread);
989 } 2103 }
990 2104
991 if (thread == cores[IF_COP2(thread->core)].sleeping) 2105#if NUM_CORES > 1
992 remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread); 2106 if (thread->name == THREAD_DESTRUCT)
2107 {
2108 /* Another core is doing this operation already */
2109 UNLOCK_THREAD(thread, state);
2110 set_irq_level(oldlevel);
2111 return;
2112 }
2113#endif
2114 if (cores[core].waking.queue != NULL)
2115 {
2116 /* Get any threads off the waking list and onto the running
2117 * list first - waking and running cannot be distinguished by
2118 * state */
2119 core_perform_wakeup(IF_COP(core));
2120 }
2121
2122 switch (state)
2123 {
2124 case STATE_RUNNING:
2125 /* Remove thread from ready to run tasks */
2126 remove_from_list_l(&cores[core].running, thread);
2127 break;
2128 case STATE_BLOCKED:
2129 case STATE_BLOCKED_W_TMO:
2130 /* Remove thread from the queue it's blocked on - including its
2131 * own if waiting there */
2132#if CONFIG_CORELOCK == SW_CORELOCK
2133 /* One or the other will be valid */
2134 if (thread->bqp == NULL)
2135 {
2136 remove_from_list_l(thread->bqnlp, thread);
2137 }
2138 else
2139#endif /* CONFIG_CORELOCK */
2140 {
2141 remove_from_list_l_locked(thread->bqp, thread);
2142 }
2143 break;
2144 /* Otherwise thread is killed or is frozen and hasn't run yet */
2145 }
2146
2147 /* If thread was waiting on itself, it will have been removed above.
2148 * The wrong order would result in waking the thread first and deadlocking
2149 * since the slot is already locked. */
2150 thread_queue_wake_no_listlock(&thread->queue);
2151
2152thread_killed: /* Thread was already killed */
2153 /* Removal complete - safe to unlock state and reenable interrupts */
2154 UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED);
2155 set_irq_level(oldlevel);
2156
2157#if NUM_CORES > 1
2158thread_kill_abort: /* Something stopped us from killing the thread */
2159 if (old_core < NUM_CORES)
2160 {
2161 /* Did a removal on another processor's thread - switch back to
2162 native core */
2163 switch_core(old_core);
2164 }
2165#endif
2166}
2167
2168/*---------------------------------------------------------------------------
2169 * Block the current thread until another thread terminates. A thread may
2170 * wait on itself to terminate which prevents it from running again and it
2171 * will need to be killed externally.
2172 * Parameter is the ID as returned from create_thread().
2173 *---------------------------------------------------------------------------
2174 */
2175void thread_wait(struct thread_entry *thread)
2176{
2177 const unsigned int core = CURRENT_CORE;
2178 struct thread_entry *current = cores[core].running;
2179 unsigned thread_state;
2180#if NUM_CORES > 1
2181 int oldlevel;
2182 unsigned current_state;
2183#endif
2184
2185 if (thread == NULL)
2186 thread = current;
2187
2188#if NUM_CORES > 1
2189 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2190#endif
2191
2192 thread_state = GET_THREAD_STATE(thread);
2193
2194#if NUM_CORES > 1
2195 /* We can't lock the same slot twice. The waitee will also lock itself
2196 first then the thread slots that will be locked and woken in turn.
2197 The same order must be observed here as well. */
2198 if (thread == current)
2199 {
2200 current_state = thread_state;
2201 }
993 else 2202 else
994 remove_from_list(NULL, thread); 2203 {
2204 current_state = GET_THREAD_STATE(current);
2205 }
2206#endif
2207
2208 if (thread_state != STATE_KILLED)
2209 {
2210#if NUM_CORES > 1
2211 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2212 cores[core].blk_ops.irq_level = oldlevel;
2213#endif
2214 /* Unlock the waitee state at task switch - not done for self-wait
2215 because the would double-unlock the state and potentially
2216 corrupt another's busy assert on the slot */
2217 if (thread != current)
2218 {
2219#if CONFIG_CORELOCK == SW_CORELOCK
2220 cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
2221 cores[core].blk_ops.thread = thread;
2222#elif CONFIG_CORELOCK == CORELOCK_SWAP
2223 cores[core].blk_ops.flags |= TBOP_SET_VARu8;
2224 cores[core].blk_ops.var_u8p = &thread->state;
2225 cores[core].blk_ops.var_u8v = thread_state;
2226#endif
2227 }
2228 block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED);
2229 switch_thread(current);
2230 return;
2231 }
2232
2233 /* Unlock both slots - obviously the current thread can't have
2234 STATE_KILLED so the above if clause will always catch a thread
2235 waiting on itself */
2236#if NUM_CORES > 1
2237 UNLOCK_THREAD(current, current_state);
2238 UNLOCK_THREAD(thread, thread_state);
2239 set_irq_level(oldlevel);
2240#endif
995} 2241}
996 2242
997#ifdef HAVE_PRIORITY_SCHEDULING 2243#ifdef HAVE_PRIORITY_SCHEDULING
2244/*---------------------------------------------------------------------------
2245 * Sets the thread's relative priority for the core it runs on.
2246 *---------------------------------------------------------------------------
2247 */
998int thread_set_priority(struct thread_entry *thread, int priority) 2248int thread_set_priority(struct thread_entry *thread, int priority)
999{ 2249{
1000 int old_priority; 2250 unsigned old_priority = (unsigned)-1;
1001 2251
1002 if (thread == NULL) 2252 if (thread == NULL)
1003 thread = cores[CURRENT_CORE].running; 2253 thread = cores[CURRENT_CORE].running;
1004 2254
1005 old_priority = thread->priority; 2255#if NUM_CORES > 1
1006 thread->priority = priority; 2256 /* Thread could be on any list and therefore on an interrupt accessible
1007 cores[IF_COP2(thread->core)].highest_priority = 100; 2257 one - disable interrupts */
1008 2258 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2259#endif
2260 unsigned state = GET_THREAD_STATE(thread);
2261
2262 /* Make sure it's not killed */
2263 if (state != STATE_KILLED)
2264 {
2265 old_priority = thread->priority;
2266 thread->priority = priority;
2267 cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY;
2268 }
2269
2270#if NUM_CORES > 1
2271 UNLOCK_THREAD(thread, state);
2272 set_irq_level(oldlevel);
2273#endif
1009 return old_priority; 2274 return old_priority;
1010} 2275}
1011 2276
2277/*---------------------------------------------------------------------------
2278 * Returns the current priority for a thread.
2279 *---------------------------------------------------------------------------
2280 */
1012int thread_get_priority(struct thread_entry *thread) 2281int thread_get_priority(struct thread_entry *thread)
1013{ 2282{
2283 /* Simple, quick probe. */
1014 if (thread == NULL) 2284 if (thread == NULL)
1015 thread = cores[CURRENT_CORE].running; 2285 thread = cores[CURRENT_CORE].running;
1016 2286
1017 return thread->priority; 2287 return (unsigned)thread->priority;
1018} 2288}
1019 2289
2290/*---------------------------------------------------------------------------
2291 * Yield that guarantees thread execution once per round regardless of
2292 * thread's scheduler priority - basically a transient realtime boost
2293 * without altering the scheduler's thread precedence.
2294 *
2295 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2296 *---------------------------------------------------------------------------
2297 */
1020void priority_yield(void) 2298void priority_yield(void)
1021{ 2299{
1022 struct thread_entry *thread = cores[CURRENT_CORE].running; 2300 const unsigned int core = CURRENT_CORE;
1023 thread->priority_x = 1; 2301 struct thread_entry *thread = cores[core].running;
1024 switch_thread(true, NULL); 2302 thread->priority_x = HIGHEST_PRIORITY;
1025 thread->priority_x = 0; 2303 switch_thread(NULL);
2304 thread->priority_x = LOWEST_PRIORITY;
2305 cores[core].highest_priority = LOWEST_PRIORITY;
1026} 2306}
1027#endif /* HAVE_PRIORITY_SCHEDULING */ 2307#endif /* HAVE_PRIORITY_SCHEDULING */
1028 2308
2309/* Resumes a frozen thread - similar logic to wakeup_thread except that
2310 the thread is on no scheduler list at all. It exists simply by virtue of
2311 the slot having a state of STATE_FROZEN. */
2312void thread_thaw(struct thread_entry *thread)
2313{
2314#if NUM_CORES > 1
2315 /* Thread could be on any list and therefore on an interrupt accessible
2316 one - disable interrupts */
2317 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2318#endif
2319 unsigned state = GET_THREAD_STATE(thread);
2320
2321 if (state == STATE_FROZEN)
2322 {
2323 const unsigned int core = CURRENT_CORE;
2324#if NUM_CORES > 1
2325 if (thread->core != core)
2326 {
2327 core_schedule_wakeup(thread);
2328 }
2329 else
2330#endif
2331 {
2332 add_to_list_l(&cores[core].running, thread);
2333 }
2334
2335 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
2336 return;
2337 }
2338
2339#if NUM_CORES > 1
2340 UNLOCK_THREAD(thread, state);
2341 set_irq_level(oldlevel);
2342#endif
2343}
2344
2345/*---------------------------------------------------------------------------
2346 * Return the ID of the currently executing thread.
2347 *---------------------------------------------------------------------------
2348 */
1029struct thread_entry * thread_get_current(void) 2349struct thread_entry * thread_get_current(void)
1030{ 2350{
1031 return cores[CURRENT_CORE].running; 2351 return cores[CURRENT_CORE].running;
1032} 2352}
1033 2353
2354#if NUM_CORES > 1
2355/*---------------------------------------------------------------------------
2356 * Switch the processor that the currently executing thread runs on.
2357 *---------------------------------------------------------------------------
2358 */
2359unsigned int switch_core(unsigned int new_core)
2360{
2361 const unsigned int core = CURRENT_CORE;
2362 struct thread_entry *current = cores[core].running;
2363 struct thread_entry *w;
2364 int oldlevel;
2365
2366 /* Interrupts can access the lists that will be used - disable them */
2367 unsigned state = GET_THREAD_STATE(current);
2368
2369 if (core == new_core)
2370 {
2371 /* No change - just unlock everything and return same core */
2372 UNLOCK_THREAD(current, state);
2373 return core;
2374 }
2375
2376 /* Get us off the running list for the current core */
2377 remove_from_list_l(&cores[core].running, current);
2378
2379 /* Stash return value (old core) in a safe place */
2380 current->retval = core;
2381
2382 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2383 * the other core will likely attempt a removal from the wrong list! */
2384 if (current->tmo.prev != NULL)
2385 {
2386 remove_from_list_tmo(current);
2387 }
2388
2389 /* Change the core number for this thread slot */
2390 current->core = new_core;
2391
2392 /* Do not use core_schedule_wakeup here since this will result in
2393 * the thread starting to run on the other core before being finished on
2394 * this one. Delay the wakeup list unlock to keep the other core stuck
2395 * until this thread is ready. */
2396 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2397 w = LOCK_LIST(&cores[new_core].waking);
2398 ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current);
2399
2400 /* Make a callback into device-specific code, unlock the wakeup list so
2401 * that execution may resume on the new core, unlock our slot and finally
2402 * restore the interrupt level */
2403 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
2404 TBOP_UNLOCK_LIST | TBOP_IRQ_LEVEL;
2405 cores[core].blk_ops.irq_level = oldlevel;
2406 cores[core].blk_ops.list_p = &cores[new_core].waking;
2407#if CONFIG_CORELOCK == CORELOCK_SWAP
2408 cores[core].blk_ops.state = STATE_RUNNING;
2409 cores[core].blk_ops.list_v = w;
2410#endif
2411
2412#ifdef HAVE_PRIORITY_SCHEDULING
2413 current->priority_x = HIGHEST_PRIORITY;
2414 cores[core].highest_priority = LOWEST_PRIORITY;
2415#endif
2416 /* Do the stack switching, cache_maintenence and switch_thread call -
2417 requires native code */
2418 switch_thread_core(core, current);
2419
2420#ifdef HAVE_PRIORITY_SCHEDULING
2421 current->priority_x = LOWEST_PRIORITY;
2422 cores[current->core].highest_priority = LOWEST_PRIORITY;
2423#endif
2424
2425 /* Finally return the old core to caller */
2426 return current->retval;
2427 (void)state;
2428}
2429#endif /* NUM_CORES > 1 */
2430
2431/*---------------------------------------------------------------------------
2432 * Initialize threading API. This assumes interrupts are not yet enabled. On
2433 * multicore setups, no core is allowed to proceed until create_thread calls
2434 * are safe to perform.
2435 *---------------------------------------------------------------------------
2436 */
1034void init_threads(void) 2437void init_threads(void)
1035{ 2438{
1036 const unsigned int core = CURRENT_CORE; 2439 const unsigned int core = CURRENT_CORE;
@@ -1038,36 +2441,43 @@ void init_threads(void)
1038 2441
1039 /* CPU will initialize first and then sleep */ 2442 /* CPU will initialize first and then sleep */
1040 slot = find_empty_thread_slot(); 2443 slot = find_empty_thread_slot();
1041#if THREAD_EXTRA_CHECKS 2444
1042 /* This can fail if, for example, .bss isn't zero'ed out by the loader 2445 if (slot >= MAXTHREADS)
1043 or threads is in the wrong section. */ 2446 {
1044 if (slot < 0) { 2447 /* WTF? There really must be a slot available at this stage.
1045 panicf("uninitialized threads[]"); 2448 * This can fail if, for example, .bss isn't zero'ed out by the loader
2449 * or threads is in the wrong section. */
2450 THREAD_PANICF("init_threads->no slot", NULL);
1046 } 2451 }
1047#endif
1048 2452
1049 cores[core].sleeping = NULL;
1050 cores[core].running = NULL; 2453 cores[core].running = NULL;
1051 cores[core].waking = NULL; 2454 cores[core].timeout = NULL;
1052 cores[core].wakeup_list = &cores[core].running; 2455 thread_queue_init(&cores[core].waking);
1053#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 2456 cores[core].next_tmo_check = current_tick; /* Something not in the past */
1054 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL; 2457#if NUM_CORES > 1
2458 cores[core].blk_ops.flags = 0;
2459#else
2460 cores[core].irq_level = STAY_IRQ_LEVEL;
1055#endif 2461#endif
1056 threads[slot].name = main_thread_name; 2462 threads[slot].name = main_thread_name;
1057 threads[slot].statearg = 0; 2463 UNLOCK_THREAD_SET_STATE(&threads[slot], STATE_RUNNING); /* No sync worries yet */
1058 threads[slot].context.start = 0; /* core's main thread already running */ 2464 threads[slot].context.start = NULL; /* core's main thread already running */
2465 threads[slot].tmo.prev = NULL;
2466 threads[slot].queue = NULL;
2467#ifdef HAVE_SCHEDULER_BOOSTCTRL
2468 threads[slot].boosted = 0;
2469#endif
1059#if NUM_CORES > 1 2470#if NUM_CORES > 1
1060 threads[slot].core = core; 2471 threads[slot].core = core;
1061#endif 2472#endif
1062#ifdef HAVE_PRIORITY_SCHEDULING 2473#ifdef HAVE_PRIORITY_SCHEDULING
1063 threads[slot].priority = PRIORITY_USER_INTERFACE; 2474 threads[slot].priority = PRIORITY_USER_INTERFACE;
1064 threads[slot].priority_x = 0; 2475 threads[slot].priority_x = LOWEST_PRIORITY;
1065 cores[core].highest_priority = 100; 2476 cores[core].highest_priority = LOWEST_PRIORITY;
1066#endif 2477#endif
1067 add_to_list(&cores[core].running, &threads[slot]); 2478
1068 2479 add_to_list_l(&cores[core].running, &threads[slot]);
1069 /* In multiple core setups, each core has a different stack. There is 2480
1070 * probably a much better way to do this. */
1071 if (core == CPU) 2481 if (core == CPU)
1072 { 2482 {
1073#ifdef HAVE_SCHEDULER_BOOSTCTRL 2483#ifdef HAVE_SCHEDULER_BOOSTCTRL
@@ -1076,22 +2486,19 @@ void init_threads(void)
1076 threads[slot].stack = stackbegin; 2486 threads[slot].stack = stackbegin;
1077 threads[slot].stack_size = (int)stackend - (int)stackbegin; 2487 threads[slot].stack_size = (int)stackend - (int)stackbegin;
1078#if NUM_CORES > 1 /* This code path will not be run on single core targets */ 2488#if NUM_CORES > 1 /* This code path will not be run on single core targets */
1079 /* Mark CPU initialized */
1080 cores[CPU].kernel_running = true;
1081 /* Do _not_ wait for the COP to init in the bootloader because it doesn't */
1082 /* TODO: HAL interface for this */ 2489 /* TODO: HAL interface for this */
1083 /* Wake up coprocessor and let it initialize kernel and threads */ 2490 /* Wake up coprocessor and let it initialize kernel and threads */
1084 COP_CTL = PROC_WAKE; 2491 COP_CTL = PROC_WAKE;
1085 /* Sleep until finished */ 2492 /* Sleep until finished */
1086 CPU_CTL = PROC_SLEEP; 2493 CPU_CTL = PROC_SLEEP;
1087 } 2494 }
1088 else 2495 else
1089 { 2496 {
1090 /* Initial stack is the COP idle stack */ 2497 /* Initial stack is the COP idle stack */
1091 threads[slot].stack = cop_idlestackbegin; 2498 threads[slot].stack = cop_idlestackbegin;
1092 threads[slot].stack_size = IDLE_STACK_SIZE; 2499 threads[slot].stack_size = IDLE_STACK_SIZE;
1093 /* Mark COP initialized */ 2500 /* Mark COP initialized */
1094 cores[COP].kernel_running = true; 2501 cores[COP].blk_ops.flags = 0;
1095 /* Get COP safely primed inside switch_thread where it will remain 2502 /* Get COP safely primed inside switch_thread where it will remain
1096 * until a thread actually exists on it */ 2503 * until a thread actually exists on it */
1097 CPU_CTL = PROC_WAKE; 2504 CPU_CTL = PROC_WAKE;
@@ -1100,19 +2507,28 @@ void init_threads(void)
1100 } 2507 }
1101} 2508}
1102 2509
2510/*---------------------------------------------------------------------------
2511 * Returns the maximum percentage of stack a thread ever used while running.
2512 * NOTE: Some large buffer allocations that don't use enough the buffer to
2513 * overwrite stackptr[0] will not be seen.
2514 *---------------------------------------------------------------------------
2515 */
1103int thread_stack_usage(const struct thread_entry *thread) 2516int thread_stack_usage(const struct thread_entry *thread)
1104{ 2517{
1105 unsigned int i;
1106 unsigned int *stackptr = thread->stack; 2518 unsigned int *stackptr = thread->stack;
2519 int stack_words = thread->stack_size / sizeof (int);
2520 int i, usage = 0;
1107 2521
1108 for (i = 0;i < thread->stack_size/sizeof(int);i++) 2522 for (i = 0; i < stack_words; i++)
1109 { 2523 {
1110 if (stackptr[i] != DEADBEEF) 2524 if (stackptr[i] != DEADBEEF)
2525 {
2526 usage = ((stack_words - i) * 100) / stack_words;
1111 break; 2527 break;
2528 }
1112 } 2529 }
1113 2530
1114 return ((thread->stack_size - i * sizeof(int)) * 100) / 2531 return usage;
1115 thread->stack_size;
1116} 2532}
1117 2533
1118#if NUM_CORES > 1 2534#if NUM_CORES > 1
@@ -1139,9 +2555,14 @@ int idle_stack_usage(unsigned int core)
1139} 2555}
1140#endif 2556#endif
1141 2557
1142int thread_get_status(const struct thread_entry *thread) 2558/*---------------------------------------------------------------------------
2559 * Returns the current thread status. This is a snapshot for debugging and
2560 * does not do any slot synchronization so it could return STATE_BUSY.
2561 *---------------------------------------------------------------------------
2562 */
2563unsigned thread_get_status(const struct thread_entry *thread)
1143{ 2564{
1144 return GET_STATE(thread->statearg); 2565 return thread->state;
1145} 2566}
1146 2567
1147/*--------------------------------------------------------------------------- 2568/*---------------------------------------------------------------------------
@@ -1163,7 +2584,7 @@ void thread_get_name(char *buffer, int size,
1163 /* Display thread name if one or ID if none */ 2584 /* Display thread name if one or ID if none */
1164 const char *name = thread->name; 2585 const char *name = thread->name;
1165 const char *fmt = "%s"; 2586 const char *fmt = "%s";
1166 if (name == NULL || *name == '\0') 2587 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
1167 { 2588 {
1168 name = (const char *)thread; 2589 name = (const char *)thread;
1169 fmt = "%08lX"; 2590 fmt = "%08lX";