summaryrefslogtreecommitdiff
path: root/firmware/kernel/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/thread.c')
-rw-r--r--firmware/kernel/thread.c2442
1 files changed, 2442 insertions, 0 deletions
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c
new file mode 100644
index 0000000000..43ff584a68
--- /dev/null
+++ b/firmware/kernel/thread.c
@@ -0,0 +1,2442 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22
23#ifdef HAVE_SIGALTSTACK_THREADS
24/*
25 * The sp check in glibc __longjmp_chk() will cause
26 * a fatal error when switching threads via longjmp().
27 */
28#undef _FORTIFY_SOURCE
29#endif
30
31#include <stdbool.h>
32#include <stdio.h>
33#include "thread.h"
34#include "panic.h"
35#include "system.h"
36#include "kernel.h"
37#include "cpu.h"
38#include "string.h"
39#ifdef RB_PROFILE
40#include <profile.h>
41#endif
42#include "core_alloc.h"
43#include "gcc_extensions.h"
44#include "corelock.h"
45
46/****************************************************************************
47 * ATTENTION!! *
48 * See notes below on implementing processor-specific portions! *
49 ***************************************************************************/
50
51/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
52#ifdef DEBUG
53#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
54#else
55#define THREAD_EXTRA_CHECKS 0
56#endif
57
58/**
59 * General locking order to guarantee progress. Order must be observed but
60 * all stages are not nescessarily obligatory. Going from 1) to 3) is
61 * perfectly legal.
62 *
63 * 1) IRQ
64 * This is first because of the likelyhood of having an interrupt occur that
65 * also accesses one of the objects farther down the list. Any non-blocking
66 * synchronization done may already have a lock on something during normal
67 * execution and if an interrupt handler running on the same processor as
68 * the one that has the resource locked were to attempt to access the
69 * resource, the interrupt handler would wait forever waiting for an unlock
70 * that will never happen. There is no danger if the interrupt occurs on
71 * a different processor because the one that has the lock will eventually
72 * unlock and the other processor's handler may proceed at that time. Not
73 * nescessary when the resource in question is definitely not available to
74 * interrupt handlers.
75 *
76 * 2) Kernel Object
77 * 1) May be needed beforehand if the kernel object allows dual-use such as
78 * event queues. The kernel object must have a scheme to protect itself from
79 * access by another processor and is responsible for serializing the calls
80 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
81 * other. Objects' queues are also protected here.
82 *
83 * 3) Thread Slot
84 * This locks access to the thread's slot such that its state cannot be
85 * altered by another processor when a state change is in progress such as
86 * when it is in the process of going on a blocked list. An attempt to wake
87 * a thread while it is still blocking will likely desync its state with
88 * the other resources used for that state.
89 *
90 * 4) Core Lists
91 * These lists are specific to a particular processor core and are accessible
92 * by all processor cores and interrupt handlers. The running (rtr) list is
93 * the prime example where a thread may be added by any means.
94 */
95
96/*---------------------------------------------------------------------------
97 * Processor specific: core_sleep/core_wake/misc. notes
98 *
99 * ARM notes:
100 * FIQ is not dealt with by the scheduler code and is simply restored if it
101 * must by masked for some reason - because threading modifies a register
102 * that FIQ may also modify and there's no way to accomplish it atomically.
103 * s3c2440 is such a case.
104 *
105 * Audio interrupts are generally treated at a higher priority than others
106 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
107 * are not in general safe. Special cases may be constructed on a per-
108 * source basis and blocking operations are not available.
109 *
110 * core_sleep procedure to implement for any CPU to ensure an asychronous
111 * wakup never results in requiring a wait until the next tick (up to
112 * 10000uS!). May require assembly and careful instruction ordering.
113 *
114 * 1) On multicore, stay awake if directed to do so by another. If so, goto
115 * step 4.
116 * 2) If processor requires, atomically reenable interrupts and perform step
117 * 3.
118 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
119 * on Coldfire) goto step 5.
120 * 4) Enable interrupts.
121 * 5) Exit procedure.
122 *
123 * core_wake and multprocessor notes for sleep/wake coordination:
124 * If possible, to wake up another processor, the forcing of an interrupt on
125 * the woken core by the waker core is the easiest way to ensure a non-
126 * delayed wake and immediate execution of any woken threads. If that isn't
127 * available then some careful non-blocking synchonization is needed (as on
128 * PP targets at the moment).
129 *---------------------------------------------------------------------------
130 */
131
132/* Cast to the the machine pointer size, whose size could be < 4 or > 32
133 * (someday :). */
134#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
135static struct core_entry cores[NUM_CORES] IBSS_ATTR;
136struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
137
138static const char main_thread_name[] = "main";
139#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
140extern uintptr_t stackbegin[];
141extern uintptr_t stackend[];
142#else
143extern uintptr_t *stackbegin;
144extern uintptr_t *stackend;
145#endif
146
147static inline void core_sleep(IF_COP_VOID(unsigned int core))
148 __attribute__((always_inline));
149
150void check_tmo_threads(void)
151 __attribute__((noinline));
152
153static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
154 __attribute__((always_inline));
155
156static void add_to_list_tmo(struct thread_entry *thread)
157 __attribute__((noinline));
158
159static void core_schedule_wakeup(struct thread_entry *thread)
160 __attribute__((noinline));
161
162#if NUM_CORES > 1
163static inline void run_blocking_ops(
164 unsigned int core, struct thread_entry *thread)
165 __attribute__((always_inline));
166#endif
167
168static void thread_stkov(struct thread_entry *thread)
169 __attribute__((noinline));
170
171static inline void store_context(void* addr)
172 __attribute__((always_inline));
173
174static inline void load_context(const void* addr)
175 __attribute__((always_inline));
176
177#if NUM_CORES > 1
178static void thread_final_exit_do(struct thread_entry *current)
179 __attribute__((noinline)) NORETURN_ATTR USED_ATTR;
180#else
181static inline void thread_final_exit(struct thread_entry *current)
182 __attribute__((always_inline)) NORETURN_ATTR;
183#endif
184
185void switch_thread(void)
186 __attribute__((noinline));
187
188/****************************************************************************
189 * Processor/OS-specific section - include necessary core support
190 */
191
192
193#include "asm/thread.c"
194
195#if defined (CPU_PP)
196#include "thread-pp.c"
197#endif /* CPU_PP */
198
199#ifndef IF_NO_SKIP_YIELD
200#define IF_NO_SKIP_YIELD(...)
201#endif
202
203/*
204 * End Processor-specific section
205 ***************************************************************************/
206
207#if THREAD_EXTRA_CHECKS
208static void thread_panicf(const char *msg, struct thread_entry *thread)
209{
210 IF_COP( const unsigned int core = thread->core; )
211 static char name[32];
212 thread_get_name(name, 32, thread);
213 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
214}
215static void thread_stkov(struct thread_entry *thread)
216{
217 thread_panicf("Stkov", thread);
218}
219#define THREAD_PANICF(msg, thread) \
220 thread_panicf(msg, thread)
221#define THREAD_ASSERT(exp, msg, thread) \
222 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
223#else
224static void thread_stkov(struct thread_entry *thread)
225{
226 IF_COP( const unsigned int core = thread->core; )
227 static char name[32];
228 thread_get_name(name, 32, thread);
229 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
230}
231#define THREAD_PANICF(msg, thread)
232#define THREAD_ASSERT(exp, msg, thread)
233#endif /* THREAD_EXTRA_CHECKS */
234
235/* Thread locking */
236#if NUM_CORES > 1
237#define LOCK_THREAD(thread) \
238 ({ corelock_lock(&(thread)->slot_cl); })
239#define TRY_LOCK_THREAD(thread) \
240 ({ corelock_try_lock(&(thread)->slot_cl); })
241#define UNLOCK_THREAD(thread) \
242 ({ corelock_unlock(&(thread)->slot_cl); })
243#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
244 ({ unsigned int _core = (thread)->core; \
245 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
246 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
247#else
248#define LOCK_THREAD(thread) \
249 ({ })
250#define TRY_LOCK_THREAD(thread) \
251 ({ })
252#define UNLOCK_THREAD(thread) \
253 ({ })
254#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
255 ({ })
256#endif
257
258/* RTR list */
259#define RTR_LOCK(core) \
260 ({ corelock_lock(&cores[core].rtr_cl); })
261#define RTR_UNLOCK(core) \
262 ({ corelock_unlock(&cores[core].rtr_cl); })
263
264#ifdef HAVE_PRIORITY_SCHEDULING
265#define rtr_add_entry(core, priority) \
266 prio_add_entry(&cores[core].rtr, (priority))
267
268#define rtr_subtract_entry(core, priority) \
269 prio_subtract_entry(&cores[core].rtr, (priority))
270
271#define rtr_move_entry(core, from, to) \
272 prio_move_entry(&cores[core].rtr, (from), (to))
273#else
274#define rtr_add_entry(core, priority)
275#define rtr_add_entry_inl(core, priority)
276#define rtr_subtract_entry(core, priority)
277#define rtr_subtract_entry_inl(core, priotity)
278#define rtr_move_entry(core, from, to)
279#define rtr_move_entry_inl(core, from, to)
280#endif
281
282/*---------------------------------------------------------------------------
283 * Thread list structure - circular:
284 * +------------------------------+
285 * | |
286 * +--+---+<-+---+<-+---+<-+---+<-+
287 * Head->| T | | T | | T | | T |
288 * +->+---+->+---+->+---+->+---+--+
289 * | |
290 * +------------------------------+
291 *---------------------------------------------------------------------------
292 */
293
294/*---------------------------------------------------------------------------
295 * Adds a thread to a list of threads using "insert last". Uses the "l"
296 * links.
297 *---------------------------------------------------------------------------
298 */
299static void add_to_list_l(struct thread_entry **list,
300 struct thread_entry *thread)
301{
302 struct thread_entry *l = *list;
303
304 if (l == NULL)
305 {
306 /* Insert into unoccupied list */
307 thread->l.prev = thread;
308 thread->l.next = thread;
309 *list = thread;
310 return;
311 }
312
313 /* Insert last */
314 thread->l.prev = l->l.prev;
315 thread->l.next = l;
316 l->l.prev->l.next = thread;
317 l->l.prev = thread;
318}
319
320/*---------------------------------------------------------------------------
321 * Removes a thread from a list of threads. Uses the "l" links.
322 *---------------------------------------------------------------------------
323 */
324static void remove_from_list_l(struct thread_entry **list,
325 struct thread_entry *thread)
326{
327 struct thread_entry *prev, *next;
328
329 next = thread->l.next;
330
331 if (thread == next)
332 {
333 /* The only item */
334 *list = NULL;
335 return;
336 }
337
338 if (thread == *list)
339 {
340 /* List becomes next item */
341 *list = next;
342 }
343
344 prev = thread->l.prev;
345
346 /* Fix links to jump over the removed entry. */
347 next->l.prev = prev;
348 prev->l.next = next;
349}
350
351/*---------------------------------------------------------------------------
352 * Timeout list structure - circular reverse (to make "remove item" O(1)),
353 * NULL-terminated forward (to ease the far more common forward traversal):
354 * +------------------------------+
355 * | |
356 * +--+---+<-+---+<-+---+<-+---+<-+
357 * Head->| T | | T | | T | | T |
358 * +---+->+---+->+---+->+---+-X
359 *---------------------------------------------------------------------------
360 */
361
362/*---------------------------------------------------------------------------
363 * Add a thread from the core's timout list by linking the pointers in its
364 * tmo structure.
365 *---------------------------------------------------------------------------
366 */
367static void add_to_list_tmo(struct thread_entry *thread)
368{
369 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
370 THREAD_ASSERT(thread->tmo.prev == NULL,
371 "add_to_list_tmo->already listed", thread);
372
373 thread->tmo.next = NULL;
374
375 if (tmo == NULL)
376 {
377 /* Insert into unoccupied list */
378 thread->tmo.prev = thread;
379 cores[IF_COP_CORE(thread->core)].timeout = thread;
380 return;
381 }
382
383 /* Insert Last */
384 thread->tmo.prev = tmo->tmo.prev;
385 tmo->tmo.prev->tmo.next = thread;
386 tmo->tmo.prev = thread;
387}
388
389/*---------------------------------------------------------------------------
390 * Remove a thread from the core's timout list by unlinking the pointers in
391 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
392 * is cancelled.
393 *---------------------------------------------------------------------------
394 */
395static void remove_from_list_tmo(struct thread_entry *thread)
396{
397 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
398 struct thread_entry *prev = thread->tmo.prev;
399 struct thread_entry *next = thread->tmo.next;
400
401 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
402
403 if (next != NULL)
404 next->tmo.prev = prev;
405
406 if (thread == *list)
407 {
408 /* List becomes next item and empty if next == NULL */
409 *list = next;
410 /* Mark as unlisted */
411 thread->tmo.prev = NULL;
412 }
413 else
414 {
415 if (next == NULL)
416 (*list)->tmo.prev = prev;
417 prev->tmo.next = next;
418 /* Mark as unlisted */
419 thread->tmo.prev = NULL;
420 }
421}
422
423
424#ifdef HAVE_PRIORITY_SCHEDULING
425/*---------------------------------------------------------------------------
426 * Priority distribution structure (one category for each possible priority):
427 *
428 * +----+----+----+ ... +-----+
429 * hist: | F0 | F1 | F2 | | F31 |
430 * +----+----+----+ ... +-----+
431 * mask: | b0 | b1 | b2 | | b31 |
432 * +----+----+----+ ... +-----+
433 *
434 * F = count of threads at priority category n (frequency)
435 * b = bitmask of non-zero priority categories (occupancy)
436 *
437 * / if H[n] != 0 : 1
438 * b[n] = |
439 * \ else : 0
440 *
441 *---------------------------------------------------------------------------
442 * Basic priority inheritance priotocol (PIP):
443 *
444 * Mn = mutex n, Tn = thread n
445 *
446 * A lower priority thread inherits the priority of the highest priority
447 * thread blocked waiting for it to complete an action (such as release a
448 * mutex or respond to a message via queue_send):
449 *
450 * 1) T2->M1->T1
451 *
452 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
453 * priority than T1 then T1 inherits the priority of T2.
454 *
455 * 2) T3
456 * \/
457 * T2->M1->T1
458 *
459 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
460 * T1 inherits the higher of T2 and T3.
461 *
462 * 3) T3->M2->T2->M1->T1
463 *
464 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
465 * then T1 inherits the priority of T3 through T2.
466 *
467 * Blocking chains can grow arbitrarily complex (though it's best that they
468 * not form at all very often :) and build-up from these units.
469 *---------------------------------------------------------------------------
470 */
471
472/*---------------------------------------------------------------------------
473 * Increment frequency at category "priority"
474 *---------------------------------------------------------------------------
475 */
476static inline unsigned int prio_add_entry(
477 struct priority_distribution *pd, int priority)
478{
479 unsigned int count;
480 /* Enough size/instruction count difference for ARM makes it worth it to
481 * use different code (192 bytes for ARM). Only thing better is ASM. */
482#ifdef CPU_ARM
483 count = pd->hist[priority];
484 if (++count == 1)
485 pd->mask |= 1 << priority;
486 pd->hist[priority] = count;
487#else /* This one's better for Coldfire */
488 if ((count = ++pd->hist[priority]) == 1)
489 pd->mask |= 1 << priority;
490#endif
491
492 return count;
493}
494
495/*---------------------------------------------------------------------------
496 * Decrement frequency at category "priority"
497 *---------------------------------------------------------------------------
498 */
499static inline unsigned int prio_subtract_entry(
500 struct priority_distribution *pd, int priority)
501{
502 unsigned int count;
503
504#ifdef CPU_ARM
505 count = pd->hist[priority];
506 if (--count == 0)
507 pd->mask &= ~(1 << priority);
508 pd->hist[priority] = count;
509#else
510 if ((count = --pd->hist[priority]) == 0)
511 pd->mask &= ~(1 << priority);
512#endif
513
514 return count;
515}
516
517/*---------------------------------------------------------------------------
518 * Remove from one category and add to another
519 *---------------------------------------------------------------------------
520 */
521static inline void prio_move_entry(
522 struct priority_distribution *pd, int from, int to)
523{
524 uint32_t mask = pd->mask;
525
526#ifdef CPU_ARM
527 unsigned int count;
528
529 count = pd->hist[from];
530 if (--count == 0)
531 mask &= ~(1 << from);
532 pd->hist[from] = count;
533
534 count = pd->hist[to];
535 if (++count == 1)
536 mask |= 1 << to;
537 pd->hist[to] = count;
538#else
539 if (--pd->hist[from] == 0)
540 mask &= ~(1 << from);
541
542 if (++pd->hist[to] == 1)
543 mask |= 1 << to;
544#endif
545
546 pd->mask = mask;
547}
548
549/*---------------------------------------------------------------------------
550 * Change the priority and rtr entry for a running thread
551 *---------------------------------------------------------------------------
552 */
553static inline void set_running_thread_priority(
554 struct thread_entry *thread, int priority)
555{
556 const unsigned int core = IF_COP_CORE(thread->core);
557 RTR_LOCK(core);
558 rtr_move_entry(core, thread->priority, priority);
559 thread->priority = priority;
560 RTR_UNLOCK(core);
561}
562
563/*---------------------------------------------------------------------------
564 * Finds the highest priority thread in a list of threads. If the list is
565 * empty, the PRIORITY_IDLE is returned.
566 *
567 * It is possible to use the struct priority_distribution within an object
568 * instead of scanning the remaining threads in the list but as a compromise,
569 * the resulting per-object memory overhead is saved at a slight speed
570 * penalty under high contention.
571 *---------------------------------------------------------------------------
572 */
573static int find_highest_priority_in_list_l(
574 struct thread_entry * const thread)
575{
576 if (LIKELY(thread != NULL))
577 {
578 /* Go though list until the ending up at the initial thread */
579 int highest_priority = thread->priority;
580 struct thread_entry *curr = thread;
581
582 do
583 {
584 int priority = curr->priority;
585
586 if (priority < highest_priority)
587 highest_priority = priority;
588
589 curr = curr->l.next;
590 }
591 while (curr != thread);
592
593 return highest_priority;
594 }
595
596 return PRIORITY_IDLE;
597}
598
599/*---------------------------------------------------------------------------
600 * Register priority with blocking system and bubble it down the chain if
601 * any until we reach the end or something is already equal or higher.
602 *
603 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
604 * targets but that same action also guarantees a circular block anyway and
605 * those are prevented, right? :-)
606 *---------------------------------------------------------------------------
607 */
608static struct thread_entry *
609 blocker_inherit_priority(struct thread_entry *current)
610{
611 const int priority = current->priority;
612 struct blocker *bl = current->blocker;
613 struct thread_entry * const tstart = current;
614 struct thread_entry *bl_t = bl->thread;
615
616 /* Blocker cannot change since the object protection is held */
617 LOCK_THREAD(bl_t);
618
619 for (;;)
620 {
621 struct thread_entry *next;
622 int bl_pr = bl->priority;
623
624 if (priority >= bl_pr)
625 break; /* Object priority already high enough */
626
627 bl->priority = priority;
628
629 /* Add this one */
630 prio_add_entry(&bl_t->pdist, priority);
631
632 if (bl_pr < PRIORITY_IDLE)
633 {
634 /* Not first waiter - subtract old one */
635 prio_subtract_entry(&bl_t->pdist, bl_pr);
636 }
637
638 if (priority >= bl_t->priority)
639 break; /* Thread priority high enough */
640
641 if (bl_t->state == STATE_RUNNING)
642 {
643 /* Blocking thread is a running thread therefore there are no
644 * further blockers. Change the "run queue" on which it
645 * resides. */
646 set_running_thread_priority(bl_t, priority);
647 break;
648 }
649
650 bl_t->priority = priority;
651
652 /* If blocking thread has a blocker, apply transitive inheritance */
653 bl = bl_t->blocker;
654
655 if (bl == NULL)
656 break; /* End of chain or object doesn't support inheritance */
657
658 next = bl->thread;
659
660 if (UNLIKELY(next == tstart))
661 break; /* Full-circle - deadlock! */
662
663 UNLOCK_THREAD(current);
664
665#if NUM_CORES > 1
666 for (;;)
667 {
668 LOCK_THREAD(next);
669
670 /* Blocker could change - retest condition */
671 if (LIKELY(bl->thread == next))
672 break;
673
674 UNLOCK_THREAD(next);
675 next = bl->thread;
676 }
677#endif
678 current = bl_t;
679 bl_t = next;
680 }
681
682 UNLOCK_THREAD(bl_t);
683
684 return current;
685}
686
687/*---------------------------------------------------------------------------
688 * Readjust priorities when waking a thread blocked waiting for another
689 * in essence "releasing" the thread's effect on the object owner. Can be
690 * performed from any context.
691 *---------------------------------------------------------------------------
692 */
693struct thread_entry *
694 wakeup_priority_protocol_release(struct thread_entry *thread)
695{
696 const int priority = thread->priority;
697 struct blocker *bl = thread->blocker;
698 struct thread_entry * const tstart = thread;
699 struct thread_entry *bl_t = bl->thread;
700
701 /* Blocker cannot change since object will be locked */
702 LOCK_THREAD(bl_t);
703
704 thread->blocker = NULL; /* Thread not blocked */
705
706 for (;;)
707 {
708 struct thread_entry *next;
709 int bl_pr = bl->priority;
710
711 if (priority > bl_pr)
712 break; /* Object priority higher */
713
714 next = *thread->bqp;
715
716 if (next == NULL)
717 {
718 /* No more threads in queue */
719 prio_subtract_entry(&bl_t->pdist, bl_pr);
720 bl->priority = PRIORITY_IDLE;
721 }
722 else
723 {
724 /* Check list for highest remaining priority */
725 int queue_pr = find_highest_priority_in_list_l(next);
726
727 if (queue_pr == bl_pr)
728 break; /* Object priority not changing */
729
730 /* Change queue priority */
731 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
732 bl->priority = queue_pr;
733 }
734
735 if (bl_pr > bl_t->priority)
736 break; /* thread priority is higher */
737
738 bl_pr = find_first_set_bit(bl_t->pdist.mask);
739
740 if (bl_pr == bl_t->priority)
741 break; /* Thread priority not changing */
742
743 if (bl_t->state == STATE_RUNNING)
744 {
745 /* No further blockers */
746 set_running_thread_priority(bl_t, bl_pr);
747 break;
748 }
749
750 bl_t->priority = bl_pr;
751
752 /* If blocking thread has a blocker, apply transitive inheritance */
753 bl = bl_t->blocker;
754
755 if (bl == NULL)
756 break; /* End of chain or object doesn't support inheritance */
757
758 next = bl->thread;
759
760 if (UNLIKELY(next == tstart))
761 break; /* Full-circle - deadlock! */
762
763 UNLOCK_THREAD(thread);
764
765#if NUM_CORES > 1
766 for (;;)
767 {
768 LOCK_THREAD(next);
769
770 /* Blocker could change - retest condition */
771 if (LIKELY(bl->thread == next))
772 break;
773
774 UNLOCK_THREAD(next);
775 next = bl->thread;
776 }
777#endif
778 thread = bl_t;
779 bl_t = next;
780 }
781
782 UNLOCK_THREAD(bl_t);
783
784#if NUM_CORES > 1
785 if (UNLIKELY(thread != tstart))
786 {
787 /* Relock original if it changed */
788 LOCK_THREAD(tstart);
789 }
790#endif
791
792 return cores[CURRENT_CORE].running;
793}
794
795/*---------------------------------------------------------------------------
796 * Transfer ownership to a thread waiting for an objects and transfer
797 * inherited priority boost from other waiters. This algorithm knows that
798 * blocking chains may only unblock from the very end.
799 *
800 * Only the owning thread itself may call this and so the assumption that
801 * it is the running thread is made.
802 *---------------------------------------------------------------------------
803 */
804struct thread_entry *
805 wakeup_priority_protocol_transfer(struct thread_entry *thread)
806{
807 /* Waking thread inherits priority boost from object owner */
808 struct blocker *bl = thread->blocker;
809 struct thread_entry *bl_t = bl->thread;
810 struct thread_entry *next;
811 int bl_pr;
812
813 THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t,
814 "UPPT->wrong thread", cores[CURRENT_CORE].running);
815
816 LOCK_THREAD(bl_t);
817
818 bl_pr = bl->priority;
819
820 /* Remove the object's boost from the owning thread */
821 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
822 bl_pr <= bl_t->priority)
823 {
824 /* No more threads at this priority are waiting and the old level is
825 * at least the thread level */
826 int priority = find_first_set_bit(bl_t->pdist.mask);
827
828 if (priority != bl_t->priority)
829 {
830 /* Adjust this thread's priority */
831 set_running_thread_priority(bl_t, priority);
832 }
833 }
834
835 next = *thread->bqp;
836
837 if (LIKELY(next == NULL))
838 {
839 /* Expected shortcut - no more waiters */
840 bl_pr = PRIORITY_IDLE;
841 }
842 else
843 {
844 if (thread->priority <= bl_pr)
845 {
846 /* Need to scan threads remaining in queue */
847 bl_pr = find_highest_priority_in_list_l(next);
848 }
849
850 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
851 bl_pr < thread->priority)
852 {
853 /* Thread priority must be raised */
854 thread->priority = bl_pr;
855 }
856 }
857
858 bl->thread = thread; /* This thread pwns */
859 bl->priority = bl_pr; /* Save highest blocked priority */
860 thread->blocker = NULL; /* Thread not blocked */
861
862 UNLOCK_THREAD(bl_t);
863
864 return bl_t;
865}
866
867/*---------------------------------------------------------------------------
868 * No threads must be blocked waiting for this thread except for it to exit.
869 * The alternative is more elaborate cleanup and object registration code.
870 * Check this for risk of silent data corruption when objects with
871 * inheritable blocking are abandoned by the owner - not precise but may
872 * catch something.
873 *---------------------------------------------------------------------------
874 */
875static void __attribute__((noinline)) check_for_obj_waiters(
876 const char *function, struct thread_entry *thread)
877{
878 /* Only one bit in the mask should be set with a frequency on 1 which
879 * represents the thread's own base priority */
880 uint32_t mask = thread->pdist.mask;
881 if ((mask & (mask - 1)) != 0 ||
882 thread->pdist.hist[find_first_set_bit(mask)] > 1)
883 {
884 unsigned char name[32];
885 thread_get_name(name, 32, thread);
886 panicf("%s->%s with obj. waiters", function, name);
887 }
888}
889#endif /* HAVE_PRIORITY_SCHEDULING */
890
891/*---------------------------------------------------------------------------
892 * Move a thread back to a running state on its core.
893 *---------------------------------------------------------------------------
894 */
895static void core_schedule_wakeup(struct thread_entry *thread)
896{
897 const unsigned int core = IF_COP_CORE(thread->core);
898
899 RTR_LOCK(core);
900
901 thread->state = STATE_RUNNING;
902
903 add_to_list_l(&cores[core].running, thread);
904 rtr_add_entry(core, thread->priority);
905
906 RTR_UNLOCK(core);
907
908#if NUM_CORES > 1
909 if (core != CURRENT_CORE)
910 core_wake(core);
911#endif
912}
913
914/*---------------------------------------------------------------------------
915 * Check the core's timeout list when at least one thread is due to wake.
916 * Filtering for the condition is done before making the call. Resets the
917 * tick when the next check will occur.
918 *---------------------------------------------------------------------------
919 */
920void check_tmo_threads(void)
921{
922 const unsigned int core = CURRENT_CORE;
923 const long tick = current_tick; /* snapshot the current tick */
924 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
925 struct thread_entry *next = cores[core].timeout;
926
927 /* If there are no processes waiting for a timeout, just keep the check
928 tick from falling into the past. */
929
930 /* Break the loop once we have walked through the list of all
931 * sleeping processes or have removed them all. */
932 while (next != NULL)
933 {
934 /* Check sleeping threads. Allow interrupts between checks. */
935 enable_irq();
936
937 struct thread_entry *curr = next;
938
939 next = curr->tmo.next;
940
941 /* Lock thread slot against explicit wakeup */
942 disable_irq();
943 LOCK_THREAD(curr);
944
945 unsigned state = curr->state;
946
947 if (state < TIMEOUT_STATE_FIRST)
948 {
949 /* Cleanup threads no longer on a timeout but still on the
950 * list. */
951 remove_from_list_tmo(curr);
952 }
953 else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick)))
954 {
955 /* Timeout still pending - this will be the usual case */
956 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
957 {
958 /* Earliest timeout found so far - move the next check up
959 to its time */
960 next_tmo_check = curr->tmo_tick;
961 }
962 }
963 else
964 {
965 /* Sleep timeout has been reached so bring the thread back to
966 * life again. */
967 if (state == STATE_BLOCKED_W_TMO)
968 {
969#ifdef HAVE_CORELOCK_OBJECT
970 /* Lock the waiting thread's kernel object */
971 struct corelock *ocl = curr->obj_cl;
972
973 if (UNLIKELY(corelock_try_lock(ocl) == 0))
974 {
975 /* Need to retry in the correct order though the need is
976 * unlikely */
977 UNLOCK_THREAD(curr);
978 corelock_lock(ocl);
979 LOCK_THREAD(curr);
980
981 if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO))
982 {
983 /* Thread was woken or removed explicitely while slot
984 * was unlocked */
985 corelock_unlock(ocl);
986 remove_from_list_tmo(curr);
987 UNLOCK_THREAD(curr);
988 continue;
989 }
990 }
991#endif /* NUM_CORES */
992
993 remove_from_list_l(curr->bqp, curr);
994
995#ifdef HAVE_WAKEUP_EXT_CB
996 if (curr->wakeup_ext_cb != NULL)
997 curr->wakeup_ext_cb(curr);
998#endif
999
1000#ifdef HAVE_PRIORITY_SCHEDULING
1001 if (curr->blocker != NULL)
1002 wakeup_priority_protocol_release(curr);
1003#endif
1004 corelock_unlock(ocl);
1005 }
1006 /* else state == STATE_SLEEPING */
1007
1008 remove_from_list_tmo(curr);
1009
1010 RTR_LOCK(core);
1011
1012 curr->state = STATE_RUNNING;
1013
1014 add_to_list_l(&cores[core].running, curr);
1015 rtr_add_entry(core, curr->priority);
1016
1017 RTR_UNLOCK(core);
1018 }
1019
1020 UNLOCK_THREAD(curr);
1021 }
1022
1023 cores[core].next_tmo_check = next_tmo_check;
1024}
1025
1026/*---------------------------------------------------------------------------
1027 * Performs operations that must be done before blocking a thread but after
1028 * the state is saved.
1029 *---------------------------------------------------------------------------
1030 */
1031#if NUM_CORES > 1
1032static inline void run_blocking_ops(
1033 unsigned int core, struct thread_entry *thread)
1034{
1035 struct thread_blk_ops *ops = &cores[core].blk_ops;
1036 const unsigned flags = ops->flags;
1037
1038 if (LIKELY(flags == TBOP_CLEAR))
1039 return;
1040
1041 switch (flags)
1042 {
1043 case TBOP_SWITCH_CORE:
1044 core_switch_blk_op(core, thread);
1045 /* Fall-through */
1046 case TBOP_UNLOCK_CORELOCK:
1047 corelock_unlock(ops->cl_p);
1048 break;
1049 }
1050
1051 ops->flags = TBOP_CLEAR;
1052}
1053#endif /* NUM_CORES > 1 */
1054
1055#ifdef RB_PROFILE
1056void profile_thread(void)
1057{
1058 profstart(cores[CURRENT_CORE].running - threads);
1059}
1060#endif
1061
1062/*---------------------------------------------------------------------------
1063 * Prepares a thread to block on an object's list and/or for a specified
1064 * duration - expects object and slot to be appropriately locked if needed
1065 * and interrupts to be masked.
1066 *---------------------------------------------------------------------------
1067 */
1068static inline void block_thread_on_l(struct thread_entry *thread,
1069 unsigned state)
1070{
1071 /* If inlined, unreachable branches will be pruned with no size penalty
1072 because state is passed as a constant parameter. */
1073 const unsigned int core = IF_COP_CORE(thread->core);
1074
1075 /* Remove the thread from the list of running threads. */
1076 RTR_LOCK(core);
1077 remove_from_list_l(&cores[core].running, thread);
1078 rtr_subtract_entry(core, thread->priority);
1079 RTR_UNLOCK(core);
1080
1081 /* Add a timeout to the block if not infinite */
1082 switch (state)
1083 {
1084 case STATE_BLOCKED:
1085 case STATE_BLOCKED_W_TMO:
1086 /* Put the thread into a new list of inactive threads. */
1087 add_to_list_l(thread->bqp, thread);
1088
1089 if (state == STATE_BLOCKED)
1090 break;
1091
1092 /* Fall-through */
1093 case STATE_SLEEPING:
1094 /* If this thread times out sooner than any other thread, update
1095 next_tmo_check to its timeout */
1096 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1097 {
1098 cores[core].next_tmo_check = thread->tmo_tick;
1099 }
1100
1101 if (thread->tmo.prev == NULL)
1102 {
1103 add_to_list_tmo(thread);
1104 }
1105 /* else thread was never removed from list - just keep it there */
1106 break;
1107 }
1108
1109 /* Remember the the next thread about to block. */
1110 cores[core].block_task = thread;
1111
1112 /* Report new state. */
1113 thread->state = state;
1114}
1115
1116/*---------------------------------------------------------------------------
1117 * Switch thread in round robin fashion for any given priority. Any thread
1118 * that removed itself from the running list first must specify itself in
1119 * the paramter.
1120 *
1121 * INTERNAL: Intended for use by kernel and not for programs.
1122 *---------------------------------------------------------------------------
1123 */
1124void switch_thread(void)
1125{
1126
1127 const unsigned int core = CURRENT_CORE;
1128 struct thread_entry *block = cores[core].block_task;
1129 struct thread_entry *thread = cores[core].running;
1130
1131 /* Get context to save - next thread to run is unknown until all wakeups
1132 * are evaluated */
1133 if (block != NULL)
1134 {
1135 cores[core].block_task = NULL;
1136
1137#if NUM_CORES > 1
1138 if (UNLIKELY(thread == block))
1139 {
1140 /* This was the last thread running and another core woke us before
1141 * reaching here. Force next thread selection to give tmo threads or
1142 * other threads woken before this block a first chance. */
1143 block = NULL;
1144 }
1145 else
1146#endif
1147 {
1148 /* Blocking task is the old one */
1149 thread = block;
1150 }
1151 }
1152
1153#ifdef RB_PROFILE
1154#ifdef CPU_COLDFIRE
1155 _profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1156#else
1157 profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1158#endif
1159#endif
1160
1161 /* Begin task switching by saving our current context so that we can
1162 * restore the state of the current thread later to the point prior
1163 * to this call. */
1164 store_context(&thread->context);
1165
1166#ifdef DEBUG
1167 /* Check core_ctx buflib integrity */
1168 core_check_valid();
1169#endif
1170
1171 /* Check if the current thread stack is overflown */
1172 if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
1173 thread_stkov(thread);
1174
1175#if NUM_CORES > 1
1176 /* Run any blocking operations requested before switching/sleeping */
1177 run_blocking_ops(core, thread);
1178#endif
1179
1180#ifdef HAVE_PRIORITY_SCHEDULING
1181 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1182 /* Reset the value of thread's skip count */
1183 thread->skip_count = 0;
1184#endif
1185
1186 for (;;)
1187 {
1188 /* If there are threads on a timeout and the earliest wakeup is due,
1189 * check the list and wake any threads that need to start running
1190 * again. */
1191 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1192 {
1193 check_tmo_threads();
1194 }
1195
1196 disable_irq();
1197 RTR_LOCK(core);
1198
1199 thread = cores[core].running;
1200
1201 if (UNLIKELY(thread == NULL))
1202 {
1203 /* Enter sleep mode to reduce power usage - woken up on interrupt
1204 * or wakeup request from another core - expected to enable
1205 * interrupts. */
1206 RTR_UNLOCK(core);
1207 core_sleep(IF_COP(core));
1208 }
1209 else
1210 {
1211#ifdef HAVE_PRIORITY_SCHEDULING
1212 /* Select the new task based on priorities and the last time a
1213 * process got CPU time relative to the highest priority runnable
1214 * task. */
1215 struct priority_distribution *pd = &cores[core].rtr;
1216 int max = find_first_set_bit(pd->mask);
1217
1218 if (block == NULL)
1219 {
1220 /* Not switching on a block, tentatively select next thread */
1221 thread = thread->l.next;
1222 }
1223
1224 for (;;)
1225 {
1226 int priority = thread->priority;
1227 int diff;
1228
1229 /* This ridiculously simple method of aging seems to work
1230 * suspiciously well. It does tend to reward CPU hogs (under
1231 * yielding) but that's generally not desirable at all. On
1232 * the plus side, it, relatively to other threads, penalizes
1233 * excess yielding which is good if some high priority thread
1234 * is performing no useful work such as polling for a device
1235 * to be ready. Of course, aging is only employed when higher
1236 * and lower priority threads are runnable. The highest
1237 * priority runnable thread(s) are never skipped unless a
1238 * lower-priority process has aged sufficiently. Priorities
1239 * of REALTIME class are run strictly according to priority
1240 * thus are not subject to switchout due to lower-priority
1241 * processes aging; they must give up the processor by going
1242 * off the run list. */
1243 if (LIKELY(priority <= max) ||
1244 IF_NO_SKIP_YIELD( thread->skip_count == -1 || )
1245 (priority > PRIORITY_REALTIME &&
1246 (diff = priority - max,
1247 ++thread->skip_count > diff*diff)))
1248 {
1249 cores[core].running = thread;
1250 break;
1251 }
1252
1253 thread = thread->l.next;
1254 }
1255#else
1256 /* Without priority use a simple FCFS algorithm */
1257 if (block == NULL)
1258 {
1259 /* Not switching on a block, select next thread */
1260 thread = thread->l.next;
1261 cores[core].running = thread;
1262 }
1263#endif /* HAVE_PRIORITY_SCHEDULING */
1264
1265 RTR_UNLOCK(core);
1266 enable_irq();
1267 break;
1268 }
1269 }
1270
1271 /* And finally give control to the next thread. */
1272 load_context(&thread->context);
1273
1274#ifdef RB_PROFILE
1275 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
1276#endif
1277
1278}
1279
1280/*---------------------------------------------------------------------------
1281 * Sleeps a thread for at least a specified number of ticks with zero being
1282 * a wait until the next tick.
1283 *
1284 * INTERNAL: Intended for use by kernel and not for programs.
1285 *---------------------------------------------------------------------------
1286 */
1287void sleep_thread(int ticks)
1288{
1289 struct thread_entry *current = cores[CURRENT_CORE].running;
1290
1291 LOCK_THREAD(current);
1292
1293 /* Set our timeout, remove from run list and join timeout list. */
1294 current->tmo_tick = current_tick + ticks + 1;
1295 block_thread_on_l(current, STATE_SLEEPING);
1296
1297 UNLOCK_THREAD(current);
1298}
1299
1300/*---------------------------------------------------------------------------
1301 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1302 *
1303 * INTERNAL: Intended for use by kernel objects and not for programs.
1304 *---------------------------------------------------------------------------
1305 */
1306void block_thread(struct thread_entry *current)
1307{
1308 /* Set the state to blocked and take us off of the run queue until we
1309 * are explicitly woken */
1310 LOCK_THREAD(current);
1311
1312 /* Set the list for explicit wakeup */
1313 block_thread_on_l(current, STATE_BLOCKED);
1314
1315#ifdef HAVE_PRIORITY_SCHEDULING
1316 if (current->blocker != NULL)
1317 {
1318 /* Object supports PIP */
1319 current = blocker_inherit_priority(current);
1320 }
1321#endif
1322
1323 UNLOCK_THREAD(current);
1324}
1325
1326/*---------------------------------------------------------------------------
1327 * Block a thread on a blocking queue for a specified time interval or until
1328 * explicitly woken - whichever happens first.
1329 *
1330 * INTERNAL: Intended for use by kernel objects and not for programs.
1331 *---------------------------------------------------------------------------
1332 */
1333void block_thread_w_tmo(struct thread_entry *current, int timeout)
1334{
1335 /* Get the entry for the current running thread. */
1336 LOCK_THREAD(current);
1337
1338 /* Set the state to blocked with the specified timeout */
1339 current->tmo_tick = current_tick + timeout;
1340
1341 /* Set the list for explicit wakeup */
1342 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1343
1344#ifdef HAVE_PRIORITY_SCHEDULING
1345 if (current->blocker != NULL)
1346 {
1347 /* Object supports PIP */
1348 current = blocker_inherit_priority(current);
1349 }
1350#endif
1351
1352 UNLOCK_THREAD(current);
1353}
1354
1355/*---------------------------------------------------------------------------
1356 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1357 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1358 *
1359 * This code should be considered a critical section by the caller meaning
1360 * that the object's corelock should be held.
1361 *
1362 * INTERNAL: Intended for use by kernel objects and not for programs.
1363 *---------------------------------------------------------------------------
1364 */
1365unsigned int wakeup_thread(struct thread_entry **list)
1366{
1367 struct thread_entry *thread = *list;
1368 unsigned int result = THREAD_NONE;
1369
1370 /* Check if there is a blocked thread at all. */
1371 if (thread == NULL)
1372 return result;
1373
1374 LOCK_THREAD(thread);
1375
1376 /* Determine thread's current state. */
1377 switch (thread->state)
1378 {
1379 case STATE_BLOCKED:
1380 case STATE_BLOCKED_W_TMO:
1381 remove_from_list_l(list, thread);
1382
1383 result = THREAD_OK;
1384
1385#ifdef HAVE_PRIORITY_SCHEDULING
1386 struct thread_entry *current;
1387 struct blocker *bl = thread->blocker;
1388
1389 if (bl == NULL)
1390 {
1391 /* No inheritance - just boost the thread by aging */
1392 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1393 thread->skip_count = thread->priority;
1394 current = cores[CURRENT_CORE].running;
1395 }
1396 else
1397 {
1398 /* Call the specified unblocking PIP */
1399 current = bl->wakeup_protocol(thread);
1400 }
1401
1402 if (current != NULL &&
1403 find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask)
1404 < current->priority)
1405 {
1406 /* There is a thread ready to run of higher or same priority on
1407 * the same core as the current one; recommend a task switch.
1408 * Knowing if this is an interrupt call would be helpful here. */
1409 result |= THREAD_SWITCH;
1410 }
1411#endif /* HAVE_PRIORITY_SCHEDULING */
1412
1413 core_schedule_wakeup(thread);
1414 break;
1415
1416 /* Nothing to do. State is not blocked. */
1417#if THREAD_EXTRA_CHECKS
1418 default:
1419 THREAD_PANICF("wakeup_thread->block invalid", thread);
1420 case STATE_RUNNING:
1421 case STATE_KILLED:
1422 break;
1423#endif
1424 }
1425
1426 UNLOCK_THREAD(thread);
1427 return result;
1428}
1429
1430/*---------------------------------------------------------------------------
1431 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1432 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1433 * the queue must be locked first.
1434 *
1435 * INTERNAL: Intended for use by kernel objects and not for programs.
1436 *---------------------------------------------------------------------------
1437 */
1438unsigned int thread_queue_wake(struct thread_entry **list)
1439{
1440 unsigned result = THREAD_NONE;
1441
1442 for (;;)
1443 {
1444 unsigned int rc = wakeup_thread(list);
1445
1446 if (rc == THREAD_NONE)
1447 break; /* No more threads */
1448
1449 result |= rc;
1450 }
1451
1452 return result;
1453}
1454
1455/*---------------------------------------------------------------------------
1456 * Assign the thread slot a new ID. Version is 1-255.
1457 *---------------------------------------------------------------------------
1458 */
1459static void new_thread_id(unsigned int slot_num,
1460 struct thread_entry *thread)
1461{
1462 unsigned int version =
1463 (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
1464 & THREAD_ID_VERSION_MASK;
1465
1466 /* If wrapped to 0, make it 1 */
1467 if (version == 0)
1468 version = 1u << THREAD_ID_VERSION_SHIFT;
1469
1470 thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
1471}
1472
1473/*---------------------------------------------------------------------------
1474 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1475 * will be locked on multicore.
1476 *---------------------------------------------------------------------------
1477 */
1478static struct thread_entry * find_empty_thread_slot(void)
1479{
1480 /* Any slot could be on an interrupt-accessible list */
1481 IF_COP( int oldlevel = disable_irq_save(); )
1482 struct thread_entry *thread = NULL;
1483 int n;
1484
1485 for (n = 0; n < MAXTHREADS; n++)
1486 {
1487 /* Obtain current slot state - lock it on multicore */
1488 struct thread_entry *t = &threads[n];
1489 LOCK_THREAD(t);
1490
1491 if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
1492 {
1493 /* Slot is empty - leave it locked and caller will unlock */
1494 thread = t;
1495 break;
1496 }
1497
1498 /* Finished examining slot - no longer busy - unlock on multicore */
1499 UNLOCK_THREAD(t);
1500 }
1501
1502 IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
1503 not accesible to them yet */
1504 return thread;
1505}
1506
1507/*---------------------------------------------------------------------------
1508 * Return the thread_entry pointer for a thread_id. Return the current
1509 * thread if the ID is (unsigned int)-1 (alias for current).
1510 *---------------------------------------------------------------------------
1511 */
1512struct thread_entry * thread_id_entry(unsigned int thread_id)
1513{
1514 return &threads[thread_id & THREAD_ID_SLOT_MASK];
1515}
1516
1517/*---------------------------------------------------------------------------
1518 * Return the thread id of the calling thread
1519 * --------------------------------------------------------------------------
1520 */
1521unsigned int thread_self(void)
1522{
1523 return cores[CURRENT_CORE].running->id;
1524}
1525
1526/*---------------------------------------------------------------------------
1527 * Return the thread entry of the calling thread.
1528 *
1529 * INTERNAL: Intended for use by kernel and not for programs.
1530 *---------------------------------------------------------------------------
1531 */
1532struct thread_entry* thread_self_entry(void)
1533{
1534 return cores[CURRENT_CORE].running;
1535}
1536
1537/*---------------------------------------------------------------------------
1538 * Place the current core in idle mode - woken up on interrupt or wake
1539 * request from another core.
1540 *---------------------------------------------------------------------------
1541 */
1542void core_idle(void)
1543{
1544 IF_COP( const unsigned int core = CURRENT_CORE; )
1545 disable_irq();
1546 core_sleep(IF_COP(core));
1547}
1548
1549/*---------------------------------------------------------------------------
1550 * Create a thread. If using a dual core architecture, specify which core to
1551 * start the thread on.
1552 *
1553 * Return ID if context area could be allocated, else NULL.
1554 *---------------------------------------------------------------------------
1555 */
1556unsigned int create_thread(void (*function)(void),
1557 void* stack, size_t stack_size,
1558 unsigned flags, const char *name
1559 IF_PRIO(, int priority)
1560 IF_COP(, unsigned int core))
1561{
1562 unsigned int i;
1563 unsigned int stack_words;
1564 uintptr_t stackptr, stackend;
1565 struct thread_entry *thread;
1566 unsigned state;
1567 int oldlevel;
1568
1569 thread = find_empty_thread_slot();
1570 if (thread == NULL)
1571 {
1572 return 0;
1573 }
1574
1575 oldlevel = disable_irq_save();
1576
1577 /* Munge the stack to make it easy to spot stack overflows */
1578 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
1579 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
1580 stack_size = stackend - stackptr;
1581 stack_words = stack_size / sizeof (uintptr_t);
1582
1583 for (i = 0; i < stack_words; i++)
1584 {
1585 ((uintptr_t *)stackptr)[i] = DEADBEEF;
1586 }
1587
1588 /* Store interesting information */
1589 thread->name = name;
1590 thread->stack = (uintptr_t *)stackptr;
1591 thread->stack_size = stack_size;
1592 thread->queue = NULL;
1593#ifdef HAVE_WAKEUP_EXT_CB
1594 thread->wakeup_ext_cb = NULL;
1595#endif
1596#ifdef HAVE_SCHEDULER_BOOSTCTRL
1597 thread->cpu_boost = 0;
1598#endif
1599#ifdef HAVE_PRIORITY_SCHEDULING
1600 memset(&thread->pdist, 0, sizeof(thread->pdist));
1601 thread->blocker = NULL;
1602 thread->base_priority = priority;
1603 thread->priority = priority;
1604 thread->skip_count = priority;
1605 prio_add_entry(&thread->pdist, priority);
1606#endif
1607
1608#ifdef HAVE_IO_PRIORITY
1609 /* Default to high (foreground) priority */
1610 thread->io_priority = IO_PRIORITY_IMMEDIATE;
1611#endif
1612
1613#if NUM_CORES > 1
1614 thread->core = core;
1615
1616 /* Writeback stack munging or anything else before starting */
1617 if (core != CURRENT_CORE)
1618 {
1619 commit_dcache();
1620 }
1621#endif
1622
1623 /* Thread is not on any timeout list but be a bit paranoid */
1624 thread->tmo.prev = NULL;
1625
1626 state = (flags & CREATE_THREAD_FROZEN) ?
1627 STATE_FROZEN : STATE_RUNNING;
1628
1629 thread->context.sp = (typeof (thread->context.sp))stackend;
1630
1631 /* Load the thread's context structure with needed startup information */
1632 THREAD_STARTUP_INIT(core, thread, function);
1633
1634 thread->state = state;
1635 i = thread->id; /* Snapshot while locked */
1636
1637 if (state == STATE_RUNNING)
1638 core_schedule_wakeup(thread);
1639
1640 UNLOCK_THREAD(thread);
1641 restore_irq(oldlevel);
1642
1643 return i;
1644}
1645
1646#ifdef HAVE_SCHEDULER_BOOSTCTRL
1647/*---------------------------------------------------------------------------
1648 * Change the boost state of a thread boosting or unboosting the CPU
1649 * as required.
1650 *---------------------------------------------------------------------------
1651 */
1652static inline void boost_thread(struct thread_entry *thread, bool boost)
1653{
1654 if ((thread->cpu_boost != 0) != boost)
1655 {
1656 thread->cpu_boost = boost;
1657 cpu_boost(boost);
1658 }
1659}
1660
1661void trigger_cpu_boost(void)
1662{
1663 struct thread_entry *current = cores[CURRENT_CORE].running;
1664 boost_thread(current, true);
1665}
1666
1667void cancel_cpu_boost(void)
1668{
1669 struct thread_entry *current = cores[CURRENT_CORE].running;
1670 boost_thread(current, false);
1671}
1672#endif /* HAVE_SCHEDULER_BOOSTCTRL */
1673
1674/*---------------------------------------------------------------------------
1675 * Block the current thread until another thread terminates. A thread may
1676 * wait on itself to terminate which prevents it from running again and it
1677 * will need to be killed externally.
1678 * Parameter is the ID as returned from create_thread().
1679 *---------------------------------------------------------------------------
1680 */
1681void thread_wait(unsigned int thread_id)
1682{
1683 struct thread_entry *current = cores[CURRENT_CORE].running;
1684 struct thread_entry *thread = thread_id_entry(thread_id);
1685
1686 /* Lock thread-as-waitable-object lock */
1687 corelock_lock(&thread->waiter_cl);
1688
1689 /* Be sure it hasn't been killed yet */
1690 if (thread->id == thread_id && thread->state != STATE_KILLED)
1691 {
1692 IF_COP( current->obj_cl = &thread->waiter_cl; )
1693 current->bqp = &thread->queue;
1694
1695 disable_irq();
1696 block_thread(current);
1697
1698 corelock_unlock(&thread->waiter_cl);
1699
1700 switch_thread();
1701 return;
1702 }
1703
1704 corelock_unlock(&thread->waiter_cl);
1705}
1706
1707/*---------------------------------------------------------------------------
1708 * Exit the current thread. The Right Way to Do Things (TM).
1709 *---------------------------------------------------------------------------
1710 */
1711/* This is done to foil optimizations that may require the current stack,
1712 * such as optimizing subexpressions that put variables on the stack that
1713 * get used after switching stacks. */
1714#if NUM_CORES > 1
1715/* Called by ASM stub */
1716static void thread_final_exit_do(struct thread_entry *current)
1717#else
1718/* No special procedure is required before calling */
1719static inline void thread_final_exit(struct thread_entry *current)
1720#endif
1721{
1722 /* At this point, this thread isn't using resources allocated for
1723 * execution except the slot itself. */
1724
1725 /* Signal this thread */
1726 thread_queue_wake(&current->queue);
1727 corelock_unlock(&current->waiter_cl);
1728 switch_thread();
1729 /* This should never and must never be reached - if it is, the
1730 * state is corrupted */
1731 THREAD_PANICF("thread_exit->K:*R", current);
1732 while (1);
1733}
1734
1735void thread_exit(void)
1736{
1737 register struct thread_entry * current = cores[CURRENT_CORE].running;
1738
1739 /* Cancel CPU boost if any */
1740 cancel_cpu_boost();
1741
1742 disable_irq();
1743
1744 corelock_lock(&current->waiter_cl);
1745 LOCK_THREAD(current);
1746
1747#if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1748 if (current->name == THREAD_DESTRUCT)
1749 {
1750 /* Thread being killed - become a waiter */
1751 unsigned int id = current->id;
1752 UNLOCK_THREAD(current);
1753 corelock_unlock(&current->waiter_cl);
1754 thread_wait(id);
1755 THREAD_PANICF("thread_exit->WK:*R", current);
1756 }
1757#endif
1758
1759#ifdef HAVE_PRIORITY_SCHEDULING
1760 check_for_obj_waiters("thread_exit", current);
1761#endif
1762
1763 if (current->tmo.prev != NULL)
1764 {
1765 /* Cancel pending timeout list removal */
1766 remove_from_list_tmo(current);
1767 }
1768
1769 /* Switch tasks and never return */
1770 block_thread_on_l(current, STATE_KILLED);
1771
1772 /* Slot must be unusable until thread is really gone */
1773 UNLOCK_THREAD_AT_TASK_SWITCH(current);
1774
1775 /* Update ID for this slot */
1776 new_thread_id(current->id, current);
1777 current->name = NULL;
1778
1779 /* Do final cleanup and remove the thread */
1780 thread_final_exit(current);
1781}
1782
1783#ifdef ALLOW_REMOVE_THREAD
1784/*---------------------------------------------------------------------------
1785 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1786 * normal programs.
1787 *
1788 * Parameter is the ID as returned from create_thread().
1789 *
1790 * Use with care on threads that are not under careful control as this may
1791 * leave various objects in an undefined state.
1792 *---------------------------------------------------------------------------
1793 */
1794void remove_thread(unsigned int thread_id)
1795{
1796#ifdef HAVE_CORELOCK_OBJECT
1797 /* core is not constant here because of core switching */
1798 unsigned int core = CURRENT_CORE;
1799 unsigned int old_core = NUM_CORES;
1800 struct corelock *ocl = NULL;
1801#else
1802 const unsigned int core = CURRENT_CORE;
1803#endif
1804 struct thread_entry *current = cores[core].running;
1805 struct thread_entry *thread = thread_id_entry(thread_id);
1806
1807 unsigned state;
1808 int oldlevel;
1809
1810 if (thread == current)
1811 thread_exit(); /* Current thread - do normal exit */
1812
1813 oldlevel = disable_irq_save();
1814
1815 corelock_lock(&thread->waiter_cl);
1816 LOCK_THREAD(thread);
1817
1818 state = thread->state;
1819
1820 if (thread->id != thread_id || state == STATE_KILLED)
1821 goto thread_killed;
1822
1823#if NUM_CORES > 1
1824 if (thread->name == THREAD_DESTRUCT)
1825 {
1826 /* Thread being killed - become a waiter */
1827 UNLOCK_THREAD(thread);
1828 corelock_unlock(&thread->waiter_cl);
1829 restore_irq(oldlevel);
1830 thread_wait(thread_id);
1831 return;
1832 }
1833
1834 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
1835
1836#ifdef HAVE_PRIORITY_SCHEDULING
1837 check_for_obj_waiters("remove_thread", thread);
1838#endif
1839
1840 if (thread->core != core)
1841 {
1842 /* Switch cores and safely extract the thread there */
1843 /* Slot HAS to be unlocked or a deadlock could occur which means other
1844 * threads have to be guided into becoming thread waiters if they
1845 * attempt to remove it. */
1846 unsigned int new_core = thread->core;
1847
1848 corelock_unlock(&thread->waiter_cl);
1849
1850 UNLOCK_THREAD(thread);
1851 restore_irq(oldlevel);
1852
1853 old_core = switch_core(new_core);
1854
1855 oldlevel = disable_irq_save();
1856
1857 corelock_lock(&thread->waiter_cl);
1858 LOCK_THREAD(thread);
1859
1860 state = thread->state;
1861 core = new_core;
1862 /* Perform the extraction and switch ourselves back to the original
1863 processor */
1864 }
1865#endif /* NUM_CORES > 1 */
1866
1867 if (thread->tmo.prev != NULL)
1868 {
1869 /* Clean thread off the timeout list if a timeout check hasn't
1870 * run yet */
1871 remove_from_list_tmo(thread);
1872 }
1873
1874#ifdef HAVE_SCHEDULER_BOOSTCTRL
1875 /* Cancel CPU boost if any */
1876 boost_thread(thread, false);
1877#endif
1878
1879IF_COP( retry_state: )
1880
1881 switch (state)
1882 {
1883 case STATE_RUNNING:
1884 RTR_LOCK(core);
1885 /* Remove thread from ready to run tasks */
1886 remove_from_list_l(&cores[core].running, thread);
1887 rtr_subtract_entry(core, thread->priority);
1888 RTR_UNLOCK(core);
1889 break;
1890 case STATE_BLOCKED:
1891 case STATE_BLOCKED_W_TMO:
1892 /* Remove thread from the queue it's blocked on - including its
1893 * own if waiting there */
1894#if NUM_CORES > 1
1895 if (&thread->waiter_cl != thread->obj_cl)
1896 {
1897 ocl = thread->obj_cl;
1898
1899 if (UNLIKELY(corelock_try_lock(ocl) == 0))
1900 {
1901 UNLOCK_THREAD(thread);
1902 corelock_lock(ocl);
1903 LOCK_THREAD(thread);
1904
1905 if (UNLIKELY(thread->state != state))
1906 {
1907 /* Something woke the thread */
1908 state = thread->state;
1909 corelock_unlock(ocl);
1910 goto retry_state;
1911 }
1912 }
1913 }
1914#endif
1915 remove_from_list_l(thread->bqp, thread);
1916
1917#ifdef HAVE_WAKEUP_EXT_CB
1918 if (thread->wakeup_ext_cb != NULL)
1919 thread->wakeup_ext_cb(thread);
1920#endif
1921
1922#ifdef HAVE_PRIORITY_SCHEDULING
1923 if (thread->blocker != NULL)
1924 {
1925 /* Remove thread's priority influence from its chain */
1926 wakeup_priority_protocol_release(thread);
1927 }
1928#endif
1929
1930#if NUM_CORES > 1
1931 if (ocl != NULL)
1932 corelock_unlock(ocl);
1933#endif
1934 break;
1935 /* Otherwise thread is frozen and hasn't run yet */
1936 }
1937
1938 new_thread_id(thread_id, thread);
1939 thread->state = STATE_KILLED;
1940
1941 /* If thread was waiting on itself, it will have been removed above.
1942 * The wrong order would result in waking the thread first and deadlocking
1943 * since the slot is already locked. */
1944 thread_queue_wake(&thread->queue);
1945
1946 thread->name = NULL;
1947
1948thread_killed: /* Thread was already killed */
1949 /* Removal complete - safe to unlock and reenable interrupts */
1950 corelock_unlock(&thread->waiter_cl);
1951 UNLOCK_THREAD(thread);
1952 restore_irq(oldlevel);
1953
1954#if NUM_CORES > 1
1955 if (old_core < NUM_CORES)
1956 {
1957 /* Did a removal on another processor's thread - switch back to
1958 native core */
1959 switch_core(old_core);
1960 }
1961#endif
1962}
1963#endif /* ALLOW_REMOVE_THREAD */
1964
1965#ifdef HAVE_PRIORITY_SCHEDULING
1966/*---------------------------------------------------------------------------
1967 * Sets the thread's relative base priority for the core it runs on. Any
1968 * needed inheritance changes also may happen.
1969 *---------------------------------------------------------------------------
1970 */
1971int thread_set_priority(unsigned int thread_id, int priority)
1972{
1973 int old_base_priority = -1;
1974 struct thread_entry *thread = thread_id_entry(thread_id);
1975
1976 /* A little safety measure */
1977 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
1978 return -1;
1979
1980 /* Thread could be on any list and therefore on an interrupt accessible
1981 one - disable interrupts */
1982 int oldlevel = disable_irq_save();
1983
1984 LOCK_THREAD(thread);
1985
1986 /* Make sure it's not killed */
1987 if (thread->id == thread_id && thread->state != STATE_KILLED)
1988 {
1989 int old_priority = thread->priority;
1990
1991 old_base_priority = thread->base_priority;
1992 thread->base_priority = priority;
1993
1994 prio_move_entry(&thread->pdist, old_base_priority, priority);
1995 priority = find_first_set_bit(thread->pdist.mask);
1996
1997 if (old_priority == priority)
1998 {
1999 /* No priority change - do nothing */
2000 }
2001 else if (thread->state == STATE_RUNNING)
2002 {
2003 /* This thread is running - change location on the run
2004 * queue. No transitive inheritance needed. */
2005 set_running_thread_priority(thread, priority);
2006 }
2007 else
2008 {
2009 thread->priority = priority;
2010
2011 if (thread->blocker != NULL)
2012 {
2013 /* Bubble new priority down the chain */
2014 struct blocker *bl = thread->blocker; /* Blocker struct */
2015 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
2016 struct thread_entry * const tstart = thread; /* Initial thread */
2017 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2018
2019 for (;;)
2020 {
2021 struct thread_entry *next; /* Next thread to check */
2022 int bl_pr; /* Highest blocked thread */
2023 int queue_pr; /* New highest blocked thread */
2024#if NUM_CORES > 1
2025 /* Owner can change but thread cannot be dislodged - thread
2026 * may not be the first in the queue which allows other
2027 * threads ahead in the list to be given ownership during the
2028 * operation. If thread is next then the waker will have to
2029 * wait for us and the owner of the object will remain fixed.
2030 * If we successfully grab the owner -- which at some point
2031 * is guaranteed -- then the queue remains fixed until we
2032 * pass by. */
2033 for (;;)
2034 {
2035 LOCK_THREAD(bl_t);
2036
2037 /* Double-check the owner - retry if it changed */
2038 if (LIKELY(bl->thread == bl_t))
2039 break;
2040
2041 UNLOCK_THREAD(bl_t);
2042 bl_t = bl->thread;
2043 }
2044#endif
2045 bl_pr = bl->priority;
2046
2047 if (highest > bl_pr)
2048 break; /* Object priority won't change */
2049
2050 /* This will include the thread being set */
2051 queue_pr = find_highest_priority_in_list_l(*thread->bqp);
2052
2053 if (queue_pr == bl_pr)
2054 break; /* Object priority not changing */
2055
2056 /* Update thread boost for this object */
2057 bl->priority = queue_pr;
2058 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2059 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2060
2061 if (bl_t->priority == bl_pr)
2062 break; /* Blocking thread priority not changing */
2063
2064 if (bl_t->state == STATE_RUNNING)
2065 {
2066 /* Thread not blocked - we're done */
2067 set_running_thread_priority(bl_t, bl_pr);
2068 break;
2069 }
2070
2071 bl_t->priority = bl_pr;
2072 bl = bl_t->blocker; /* Blocking thread has a blocker? */
2073
2074 if (bl == NULL)
2075 break; /* End of chain */
2076
2077 next = bl->thread;
2078
2079 if (UNLIKELY(next == tstart))
2080 break; /* Full-circle */
2081
2082 UNLOCK_THREAD(thread);
2083
2084 thread = bl_t;
2085 bl_t = next;
2086 } /* for (;;) */
2087
2088 UNLOCK_THREAD(bl_t);
2089 }
2090 }
2091 }
2092
2093 UNLOCK_THREAD(thread);
2094
2095 restore_irq(oldlevel);
2096
2097 return old_base_priority;
2098}
2099
2100/*---------------------------------------------------------------------------
2101 * Returns the current base priority for a thread.
2102 *---------------------------------------------------------------------------
2103 */
2104int thread_get_priority(unsigned int thread_id)
2105{
2106 struct thread_entry *thread = thread_id_entry(thread_id);
2107 int base_priority = thread->base_priority;
2108
2109 /* Simply check without locking slot. It may or may not be valid by the
2110 * time the function returns anyway. If all tests pass, it is the
2111 * correct value for when it was valid. */
2112 if (thread->id != thread_id || thread->state == STATE_KILLED)
2113 base_priority = -1;
2114
2115 return base_priority;
2116}
2117#endif /* HAVE_PRIORITY_SCHEDULING */
2118
2119#ifdef HAVE_IO_PRIORITY
2120int thread_get_io_priority(unsigned int thread_id)
2121{
2122 struct thread_entry *thread = thread_id_entry(thread_id);
2123 return thread->io_priority;
2124}
2125
2126void thread_set_io_priority(unsigned int thread_id,int io_priority)
2127{
2128 struct thread_entry *thread = thread_id_entry(thread_id);
2129 thread->io_priority = io_priority;
2130}
2131#endif
2132
2133/*---------------------------------------------------------------------------
2134 * Starts a frozen thread - similar semantics to wakeup_thread except that
2135 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2136 * virtue of the slot having a state of STATE_FROZEN.
2137 *---------------------------------------------------------------------------
2138 */
2139void thread_thaw(unsigned int thread_id)
2140{
2141 struct thread_entry *thread = thread_id_entry(thread_id);
2142 int oldlevel = disable_irq_save();
2143
2144 LOCK_THREAD(thread);
2145
2146 /* If thread is the current one, it cannot be frozen, therefore
2147 * there is no need to check that. */
2148 if (thread->id == thread_id && thread->state == STATE_FROZEN)
2149 core_schedule_wakeup(thread);
2150
2151 UNLOCK_THREAD(thread);
2152 restore_irq(oldlevel);
2153}
2154
2155#if NUM_CORES > 1
2156/*---------------------------------------------------------------------------
2157 * Switch the processor that the currently executing thread runs on.
2158 *---------------------------------------------------------------------------
2159 */
2160unsigned int switch_core(unsigned int new_core)
2161{
2162 const unsigned int core = CURRENT_CORE;
2163 struct thread_entry *current = cores[core].running;
2164
2165 if (core == new_core)
2166 {
2167 /* No change - just return same core */
2168 return core;
2169 }
2170
2171 int oldlevel = disable_irq_save();
2172 LOCK_THREAD(current);
2173
2174 if (current->name == THREAD_DESTRUCT)
2175 {
2176 /* Thread being killed - deactivate and let process complete */
2177 unsigned int id = current->id;
2178 UNLOCK_THREAD(current);
2179 restore_irq(oldlevel);
2180 thread_wait(id);
2181 /* Should never be reached */
2182 THREAD_PANICF("switch_core->D:*R", current);
2183 }
2184
2185 /* Get us off the running list for the current core */
2186 RTR_LOCK(core);
2187 remove_from_list_l(&cores[core].running, current);
2188 rtr_subtract_entry(core, current->priority);
2189 RTR_UNLOCK(core);
2190
2191 /* Stash return value (old core) in a safe place */
2192 current->retval = core;
2193
2194 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2195 * the other core will likely attempt a removal from the wrong list! */
2196 if (current->tmo.prev != NULL)
2197 {
2198 remove_from_list_tmo(current);
2199 }
2200
2201 /* Change the core number for this thread slot */
2202 current->core = new_core;
2203
2204 /* Do not use core_schedule_wakeup here since this will result in
2205 * the thread starting to run on the other core before being finished on
2206 * this one. Delay the list unlock to keep the other core stuck
2207 * until this thread is ready. */
2208 RTR_LOCK(new_core);
2209
2210 rtr_add_entry(new_core, current->priority);
2211 add_to_list_l(&cores[new_core].running, current);
2212
2213 /* Make a callback into device-specific code, unlock the wakeup list so
2214 * that execution may resume on the new core, unlock our slot and finally
2215 * restore the interrupt level */
2216 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
2217 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
2218 cores[core].block_task = current;
2219
2220 UNLOCK_THREAD(current);
2221
2222 /* Alert other core to activity */
2223 core_wake(new_core);
2224
2225 /* Do the stack switching, cache_maintenence and switch_thread call -
2226 requires native code */
2227 switch_thread_core(core, current);
2228
2229 /* Finally return the old core to caller */
2230 return current->retval;
2231}
2232#endif /* NUM_CORES > 1 */
2233
2234/*---------------------------------------------------------------------------
2235 * Initialize threading API. This assumes interrupts are not yet enabled. On
2236 * multicore setups, no core is allowed to proceed until create_thread calls
2237 * are safe to perform.
2238 *---------------------------------------------------------------------------
2239 */
2240void init_threads(void)
2241{
2242 const unsigned int core = CURRENT_CORE;
2243 struct thread_entry *thread;
2244
2245 if (core == CPU)
2246 {
2247 /* Initialize core locks and IDs in all slots */
2248 int n;
2249 for (n = 0; n < MAXTHREADS; n++)
2250 {
2251 thread = &threads[n];
2252 corelock_init(&thread->waiter_cl);
2253 corelock_init(&thread->slot_cl);
2254 thread->id = THREAD_ID_INIT(n);
2255 }
2256 }
2257
2258 /* CPU will initialize first and then sleep */
2259 thread = find_empty_thread_slot();
2260
2261 if (thread == NULL)
2262 {
2263 /* WTF? There really must be a slot available at this stage.
2264 * This can fail if, for example, .bss isn't zero'ed out by the loader
2265 * or threads is in the wrong section. */
2266 THREAD_PANICF("init_threads->no slot", NULL);
2267 }
2268
2269 /* Initialize initially non-zero members of core */
2270 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2271
2272 /* Initialize initially non-zero members of slot */
2273 UNLOCK_THREAD(thread); /* No sync worries yet */
2274 thread->name = main_thread_name;
2275 thread->state = STATE_RUNNING;
2276 IF_COP( thread->core = core; )
2277#ifdef HAVE_PRIORITY_SCHEDULING
2278 corelock_init(&cores[core].rtr_cl);
2279 thread->base_priority = PRIORITY_USER_INTERFACE;
2280 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2281 thread->priority = PRIORITY_USER_INTERFACE;
2282 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2283#endif
2284
2285 add_to_list_l(&cores[core].running, thread);
2286
2287 if (core == CPU)
2288 {
2289 thread->stack = stackbegin;
2290 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
2291#if NUM_CORES > 1 /* This code path will not be run on single core targets */
2292 /* Wait for other processors to finish their inits since create_thread
2293 * isn't safe to call until the kernel inits are done. The first
2294 * threads created in the system must of course be created by CPU.
2295 * Another possible approach is to initialize all cores and slots
2296 * for each core by CPU, let the remainder proceed in parallel and
2297 * signal CPU when all are finished. */
2298 core_thread_init(CPU);
2299 }
2300 else
2301 {
2302 /* Initial stack is the idle stack */
2303 thread->stack = idle_stacks[core];
2304 thread->stack_size = IDLE_STACK_SIZE;
2305 /* After last processor completes, it should signal all others to
2306 * proceed or may signal the next and call thread_exit(). The last one
2307 * to finish will signal CPU. */
2308 core_thread_init(core);
2309 /* Other cores do not have a main thread - go idle inside switch_thread
2310 * until a thread can run on the core. */
2311 thread_exit();
2312#endif /* NUM_CORES */
2313 }
2314#ifdef INIT_MAIN_THREAD
2315 init_main_thread(&thread->context);
2316#endif
2317}
2318
2319/* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2320#if NUM_CORES == 1
2321static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
2322#else
2323static int stack_usage(uintptr_t *stackptr, size_t stack_size)
2324#endif
2325{
2326 unsigned int stack_words = stack_size / sizeof (uintptr_t);
2327 unsigned int i;
2328 int usage = 0;
2329
2330 for (i = 0; i < stack_words; i++)
2331 {
2332 if (stackptr[i] != DEADBEEF)
2333 {
2334 usage = ((stack_words - i) * 100) / stack_words;
2335 break;
2336 }
2337 }
2338
2339 return usage;
2340}
2341
2342/*---------------------------------------------------------------------------
2343 * Returns the maximum percentage of stack a thread ever used while running.
2344 * NOTE: Some large buffer allocations that don't use enough the buffer to
2345 * overwrite stackptr[0] will not be seen.
2346 *---------------------------------------------------------------------------
2347 */
2348int thread_stack_usage(const struct thread_entry *thread)
2349{
2350 if (LIKELY(thread->stack_size > 0))
2351 return stack_usage(thread->stack, thread->stack_size);
2352 return 0;
2353}
2354
2355#if NUM_CORES > 1
2356/*---------------------------------------------------------------------------
2357 * Returns the maximum percentage of the core's idle stack ever used during
2358 * runtime.
2359 *---------------------------------------------------------------------------
2360 */
2361int idle_stack_usage(unsigned int core)
2362{
2363 return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
2364}
2365#endif
2366
2367/*---------------------------------------------------------------------------
2368 * Fills in the buffer with the specified thread's name. If the name is NULL,
2369 * empty, or the thread is in destruct state a formatted ID is written
2370 * instead.
2371 *---------------------------------------------------------------------------
2372 */
2373void thread_get_name(char *buffer, int size,
2374 struct thread_entry *thread)
2375{
2376 if (size <= 0)
2377 return;
2378
2379 *buffer = '\0';
2380
2381 if (thread)
2382 {
2383 /* Display thread name if one or ID if none */
2384 const char *name = thread->name;
2385 const char *fmt = "%s";
2386 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2387 {
2388 name = (const char *)(uintptr_t)thread->id;
2389 fmt = "%04lX";
2390 }
2391 snprintf(buffer, size, fmt, name);
2392 }
2393}
2394
2395/* Unless otherwise defined, do nothing */
2396#ifndef YIELD_KERNEL_HOOK
2397#define YIELD_KERNEL_HOOK() false
2398#endif
2399#ifndef SLEEP_KERNEL_HOOK
2400#define SLEEP_KERNEL_HOOK(ticks) false
2401#endif
2402
2403/*---------------------------------------------------------------------------
2404 * Suspends a thread's execution for at least the specified number of ticks.
2405 *
2406 * May result in CPU core entering wait-for-interrupt mode if no other thread
2407 * may be scheduled.
2408 *
2409 * NOTE: sleep(0) sleeps until the end of the current tick
2410 * sleep(n) that doesn't result in rescheduling:
2411 * n <= ticks suspended < n + 1
2412 * n to n+1 is a lower bound. Other factors may affect the actual time
2413 * a thread is suspended before it runs again.
2414 *---------------------------------------------------------------------------
2415 */
2416unsigned sleep(unsigned ticks)
2417{
2418 /* In certain situations, certain bootloaders in particular, a normal
2419 * threading call is inappropriate. */
2420 if (SLEEP_KERNEL_HOOK(ticks))
2421 return 0; /* Handled */
2422
2423 disable_irq();
2424 sleep_thread(ticks);
2425 switch_thread();
2426 return 0;
2427}
2428
2429/*---------------------------------------------------------------------------
2430 * Elects another thread to run or, if no other thread may be made ready to
2431 * run, immediately returns control back to the calling thread.
2432 *---------------------------------------------------------------------------
2433 */
2434void yield(void)
2435{
2436 /* In certain situations, certain bootloaders in particular, a normal
2437 * threading call is inappropriate. */
2438 if (YIELD_KERNEL_HOOK())
2439 return; /* handled */
2440
2441 switch_thread();
2442}