summaryrefslogtreecommitdiff
path: root/firmware/thread.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2008-03-25 02:34:12 +0000
committerMichael Sevakis <jethead71@rockbox.org>2008-03-25 02:34:12 +0000
commit27cf67733936abd75fcb1f8da765977cd75906ee (patch)
treef894211a8a0c77b402dd3250b2bee2d17dcfe13f /firmware/thread.c
parentbc2f8fd8f38a3e010cd67bbac358f6e9991153c6 (diff)
downloadrockbox-27cf67733936abd75fcb1f8da765977cd75906ee.tar.gz
rockbox-27cf67733936abd75fcb1f8da765977cd75906ee.zip
Add a complete priority inheritance implementation to the scheduler (all mutex ownership and queue_send calls are inheritable). Priorities are differential so that dispatch depends on the runnable range of priorities. Codec priority can therefore be raised in small steps (pcmbuf updated to enable). Simplify the kernel functions to ease implementation and use the same kernel.c for both sim and target (I'm tired of maintaining two ;_). 1) Not sure if a minor audio break at first buffering issue will exist on large-sector disks (the main mutex speed issue was genuinely resolved earlier). At this point it's best dealt with at the buffering level. It seems a larger filechunk could be used again. 2) Perhaps 64-bit sims will have some minor issues (finicky) but a backroll of the code of concern there is a 5-minute job. All kernel objects become incompatible so a full rebuild and update is needed.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@16791 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/thread.c')
-rw-r--r--firmware/thread.c2535
1 files changed, 1436 insertions, 1099 deletions
diff --git a/firmware/thread.c b/firmware/thread.c
index 8bebfedbf5..259a66a652 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -28,6 +28,10 @@
28#ifdef RB_PROFILE 28#ifdef RB_PROFILE
29#include <profile.h> 29#include <profile.h>
30#endif 30#endif
31/****************************************************************************
32 * ATTENTION!! *
33 * See notes below on implementing processor-specific portions! *
34 ***************************************************************************/
31 35
32/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */ 36/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
33#ifdef DEBUG 37#ifdef DEBUG
@@ -59,9 +63,7 @@
59 * event queues. The kernel object must have a scheme to protect itself from 63 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls 64 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each 65 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members 66 * other. Objects' queues are also protected here.
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
65 * 67 *
66 * 3) Thread Slot 68 * 3) Thread Slot
67 * This locks access to the thread's slot such that its state cannot be 69 * This locks access to the thread's slot such that its state cannot be
@@ -70,70 +72,66 @@
70 * a thread while it is still blocking will likely desync its state with 72 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state. 73 * the other resources used for that state.
72 * 74 *
73 * 4) Lists 75 * 4) Core Lists
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
81 *
82 * 5) Core Lists
83 * These lists are specific to a particular processor core and are accessible 76 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an 77 * by all processor cores and interrupt handlers. The running (rtr) list is
85 * operation may only be performed by the thread's own core in a normal 78 * the prime example where a thread may be added by any means.
86 * execution context. The wakeup list is the prime example where a thread 79 */
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
90 */
91#define DEADBEEF ((unsigned int)0xdeadbeef)
92/* Cast to the the machine int type, whose size could be < 4. */
93struct core_entry cores[NUM_CORES] IBSS_ATTR;
94struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
95
96static const char main_thread_name[] = "main";
97extern int stackbegin[];
98extern int stackend[];
99 80
100/* core_sleep procedure to implement for any CPU to ensure an asychronous wakup 81/*---------------------------------------------------------------------------
101 * never results in requiring a wait until the next tick (up to 10000uS!). May 82 * Processor specific: core_sleep/core_wake/misc. notes
102 * require assembly and careful instruction ordering. 83 *
84 * ARM notes:
85 * FIQ is not dealt with by the scheduler code and is simply restored if it
86 * must by masked for some reason - because threading modifies a register
87 * that FIQ may also modify and there's no way to accomplish it atomically.
88 * s3c2440 is such a case.
89 *
90 * Audio interrupts are generally treated at a higher priority than others
91 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
92 * are not in general safe. Special cases may be constructed on a per-
93 * source basis and blocking operations are not available.
94 *
95 * core_sleep procedure to implement for any CPU to ensure an asychronous
96 * wakup never results in requiring a wait until the next tick (up to
97 * 10000uS!). May require assembly and careful instruction ordering.
103 * 98 *
104 * 1) On multicore, stay awake if directed to do so by another. If so, goto step 4. 99 * 1) On multicore, stay awake if directed to do so by another. If so, goto
105 * 2) If processor requires, atomically reenable interrupts and perform step 3. 100 * step 4.
106 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire) 101 * 2) If processor requires, atomically reenable interrupts and perform step
107 * goto step 5. 102 * 3.
103 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
104 * on Coldfire) goto step 5.
108 * 4) Enable interrupts. 105 * 4) Enable interrupts.
109 * 5) Exit procedure. 106 * 5) Exit procedure.
107 *
108 * core_wake and multprocessor notes for sleep/wake coordination:
109 * If possible, to wake up another processor, the forcing of an interrupt on
110 * the woken core by the waker core is the easiest way to ensure a non-
111 * delayed wake and immediate execution of any woken threads. If that isn't
112 * available then some careful non-blocking synchonization is needed (as on
113 * PP targets at the moment).
114 *---------------------------------------------------------------------------
110 */ 115 */
111static inline void core_sleep(IF_COP_VOID(unsigned int core))
112 __attribute__((always_inline));
113
114static void check_tmo_threads(void)
115 __attribute__((noinline));
116 116
117static inline void block_thread_on_l( 117/* Cast to the the machine pointer size, whose size could be < 4 or > 32
118 struct thread_queue *list, struct thread_entry *thread, unsigned state) 118 * (someday :). */
119 __attribute__((always_inline)); 119#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
120struct core_entry cores[NUM_CORES] IBSS_ATTR;
121struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
120 122
121static inline void block_thread_on_l_no_listlock( 123static const char main_thread_name[] = "main";
122 struct thread_entry **list, struct thread_entry *thread, unsigned state) 124extern uintptr_t stackbegin[];
123 __attribute__((always_inline)); 125extern uintptr_t stackend[];
124 126
125static inline void _block_thread_on_l( 127static inline void core_sleep(IF_COP_VOID(unsigned int core))
126 struct thread_queue *list, struct thread_entry *thread,
127 unsigned state IF_SWCL(, const bool single))
128 __attribute__((always_inline)); 128 __attribute__((always_inline));
129 129
130IF_SWCL(static inline) struct thread_entry * _wakeup_thread( 130void check_tmo_threads(void)
131 struct thread_queue *list IF_SWCL(, const bool nolock)) 131 __attribute__((noinline));
132 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
133 132
134IF_SWCL(static inline) void _block_thread( 133static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
135 struct thread_queue *list IF_SWCL(, const bool nolock)) 134 __attribute__((always_inline));
136 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
137 135
138static void add_to_list_tmo(struct thread_entry *thread) 136static void add_to_list_tmo(struct thread_entry *thread)
139 __attribute__((noinline)); 137 __attribute__((noinline));
@@ -141,9 +139,6 @@ static void add_to_list_tmo(struct thread_entry *thread)
141static void core_schedule_wakeup(struct thread_entry *thread) 139static void core_schedule_wakeup(struct thread_entry *thread)
142 __attribute__((noinline)); 140 __attribute__((noinline));
143 141
144static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
145 __attribute__((always_inline));
146
147#if NUM_CORES > 1 142#if NUM_CORES > 1
148static inline void run_blocking_ops( 143static inline void run_blocking_ops(
149 unsigned int core, struct thread_entry *thread) 144 unsigned int core, struct thread_entry *thread)
@@ -159,10 +154,9 @@ static inline void store_context(void* addr)
159static inline void load_context(const void* addr) 154static inline void load_context(const void* addr)
160 __attribute__((always_inline)); 155 __attribute__((always_inline));
161 156
162void switch_thread(struct thread_entry *old) 157void switch_thread(void)
163 __attribute__((noinline)); 158 __attribute__((noinline));
164 159
165
166/**************************************************************************** 160/****************************************************************************
167 * Processor-specific section 161 * Processor-specific section
168 */ 162 */
@@ -172,8 +166,7 @@ void switch_thread(struct thread_entry *old)
172 * Start the thread running and terminate it if it returns 166 * Start the thread running and terminate it if it returns
173 *--------------------------------------------------------------------------- 167 *---------------------------------------------------------------------------
174 */ 168 */
175static void start_thread(void) __attribute__((naked,used)); 169static void __attribute__((naked,used)) start_thread(void)
176static void start_thread(void)
177{ 170{
178 /* r0 = context */ 171 /* r0 = context */
179 asm volatile ( 172 asm volatile (
@@ -188,19 +181,18 @@ static void start_thread(void)
188#endif 181#endif
189 "mov lr, pc \n" /* Call thread function */ 182 "mov lr, pc \n" /* Call thread function */
190 "bx r4 \n" 183 "bx r4 \n"
191 "mov r0, #0 \n" /* remove_thread(NULL) */
192 "ldr pc, =remove_thread \n"
193 ".ltorg \n" /* Dump constant pool */
194 ); /* No clobber list - new thread doesn't care */ 184 ); /* No clobber list - new thread doesn't care */
185 thread_exit();
186 //asm volatile (".ltorg"); /* Dump constant pool */
195} 187}
196 188
197/* For startup, place context pointer in r4 slot, start_thread pointer in r5 189/* For startup, place context pointer in r4 slot, start_thread pointer in r5
198 * slot, and thread function pointer in context.start. See load_context for 190 * slot, and thread function pointer in context.start. See load_context for
199 * what happens when thread is initially going to run. */ 191 * what happens when thread is initially going to run. */
200#define THREAD_STARTUP_INIT(core, thread, function) \ 192#define THREAD_STARTUP_INIT(core, thread, function) \
201 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \ 193 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
202 (thread)->context.r[1] = (unsigned int)start_thread, \ 194 (thread)->context.r[1] = (uint32_t)start_thread, \
203 (thread)->context.start = (void *)function; }) 195 (thread)->context.start = (uint32_t)function; })
204 196
205/*--------------------------------------------------------------------------- 197/*---------------------------------------------------------------------------
206 * Store non-volatile context. 198 * Store non-volatile context.
@@ -232,11 +224,11 @@ static inline void load_context(const void* addr)
232#if defined (CPU_PP) 224#if defined (CPU_PP)
233 225
234#if NUM_CORES > 1 226#if NUM_CORES > 1
235extern int cpu_idlestackbegin[]; 227extern uintptr_t cpu_idlestackbegin[];
236extern int cpu_idlestackend[]; 228extern uintptr_t cpu_idlestackend[];
237extern int cop_idlestackbegin[]; 229extern uintptr_t cop_idlestackbegin[];
238extern int cop_idlestackend[]; 230extern uintptr_t cop_idlestackend[];
239static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR = 231static uintptr_t * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
240{ 232{
241 [CPU] = cpu_idlestackbegin, 233 [CPU] = cpu_idlestackbegin,
242 [COP] = cop_idlestackbegin 234 [COP] = cop_idlestackbegin
@@ -253,7 +245,7 @@ struct core_semaphores
253}; 245};
254 246
255static struct core_semaphores core_semaphores[NUM_CORES] NOCACHEBSS_ATTR; 247static struct core_semaphores core_semaphores[NUM_CORES] NOCACHEBSS_ATTR;
256#endif 248#endif /* CONFIG_CPU == PP5002 */
257 249
258#endif /* NUM_CORES */ 250#endif /* NUM_CORES */
259 251
@@ -401,15 +393,15 @@ void corelock_unlock(struct corelock *cl)
401 * no other core requested a wakeup for it to perform a task. 393 * no other core requested a wakeup for it to perform a task.
402 *--------------------------------------------------------------------------- 394 *---------------------------------------------------------------------------
403 */ 395 */
396#ifdef CPU_PP502x
404#if NUM_CORES == 1 397#if NUM_CORES == 1
405/* Shared single-core build debugging version */
406static inline void core_sleep(void) 398static inline void core_sleep(void)
407{ 399{
408 PROC_CTL(CURRENT_CORE) = PROC_SLEEP; 400 PROC_CTL(CURRENT_CORE) = PROC_SLEEP;
409 nop; nop; nop; 401 nop; nop; nop;
410 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS); 402 set_irq_level(IRQ_ENABLED);
411} 403}
412#elif defined (CPU_PP502x) 404#else
413static inline void core_sleep(unsigned int core) 405static inline void core_sleep(unsigned int core)
414{ 406{
415#if 1 407#if 1
@@ -429,8 +421,8 @@ static inline void core_sleep(unsigned int core)
429 "ldr r1, [%[mbx], #0] \n" 421 "ldr r1, [%[mbx], #0] \n"
430 "tst r1, r0, lsr #2 \n" 422 "tst r1, r0, lsr #2 \n"
431 "bne 1b \n" 423 "bne 1b \n"
432 "mrs r1, cpsr \n" /* Enable interrupts */ 424 "mrs r1, cpsr \n" /* Enable IRQ */
433 "bic r1, r1, #0xc0 \n" 425 "bic r1, r1, #0x80 \n"
434 "msr cpsr_c, r1 \n" 426 "msr cpsr_c, r1 \n"
435 : 427 :
436 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [c]"r"(core) 428 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [c]"r"(core)
@@ -452,11 +444,36 @@ static inline void core_sleep(unsigned int core)
452 /* Wait for other processor to finish wake procedure */ 444 /* Wait for other processor to finish wake procedure */
453 while (MBX_MSG_STAT & (0x1 << core)); 445 while (MBX_MSG_STAT & (0x1 << core));
454 446
455 /* Enable IRQ, FIQ */ 447 /* Enable IRQ */
456 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS); 448 set_irq_level(IRQ_ENABLED);
457#endif /* ASM/C selection */ 449#endif /* ASM/C selection */
458} 450}
451#endif /* NUM_CORES */
459#elif CONFIG_CPU == PP5002 452#elif CONFIG_CPU == PP5002
453#if NUM_CORES == 1
454static inline void core_sleep(void)
455{
456 asm volatile (
457 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
458 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
459 * that the correct alternative is executed. Don't change the order
460 * of the next 4 instructions! */
461 "tst pc, #0x0c \n"
462 "mov r0, #0xca \n"
463 "strne r0, [%[ctl]] \n"
464 "streq r0, [%[ctl]] \n"
465 "nop \n" /* nop's needed because of pipeline */
466 "nop \n"
467 "nop \n"
468 "mrs r0, cpsr \n" /* Enable IRQ */
469 "bic r0, r0, #0x80 \n"
470 "msr cpsr_c, r0 \n"
471 :
472 : [ctl]"r"(&PROC_CTL(CURRENT_CORE))
473 : "r0"
474 );
475}
476#else
460/* PP5002 has no mailboxes - emulate using bytes */ 477/* PP5002 has no mailboxes - emulate using bytes */
461static inline void core_sleep(unsigned int core) 478static inline void core_sleep(unsigned int core)
462{ 479{
@@ -486,8 +503,8 @@ static inline void core_sleep(unsigned int core)
486 "ldrb r0, [%[sem], #0] \n" 503 "ldrb r0, [%[sem], #0] \n"
487 "cmp r0, #0 \n" 504 "cmp r0, #0 \n"
488 "bne 1b \n" 505 "bne 1b \n"
489 "mrs r0, cpsr \n" /* Enable interrupts */ 506 "mrs r0, cpsr \n" /* Enable IRQ */
490 "bic r0, r0, #0xc0 \n" 507 "bic r0, r0, #0x80 \n"
491 "msr cpsr_c, r0 \n" 508 "msr cpsr_c, r0 \n"
492 : 509 :
493 : [sem]"r"(&core_semaphores[core]), [c]"r"(core), 510 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
@@ -512,11 +529,12 @@ static inline void core_sleep(unsigned int core)
512 /* Wait for other processor to finish wake procedure */ 529 /* Wait for other processor to finish wake procedure */
513 while (core_semaphores[core].intend_wake != 0); 530 while (core_semaphores[core].intend_wake != 0);
514 531
515 /* Enable IRQ, FIQ */ 532 /* Enable IRQ */
516 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS); 533 set_irq_level(IRQ_ENABLED);
517#endif /* ASM/C selection */ 534#endif /* ASM/C selection */
518} 535}
519#endif /* CPU type */ 536#endif /* NUM_CORES */
537#endif /* PP CPU type */
520 538
521/*--------------------------------------------------------------------------- 539/*---------------------------------------------------------------------------
522 * Wake another processor core that is sleeping or prevent it from doing so 540 * Wake another processor core that is sleeping or prevent it from doing so
@@ -553,7 +571,7 @@ void core_wake(unsigned int othercore)
553 "strne r1, [%[ctl], %[oc], lsl #2] \n" 571 "strne r1, [%[ctl], %[oc], lsl #2] \n"
554 "mov r1, r2, lsr #4 \n" 572 "mov r1, r2, lsr #4 \n"
555 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */ 573 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
556 "msr cpsr_c, r3 \n" /* Restore int status */ 574 "msr cpsr_c, r3 \n" /* Restore IRQ */
557 : 575 :
558 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), 576 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
559 [oc]"r"(othercore) 577 [oc]"r"(othercore)
@@ -604,7 +622,7 @@ void core_wake(unsigned int othercore)
604 "strne r1, [r2, %[oc], lsl #2] \n" 622 "strne r1, [r2, %[oc], lsl #2] \n"
605 "mov r1, #0 \n" /* Done with wake procedure */ 623 "mov r1, #0 \n" /* Done with wake procedure */
606 "strb r1, [%[sem], #0] \n" 624 "strb r1, [%[sem], #0] \n"
607 "msr cpsr_c, r3 \n" /* Restore int status */ 625 "msr cpsr_c, r3 \n" /* Restore IRQ */
608 : 626 :
609 : [sem]"r"(&core_semaphores[othercore]), 627 : [sem]"r"(&core_semaphores[othercore]),
610 [st]"r"(&PROC_STAT), 628 [st]"r"(&PROC_STAT),
@@ -640,8 +658,8 @@ void core_wake(unsigned int othercore)
640 * 658 *
641 * Needed when a thread suicides on a core other than the main CPU since the 659 * Needed when a thread suicides on a core other than the main CPU since the
642 * stack used when idling is the stack of the last thread to run. This stack 660 * stack used when idling is the stack of the last thread to run. This stack
643 * may not reside in the core in which case the core will continue to use a 661 * may not reside in the core firmware in which case the core will continue
644 * stack from an unloaded module until another thread runs on it. 662 * to use a stack from an unloaded module until another thread runs on it.
645 *--------------------------------------------------------------------------- 663 *---------------------------------------------------------------------------
646 */ 664 */
647static inline void switch_to_idle_stack(const unsigned int core) 665static inline void switch_to_idle_stack(const unsigned int core)
@@ -670,11 +688,11 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
670 /* Flush our data to ram */ 688 /* Flush our data to ram */
671 flush_icache(); 689 flush_icache();
672 /* Stash thread in r4 slot */ 690 /* Stash thread in r4 slot */
673 thread->context.r[0] = (unsigned int)thread; 691 thread->context.r[0] = (uint32_t)thread;
674 /* Stash restart address in r5 slot */ 692 /* Stash restart address in r5 slot */
675 thread->context.r[1] = (unsigned int)thread->context.start; 693 thread->context.r[1] = thread->context.start;
676 /* Save sp in context.sp while still running on old core */ 694 /* Save sp in context.sp while still running on old core */
677 thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1]; 695 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
678} 696}
679 697
680/*--------------------------------------------------------------------------- 698/*---------------------------------------------------------------------------
@@ -689,9 +707,8 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
689/*--------------------------------------------------------------------------- 707/*---------------------------------------------------------------------------
690 * This actually performs the core switch. 708 * This actually performs the core switch.
691 */ 709 */
692static void switch_thread_core(unsigned int core, struct thread_entry *thread) 710static void __attribute__((naked))
693 __attribute__((naked)); 711 switch_thread_core(unsigned int core, struct thread_entry *thread)
694static void switch_thread_core(unsigned int core, struct thread_entry *thread)
695{ 712{
696 /* Pure asm for this because compiler behavior isn't sufficiently predictable. 713 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
697 * Stack access also isn't permitted until restoring the original stack and 714 * Stack access also isn't permitted until restoring the original stack and
@@ -705,7 +722,6 @@ static void switch_thread_core(unsigned int core, struct thread_entry *thread)
705 "mov sp, r2 \n" /* switch stacks */ 722 "mov sp, r2 \n" /* switch stacks */
706 "adr r2, 1f \n" /* r2 = new core restart address */ 723 "adr r2, 1f \n" /* r2 = new core restart address */
707 "str r2, [r1, #40] \n" /* thread->context.start = r2 */ 724 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
708 "mov r0, r1 \n" /* switch_thread(thread) */
709 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ 725 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
710 "1: \n" 726 "1: \n"
711 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ 727 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
@@ -733,13 +749,15 @@ static inline void core_sleep(void)
733 /* FIQ also changes the CLKCON register so FIQ must be disabled 749 /* FIQ also changes the CLKCON register so FIQ must be disabled
734 when changing it here */ 750 when changing it here */
735 asm volatile ( 751 asm volatile (
736 "mrs r0, cpsr \n" /* Prepare IRQ, FIQ enable */ 752 "mrs r0, cpsr \n"
737 "bic r0, r0, #0xc0 \n" 753 "orr r2, r0, #0x40 \n" /* Disable FIQ */
754 "bic r0, r0, #0x80 \n" /* Prepare IRQ enable */
755 "msr cpsr_c, r2 \n"
738 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */ 756 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
739 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */ 757 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
740 "orr r2, r2, #4 \n" 758 "orr r2, r2, #4 \n"
741 "str r2, [r1, #0xc] \n" 759 "str r2, [r1, #0xc] \n"
742 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */ 760 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
743 "mov r2, #0 \n" /* wait for IDLE */ 761 "mov r2, #0 \n" /* wait for IDLE */
744 "1: \n" 762 "1: \n"
745 "add r2, r2, #1 \n" 763 "add r2, r2, #1 \n"
@@ -750,13 +768,14 @@ static inline void core_sleep(void)
750 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */ 768 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
751 "bic r2, r2, #4 \n" 769 "bic r2, r2, #4 \n"
752 "str r2, [r1, #0xc] \n" 770 "str r2, [r1, #0xc] \n"
753 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */ 771 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
754 : : : "r0", "r1", "r2"); 772 : : : "r0", "r1", "r2");
755} 773}
756#elif defined(CPU_TCC77X) 774#elif defined(CPU_TCC77X)
757static inline void core_sleep(void) 775static inline void core_sleep(void)
758{ 776{
759 #warning TODO: Implement core_sleep 777 #warning TODO: Implement core_sleep
778 set_irq_level(IRQ_ENABLED);
760} 779}
761#elif defined(CPU_TCC780X) 780#elif defined(CPU_TCC780X)
762static inline void core_sleep(void) 781static inline void core_sleep(void)
@@ -765,8 +784,8 @@ static inline void core_sleep(void)
765 asm volatile ( 784 asm volatile (
766 "mov r0, #0 \n" 785 "mov r0, #0 \n"
767 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */ 786 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
768 "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */ 787 "mrs r0, cpsr \n" /* Unmask IRQ at core level */
769 "bic r0, r0, #0xc0 \n" 788 "bic r0, r0, #0x80 \n"
770 "msr cpsr_c, r0 \n" 789 "msr cpsr_c, r0 \n"
771 : : : "r0" 790 : : : "r0"
772 ); 791 );
@@ -777,8 +796,8 @@ static inline void core_sleep(void)
777 asm volatile ( 796 asm volatile (
778 "mov r0, #0 \n" 797 "mov r0, #0 \n"
779 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */ 798 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
780 "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */ 799 "mrs r0, cpsr \n" /* Unmask IRQ at core level */
781 "bic r0, r0, #0xc0 \n" 800 "bic r0, r0, #0x80 \n"
782 "msr cpsr_c, r0 \n" 801 "msr cpsr_c, r0 \n"
783 : : : "r0" 802 : : : "r0"
784 ); 803 );
@@ -787,6 +806,7 @@ static inline void core_sleep(void)
787static inline void core_sleep(void) 806static inline void core_sleep(void)
788{ 807{
789 #warning core_sleep not implemented, battery life will be decreased 808 #warning core_sleep not implemented, battery life will be decreased
809 set_irq_level(0);
790} 810}
791#endif /* CONFIG_CPU == */ 811#endif /* CONFIG_CPU == */
792 812
@@ -796,8 +816,7 @@ static inline void core_sleep(void)
796 *--------------------------------------------------------------------------- 816 *---------------------------------------------------------------------------
797 */ 817 */
798void start_thread(void); /* Provide C access to ASM label */ 818void start_thread(void); /* Provide C access to ASM label */
799static void __start_thread(void) __attribute__((used)); 819static void __attribute__((used)) __start_thread(void)
800static void __start_thread(void)
801{ 820{
802 /* a0=macsr, a1=context */ 821 /* a0=macsr, a1=context */
803 asm volatile ( 822 asm volatile (
@@ -808,9 +827,8 @@ static void __start_thread(void)
808 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */ 827 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
809 "clr.l (%a1) \n" /* Mark thread running */ 828 "clr.l (%a1) \n" /* Mark thread running */
810 "jsr (%a2) \n" /* Call thread function */ 829 "jsr (%a2) \n" /* Call thread function */
811 "clr.l -(%sp) \n" /* remove_thread(NULL) */
812 "jsr remove_thread \n"
813 ); 830 );
831 thread_exit();
814} 832}
815 833
816/* Set EMAC unit to fractional mode with saturation for each new thread, 834/* Set EMAC unit to fractional mode with saturation for each new thread,
@@ -823,9 +841,9 @@ static void __start_thread(void)
823 */ 841 */
824#define THREAD_STARTUP_INIT(core, thread, function) \ 842#define THREAD_STARTUP_INIT(core, thread, function) \
825 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \ 843 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
826 (thread)->context.d[0] = (unsigned int)&(thread)->context, \ 844 (thread)->context.d[0] = (uint32_t)&(thread)->context, \
827 (thread)->context.d[1] = (unsigned int)start_thread, \ 845 (thread)->context.d[1] = (uint32_t)start_thread, \
828 (thread)->context.start = (void *)(function); }) 846 (thread)->context.start = (uint32_t)(function); })
829 847
830/*--------------------------------------------------------------------------- 848/*---------------------------------------------------------------------------
831 * Store non-volatile context. 849 * Store non-volatile context.
@@ -874,8 +892,7 @@ static inline void core_sleep(void)
874 *--------------------------------------------------------------------------- 892 *---------------------------------------------------------------------------
875 */ 893 */
876void start_thread(void); /* Provide C access to ASM label */ 894void start_thread(void); /* Provide C access to ASM label */
877static void __start_thread(void) __attribute__((used)); 895static void __attribute__((used)) __start_thread(void)
878static void __start_thread(void)
879{ 896{
880 /* r8 = context */ 897 /* r8 = context */
881 asm volatile ( 898 asm volatile (
@@ -885,20 +902,16 @@ static void __start_thread(void)
885 "mov #0, r1 \n" /* Start the thread */ 902 "mov #0, r1 \n" /* Start the thread */
886 "jsr @r0 \n" 903 "jsr @r0 \n"
887 "mov.l r1, @(36, r8) \n" /* Clear start address */ 904 "mov.l r1, @(36, r8) \n" /* Clear start address */
888 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
889 "jmp @r0 \n"
890 "mov #0, r4 \n"
891 "1: \n"
892 ".long _remove_thread \n"
893 ); 905 );
906 thread_exit();
894} 907}
895 908
896/* Place context pointer in r8 slot, function pointer in r9 slot, and 909/* Place context pointer in r8 slot, function pointer in r9 slot, and
897 * start_thread pointer in context_start */ 910 * start_thread pointer in context_start */
898#define THREAD_STARTUP_INIT(core, thread, function) \ 911#define THREAD_STARTUP_INIT(core, thread, function) \
899 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \ 912 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
900 (thread)->context.r[1] = (unsigned int)(function), \ 913 (thread)->context.r[1] = (uint32_t)(function), \
901 (thread)->context.start = (void*)start_thread; }) 914 (thread)->context.start = (uint32_t)start_thread; })
902 915
903/*--------------------------------------------------------------------------- 916/*---------------------------------------------------------------------------
904 * Store non-volatile context. 917 * Store non-volatile context.
@@ -947,7 +960,7 @@ static inline void load_context(const void* addr)
947} 960}
948 961
949/*--------------------------------------------------------------------------- 962/*---------------------------------------------------------------------------
950 * Put core in a power-saving state if waking list wasn't repopulated. 963 * Put core in a power-saving state.
951 *--------------------------------------------------------------------------- 964 *---------------------------------------------------------------------------
952 */ 965 */
953static inline void core_sleep(void) 966static inline void core_sleep(void)
@@ -969,9 +982,7 @@ static inline void core_sleep(void)
969#if THREAD_EXTRA_CHECKS 982#if THREAD_EXTRA_CHECKS
970static void thread_panicf(const char *msg, struct thread_entry *thread) 983static void thread_panicf(const char *msg, struct thread_entry *thread)
971{ 984{
972#if NUM_CORES > 1 985 IF_COP( const unsigned int core = thread->core; )
973 const unsigned int core = thread->core;
974#endif
975 static char name[32]; 986 static char name[32];
976 thread_get_name(name, 32, thread); 987 thread_get_name(name, 32, thread);
977 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); 988 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
@@ -987,9 +998,7 @@ static void thread_stkov(struct thread_entry *thread)
987#else 998#else
988static void thread_stkov(struct thread_entry *thread) 999static void thread_stkov(struct thread_entry *thread)
989{ 1000{
990#if NUM_CORES > 1 1001 IF_COP( const unsigned int core = thread->core; )
991 const unsigned int core = thread->core;
992#endif
993 static char name[32]; 1002 static char name[32];
994 thread_get_name(name, 32, thread); 1003 thread_get_name(name, 32, thread);
995 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core)); 1004 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
@@ -998,111 +1007,67 @@ static void thread_stkov(struct thread_entry *thread)
998#define THREAD_ASSERT(exp, msg, thread) 1007#define THREAD_ASSERT(exp, msg, thread)
999#endif /* THREAD_EXTRA_CHECKS */ 1008#endif /* THREAD_EXTRA_CHECKS */
1000 1009
1001/*---------------------------------------------------------------------------
1002 * Lock a list pointer and returns its value
1003 *---------------------------------------------------------------------------
1004 */
1005#if CONFIG_CORELOCK == SW_CORELOCK
1006/* Separate locking function versions */
1007
1008/* Thread locking */ 1010/* Thread locking */
1009#define GET_THREAD_STATE(thread) \ 1011#if NUM_CORES > 1
1010 ({ corelock_lock(&(thread)->cl); (thread)->state; }) 1012#define LOCK_THREAD(thread) \
1011#define TRY_GET_THREAD_STATE(thread) \ 1013 ({ corelock_lock(&(thread)->slot_cl); })
1012 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; }) 1014#define TRY_LOCK_THREAD(thread) \
1013#define UNLOCK_THREAD(thread, state) \ 1015 ({ corelock_try_lock(&thread->slot_cl); })
1014 ({ corelock_unlock(&(thread)->cl); }) 1016#define UNLOCK_THREAD(thread) \
1015#define UNLOCK_THREAD_SET_STATE(thread, _state) \ 1017 ({ corelock_unlock(&(thread)->slot_cl); })
1016 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); }) 1018#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1017 1019 ({ unsigned int _core = (thread)->core; \
1018/* List locking */ 1020 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
1019#define LOCK_LIST(tqp) \ 1021 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
1020 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; }) 1022#else
1021#define UNLOCK_LIST(tqp, mod) \ 1023#define LOCK_THREAD(thread) \
1022 ({ corelock_unlock(&(tqp)->cl); }) 1024 ({ })
1023#define UNLOCK_LIST_SET_PTR(tqp, mod) \ 1025#define TRY_LOCK_THREAD(thread) \
1024 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); }) 1026 ({ })
1025 1027#define UNLOCK_THREAD(thread) \
1026/* Select the queue pointer directly */ 1028 ({ })
1027#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \ 1029#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1028 ({ add_to_list_l(&(tqp)->queue, (thread)); }) 1030 ({ })
1029#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \ 1031#endif
1030 ({ remove_from_list_l(&(tqp)->queue, (thread)); }) 1032
1031 1033/* RTR list */
1032#elif CONFIG_CORELOCK == CORELOCK_SWAP 1034#define RTR_LOCK(core) \
1033/* Native swap/exchange versions */ 1035 ({ corelock_lock(&cores[core].rtr_cl); })
1036#define RTR_UNLOCK(core) \
1037 ({ corelock_unlock(&cores[core].rtr_cl); })
1034 1038
1035/* Thread locking */ 1039#ifdef HAVE_PRIORITY_SCHEDULING
1036#define GET_THREAD_STATE(thread) \ 1040#define rtr_add_entry(core, priority) \
1037 ({ unsigned _s; \ 1041 prio_add_entry(&cores[core].rtr, (priority))
1038 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
1039 _s; })
1040#define TRY_GET_THREAD_STATE(thread) \
1041 ({ xchg8(&(thread)->state, STATE_BUSY); })
1042#define UNLOCK_THREAD(thread, _state) \
1043 ({ (thread)->state = (_state); })
1044#define UNLOCK_THREAD_SET_STATE(thread, _state) \
1045 ({ (thread)->state = (_state); })
1046
1047/* List locking */
1048#define LOCK_LIST(tqp) \
1049 ({ struct thread_entry *_l; \
1050 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
1051 _l; })
1052#define UNLOCK_LIST(tqp, mod) \
1053 ({ (tqp)->queue = (mod); })
1054#define UNLOCK_LIST_SET_PTR(tqp, mod) \
1055 ({ (tqp)->queue = (mod); })
1056
1057/* Select the local queue pointer copy returned from LOCK_LIST */
1058#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1059 ({ add_to_list_l(&(tc), (thread)); })
1060#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1061 ({ remove_from_list_l(&(tc), (thread)); })
1062 1042
1043#define rtr_subtract_entry(core, priority) \
1044 prio_subtract_entry(&cores[core].rtr, (priority))
1045
1046#define rtr_move_entry(core, from, to) \
1047 prio_move_entry(&cores[core].rtr, (from), (to))
1063#else 1048#else
1064/* Single-core/non-locked versions */ 1049#define rtr_add_entry(core, priority)
1065 1050#define rtr_add_entry_inl(core, priority)
1066/* Threads */ 1051#define rtr_subtract_entry(core, priority)
1067#define GET_THREAD_STATE(thread) \ 1052#define rtr_subtract_entry_inl(core, priotity)
1068 ({ (thread)->state; }) 1053#define rtr_move_entry(core, from, to)
1069#define UNLOCK_THREAD(thread, _state) 1054#define rtr_move_entry_inl(core, from, to)
1070#define UNLOCK_THREAD_SET_STATE(thread, _state) \ 1055#endif
1071 ({ (thread)->state = (_state); })
1072
1073/* Lists */
1074#define LOCK_LIST(tqp) \
1075 ({ (tqp)->queue; })
1076#define UNLOCK_LIST(tqp, mod)
1077#define UNLOCK_LIST_SET_PTR(tqp, mod) \
1078 ({ (tqp)->queue = (mod); })
1079
1080/* Select the queue pointer directly */
1081#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1082 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1083#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1084 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1085
1086#endif /* locking selection */
1087 1056
1088#if THREAD_EXTRA_CHECKS
1089/*--------------------------------------------------------------------------- 1057/*---------------------------------------------------------------------------
1090 * Lock the thread slot to obtain the state and then unlock it. Waits for 1058 * Thread list structure - circular:
1091 * it not to be busy. Used for debugging. 1059 * +------------------------------+
1060 * | |
1061 * +--+---+<-+---+<-+---+<-+---+<-+
1062 * Head->| T | | T | | T | | T |
1063 * +->+---+->+---+->+---+->+---+--+
1064 * | |
1065 * +------------------------------+
1092 *--------------------------------------------------------------------------- 1066 *---------------------------------------------------------------------------
1093 */ 1067 */
1094static unsigned peek_thread_state(struct thread_entry *thread)
1095{
1096 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1097 unsigned state = GET_THREAD_STATE(thread);
1098 UNLOCK_THREAD(thread, state);
1099 set_irq_level(oldlevel);
1100 return state;
1101}
1102#endif /* THREAD_EXTRA_CHECKS */
1103 1068
1104/*--------------------------------------------------------------------------- 1069/*---------------------------------------------------------------------------
1105 * Adds a thread to a list of threads using "intert last". Uses the "l" 1070 * Adds a thread to a list of threads using "insert last". Uses the "l"
1106 * links. 1071 * links.
1107 *--------------------------------------------------------------------------- 1072 *---------------------------------------------------------------------------
1108 */ 1073 */
@@ -1114,44 +1079,18 @@ static void add_to_list_l(struct thread_entry **list,
1114 if (l == NULL) 1079 if (l == NULL)
1115 { 1080 {
1116 /* Insert into unoccupied list */ 1081 /* Insert into unoccupied list */
1117 thread->l.next = thread;
1118 thread->l.prev = thread; 1082 thread->l.prev = thread;
1083 thread->l.next = thread;
1119 *list = thread; 1084 *list = thread;
1120 return; 1085 return;
1121 } 1086 }
1122 1087
1123 /* Insert last */ 1088 /* Insert last */
1124 thread->l.next = l;
1125 thread->l.prev = l->l.prev; 1089 thread->l.prev = l->l.prev;
1126 thread->l.prev->l.next = thread; 1090 thread->l.next = l;
1091 l->l.prev->l.next = thread;
1127 l->l.prev = thread; 1092 l->l.prev = thread;
1128
1129 /* Insert next
1130 thread->l.next = l->l.next;
1131 thread->l.prev = l;
1132 thread->l.next->l.prev = thread;
1133 l->l.next = thread;
1134 */
1135}
1136
1137/*---------------------------------------------------------------------------
1138 * Locks a list, adds the thread entry and unlocks the list on multicore.
1139 * Defined as add_to_list_l on single-core.
1140 *---------------------------------------------------------------------------
1141 */
1142#if NUM_CORES > 1
1143static void add_to_list_l_locked(struct thread_queue *tq,
1144 struct thread_entry *thread)
1145{
1146 struct thread_entry *t = LOCK_LIST(tq);
1147 ADD_TO_LIST_L_SELECT(t, tq, thread);
1148 UNLOCK_LIST(tq, t);
1149 (void)t;
1150} 1093}
1151#else
1152#define add_to_list_l_locked(tq, thread) \
1153 add_to_list_l(&(tq)->queue, (thread))
1154#endif
1155 1094
1156/*--------------------------------------------------------------------------- 1095/*---------------------------------------------------------------------------
1157 * Removes a thread from a list of threads. Uses the "l" links. 1096 * Removes a thread from a list of threads. Uses the "l" links.
@@ -1180,28 +1119,20 @@ static void remove_from_list_l(struct thread_entry **list,
1180 prev = thread->l.prev; 1119 prev = thread->l.prev;
1181 1120
1182 /* Fix links to jump over the removed entry. */ 1121 /* Fix links to jump over the removed entry. */
1183 prev->l.next = next;
1184 next->l.prev = prev; 1122 next->l.prev = prev;
1123 prev->l.next = next;
1185} 1124}
1186 1125
1187/*--------------------------------------------------------------------------- 1126/*---------------------------------------------------------------------------
1188 * Locks a list, removes the thread entry and unlocks the list on multicore. 1127 * Timeout list structure - circular reverse (to make "remove item" O(1)),
1189 * Defined as remove_from_list_l on single-core. 1128 * NULL-terminated forward (to ease the far more common forward traversal):
1129 * +------------------------------+
1130 * | |
1131 * +--+---+<-+---+<-+---+<-+---+<-+
1132 * Head->| T | | T | | T | | T |
1133 * +---+->+---+->+---+->+---+-X
1190 *--------------------------------------------------------------------------- 1134 *---------------------------------------------------------------------------
1191 */ 1135 */
1192#if NUM_CORES > 1
1193static void remove_from_list_l_locked(struct thread_queue *tq,
1194 struct thread_entry *thread)
1195{
1196 struct thread_entry *t = LOCK_LIST(tq);
1197 REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
1198 UNLOCK_LIST(tq, t);
1199 (void)t;
1200}
1201#else
1202#define remove_from_list_l_locked(tq, thread) \
1203 remove_from_list_l(&(tq)->queue, (thread))
1204#endif
1205 1136
1206/*--------------------------------------------------------------------------- 1137/*---------------------------------------------------------------------------
1207 * Add a thread from the core's timout list by linking the pointers in its 1138 * Add a thread from the core's timout list by linking the pointers in its
@@ -1210,19 +1141,24 @@ static void remove_from_list_l_locked(struct thread_queue *tq,
1210 */ 1141 */
1211static void add_to_list_tmo(struct thread_entry *thread) 1142static void add_to_list_tmo(struct thread_entry *thread)
1212{ 1143{
1213 /* Insert first */ 1144 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
1214 struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout; 1145 THREAD_ASSERT(thread->tmo.prev == NULL,
1146 "add_to_list_tmo->already listed", thread);
1215 1147
1216 thread->tmo.prev = thread; 1148 thread->tmo.next = NULL;
1217 thread->tmo.next = t;
1218 1149
1219 if (t != NULL) 1150 if (tmo == NULL)
1220 { 1151 {
1221 /* Fix second item's prev pointer to point to this thread */ 1152 /* Insert into unoccupied list */
1222 t->tmo.prev = thread; 1153 thread->tmo.prev = thread;
1154 cores[IF_COP_CORE(thread->core)].timeout = thread;
1155 return;
1223 } 1156 }
1224 1157
1225 cores[IF_COP_CORE(thread->core)].timeout = thread; 1158 /* Insert Last */
1159 thread->tmo.prev = tmo->tmo.prev;
1160 tmo->tmo.prev->tmo.next = thread;
1161 tmo->tmo.prev = thread;
1226} 1162}
1227 1163
1228/*--------------------------------------------------------------------------- 1164/*---------------------------------------------------------------------------
@@ -1233,91 +1169,520 @@ static void add_to_list_tmo(struct thread_entry *thread)
1233 */ 1169 */
1234static void remove_from_list_tmo(struct thread_entry *thread) 1170static void remove_from_list_tmo(struct thread_entry *thread)
1235{ 1171{
1172 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
1173 struct thread_entry *prev = thread->tmo.prev;
1236 struct thread_entry *next = thread->tmo.next; 1174 struct thread_entry *next = thread->tmo.next;
1237 struct thread_entry *prev;
1238 1175
1239 if (thread == cores[IF_COP_CORE(thread->core)].timeout) 1176 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
1177
1178 if (next != NULL)
1179 next->tmo.prev = prev;
1180
1181 if (thread == *list)
1182 {
1183 /* List becomes next item and empty if next == NULL */
1184 *list = next;
1185 /* Mark as unlisted */
1186 thread->tmo.prev = NULL;
1187 }
1188 else
1189 {
1190 if (next == NULL)
1191 (*list)->tmo.prev = prev;
1192 prev->tmo.next = next;
1193 /* Mark as unlisted */
1194 thread->tmo.prev = NULL;
1195 }
1196}
1197
1198
1199#ifdef HAVE_PRIORITY_SCHEDULING
1200/*---------------------------------------------------------------------------
1201 * Priority distribution structure (one category for each possible priority):
1202 *
1203 * +----+----+----+ ... +-----+
1204 * hist: | F0 | F1 | F2 | | F31 |
1205 * +----+----+----+ ... +-----+
1206 * mask: | b0 | b1 | b2 | | b31 |
1207 * +----+----+----+ ... +-----+
1208 *
1209 * F = count of threads at priority category n (frequency)
1210 * b = bitmask of non-zero priority categories (occupancy)
1211 *
1212 * / if H[n] != 0 : 1
1213 * b[n] = |
1214 * \ else : 0
1215 *
1216 *---------------------------------------------------------------------------
1217 * Basic priority inheritance priotocol (PIP):
1218 *
1219 * Mn = mutex n, Tn = thread n
1220 *
1221 * A lower priority thread inherits the priority of the highest priority
1222 * thread blocked waiting for it to complete an action (such as release a
1223 * mutex or respond to a message via queue_send):
1224 *
1225 * 1) T2->M1->T1
1226 *
1227 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
1228 * priority than T1 then T1 inherits the priority of T2.
1229 *
1230 * 2) T3
1231 * \/
1232 * T2->M1->T1
1233 *
1234 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
1235 * T1 inherits the higher of T2 and T3.
1236 *
1237 * 3) T3->M2->T2->M1->T1
1238 *
1239 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
1240 * then T1 inherits the priority of T3 through T2.
1241 *
1242 * Blocking chains can grow arbitrarily complex (though it's best that they
1243 * not form at all very often :) and build-up from these units.
1244 *---------------------------------------------------------------------------
1245 */
1246
1247/*---------------------------------------------------------------------------
1248 * Increment frequency at category "priority"
1249 *---------------------------------------------------------------------------
1250 */
1251static inline unsigned int prio_add_entry(
1252 struct priority_distribution *pd, int priority)
1253{
1254 unsigned int count;
1255 /* Enough size/instruction count difference for ARM makes it worth it to
1256 * use different code (192 bytes for ARM). Only thing better is ASM. */
1257#ifdef CPU_ARM
1258 count = pd->hist[priority];
1259 if (++count == 1)
1260 pd->mask |= 1 << priority;
1261 pd->hist[priority] = count;
1262#else /* This one's better for Coldfire */
1263 if ((count = ++pd->hist[priority]) == 1)
1264 pd->mask |= 1 << priority;
1265#endif
1266
1267 return count;
1268}
1269
1270/*---------------------------------------------------------------------------
1271 * Decrement frequency at category "priority"
1272 *---------------------------------------------------------------------------
1273 */
1274static inline unsigned int prio_subtract_entry(
1275 struct priority_distribution *pd, int priority)
1276{
1277 unsigned int count;
1278
1279#ifdef CPU_ARM
1280 count = pd->hist[priority];
1281 if (--count == 0)
1282 pd->mask &= ~(1 << priority);
1283 pd->hist[priority] = count;
1284#else
1285 if ((count = --pd->hist[priority]) == 0)
1286 pd->mask &= ~(1 << priority);
1287#endif
1288
1289 return count;
1290}
1291
1292/*---------------------------------------------------------------------------
1293 * Remove from one category and add to another
1294 *---------------------------------------------------------------------------
1295 */
1296static inline void prio_move_entry(
1297 struct priority_distribution *pd, int from, int to)
1298{
1299 uint32_t mask = pd->mask;
1300
1301#ifdef CPU_ARM
1302 unsigned int count;
1303
1304 count = pd->hist[from];
1305 if (--count == 0)
1306 mask &= ~(1 << from);
1307 pd->hist[from] = count;
1308
1309 count = pd->hist[to];
1310 if (++count == 1)
1311 mask |= 1 << to;
1312 pd->hist[to] = count;
1313#else
1314 if (--pd->hist[from] == 0)
1315 mask &= ~(1 << from);
1316
1317 if (++pd->hist[to] == 1)
1318 mask |= 1 << to;
1319#endif
1320
1321 pd->mask = mask;
1322}
1323
1324/*---------------------------------------------------------------------------
1325 * Change the priority and rtr entry for a running thread
1326 *---------------------------------------------------------------------------
1327 */
1328static inline void set_running_thread_priority(
1329 struct thread_entry *thread, int priority)
1330{
1331 const unsigned int core = IF_COP_CORE(thread->core);
1332 RTR_LOCK(core);
1333 rtr_move_entry(core, thread->priority, priority);
1334 thread->priority = priority;
1335 RTR_UNLOCK(core);
1336}
1337
1338/*---------------------------------------------------------------------------
1339 * Finds the highest priority thread in a list of threads. If the list is
1340 * empty, the PRIORITY_IDLE is returned.
1341 *
1342 * It is possible to use the struct priority_distribution within an object
1343 * instead of scanning the remaining threads in the list but as a compromise,
1344 * the resulting per-object memory overhead is saved at a slight speed
1345 * penalty under high contention.
1346 *---------------------------------------------------------------------------
1347 */
1348static int find_highest_priority_in_list_l(
1349 struct thread_entry * const thread)
1350{
1351 if (thread != NULL)
1240 { 1352 {
1241 /* Next item becomes list head */ 1353 /* Go though list until the ending up at the initial thread */
1242 cores[IF_COP_CORE(thread->core)].timeout = next; 1354 int highest_priority = thread->priority;
1355 struct thread_entry *curr = thread;
1243 1356
1244 if (next != NULL) 1357 do
1245 { 1358 {
1246 /* Fix new list head's prev to point to itself. */ 1359 int priority = curr->priority;
1247 next->tmo.prev = next; 1360
1361 if (priority < highest_priority)
1362 highest_priority = priority;
1363
1364 curr = curr->l.next;
1248 } 1365 }
1366 while (curr != thread);
1249 1367
1250 thread->tmo.prev = NULL; 1368 return highest_priority;
1251 return;
1252 } 1369 }
1253 1370
1254 prev = thread->tmo.prev; 1371 return PRIORITY_IDLE;
1372}
1255 1373
1256 if (next != NULL) 1374/*---------------------------------------------------------------------------
1375 * Register priority with blocking system and bubble it down the chain if
1376 * any until we reach the end or something is already equal or higher.
1377 *
1378 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
1379 * targets but that same action also guarantees a circular block anyway and
1380 * those are prevented, right? :-)
1381 *---------------------------------------------------------------------------
1382 */
1383static struct thread_entry *
1384 blocker_inherit_priority(struct thread_entry *current)
1385{
1386 const int priority = current->priority;
1387 struct blocker *bl = current->blocker;
1388 struct thread_entry * const tstart = current;
1389 struct thread_entry *bl_t = bl->thread;
1390
1391 /* Blocker cannot change since the object protection is held */
1392 LOCK_THREAD(bl_t);
1393
1394 for (;;)
1257 { 1395 {
1258 next->tmo.prev = prev; 1396 struct thread_entry *next;
1397 int bl_pr = bl->priority;
1398
1399 if (priority >= bl_pr)
1400 break; /* Object priority already high enough */
1401
1402 bl->priority = priority;
1403
1404 /* Add this one */
1405 prio_add_entry(&bl_t->pdist, priority);
1406
1407 if (bl_pr < PRIORITY_IDLE)
1408 {
1409 /* Not first waiter - subtract old one */
1410 prio_subtract_entry(&bl_t->pdist, bl_pr);
1411 }
1412
1413 if (priority >= bl_t->priority)
1414 break; /* Thread priority high enough */
1415
1416 if (bl_t->state == STATE_RUNNING)
1417 {
1418 /* Blocking thread is a running thread therefore there are no
1419 * further blockers. Change the "run queue" on which it
1420 * resides. */
1421 set_running_thread_priority(bl_t, priority);
1422 break;
1423 }
1424
1425 bl_t->priority = priority;
1426
1427 /* If blocking thread has a blocker, apply transitive inheritance */
1428 bl = bl_t->blocker;
1429
1430 if (bl == NULL)
1431 break; /* End of chain or object doesn't support inheritance */
1432
1433 next = bl->thread;
1434
1435 if (next == tstart)
1436 break; /* Full-circle - deadlock! */
1437
1438 UNLOCK_THREAD(current);
1439
1440#if NUM_CORES > 1
1441 for (;;)
1442 {
1443 LOCK_THREAD(next);
1444
1445 /* Blocker could change - retest condition */
1446 if (bl->thread == next)
1447 break;
1448
1449 UNLOCK_THREAD(next);
1450 next = bl->thread;
1451 }
1452#endif
1453 current = bl_t;
1454 bl_t = next;
1259 } 1455 }
1260 1456
1261 prev->tmo.next = next; 1457 UNLOCK_THREAD(bl_t);
1262 thread->tmo.prev = NULL; 1458
1459 return current;
1263} 1460}
1264 1461
1265/*--------------------------------------------------------------------------- 1462/*---------------------------------------------------------------------------
1266 * Schedules a thread wakeup on the specified core. Threads will be made 1463 * Readjust priorities when waking a thread blocked waiting for another
1267 * ready to run when the next task switch occurs. Note that this does not 1464 * in essence "releasing" the thread's effect on the object owner. Can be
1268 * introduce an on-core delay since the soonest the next thread may run is 1465 * performed from any context.
1269 * no sooner than that. Other cores and on-core interrupts may only ever
1270 * add to the list.
1271 *--------------------------------------------------------------------------- 1466 *---------------------------------------------------------------------------
1272 */ 1467 */
1273static void core_schedule_wakeup(struct thread_entry *thread) 1468struct thread_entry *
1469 wakeup_priority_protocol_release(struct thread_entry *thread)
1274{ 1470{
1275 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 1471 const int priority = thread->priority;
1276 const unsigned int core = IF_COP_CORE(thread->core); 1472 struct blocker *bl = thread->blocker;
1277 add_to_list_l_locked(&cores[core].waking, thread); 1473 struct thread_entry * const tstart = thread;
1474 struct thread_entry *bl_t = bl->thread;
1475
1476 /* Blocker cannot change since object will be locked */
1477 LOCK_THREAD(bl_t);
1478
1479 thread->blocker = NULL; /* Thread not blocked */
1480
1481 for (;;)
1482 {
1483 struct thread_entry *next;
1484 int bl_pr = bl->priority;
1485
1486 if (priority > bl_pr)
1487 break; /* Object priority higher */
1488
1489 next = *thread->bqp;
1490
1491 if (next == NULL)
1492 {
1493 /* No more threads in queue */
1494 prio_subtract_entry(&bl_t->pdist, bl_pr);
1495 bl->priority = PRIORITY_IDLE;
1496 }
1497 else
1498 {
1499 /* Check list for highest remaining priority */
1500 int queue_pr = find_highest_priority_in_list_l(next);
1501
1502 if (queue_pr == bl_pr)
1503 break; /* Object priority not changing */
1504
1505 /* Change queue priority */
1506 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
1507 bl->priority = queue_pr;
1508 }
1509
1510 if (bl_pr > bl_t->priority)
1511 break; /* thread priority is higher */
1512
1513 bl_pr = find_first_set_bit(bl_t->pdist.mask);
1514
1515 if (bl_pr == bl_t->priority)
1516 break; /* Thread priority not changing */
1517
1518 if (bl_t->state == STATE_RUNNING)
1519 {
1520 /* No further blockers */
1521 set_running_thread_priority(bl_t, bl_pr);
1522 break;
1523 }
1524
1525 bl_t->priority = bl_pr;
1526
1527 /* If blocking thread has a blocker, apply transitive inheritance */
1528 bl = bl_t->blocker;
1529
1530 if (bl == NULL)
1531 break; /* End of chain or object doesn't support inheritance */
1532
1533 next = bl->thread;
1534
1535 if (next == tstart)
1536 break; /* Full-circle - deadlock! */
1537
1538 UNLOCK_THREAD(thread);
1539
1278#if NUM_CORES > 1 1540#if NUM_CORES > 1
1279 if (core != CURRENT_CORE) 1541 for (;;)
1542 {
1543 LOCK_THREAD(next);
1544
1545 /* Blocker could change - retest condition */
1546 if (bl->thread == next)
1547 break;
1548
1549 UNLOCK_THREAD(next);
1550 next = bl->thread;
1551 }
1552#endif
1553 thread = bl_t;
1554 bl_t = next;
1555 }
1556
1557 UNLOCK_THREAD(bl_t);
1558
1559#if NUM_CORES > 1
1560 if (thread != tstart)
1280 { 1561 {
1281 core_wake(core); 1562 /* Relock original if it changed */
1563 LOCK_THREAD(tstart);
1282 } 1564 }
1283#endif 1565#endif
1284 set_irq_level(oldlevel); 1566
1567 return cores[CURRENT_CORE].running;
1285} 1568}
1286 1569
1287/*--------------------------------------------------------------------------- 1570/*---------------------------------------------------------------------------
1288 * If the waking list was populated, move all threads on it onto the running 1571 * Transfer ownership to a thread waiting for an objects and transfer
1289 * list so they may be run ASAP. 1572 * inherited priority boost from other waiters. This algorithm knows that
1573 * blocking chains may only unblock from the very end.
1574 *
1575 * Only the owning thread itself may call this and so the assumption that
1576 * it is the running thread is made.
1290 *--------------------------------------------------------------------------- 1577 *---------------------------------------------------------------------------
1291 */ 1578 */
1292static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core)) 1579struct thread_entry *
1580 wakeup_priority_protocol_transfer(struct thread_entry *thread)
1293{ 1581{
1294 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking); 1582 /* Waking thread inherits priority boost from object owner */
1295 struct thread_entry *r = cores[IF_COP_CORE(core)].running; 1583 struct blocker *bl = thread->blocker;
1584 struct thread_entry *bl_t = bl->thread;
1585 struct thread_entry *next;
1586 int bl_pr;
1296 1587
1297 /* Tranfer all threads on waking list to running list in one 1588 THREAD_ASSERT(thread_get_current() == bl_t,
1298 swoop */ 1589 "UPPT->wrong thread", thread_get_current());
1299 if (r != NULL) 1590
1591 LOCK_THREAD(bl_t);
1592
1593 bl_pr = bl->priority;
1594
1595 /* Remove the object's boost from the owning thread */
1596 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
1597 bl_pr <= bl_t->priority)
1300 { 1598 {
1301 /* Place waking threads at the end of the running list. */ 1599 /* No more threads at this priority are waiting and the old level is
1302 struct thread_entry *tmp; 1600 * at least the thread level */
1303 w->l.prev->l.next = r; 1601 int priority = find_first_set_bit(bl_t->pdist.mask);
1304 r->l.prev->l.next = w; 1602
1305 tmp = r->l.prev; 1603 if (priority != bl_t->priority)
1306 r->l.prev = w->l.prev; 1604 {
1307 w->l.prev = tmp; 1605 /* Adjust this thread's priority */
1606 set_running_thread_priority(bl_t, priority);
1607 }
1608 }
1609
1610 next = *thread->bqp;
1611
1612 if (next == NULL)
1613 {
1614 /* Expected shortcut - no more waiters */
1615 bl_pr = PRIORITY_IDLE;
1308 } 1616 }
1309 else 1617 else
1310 { 1618 {
1311 /* Just transfer the list as-is */ 1619 if (thread->priority <= bl_pr)
1312 cores[IF_COP_CORE(core)].running = w; 1620 {
1621 /* Need to scan threads remaining in queue */
1622 bl_pr = find_highest_priority_in_list_l(next);
1623 }
1624
1625 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
1626 bl_pr < thread->priority)
1627 {
1628 /* Thread priority must be raised */
1629 thread->priority = bl_pr;
1630 }
1631 }
1632
1633 bl->thread = thread; /* This thread pwns */
1634 bl->priority = bl_pr; /* Save highest blocked priority */
1635 thread->blocker = NULL; /* Thread not blocked */
1636
1637 UNLOCK_THREAD(bl_t);
1638
1639 return bl_t;
1640}
1641
1642/*---------------------------------------------------------------------------
1643 * No threads must be blocked waiting for this thread except for it to exit.
1644 * The alternative is more elaborate cleanup and object registration code.
1645 * Check this for risk of silent data corruption when objects with
1646 * inheritable blocking are abandoned by the owner - not precise but may
1647 * catch something.
1648 *---------------------------------------------------------------------------
1649 */
1650void check_for_obj_waiters(const char *function, struct thread_entry *thread)
1651{
1652 /* Only one bit in the mask should be set with a frequency on 1 which
1653 * represents the thread's own base priority */
1654 uint32_t mask = thread->pdist.mask;
1655 if ((mask & (mask - 1)) != 0 ||
1656 thread->pdist.hist[find_first_set_bit(mask)] > 1)
1657 {
1658 unsigned char name[32];
1659 thread_get_name(name, 32, thread);
1660 panicf("%s->%s with obj. waiters", function, name);
1313 } 1661 }
1314 /* Just leave any timeout threads on the timeout list. If a timeout check 1662}
1315 * is due, they will be removed there. If they do a timeout again before 1663#endif /* HAVE_PRIORITY_SCHEDULING */
1316 * being removed, they will just stay on the list with a new expiration 1664
1317 * tick. */ 1665/*---------------------------------------------------------------------------
1666 * Move a thread back to a running state on its core.
1667 *---------------------------------------------------------------------------
1668 */
1669static void core_schedule_wakeup(struct thread_entry *thread)
1670{
1671 const unsigned int core = IF_COP_CORE(thread->core);
1672
1673 RTR_LOCK(core);
1674
1675 thread->state = STATE_RUNNING;
1676
1677 add_to_list_l(&cores[core].running, thread);
1678 rtr_add_entry(core, thread->priority);
1679
1680 RTR_UNLOCK(core);
1318 1681
1319 /* Waking list is clear - NULL and unlock it */ 1682#if NUM_CORES > 1
1320 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL); 1683 if (core != CURRENT_CORE)
1684 core_wake(core);
1685#endif
1321} 1686}
1322 1687
1323/*--------------------------------------------------------------------------- 1688/*---------------------------------------------------------------------------
@@ -1326,7 +1691,7 @@ static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1326 * tick when the next check will occur. 1691 * tick when the next check will occur.
1327 *--------------------------------------------------------------------------- 1692 *---------------------------------------------------------------------------
1328 */ 1693 */
1329static void check_tmo_threads(void) 1694void check_tmo_threads(void)
1330{ 1695{
1331 const unsigned int core = CURRENT_CORE; 1696 const unsigned int core = CURRENT_CORE;
1332 const long tick = current_tick; /* snapshot the current tick */ 1697 const long tick = current_tick; /* snapshot the current tick */
@@ -1335,54 +1700,98 @@ static void check_tmo_threads(void)
1335 1700
1336 /* If there are no processes waiting for a timeout, just keep the check 1701 /* If there are no processes waiting for a timeout, just keep the check
1337 tick from falling into the past. */ 1702 tick from falling into the past. */
1338 if (next != NULL) 1703
1704 /* Break the loop once we have walked through the list of all
1705 * sleeping processes or have removed them all. */
1706 while (next != NULL)
1339 { 1707 {
1340 /* Check sleeping threads. */ 1708 /* Check sleeping threads. Allow interrupts between checks. */
1341 do 1709 set_irq_level(0);
1342 {
1343 /* Must make sure noone else is examining the state, wait until
1344 slot is no longer busy */
1345 struct thread_entry *curr = next;
1346 next = curr->tmo.next;
1347 1710
1348 unsigned state = GET_THREAD_STATE(curr); 1711 struct thread_entry *curr = next;
1349 1712
1350 if (state < TIMEOUT_STATE_FIRST) 1713 next = curr->tmo.next;
1351 { 1714
1352 /* Cleanup threads no longer on a timeout but still on the 1715 /* Lock thread slot against explicit wakeup */
1353 * list. */ 1716 set_irq_level(HIGHEST_IRQ_LEVEL);
1354 remove_from_list_tmo(curr); 1717 LOCK_THREAD(curr);
1355 UNLOCK_THREAD(curr, state); /* Unlock thread slot */ 1718
1356 } 1719 unsigned state = curr->state;
1357 else if (TIME_BEFORE(tick, curr->tmo_tick)) 1720
1721 if (state < TIMEOUT_STATE_FIRST)
1722 {
1723 /* Cleanup threads no longer on a timeout but still on the
1724 * list. */
1725 remove_from_list_tmo(curr);
1726 }
1727 else if (TIME_BEFORE(tick, curr->tmo_tick))
1728 {
1729 /* Timeout still pending - this will be the usual case */
1730 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1358 { 1731 {
1359 /* Timeout still pending - this will be the usual case */ 1732 /* Earliest timeout found so far - move the next check up
1360 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check)) 1733 to its time */
1361 { 1734 next_tmo_check = curr->tmo_tick;
1362 /* Earliest timeout found so far - move the next check up
1363 to its time */
1364 next_tmo_check = curr->tmo_tick;
1365 }
1366 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1367 } 1735 }
1368 else 1736 }
1737 else
1738 {
1739 /* Sleep timeout has been reached so bring the thread back to
1740 * life again. */
1741 if (state == STATE_BLOCKED_W_TMO)
1369 { 1742 {
1370 /* Sleep timeout has been reached so bring the thread back to 1743#if NUM_CORES > 1
1371 * life again. */ 1744 /* Lock the waiting thread's kernel object */
1372 if (state == STATE_BLOCKED_W_TMO) 1745 struct corelock *ocl = curr->obj_cl;
1746
1747 if (corelock_try_lock(ocl) == 0)
1373 { 1748 {
1374 remove_from_list_l_locked(curr->bqp, curr); 1749 /* Need to retry in the correct order though the need is
1750 * unlikely */
1751 UNLOCK_THREAD(curr);
1752 corelock_lock(ocl);
1753 LOCK_THREAD(curr);
1754
1755 if (curr->state != STATE_BLOCKED_W_TMO)
1756 {
1757 /* Thread was woken or removed explicitely while slot
1758 * was unlocked */
1759 corelock_unlock(ocl);
1760 remove_from_list_tmo(curr);
1761 UNLOCK_THREAD(curr);
1762 continue;
1763 }
1375 } 1764 }
1765#endif /* NUM_CORES */
1766
1767 remove_from_list_l(curr->bqp, curr);
1768
1769#ifdef HAVE_WAKEUP_EXT_CB
1770 if (curr->wakeup_ext_cb != NULL)
1771 curr->wakeup_ext_cb(curr);
1772#endif
1376 1773
1377 remove_from_list_tmo(curr); 1774#ifdef HAVE_PRIORITY_SCHEDULING
1378 add_to_list_l(&cores[core].running, curr); 1775 if (curr->blocker != NULL)
1379 UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING); 1776 wakeup_priority_protocol_release(curr);
1777#endif
1778 corelock_unlock(ocl);
1380 } 1779 }
1780 /* else state == STATE_SLEEPING */
1781
1782 remove_from_list_tmo(curr);
1783
1784 RTR_LOCK(core);
1381 1785
1382 /* Break the loop once we have walked through the list of all 1786 curr->state = STATE_RUNNING;
1383 * sleeping processes or have removed them all. */ 1787
1788 add_to_list_l(&cores[core].running, curr);
1789 rtr_add_entry(core, curr->priority);
1790
1791 RTR_UNLOCK(core);
1384 } 1792 }
1385 while (next != NULL); 1793
1794 UNLOCK_THREAD(curr);
1386 } 1795 }
1387 1796
1388 cores[core].next_tmo_check = next_tmo_check; 1797 cores[core].next_tmo_check = next_tmo_check;
@@ -1390,109 +1799,33 @@ static void check_tmo_threads(void)
1390 1799
1391/*--------------------------------------------------------------------------- 1800/*---------------------------------------------------------------------------
1392 * Performs operations that must be done before blocking a thread but after 1801 * Performs operations that must be done before blocking a thread but after
1393 * the state is saved - follows reverse of locking order. blk_ops.flags is 1802 * the state is saved.
1394 * assumed to be nonzero.
1395 *--------------------------------------------------------------------------- 1803 *---------------------------------------------------------------------------
1396 */ 1804 */
1397#if NUM_CORES > 1 1805#if NUM_CORES > 1
1398static inline void run_blocking_ops( 1806static inline void run_blocking_ops(
1399 unsigned int core, struct thread_entry *thread) 1807 unsigned int core, struct thread_entry *thread)
1400{ 1808{
1401 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops; 1809 struct thread_blk_ops *ops = &cores[core].blk_ops;
1402 const unsigned flags = ops->flags; 1810 const unsigned flags = ops->flags;
1403 1811
1404 if (flags == 0) 1812 if (flags == TBOP_CLEAR)
1405 return; 1813 return;
1406 1814
1407 if (flags & TBOP_SWITCH_CORE) 1815 switch (flags)
1408 { 1816 {
1817 case TBOP_SWITCH_CORE:
1409 core_switch_blk_op(core, thread); 1818 core_switch_blk_op(core, thread);
1410 } 1819 /* Fall-through */
1411 1820 case TBOP_UNLOCK_CORELOCK:
1412#if CONFIG_CORELOCK == SW_CORELOCK
1413 if (flags & TBOP_UNLOCK_LIST)
1414 {
1415 UNLOCK_LIST(ops->list_p, NULL);
1416 }
1417
1418 if (flags & TBOP_UNLOCK_CORELOCK)
1419 {
1420 corelock_unlock(ops->cl_p); 1821 corelock_unlock(ops->cl_p);
1421 }
1422
1423 if (flags & TBOP_UNLOCK_THREAD)
1424 {
1425 UNLOCK_THREAD(ops->thread, 0);
1426 }
1427#elif CONFIG_CORELOCK == CORELOCK_SWAP
1428 /* Write updated variable value into memory location */
1429 switch (flags & TBOP_VAR_TYPE_MASK)
1430 {
1431 case TBOP_UNLOCK_LIST:
1432 UNLOCK_LIST(ops->list_p, ops->list_v);
1433 break;
1434 case TBOP_SET_VARi:
1435 *ops->var_ip = ops->var_iv;
1436 break;
1437 case TBOP_SET_VARu8:
1438 *ops->var_u8p = ops->var_u8v;
1439 break; 1822 break;
1440 } 1823 }
1441#endif /* CONFIG_CORELOCK == */
1442 1824
1443 /* Unlock thread's slot */ 1825 ops->flags = TBOP_CLEAR;
1444 if (flags & TBOP_UNLOCK_CURRENT)
1445 {
1446 UNLOCK_THREAD(thread, ops->state);
1447 }
1448
1449 ops->flags = 0;
1450} 1826}
1451#endif /* NUM_CORES > 1 */ 1827#endif /* NUM_CORES > 1 */
1452 1828
1453
1454/*---------------------------------------------------------------------------
1455 * Runs any operations that may cause threads to be ready to run and then
1456 * sleeps the processor core until the next interrupt if none are.
1457 *---------------------------------------------------------------------------
1458 */
1459static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1460{
1461 for (;;)
1462 {
1463 set_irq_level(HIGHEST_IRQ_LEVEL);
1464 /* We want to do these ASAP as it may change the decision to sleep
1465 * the core or a core has woken because an interrupt occurred
1466 * and posted a message to a queue. */
1467 if (cores[IF_COP_CORE(core)].waking.queue != NULL)
1468 {
1469 core_perform_wakeup(IF_COP(core));
1470 }
1471
1472 /* If there are threads on a timeout and the earliest wakeup is due,
1473 * check the list and wake any threads that need to start running
1474 * again. */
1475 if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
1476 {
1477 check_tmo_threads();
1478 }
1479
1480 /* If there is a ready to run task, return its ID and keep core
1481 * awake. */
1482 if (cores[IF_COP_CORE(core)].running == NULL)
1483 {
1484 /* Enter sleep mode to reduce power usage - woken up on interrupt
1485 * or wakeup request from another core - expected to enable all
1486 * interrupts. */
1487 core_sleep(IF_COP(core));
1488 continue;
1489 }
1490
1491 set_irq_level(0);
1492 return cores[IF_COP_CORE(core)].running;
1493 }
1494}
1495
1496#ifdef RB_PROFILE 1829#ifdef RB_PROFILE
1497void profile_thread(void) 1830void profile_thread(void)
1498{ 1831{
@@ -1502,55 +1835,34 @@ void profile_thread(void)
1502 1835
1503/*--------------------------------------------------------------------------- 1836/*---------------------------------------------------------------------------
1504 * Prepares a thread to block on an object's list and/or for a specified 1837 * Prepares a thread to block on an object's list and/or for a specified
1505 * duration - expects object and slot to be appropriately locked if needed. 1838 * duration - expects object and slot to be appropriately locked if needed
1839 * and interrupts to be masked.
1506 *--------------------------------------------------------------------------- 1840 *---------------------------------------------------------------------------
1507 */ 1841 */
1508static inline void _block_thread_on_l(struct thread_queue *list, 1842static inline void block_thread_on_l(struct thread_entry *thread,
1509 struct thread_entry *thread, 1843 unsigned state)
1510 unsigned state
1511 IF_SWCL(, const bool nolock))
1512{ 1844{
1513 /* If inlined, unreachable branches will be pruned with no size penalty 1845 /* If inlined, unreachable branches will be pruned with no size penalty
1514 because constant params are used for state and nolock. */ 1846 because state is passed as a constant parameter. */
1515 const unsigned int core = IF_COP_CORE(thread->core); 1847 const unsigned int core = IF_COP_CORE(thread->core);
1516 1848
1517 /* Remove the thread from the list of running threads. */ 1849 /* Remove the thread from the list of running threads. */
1850 RTR_LOCK(core);
1518 remove_from_list_l(&cores[core].running, thread); 1851 remove_from_list_l(&cores[core].running, thread);
1852 rtr_subtract_entry(core, thread->priority);
1853 RTR_UNLOCK(core);
1519 1854
1520 /* Add a timeout to the block if not infinite */ 1855 /* Add a timeout to the block if not infinite */
1521 switch (state) 1856 switch (state)
1522 { 1857 {
1523 case STATE_BLOCKED: 1858 case STATE_BLOCKED:
1524 /* Put the thread into a new list of inactive threads. */
1525#if CONFIG_CORELOCK == SW_CORELOCK
1526 if (nolock)
1527 {
1528 thread->bqp = NULL; /* Indicate nolock list */
1529 thread->bqnlp = (struct thread_entry **)list;
1530 add_to_list_l((struct thread_entry **)list, thread);
1531 }
1532 else
1533#endif
1534 {
1535 thread->bqp = list;
1536 add_to_list_l_locked(list, thread);
1537 }
1538 break;
1539 case STATE_BLOCKED_W_TMO: 1859 case STATE_BLOCKED_W_TMO:
1540 /* Put the thread into a new list of inactive threads. */ 1860 /* Put the thread into a new list of inactive threads. */
1541#if CONFIG_CORELOCK == SW_CORELOCK 1861 add_to_list_l(thread->bqp, thread);
1542 if (nolock) 1862
1543 { 1863 if (state == STATE_BLOCKED)
1544 thread->bqp = NULL; /* Indicate nolock list */ 1864 break;
1545 thread->bqnlp = (struct thread_entry **)list; 1865
1546 add_to_list_l((struct thread_entry **)list, thread);
1547 }
1548 else
1549#endif
1550 {
1551 thread->bqp = list;
1552 add_to_list_l_locked(list, thread);
1553 }
1554 /* Fall-through */ 1866 /* Fall-through */
1555 case STATE_SLEEPING: 1867 case STATE_SLEEPING:
1556 /* If this thread times out sooner than any other thread, update 1868 /* If this thread times out sooner than any other thread, update
@@ -1568,35 +1880,11 @@ static inline void _block_thread_on_l(struct thread_queue *list,
1568 break; 1880 break;
1569 } 1881 }
1570 1882
1571#ifdef HAVE_PRIORITY_SCHEDULING 1883 /* Remember the the next thread about to block. */
1572 /* Reset priorities */ 1884 cores[core].block_task = thread;
1573 if (thread->priority == cores[core].highest_priority)
1574 cores[core].highest_priority = LOWEST_PRIORITY;
1575#endif
1576 1885
1577#if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK 1886 /* Report new state. */
1578 /* Safe to set state now */
1579 thread->state = state; 1887 thread->state = state;
1580#elif CONFIG_CORELOCK == CORELOCK_SWAP
1581 cores[core].blk_ops.state = state;
1582#endif
1583
1584#if NUM_CORES > 1
1585 /* Delay slot unlock until task switch */
1586 cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
1587#endif
1588}
1589
1590static inline void block_thread_on_l(
1591 struct thread_queue *list, struct thread_entry *thread, unsigned state)
1592{
1593 _block_thread_on_l(list, thread, state IF_SWCL(, false));
1594}
1595
1596static inline void block_thread_on_l_no_listlock(
1597 struct thread_entry **list, struct thread_entry *thread, unsigned state)
1598{
1599 _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
1600} 1888}
1601 1889
1602/*--------------------------------------------------------------------------- 1890/*---------------------------------------------------------------------------
@@ -1607,72 +1895,134 @@ static inline void block_thread_on_l_no_listlock(
1607 * INTERNAL: Intended for use by kernel and not for programs. 1895 * INTERNAL: Intended for use by kernel and not for programs.
1608 *--------------------------------------------------------------------------- 1896 *---------------------------------------------------------------------------
1609 */ 1897 */
1610void switch_thread(struct thread_entry *old) 1898void switch_thread(void)
1611{ 1899{
1612 const unsigned int core = CURRENT_CORE; 1900 const unsigned int core = CURRENT_CORE;
1901 struct thread_entry *block = cores[core].block_task;
1613 struct thread_entry *thread = cores[core].running; 1902 struct thread_entry *thread = cores[core].running;
1614 struct thread_entry *block = old;
1615 1903
1616 if (block == NULL) 1904 /* Get context to save - next thread to run is unknown until all wakeups
1617 old = thread; 1905 * are evaluated */
1906 if (block != NULL)
1907 {
1908 cores[core].block_task = NULL;
1909
1910#if NUM_CORES > 1
1911 if (thread == block)
1912 {
1913 /* This was the last thread running and another core woke us before
1914 * reaching here. Force next thread selection to give tmo threads or
1915 * other threads woken before this block a first chance. */
1916 block = NULL;
1917 }
1918 else
1919#endif
1920 {
1921 /* Blocking task is the old one */
1922 thread = block;
1923 }
1924 }
1618 1925
1619#ifdef RB_PROFILE 1926#ifdef RB_PROFILE
1620 profile_thread_stopped(old - threads); 1927 profile_thread_stopped(thread - threads);
1621#endif 1928#endif
1622 1929
1623 /* Begin task switching by saving our current context so that we can 1930 /* Begin task switching by saving our current context so that we can
1624 * restore the state of the current thread later to the point prior 1931 * restore the state of the current thread later to the point prior
1625 * to this call. */ 1932 * to this call. */
1626 store_context(&old->context); 1933 store_context(&thread->context);
1627 1934
1628 /* Check if the current thread stack is overflown */ 1935 /* Check if the current thread stack is overflown */
1629 if(((unsigned int *)old->stack)[0] != DEADBEEF) 1936 if (thread->stack[0] != DEADBEEF)
1630 thread_stkov(old); 1937 thread_stkov(thread);
1631 1938
1632#if NUM_CORES > 1 1939#if NUM_CORES > 1
1633 /* Run any blocking operations requested before switching/sleeping */ 1940 /* Run any blocking operations requested before switching/sleeping */
1634 run_blocking_ops(core, old); 1941 run_blocking_ops(core, thread);
1635#endif 1942#endif
1636 1943
1637 /* Go through the list of sleeping task to check if we need to wake up
1638 * any of them due to timeout. Also puts core into sleep state until
1639 * there is at least one running process again. */
1640 thread = sleep_core(IF_COP(core));
1641
1642#ifdef HAVE_PRIORITY_SCHEDULING 1944#ifdef HAVE_PRIORITY_SCHEDULING
1643 /* Select the new task based on priorities and the last time a process 1945 /* Reset the value of thread's skip count */
1644 * got CPU time. */ 1946 thread->skip_count = 0;
1645 if (block == NULL) 1947#endif
1646 thread = thread->l.next;
1647 1948
1648 for (;;) 1949 for (;;)
1649 { 1950 {
1650 int priority = thread->priority; 1951 /* If there are threads on a timeout and the earliest wakeup is due,
1952 * check the list and wake any threads that need to start running
1953 * again. */
1954 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1955 {
1956 check_tmo_threads();
1957 }
1958
1959 set_irq_level(HIGHEST_IRQ_LEVEL);
1960 RTR_LOCK(core);
1651 1961
1652 if (priority < cores[core].highest_priority) 1962 thread = cores[core].running;
1653 cores[core].highest_priority = priority;
1654 1963
1655 if (priority == cores[core].highest_priority || 1964 if (thread == NULL)
1656 thread->priority_x < cores[core].highest_priority ||
1657 (current_tick - thread->last_run > priority * 8))
1658 { 1965 {
1659 cores[core].running = thread; 1966 /* Enter sleep mode to reduce power usage - woken up on interrupt
1660 break; 1967 * or wakeup request from another core - expected to enable
1968 * interrupts. */
1969 RTR_UNLOCK(core);
1970 core_sleep(IF_COP(core));
1661 } 1971 }
1972 else
1973 {
1974#ifdef HAVE_PRIORITY_SCHEDULING
1975 /* Select the new task based on priorities and the last time a
1976 * process got CPU time relative to the highest priority runnable
1977 * task. */
1978 struct priority_distribution *pd = &cores[core].rtr;
1979 int max = find_first_set_bit(pd->mask);
1662 1980
1663 thread = thread->l.next; 1981 if (block == NULL)
1664 } 1982 {
1665 1983 /* Not switching on a block, tentatively select next thread */
1666 /* Reset the value of thread's last running time to the current time. */ 1984 thread = thread->l.next;
1667 thread->last_run = current_tick; 1985 }
1986
1987 for (;;)
1988 {
1989 int priority = thread->priority;
1990 int diff;
1991
1992 /* This ridiculously simple method of aging seems to work
1993 * suspiciously well. It does tend to reward CPU hogs (under
1994 * yielding) but that's generally not desirable at all. On the
1995 * plus side, it, relatively to other threads, penalizes excess
1996 * yielding which is good if some high priority thread is
1997 * performing no useful work such as polling for a device to be
1998 * ready. Of course, aging is only employed when higher and lower
1999 * priority threads are runnable. The highest priority runnable
2000 * thread(s) are never skipped. */
2001 if (priority <= max ||
2002 (diff = priority - max, ++thread->skip_count > diff*diff))
2003 {
2004 cores[core].running = thread;
2005 break;
2006 }
2007
2008 thread = thread->l.next;
2009 }
1668#else 2010#else
1669 if (block == NULL) 2011 /* Without priority use a simple FCFS algorithm */
1670 { 2012 if (block == NULL)
1671 thread = thread->l.next; 2013 {
1672 cores[core].running = thread; 2014 /* Not switching on a block, select next thread */
1673 } 2015 thread = thread->l.next;
2016 cores[core].running = thread;
2017 }
1674#endif /* HAVE_PRIORITY_SCHEDULING */ 2018#endif /* HAVE_PRIORITY_SCHEDULING */
1675 2019
2020 RTR_UNLOCK(core);
2021 set_irq_level(0);
2022 break;
2023 }
2024 }
2025
1676 /* And finally give control to the next thread. */ 2026 /* And finally give control to the next thread. */
1677 load_context(&thread->context); 2027 load_context(&thread->context);
1678 2028
@@ -1682,314 +2032,210 @@ void switch_thread(struct thread_entry *old)
1682} 2032}
1683 2033
1684/*--------------------------------------------------------------------------- 2034/*---------------------------------------------------------------------------
1685 * Change the boost state of a thread boosting or unboosting the CPU 2035 * Sleeps a thread for at least a specified number of ticks with zero being
1686 * as required. Require thread slot to be locked first. 2036 * a wait until the next tick.
1687 *---------------------------------------------------------------------------
1688 */
1689static inline void boost_thread(struct thread_entry *thread, bool boost)
1690{
1691#ifdef HAVE_SCHEDULER_BOOSTCTRL
1692 if ((thread->boosted != 0) != boost)
1693 {
1694 thread->boosted = boost;
1695 cpu_boost(boost);
1696 }
1697#endif
1698 (void)thread; (void)boost;
1699}
1700
1701/*---------------------------------------------------------------------------
1702 * Sleeps a thread for a specified number of ticks and unboost the thread if
1703 * if it is boosted. If ticks is zero, it does not delay but instead switches
1704 * tasks.
1705 * 2037 *
1706 * INTERNAL: Intended for use by kernel and not for programs. 2038 * INTERNAL: Intended for use by kernel and not for programs.
1707 *--------------------------------------------------------------------------- 2039 *---------------------------------------------------------------------------
1708 */ 2040 */
1709void sleep_thread(int ticks) 2041void sleep_thread(int ticks)
1710{ 2042{
1711 /* Get the entry for the current running thread. */
1712 struct thread_entry *current = cores[CURRENT_CORE].running; 2043 struct thread_entry *current = cores[CURRENT_CORE].running;
1713 2044
1714#if NUM_CORES > 1 2045 LOCK_THREAD(current);
1715 /* Lock thread slot */
1716 GET_THREAD_STATE(current);
1717#endif
1718 2046
1719 /* Set our timeout, change lists, and finally switch threads. 2047 /* Set our timeout, remove from run list and join timeout list. */
1720 * Unlock during switch on mulicore. */
1721 current->tmo_tick = current_tick + ticks + 1; 2048 current->tmo_tick = current_tick + ticks + 1;
1722 block_thread_on_l(NULL, current, STATE_SLEEPING); 2049 block_thread_on_l(current, STATE_SLEEPING);
1723 switch_thread(current);
1724 2050
1725 /* Our status should be STATE_RUNNING */ 2051 UNLOCK_THREAD(current);
1726 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1727 "S:R->!*R", current);
1728} 2052}
1729 2053
1730/*--------------------------------------------------------------------------- 2054/*---------------------------------------------------------------------------
1731 * Indefinitely block a thread on a blocking queue for explicit wakeup. 2055 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1732 * Caller with interrupt-accessible lists should disable interrupts first
1733 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1734 * 2056 *
1735 * INTERNAL: Intended for use by kernel objects and not for programs. 2057 * INTERNAL: Intended for use by kernel objects and not for programs.
1736 *--------------------------------------------------------------------------- 2058 *---------------------------------------------------------------------------
1737 */ 2059 */
1738IF_SWCL(static inline) void _block_thread(struct thread_queue *list 2060void block_thread(struct thread_entry *current)
1739 IF_SWCL(, const bool nolock))
1740{ 2061{
1741 /* Get the entry for the current running thread. */ 2062 /* Set the state to blocked and take us off of the run queue until we
1742 struct thread_entry *current = cores[CURRENT_CORE].running; 2063 * are explicitly woken */
1743 2064 LOCK_THREAD(current);
1744 /* Set the state to blocked and ask the scheduler to switch tasks,
1745 * this takes us off of the run queue until we are explicitly woken */
1746 2065
1747#if NUM_CORES > 1 2066 /* Set the list for explicit wakeup */
1748 /* Lock thread slot */ 2067 block_thread_on_l(current, STATE_BLOCKED);
1749 GET_THREAD_STATE(current);
1750#endif
1751 2068
1752#if CONFIG_CORELOCK == SW_CORELOCK 2069#ifdef HAVE_PRIORITY_SCHEDULING
1753 /* One branch optimized away during inlining */ 2070 if (current->blocker != NULL)
1754 if (nolock)
1755 { 2071 {
1756 block_thread_on_l_no_listlock((struct thread_entry **)list, 2072 /* Object supports PIP */
1757 current, STATE_BLOCKED); 2073 current = blocker_inherit_priority(current);
1758 } 2074 }
1759 else
1760#endif 2075#endif
1761 {
1762 block_thread_on_l(list, current, STATE_BLOCKED);
1763 }
1764
1765 switch_thread(current);
1766
1767 /* Our status should be STATE_RUNNING */
1768 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1769 "B:R->!*R", current);
1770}
1771
1772#if CONFIG_CORELOCK == SW_CORELOCK
1773/* Inline lock/nolock version of _block_thread into these functions */
1774void block_thread(struct thread_queue *tq)
1775{
1776 _block_thread(tq, false);
1777}
1778 2076
1779void block_thread_no_listlock(struct thread_entry **list) 2077 UNLOCK_THREAD(current);
1780{
1781 _block_thread((struct thread_queue *)list, true);
1782} 2078}
1783#endif /* CONFIG_CORELOCK */
1784 2079
1785/*--------------------------------------------------------------------------- 2080/*---------------------------------------------------------------------------
1786 * Block a thread on a blocking queue for a specified time interval or until 2081 * Block a thread on a blocking queue for a specified time interval or until
1787 * explicitly woken - whichever happens first. 2082 * explicitly woken - whichever happens first.
1788 * Caller with interrupt-accessible lists should disable interrupts first
1789 * and request that interrupt level be restored after switching out the
1790 * current thread.
1791 * 2083 *
1792 * INTERNAL: Intended for use by kernel objects and not for programs. 2084 * INTERNAL: Intended for use by kernel objects and not for programs.
1793 *--------------------------------------------------------------------------- 2085 *---------------------------------------------------------------------------
1794 */ 2086 */
1795void block_thread_w_tmo(struct thread_queue *list, int timeout) 2087void block_thread_w_tmo(struct thread_entry *current, int timeout)
1796{ 2088{
1797 /* Get the entry for the current running thread. */ 2089 /* Get the entry for the current running thread. */
1798 struct thread_entry *current = cores[CURRENT_CORE].running; 2090 LOCK_THREAD(current);
1799
1800#if NUM_CORES > 1
1801 /* Lock thread slot */
1802 GET_THREAD_STATE(current);
1803#endif
1804 2091
1805 /* Set the state to blocked with the specified timeout */ 2092 /* Set the state to blocked with the specified timeout */
1806 current->tmo_tick = current_tick + timeout; 2093 current->tmo_tick = current_tick + timeout;
2094
1807 /* Set the list for explicit wakeup */ 2095 /* Set the list for explicit wakeup */
1808 block_thread_on_l(list, current, STATE_BLOCKED_W_TMO); 2096 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1809 2097
1810 /* Now force a task switch and block until we have been woken up 2098#ifdef HAVE_PRIORITY_SCHEDULING
1811 * by another thread or timeout is reached - whichever happens first */ 2099 if (current->blocker != NULL)
1812 switch_thread(current); 2100 {
2101 /* Object supports PIP */
2102 current = blocker_inherit_priority(current);
2103 }
2104#endif
1813 2105
1814 /* Our status should be STATE_RUNNING */ 2106 UNLOCK_THREAD(current);
1815 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1816 "T:R->!*R", current);
1817} 2107}
1818 2108
1819/*--------------------------------------------------------------------------- 2109/*---------------------------------------------------------------------------
1820 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads 2110 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1821 * that called sleep(). 2111 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1822 * Caller with interrupt-accessible lists should disable interrupts first. 2112 *
1823 * This code should be considered a critical section by the caller. 2113 * This code should be considered a critical section by the caller meaning
2114 * that the object's corelock should be held.
1824 * 2115 *
1825 * INTERNAL: Intended for use by kernel objects and not for programs. 2116 * INTERNAL: Intended for use by kernel objects and not for programs.
1826 *--------------------------------------------------------------------------- 2117 *---------------------------------------------------------------------------
1827 */ 2118 */
1828IF_SWCL(static inline) struct thread_entry * _wakeup_thread( 2119unsigned int wakeup_thread(struct thread_entry **list)
1829 struct thread_queue *list IF_SWCL(, const bool nolock))
1830{ 2120{
1831 struct thread_entry *t; 2121 struct thread_entry *thread = *list;
1832 struct thread_entry *thread; 2122 unsigned int result = THREAD_NONE;
1833 unsigned state;
1834
1835 /* Wake up the last thread first. */
1836#if CONFIG_CORELOCK == SW_CORELOCK
1837 /* One branch optimized away during inlining */
1838 if (nolock)
1839 {
1840 t = list->queue;
1841 }
1842 else
1843#endif
1844 {
1845 t = LOCK_LIST(list);
1846 }
1847 2123
1848 /* Check if there is a blocked thread at all. */ 2124 /* Check if there is a blocked thread at all. */
1849 if (t == NULL) 2125 if (thread == NULL)
1850 { 2126 return result;
1851#if CONFIG_CORELOCK == SW_CORELOCK
1852 if (!nolock)
1853#endif
1854 {
1855 UNLOCK_LIST(list, NULL);
1856 }
1857 return NULL;
1858 }
1859 2127
1860 thread = t; 2128 LOCK_THREAD(thread);
1861
1862#if NUM_CORES > 1
1863#if CONFIG_CORELOCK == SW_CORELOCK
1864 if (nolock)
1865 {
1866 /* Lock thread only, not list */
1867 state = GET_THREAD_STATE(thread);
1868 }
1869 else
1870#endif
1871 {
1872 /* This locks in reverse order from other routines so a retry in the
1873 correct order may be needed */
1874 state = TRY_GET_THREAD_STATE(thread);
1875 if (state == STATE_BUSY)
1876 {
1877 /* Unlock list and retry slot, then list */
1878 UNLOCK_LIST(list, t);
1879 state = GET_THREAD_STATE(thread);
1880 t = LOCK_LIST(list);
1881 /* Be sure thread still exists here - it couldn't have re-added
1882 itself if it was woken elsewhere because this function is
1883 serialized within the object that owns the list. */
1884 if (thread != t)
1885 {
1886 /* Thread disappeared :( */
1887 UNLOCK_LIST(list, t);
1888 UNLOCK_THREAD(thread, state);
1889 return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
1890 }
1891 }
1892 }
1893#else /* NUM_CORES == 1 */
1894 state = GET_THREAD_STATE(thread);
1895#endif /* NUM_CORES */
1896 2129
1897 /* Determine thread's current state. */ 2130 /* Determine thread's current state. */
1898 switch (state) 2131 switch (thread->state)
1899 { 2132 {
1900 case STATE_BLOCKED: 2133 case STATE_BLOCKED:
1901 case STATE_BLOCKED_W_TMO: 2134 case STATE_BLOCKED_W_TMO:
1902 /* Remove thread from object's blocked list - select t or list depending 2135 remove_from_list_l(list, thread);
1903 on locking type at compile time */ 2136
1904 REMOVE_FROM_LIST_L_SELECT(t, list, thread); 2137 result = THREAD_OK;
1905#if CONFIG_CORELOCK == SW_CORELOCK 2138
1906 /* Statment optimized away during inlining if nolock != false */ 2139#ifdef HAVE_PRIORITY_SCHEDULING
1907 if (!nolock) 2140 struct thread_entry *current;
1908#endif 2141 struct blocker *bl = thread->blocker;
2142
2143 if (bl == NULL)
1909 { 2144 {
1910 UNLOCK_LIST(list, t); /* Unlock list - removal complete */ 2145 /* No inheritance - just boost the thread by aging */
2146 thread->skip_count = thread->priority;
2147 current = cores[CURRENT_CORE].running;
2148 }
2149 else
2150 {
2151 /* Call the specified unblocking PIP */
2152 current = bl->wakeup_protocol(thread);
1911 } 2153 }
1912 2154
1913#ifdef HAVE_PRIORITY_SCHEDULING 2155 if (current != NULL && thread->priority < current->priority
1914 /* Give the task a kick to avoid a stall after wakeup. 2156 IF_COP( && thread->core == current->core ))
1915 Not really proper treatment - TODO later. */ 2157 {
1916 thread->last_run = current_tick - 8*LOWEST_PRIORITY; 2158 /* Woken thread is higher priority and exists on the same CPU core;
1917#endif 2159 * recommend a task switch. Knowing if this is an interrupt call
2160 * would be helpful here. */
2161 result |= THREAD_SWITCH;
2162 }
2163#endif /* HAVE_PRIORITY_SCHEDULING */
2164
1918 core_schedule_wakeup(thread); 2165 core_schedule_wakeup(thread);
1919 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); 2166 break;
1920 return thread; 2167
1921 default: 2168 /* Nothing to do. State is not blocked. */
1922 /* Nothing to do. State is not blocked. */
1923#if THREAD_EXTRA_CHECKS 2169#if THREAD_EXTRA_CHECKS
2170 default:
1924 THREAD_PANICF("wakeup_thread->block invalid", thread); 2171 THREAD_PANICF("wakeup_thread->block invalid", thread);
1925 case STATE_RUNNING: 2172 case STATE_RUNNING:
1926 case STATE_KILLED: 2173 case STATE_KILLED:
2174 break;
1927#endif 2175#endif
1928#if CONFIG_CORELOCK == SW_CORELOCK
1929 /* Statement optimized away during inlining if nolock != false */
1930 if (!nolock)
1931#endif
1932 {
1933 UNLOCK_LIST(list, t); /* Unlock the object's list */
1934 }
1935 UNLOCK_THREAD(thread, state); /* Unlock thread slot */
1936 return NULL;
1937 } 2176 }
1938}
1939 2177
1940#if CONFIG_CORELOCK == SW_CORELOCK 2178 UNLOCK_THREAD(thread);
1941/* Inline lock/nolock version of _wakeup_thread into these functions */ 2179 return result;
1942struct thread_entry * wakeup_thread(struct thread_queue *tq)
1943{
1944 return _wakeup_thread(tq, false);
1945} 2180}
1946 2181
1947struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list) 2182/*---------------------------------------------------------------------------
2183 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
2184 * from each operation or THREAD_NONE of nothing was awakened. Object owning
2185 * the queue must be locked first.
2186 *
2187 * INTERNAL: Intended for use by kernel objects and not for programs.
2188 *---------------------------------------------------------------------------
2189 */
2190unsigned int thread_queue_wake(struct thread_entry **list)
1948{ 2191{
1949 return _wakeup_thread((struct thread_queue *)list, true); 2192 unsigned result = THREAD_NONE;
2193
2194 for (;;)
2195 {
2196 unsigned int rc = wakeup_thread(list);
2197
2198 if (rc == THREAD_NONE)
2199 break; /* No more threads */
2200
2201 result |= rc;
2202 }
2203
2204 return result;
1950} 2205}
1951#endif /* CONFIG_CORELOCK */
1952 2206
1953/*--------------------------------------------------------------------------- 2207/*---------------------------------------------------------------------------
1954 * Find an empty thread slot or MAXTHREADS if none found. The slot returned 2208 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1955 * will be locked on multicore. 2209 * will be locked on multicore.
1956 *--------------------------------------------------------------------------- 2210 *---------------------------------------------------------------------------
1957 */ 2211 */
1958static int find_empty_thread_slot(void) 2212static struct thread_entry * find_empty_thread_slot(void)
1959{ 2213{
1960#if NUM_CORES > 1 2214 /* Any slot could be on an interrupt-accessible list */
1961 /* Any slot could be on an IRQ-accessible list */ 2215 IF_COP( int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); )
1962 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2216 struct thread_entry *thread = NULL;
1963#endif
1964 /* Thread slots are not locked on single core */
1965
1966 int n; 2217 int n;
1967 2218
1968 for (n = 0; n < MAXTHREADS; n++) 2219 for (n = 0; n < MAXTHREADS; n++)
1969 { 2220 {
1970 /* Obtain current slot state - lock it on multicore */ 2221 /* Obtain current slot state - lock it on multicore */
1971 unsigned state = GET_THREAD_STATE(&threads[n]); 2222 struct thread_entry *t = &threads[n];
2223 LOCK_THREAD(t);
1972 2224
1973 if (state == STATE_KILLED 2225 if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
1974#if NUM_CORES > 1
1975 && threads[n].name != THREAD_DESTRUCT
1976#endif
1977 )
1978 { 2226 {
1979 /* Slot is empty - leave it locked and caller will unlock */ 2227 /* Slot is empty - leave it locked and caller will unlock */
2228 thread = t;
1980 break; 2229 break;
1981 } 2230 }
1982 2231
1983 /* Finished examining slot - no longer busy - unlock on multicore */ 2232 /* Finished examining slot - no longer busy - unlock on multicore */
1984 UNLOCK_THREAD(&threads[n], state); 2233 UNLOCK_THREAD(t);
1985 } 2234 }
1986 2235
1987#if NUM_CORES > 1 2236 IF_COP( set_irq_level(oldlevel); ) /* Reenable interrups - this slot is
1988 set_irq_level(oldlevel); /* Reenable interrups - this slot is 2237 not accesible to them yet */
1989 not accesible to them yet */ 2238 return thread;
1990#endif
1991
1992 return n;
1993} 2239}
1994 2240
1995 2241
@@ -2000,65 +2246,68 @@ static int find_empty_thread_slot(void)
2000 */ 2246 */
2001void core_idle(void) 2247void core_idle(void)
2002{ 2248{
2003#if NUM_CORES > 1 2249 IF_COP( const unsigned int core = CURRENT_CORE; )
2004 const unsigned int core = CURRENT_CORE;
2005#endif
2006 set_irq_level(HIGHEST_IRQ_LEVEL); 2250 set_irq_level(HIGHEST_IRQ_LEVEL);
2007 core_sleep(IF_COP(core)); 2251 core_sleep(IF_COP(core));
2008} 2252}
2009 2253
2010/*--------------------------------------------------------------------------- 2254/*---------------------------------------------------------------------------
2011 * Create a thread 2255 * Create a thread. If using a dual core architecture, specify which core to
2012 * If using a dual core architecture, specify which core to start the thread 2256 * start the thread on.
2013 * on, and whether to fall back to the other core if it can't be created 2257 *
2014 * Return ID if context area could be allocated, else NULL. 2258 * Return ID if context area could be allocated, else NULL.
2015 *--------------------------------------------------------------------------- 2259 *---------------------------------------------------------------------------
2016 */ 2260 */
2017struct thread_entry* 2261struct thread_entry*
2018 create_thread(void (*function)(void), void* stack, int stack_size, 2262 create_thread(void (*function)(void), void* stack, size_t stack_size,
2019 unsigned flags, const char *name 2263 unsigned flags, const char *name
2020 IF_PRIO(, int priority) 2264 IF_PRIO(, int priority)
2021 IF_COP(, unsigned int core)) 2265 IF_COP(, unsigned int core))
2022{ 2266{
2023 unsigned int i; 2267 unsigned int i;
2024 unsigned int stacklen; 2268 unsigned int stack_words;
2025 unsigned int *stackptr; 2269 uintptr_t stackptr, stackend;
2026 int slot;
2027 struct thread_entry *thread; 2270 struct thread_entry *thread;
2028 unsigned state; 2271 unsigned state;
2272 int oldlevel;
2029 2273
2030 slot = find_empty_thread_slot(); 2274 thread = find_empty_thread_slot();
2031 if (slot >= MAXTHREADS) 2275 if (thread == NULL)
2032 { 2276 {
2033 return NULL; 2277 return NULL;
2034 } 2278 }
2035 2279
2280 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2281
2036 /* Munge the stack to make it easy to spot stack overflows */ 2282 /* Munge the stack to make it easy to spot stack overflows */
2037 stacklen = stack_size / sizeof(int); 2283 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
2038 stackptr = stack; 2284 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
2039 for(i = 0;i < stacklen;i++) 2285 stack_size = stackend - stackptr;
2286 stack_words = stack_size / sizeof (uintptr_t);
2287
2288 for (i = 0; i < stack_words; i++)
2040 { 2289 {
2041 stackptr[i] = DEADBEEF; 2290 ((uintptr_t *)stackptr)[i] = DEADBEEF;
2042 } 2291 }
2043 2292
2044 /* Store interesting information */ 2293 /* Store interesting information */
2045 thread = &threads[slot];
2046 thread->name = name; 2294 thread->name = name;
2047 thread->stack = stack; 2295 thread->stack = (uintptr_t *)stackptr;
2048 thread->stack_size = stack_size; 2296 thread->stack_size = stack_size;
2049 thread->bqp = NULL;
2050#if CONFIG_CORELOCK == SW_CORELOCK
2051 thread->bqnlp = NULL;
2052#endif
2053 thread->queue = NULL; 2297 thread->queue = NULL;
2298#ifdef HAVE_WAKEUP_EXT_CB
2299 thread->wakeup_ext_cb = NULL;
2300#endif
2054#ifdef HAVE_SCHEDULER_BOOSTCTRL 2301#ifdef HAVE_SCHEDULER_BOOSTCTRL
2055 thread->boosted = 0; 2302 thread->cpu_boost = 0;
2056#endif 2303#endif
2057#ifdef HAVE_PRIORITY_SCHEDULING 2304#ifdef HAVE_PRIORITY_SCHEDULING
2058 thread->priority_x = LOWEST_PRIORITY; 2305 memset(&thread->pdist, 0, sizeof(thread->pdist));
2306 thread->blocker = NULL;
2307 thread->base_priority = priority;
2059 thread->priority = priority; 2308 thread->priority = priority;
2060 thread->last_run = current_tick - priority * 8; 2309 thread->skip_count = priority;
2061 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY; 2310 prio_add_entry(&thread->pdist, priority);
2062#endif 2311#endif
2063 2312
2064#if NUM_CORES > 1 2313#if NUM_CORES > 1
@@ -2077,70 +2326,160 @@ struct thread_entry*
2077 state = (flags & CREATE_THREAD_FROZEN) ? 2326 state = (flags & CREATE_THREAD_FROZEN) ?
2078 STATE_FROZEN : STATE_RUNNING; 2327 STATE_FROZEN : STATE_RUNNING;
2079 2328
2080 /* Align stack to an even 32 bit boundary */ 2329 thread->context.sp = (typeof (thread->context.sp))stackend;
2081 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
2082 2330
2083 /* Load the thread's context structure with needed startup information */ 2331 /* Load the thread's context structure with needed startup information */
2084 THREAD_STARTUP_INIT(core, thread, function); 2332 THREAD_STARTUP_INIT(core, thread, function);
2085 2333
2334 thread->state = state;
2335
2086 if (state == STATE_RUNNING) 2336 if (state == STATE_RUNNING)
2087 { 2337 core_schedule_wakeup(thread);
2088#if NUM_CORES > 1 2338
2089 if (core != CURRENT_CORE) 2339 UNLOCK_THREAD(thread);
2090 { 2340
2091 /* Next task switch on other core moves thread to running list */ 2341 set_irq_level(oldlevel);
2092 core_schedule_wakeup(thread);
2093 }
2094 else
2095#endif
2096 {
2097 /* Place on running list immediately */
2098 add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
2099 }
2100 }
2101 2342
2102 /* remove lock and set state */
2103 UNLOCK_THREAD_SET_STATE(thread, state);
2104
2105 return thread; 2343 return thread;
2106} 2344}
2107 2345
2108#ifdef HAVE_SCHEDULER_BOOSTCTRL 2346#ifdef HAVE_SCHEDULER_BOOSTCTRL
2347/*---------------------------------------------------------------------------
2348 * Change the boost state of a thread boosting or unboosting the CPU
2349 * as required.
2350 *---------------------------------------------------------------------------
2351 */
2352static inline void boost_thread(struct thread_entry *thread, bool boost)
2353{
2354 if ((thread->cpu_boost != 0) != boost)
2355 {
2356 thread->cpu_boost = boost;
2357 cpu_boost(boost);
2358 }
2359}
2360
2109void trigger_cpu_boost(void) 2361void trigger_cpu_boost(void)
2110{ 2362{
2111 /* No IRQ disable nescessary since the current thread cannot be blocked
2112 on an IRQ-accessible list */
2113 struct thread_entry *current = cores[CURRENT_CORE].running; 2363 struct thread_entry *current = cores[CURRENT_CORE].running;
2114 unsigned state;
2115
2116 state = GET_THREAD_STATE(current);
2117 boost_thread(current, true); 2364 boost_thread(current, true);
2118 UNLOCK_THREAD(current, state);
2119
2120 (void)state;
2121} 2365}
2122 2366
2123void cancel_cpu_boost(void) 2367void cancel_cpu_boost(void)
2124{ 2368{
2125 struct thread_entry *current = cores[CURRENT_CORE].running; 2369 struct thread_entry *current = cores[CURRENT_CORE].running;
2126 unsigned state;
2127
2128 state = GET_THREAD_STATE(current);
2129 boost_thread(current, false); 2370 boost_thread(current, false);
2130 UNLOCK_THREAD(current, state);
2131
2132 (void)state;
2133} 2371}
2134#endif /* HAVE_SCHEDULER_BOOSTCTRL */ 2372#endif /* HAVE_SCHEDULER_BOOSTCTRL */
2135 2373
2136/*--------------------------------------------------------------------------- 2374/*---------------------------------------------------------------------------
2137 * Remove a thread from the scheduler. 2375 * Block the current thread until another thread terminates. A thread may
2376 * wait on itself to terminate which prevents it from running again and it
2377 * will need to be killed externally.
2378 * Parameter is the ID as returned from create_thread().
2379 *---------------------------------------------------------------------------
2380 */
2381void thread_wait(struct thread_entry *thread)
2382{
2383 struct thread_entry *current = cores[CURRENT_CORE].running;
2384
2385 if (thread == NULL)
2386 thread = current;
2387
2388 /* Lock thread-as-waitable-object lock */
2389 corelock_lock(&thread->waiter_cl);
2390
2391 /* Be sure it hasn't been killed yet */
2392 if (thread->state != STATE_KILLED)
2393 {
2394 IF_COP( current->obj_cl = &thread->waiter_cl; )
2395 current->bqp = &thread->queue;
2396
2397 set_irq_level(HIGHEST_IRQ_LEVEL);
2398 block_thread(current);
2399
2400 corelock_unlock(&thread->waiter_cl);
2401
2402 switch_thread();
2403 return;
2404 }
2405
2406 corelock_unlock(&thread->waiter_cl);
2407}
2408
2409/*---------------------------------------------------------------------------
2410 * Exit the current thread. The Right Way to Do Things (TM).
2411 *---------------------------------------------------------------------------
2412 */
2413void thread_exit(void)
2414{
2415 const unsigned int core = CURRENT_CORE;
2416 struct thread_entry *current = cores[core].running;
2417
2418 /* Cancel CPU boost if any */
2419 cancel_cpu_boost();
2420
2421 set_irq_level(HIGHEST_IRQ_LEVEL);
2422
2423 corelock_lock(&current->waiter_cl);
2424 LOCK_THREAD(current);
2425
2426#if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
2427 if (current->name == THREAD_DESTRUCT)
2428 {
2429 /* Thread being killed - become a waiter */
2430 UNLOCK_THREAD(current);
2431 corelock_unlock(&current->waiter_cl);
2432 thread_wait(current);
2433 THREAD_PANICF("thread_exit->WK:*R", current);
2434 }
2435#endif
2436
2437#ifdef HAVE_PRIORITY_SCHEDULING
2438 check_for_obj_waiters("thread_exit", current);
2439#endif
2440
2441 if (current->tmo.prev != NULL)
2442 {
2443 /* Cancel pending timeout list removal */
2444 remove_from_list_tmo(current);
2445 }
2446
2447 /* Switch tasks and never return */
2448 block_thread_on_l(current, STATE_KILLED);
2449
2450#if NUM_CORES > 1
2451 /* Switch to the idle stack if not on the main core (where "main"
2452 * runs) - we can hope gcc doesn't need the old stack beyond this
2453 * point. */
2454 if (core != CPU)
2455 {
2456 switch_to_idle_stack(core);
2457 }
2458
2459 flush_icache();
2460#endif
2461 current->name = NULL;
2462
2463 /* Signal this thread */
2464 thread_queue_wake(&current->queue);
2465 corelock_unlock(&current->waiter_cl);
2466 /* Slot must be unusable until thread is really gone */
2467 UNLOCK_THREAD_AT_TASK_SWITCH(current);
2468 switch_thread();
2469 /* This should never and must never be reached - if it is, the
2470 * state is corrupted */
2471 THREAD_PANICF("thread_exit->K:*R", current);
2472}
2473
2474#ifdef ALLOW_REMOVE_THREAD
2475/*---------------------------------------------------------------------------
2476 * Remove a thread from the scheduler. Not The Right Way to Do Things in
2477 * normal programs.
2478 *
2138 * Parameter is the ID as returned from create_thread(). 2479 * Parameter is the ID as returned from create_thread().
2139 * 2480 *
2140 * Use with care on threads that are not under careful control as this may 2481 * Use with care on threads that are not under careful control as this may
2141 * leave various objects in an undefined state. When trying to kill a thread 2482 * leave various objects in an undefined state.
2142 * on another processor, be sure you know what it's doing and won't be
2143 * switching around itself.
2144 *--------------------------------------------------------------------------- 2483 *---------------------------------------------------------------------------
2145 */ 2484 */
2146void remove_thread(struct thread_entry *thread) 2485void remove_thread(struct thread_entry *thread)
@@ -2149,17 +2488,27 @@ void remove_thread(struct thread_entry *thread)
2149 /* core is not constant here because of core switching */ 2488 /* core is not constant here because of core switching */
2150 unsigned int core = CURRENT_CORE; 2489 unsigned int core = CURRENT_CORE;
2151 unsigned int old_core = NUM_CORES; 2490 unsigned int old_core = NUM_CORES;
2491 struct corelock *ocl = NULL;
2152#else 2492#else
2153 const unsigned int core = CURRENT_CORE; 2493 const unsigned int core = CURRENT_CORE;
2154#endif 2494#endif
2495 struct thread_entry *current = cores[core].running;
2496
2155 unsigned state; 2497 unsigned state;
2156 int oldlevel; 2498 int oldlevel;
2157 2499
2158 if (thread == NULL) 2500 if (thread == NULL)
2159 thread = cores[core].running; 2501 thread = current;
2502
2503 if (thread == current)
2504 thread_exit(); /* Current thread - do normal exit */
2160 2505
2161 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2506 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2162 state = GET_THREAD_STATE(thread); 2507
2508 corelock_lock(&thread->waiter_cl);
2509 LOCK_THREAD(thread);
2510
2511 state = thread->state;
2163 2512
2164 if (state == STATE_KILLED) 2513 if (state == STATE_KILLED)
2165 { 2514 {
@@ -2167,50 +2516,49 @@ void remove_thread(struct thread_entry *thread)
2167 } 2516 }
2168 2517
2169#if NUM_CORES > 1 2518#if NUM_CORES > 1
2519 if (thread->name == THREAD_DESTRUCT)
2520 {
2521 /* Thread being killed - become a waiter */
2522 UNLOCK_THREAD(thread);
2523 corelock_unlock(&thread->waiter_cl);
2524 set_irq_level(oldlevel);
2525 thread_wait(thread);
2526 return;
2527 }
2528
2529 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2530
2531#ifdef HAVE_PRIORITY_SCHEDULING
2532 check_for_obj_waiters("remove_thread", thread);
2533#endif
2534
2170 if (thread->core != core) 2535 if (thread->core != core)
2171 { 2536 {
2172 /* Switch cores and safely extract the thread there */ 2537 /* Switch cores and safely extract the thread there */
2173 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock 2538 /* Slot HAS to be unlocked or a deadlock could occur which means other
2174 condition if the thread runs away to another processor. */ 2539 * threads have to be guided into becoming thread waiters if they
2540 * attempt to remove it. */
2175 unsigned int new_core = thread->core; 2541 unsigned int new_core = thread->core;
2176 const char *old_name = thread->name;
2177 2542
2178 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */ 2543 corelock_unlock(&thread->waiter_cl);
2179 UNLOCK_THREAD(thread, state); 2544
2545 UNLOCK_THREAD(thread);
2180 set_irq_level(oldlevel); 2546 set_irq_level(oldlevel);
2181 2547
2182 old_core = switch_core(new_core); 2548 old_core = switch_core(new_core);
2183 2549
2184 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2550 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2185 state = GET_THREAD_STATE(thread);
2186
2187 core = new_core;
2188
2189 if (state == STATE_KILLED)
2190 {
2191 /* Thread suicided before we could kill it */
2192 goto thread_killed;
2193 }
2194
2195 /* Reopen slot - it's locked again anyway */
2196 thread->name = old_name;
2197 2551
2198 if (thread->core != core) 2552 corelock_lock(&thread->waiter_cl);
2199 { 2553 LOCK_THREAD(thread);
2200 /* We won't play thread tag - just forget it */
2201 UNLOCK_THREAD(thread, state);
2202 set_irq_level(oldlevel);
2203 goto thread_kill_abort;
2204 }
2205 2554
2555 state = thread->state;
2556 core = new_core;
2206 /* Perform the extraction and switch ourselves back to the original 2557 /* Perform the extraction and switch ourselves back to the original
2207 processor */ 2558 processor */
2208 } 2559 }
2209#endif /* NUM_CORES > 1 */ 2560#endif /* NUM_CORES > 1 */
2210 2561
2211#ifdef HAVE_PRIORITY_SCHEDULING
2212 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2213#endif
2214 if (thread->tmo.prev != NULL) 2562 if (thread->tmo.prev != NULL)
2215 { 2563 {
2216 /* Clean thread off the timeout list if a timeout check hasn't 2564 /* Clean thread off the timeout list if a timeout check hasn't
@@ -2218,87 +2566,86 @@ void remove_thread(struct thread_entry *thread)
2218 remove_from_list_tmo(thread); 2566 remove_from_list_tmo(thread);
2219 } 2567 }
2220 2568
2569#ifdef HAVE_SCHEDULER_BOOSTCTRL
2570 /* Cancel CPU boost if any */
2221 boost_thread(thread, false); 2571 boost_thread(thread, false);
2222
2223 if (thread == cores[core].running)
2224 {
2225 /* Suicide - thread has unconditional rights to do this */
2226 /* Maintain locks until switch-out */
2227 block_thread_on_l(NULL, thread, STATE_KILLED);
2228
2229#if NUM_CORES > 1
2230 /* Switch to the idle stack if not on the main core (where "main"
2231 * runs) */
2232 if (core != CPU)
2233 {
2234 switch_to_idle_stack(core);
2235 }
2236
2237 flush_icache();
2238#endif 2572#endif
2239 /* Signal this thread */
2240 thread_queue_wake_no_listlock(&thread->queue);
2241 /* Switch tasks and never return */
2242 switch_thread(thread);
2243 /* This should never and must never be reached - if it is, the
2244 * state is corrupted */
2245 THREAD_PANICF("remove_thread->K:*R", thread);
2246 }
2247 2573
2248#if NUM_CORES > 1 2574IF_COP( retry_state: )
2249 if (thread->name == THREAD_DESTRUCT)
2250 {
2251 /* Another core is doing this operation already */
2252 UNLOCK_THREAD(thread, state);
2253 set_irq_level(oldlevel);
2254 return;
2255 }
2256#endif
2257 if (cores[core].waking.queue != NULL)
2258 {
2259 /* Get any threads off the waking list and onto the running
2260 * list first - waking and running cannot be distinguished by
2261 * state */
2262 core_perform_wakeup(IF_COP(core));
2263 }
2264 2575
2265 switch (state) 2576 switch (state)
2266 { 2577 {
2267 case STATE_RUNNING: 2578 case STATE_RUNNING:
2579 RTR_LOCK(core);
2268 /* Remove thread from ready to run tasks */ 2580 /* Remove thread from ready to run tasks */
2269 remove_from_list_l(&cores[core].running, thread); 2581 remove_from_list_l(&cores[core].running, thread);
2582 rtr_subtract_entry(core, thread->priority);
2583 RTR_UNLOCK(core);
2270 break; 2584 break;
2271 case STATE_BLOCKED: 2585 case STATE_BLOCKED:
2272 case STATE_BLOCKED_W_TMO: 2586 case STATE_BLOCKED_W_TMO:
2273 /* Remove thread from the queue it's blocked on - including its 2587 /* Remove thread from the queue it's blocked on - including its
2274 * own if waiting there */ 2588 * own if waiting there */
2275#if CONFIG_CORELOCK == SW_CORELOCK 2589#if NUM_CORES > 1
2276 /* One or the other will be valid */ 2590 if (&thread->waiter_cl != thread->obj_cl)
2277 if (thread->bqp == NULL)
2278 { 2591 {
2279 remove_from_list_l(thread->bqnlp, thread); 2592 ocl = thread->obj_cl;
2593
2594 if (corelock_try_lock(ocl) == 0)
2595 {
2596 UNLOCK_THREAD(thread);
2597 corelock_lock(ocl);
2598 LOCK_THREAD(thread);
2599
2600 if (thread->state != state)
2601 {
2602 /* Something woke the thread */
2603 state = thread->state;
2604 corelock_unlock(ocl);
2605 goto retry_state;
2606 }
2607 }
2280 } 2608 }
2281 else 2609#endif
2282#endif /* CONFIG_CORELOCK */ 2610 remove_from_list_l(thread->bqp, thread);
2611
2612#ifdef HAVE_WAKEUP_EXT_CB
2613 if (thread->wakeup_ext_cb != NULL)
2614 thread->wakeup_ext_cb(thread);
2615#endif
2616
2617#ifdef HAVE_PRIORITY_SCHEDULING
2618 if (thread->blocker != NULL)
2283 { 2619 {
2284 remove_from_list_l_locked(thread->bqp, thread); 2620 /* Remove thread's priority influence from its chain */
2621 wakeup_priority_protocol_release(thread);
2285 } 2622 }
2623#endif
2624
2625#if NUM_CORES > 1
2626 if (ocl != NULL)
2627 corelock_unlock(ocl);
2628#endif
2286 break; 2629 break;
2287 /* Otherwise thread is killed or is frozen and hasn't run yet */ 2630 /* Otherwise thread is frozen and hasn't run yet */
2288 } 2631 }
2289 2632
2633 thread->state = STATE_KILLED;
2634
2290 /* If thread was waiting on itself, it will have been removed above. 2635 /* If thread was waiting on itself, it will have been removed above.
2291 * The wrong order would result in waking the thread first and deadlocking 2636 * The wrong order would result in waking the thread first and deadlocking
2292 * since the slot is already locked. */ 2637 * since the slot is already locked. */
2293 thread_queue_wake_no_listlock(&thread->queue); 2638 thread_queue_wake(&thread->queue);
2639
2640 thread->name = NULL;
2294 2641
2295thread_killed: /* Thread was already killed */ 2642thread_killed: /* Thread was already killed */
2296 /* Removal complete - safe to unlock state and reenable interrupts */ 2643 /* Removal complete - safe to unlock and reenable interrupts */
2297 UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED); 2644 corelock_unlock(&thread->waiter_cl);
2645 UNLOCK_THREAD(thread);
2298 set_irq_level(oldlevel); 2646 set_irq_level(oldlevel);
2299 2647
2300#if NUM_CORES > 1 2648#if NUM_CORES > 1
2301thread_kill_abort: /* Something stopped us from killing the thread */
2302 if (old_core < NUM_CORES) 2649 if (old_core < NUM_CORES)
2303 { 2650 {
2304 /* Did a removal on another processor's thread - switch back to 2651 /* Did a removal on another processor's thread - switch back to
@@ -2307,114 +2654,147 @@ thread_kill_abort: /* Something stopped us from killing the thread */
2307 } 2654 }
2308#endif 2655#endif
2309} 2656}
2657#endif /* ALLOW_REMOVE_THREAD */
2310 2658
2659#ifdef HAVE_PRIORITY_SCHEDULING
2311/*--------------------------------------------------------------------------- 2660/*---------------------------------------------------------------------------
2312 * Block the current thread until another thread terminates. A thread may 2661 * Sets the thread's relative base priority for the core it runs on. Any
2313 * wait on itself to terminate which prevents it from running again and it 2662 * needed inheritance changes also may happen.
2314 * will need to be killed externally.
2315 * Parameter is the ID as returned from create_thread().
2316 *--------------------------------------------------------------------------- 2663 *---------------------------------------------------------------------------
2317 */ 2664 */
2318void thread_wait(struct thread_entry *thread) 2665int thread_set_priority(struct thread_entry *thread, int priority)
2319{ 2666{
2320 const unsigned int core = CURRENT_CORE; 2667 int old_base_priority = -1;
2321 struct thread_entry *current = cores[core].running; 2668
2322 unsigned thread_state; 2669 /* A little safety measure */
2323#if NUM_CORES > 1 2670 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
2324 int oldlevel; 2671 return -1;
2325 unsigned current_state;
2326#endif
2327 2672
2328 if (thread == NULL) 2673 if (thread == NULL)
2329 thread = current; 2674 thread = cores[CURRENT_CORE].running;
2330 2675
2331#if NUM_CORES > 1 2676 /* Thread could be on any list and therefore on an interrupt accessible
2332 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2677 one - disable interrupts */
2333#endif 2678 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2334 2679
2335 thread_state = GET_THREAD_STATE(thread); 2680 LOCK_THREAD(thread);
2336 2681
2337#if NUM_CORES > 1 2682 /* Make sure it's not killed */
2338 /* We can't lock the same slot twice. The waitee will also lock itself 2683 if (thread->state != STATE_KILLED)
2339 first then the thread slots that will be locked and woken in turn.
2340 The same order must be observed here as well. */
2341 if (thread == current)
2342 {
2343 current_state = thread_state;
2344 }
2345 else
2346 { 2684 {
2347 current_state = GET_THREAD_STATE(current); 2685 int old_priority = thread->priority;
2348 }
2349#endif
2350 2686
2351 if (thread_state != STATE_KILLED) 2687 old_base_priority = thread->base_priority;
2352 { 2688 thread->base_priority = priority;
2353 /* Unlock the waitee state at task switch - not done for self-wait 2689
2354 because the would double-unlock the state and potentially 2690 prio_move_entry(&thread->pdist, old_base_priority, priority);
2355 corrupt another's busy assert on the slot */ 2691 priority = find_first_set_bit(thread->pdist.mask);
2356 if (thread != current) 2692
2693 if (old_priority == priority)
2357 { 2694 {
2358#if CONFIG_CORELOCK == SW_CORELOCK 2695 /* No priority change - do nothing */
2359 cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
2360 cores[core].blk_ops.thread = thread;
2361#elif CONFIG_CORELOCK == CORELOCK_SWAP
2362 cores[core].blk_ops.flags |= TBOP_SET_VARu8;
2363 cores[core].blk_ops.var_u8p = &thread->state;
2364 cores[core].blk_ops.var_u8v = thread_state;
2365#endif
2366 } 2696 }
2367 block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED); 2697 else if (thread->state == STATE_RUNNING)
2368 switch_thread(current); 2698 {
2369 return; 2699 /* This thread is running - change location on the run
2370 } 2700 * queue. No transitive inheritance needed. */
2701 set_running_thread_priority(thread, priority);
2702 }
2703 else
2704 {
2705 thread->priority = priority;
2706
2707 if (thread->blocker != NULL)
2708 {
2709 /* Bubble new priority down the chain */
2710 struct blocker *bl = thread->blocker; /* Blocker struct */
2711 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
2712 struct thread_entry * const tstart = thread; /* Initial thread */
2713 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2371 2714
2372 /* Unlock both slots - obviously the current thread can't have 2715 for (;;)
2373 STATE_KILLED so the above if clause will always catch a thread 2716 {
2374 waiting on itself */ 2717 struct thread_entry *next; /* Next thread to check */
2718 int bl_pr; /* Highest blocked thread */
2719 int queue_pr; /* New highest blocked thread */
2375#if NUM_CORES > 1 2720#if NUM_CORES > 1
2376 UNLOCK_THREAD(current, current_state); 2721 /* Owner can change but thread cannot be dislodged - thread
2377 UNLOCK_THREAD(thread, thread_state); 2722 * may not be the first in the queue which allows other
2378 set_irq_level(oldlevel); 2723 * threads ahead in the list to be given ownership during the
2379#endif 2724 * operation. If thread is next then the waker will have to
2380} 2725 * wait for us and the owner of the object will remain fixed.
2726 * If we successfully grab the owner -- which at some point
2727 * is guaranteed -- then the queue remains fixed until we
2728 * pass by. */
2729 for (;;)
2730 {
2731 LOCK_THREAD(bl_t);
2381 2732
2382#ifdef HAVE_PRIORITY_SCHEDULING 2733 /* Double-check the owner - retry if it changed */
2383/*--------------------------------------------------------------------------- 2734 if (bl->thread == bl_t)
2384 * Sets the thread's relative priority for the core it runs on. 2735 break;
2385 *---------------------------------------------------------------------------
2386 */
2387int thread_set_priority(struct thread_entry *thread, int priority)
2388{
2389 unsigned old_priority = (unsigned)-1;
2390
2391 if (thread == NULL)
2392 thread = cores[CURRENT_CORE].running;
2393 2736
2394#if NUM_CORES > 1 2737 UNLOCK_THREAD(bl_t);
2395 /* Thread could be on any list and therefore on an interrupt accessible 2738 bl_t = bl->thread;
2396 one - disable interrupts */ 2739 }
2397 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2398#endif 2740#endif
2399 unsigned state = GET_THREAD_STATE(thread); 2741 bl_pr = bl->priority;
2400 2742
2401 /* Make sure it's not killed */ 2743 if (highest > bl_pr)
2402 if (state != STATE_KILLED) 2744 break; /* Object priority won't change */
2403 { 2745
2404 old_priority = thread->priority; 2746 /* This will include the thread being set */
2405 thread->priority = priority; 2747 queue_pr = find_highest_priority_in_list_l(*thread->bqp);
2406 cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY; 2748
2749 if (queue_pr == bl_pr)
2750 break; /* Object priority not changing */
2751
2752 /* Update thread boost for this object */
2753 bl->priority = queue_pr;
2754 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2755 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2756
2757 if (bl_t->priority == bl_pr)
2758 break; /* Blocking thread priority not changing */
2759
2760 if (bl_t->state == STATE_RUNNING)
2761 {
2762 /* Thread not blocked - we're done */
2763 set_running_thread_priority(bl_t, bl_pr);
2764 break;
2765 }
2766
2767 bl_t->priority = bl_pr;
2768 bl = bl_t->blocker; /* Blocking thread has a blocker? */
2769
2770 if (bl == NULL)
2771 break; /* End of chain */
2772
2773 next = bl->thread;
2774
2775 if (next == tstart)
2776 break; /* Full-circle */
2777
2778 UNLOCK_THREAD(thread);
2779
2780 thread = bl_t;
2781 bl_t = next;
2782 } /* for (;;) */
2783
2784 UNLOCK_THREAD(bl_t);
2785 }
2786 }
2407 } 2787 }
2408 2788
2409#if NUM_CORES > 1 2789 UNLOCK_THREAD(thread);
2410 UNLOCK_THREAD(thread, state); 2790
2411 set_irq_level(oldlevel); 2791 set_irq_level(oldlevel);
2412#endif 2792
2413 return old_priority; 2793 return old_base_priority;
2414} 2794}
2415 2795
2416/*--------------------------------------------------------------------------- 2796/*---------------------------------------------------------------------------
2417 * Returns the current priority for a thread. 2797 * Returns the current base priority for a thread.
2418 *--------------------------------------------------------------------------- 2798 *---------------------------------------------------------------------------
2419 */ 2799 */
2420int thread_get_priority(struct thread_entry *thread) 2800int thread_get_priority(struct thread_entry *thread)
@@ -2423,64 +2803,26 @@ int thread_get_priority(struct thread_entry *thread)
2423 if (thread == NULL) 2803 if (thread == NULL)
2424 thread = cores[CURRENT_CORE].running; 2804 thread = cores[CURRENT_CORE].running;
2425 2805
2426 return (unsigned)thread->priority; 2806 return thread->base_priority;
2427} 2807}
2808#endif /* HAVE_PRIORITY_SCHEDULING */
2428 2809
2429/*--------------------------------------------------------------------------- 2810/*---------------------------------------------------------------------------
2430 * Yield that guarantees thread execution once per round regardless of 2811 * Starts a frozen thread - similar semantics to wakeup_thread except that
2431 * thread's scheduler priority - basically a transient realtime boost 2812 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2432 * without altering the scheduler's thread precedence. 2813 * virtue of the slot having a state of STATE_FROZEN.
2433 *
2434 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2435 *--------------------------------------------------------------------------- 2814 *---------------------------------------------------------------------------
2436 */ 2815 */
2437void priority_yield(void)
2438{
2439 const unsigned int core = CURRENT_CORE;
2440 struct thread_entry *thread = cores[core].running;
2441 thread->priority_x = HIGHEST_PRIORITY;
2442 switch_thread(NULL);
2443 thread->priority_x = LOWEST_PRIORITY;
2444}
2445#endif /* HAVE_PRIORITY_SCHEDULING */
2446
2447/* Resumes a frozen thread - similar logic to wakeup_thread except that
2448 the thread is on no scheduler list at all. It exists simply by virtue of
2449 the slot having a state of STATE_FROZEN. */
2450void thread_thaw(struct thread_entry *thread) 2816void thread_thaw(struct thread_entry *thread)
2451{ 2817{
2452#if NUM_CORES > 1
2453 /* Thread could be on any list and therefore on an interrupt accessible
2454 one - disable interrupts */
2455 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2818 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2456#endif 2819 LOCK_THREAD(thread);
2457 unsigned state = GET_THREAD_STATE(thread);
2458 2820
2459 if (state == STATE_FROZEN) 2821 if (thread->state == STATE_FROZEN)
2460 { 2822 core_schedule_wakeup(thread);
2461 const unsigned int core = CURRENT_CORE;
2462#if NUM_CORES > 1
2463 if (thread->core != core)
2464 {
2465 core_schedule_wakeup(thread);
2466 }
2467 else
2468#endif
2469 {
2470 add_to_list_l(&cores[core].running, thread);
2471 }
2472
2473 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
2474#if NUM_CORES > 1
2475 set_irq_level(oldlevel);
2476#endif
2477 return;
2478 }
2479 2823
2480#if NUM_CORES > 1 2824 UNLOCK_THREAD(thread);
2481 UNLOCK_THREAD(thread, state);
2482 set_irq_level(oldlevel); 2825 set_irq_level(oldlevel);
2483#endif
2484} 2826}
2485 2827
2486/*--------------------------------------------------------------------------- 2828/*---------------------------------------------------------------------------
@@ -2501,21 +2843,31 @@ unsigned int switch_core(unsigned int new_core)
2501{ 2843{
2502 const unsigned int core = CURRENT_CORE; 2844 const unsigned int core = CURRENT_CORE;
2503 struct thread_entry *current = cores[core].running; 2845 struct thread_entry *current = cores[core].running;
2504 struct thread_entry *w;
2505 int oldlevel;
2506
2507 /* Interrupts can access the lists that will be used - disable them */
2508 unsigned state = GET_THREAD_STATE(current);
2509 2846
2510 if (core == new_core) 2847 if (core == new_core)
2511 { 2848 {
2512 /* No change - just unlock everything and return same core */ 2849 /* No change - just return same core */
2513 UNLOCK_THREAD(current, state);
2514 return core; 2850 return core;
2515 } 2851 }
2516 2852
2853 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2854 LOCK_THREAD(current);
2855
2856 if (current->name == THREAD_DESTRUCT)
2857 {
2858 /* Thread being killed - deactivate and let process complete */
2859 UNLOCK_THREAD(current);
2860 set_irq_level(oldlevel);
2861 thread_wait(current);
2862 /* Should never be reached */
2863 THREAD_PANICF("switch_core->D:*R", current);
2864 }
2865
2517 /* Get us off the running list for the current core */ 2866 /* Get us off the running list for the current core */
2867 RTR_LOCK(core);
2518 remove_from_list_l(&cores[core].running, current); 2868 remove_from_list_l(&cores[core].running, current);
2869 rtr_subtract_entry(core, current->priority);
2870 RTR_UNLOCK(core);
2519 2871
2520 /* Stash return value (old core) in a safe place */ 2872 /* Stash return value (old core) in a safe place */
2521 current->retval = core; 2873 current->retval = core;
@@ -2532,39 +2884,31 @@ unsigned int switch_core(unsigned int new_core)
2532 2884
2533 /* Do not use core_schedule_wakeup here since this will result in 2885 /* Do not use core_schedule_wakeup here since this will result in
2534 * the thread starting to run on the other core before being finished on 2886 * the thread starting to run on the other core before being finished on
2535 * this one. Delay the wakeup list unlock to keep the other core stuck 2887 * this one. Delay the list unlock to keep the other core stuck
2536 * until this thread is ready. */ 2888 * until this thread is ready. */
2537 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2889 RTR_LOCK(new_core);
2538 w = LOCK_LIST(&cores[new_core].waking); 2890
2539 ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current); 2891 rtr_add_entry(new_core, current->priority);
2892 add_to_list_l(&cores[new_core].running, current);
2540 2893
2541 /* Make a callback into device-specific code, unlock the wakeup list so 2894 /* Make a callback into device-specific code, unlock the wakeup list so
2542 * that execution may resume on the new core, unlock our slot and finally 2895 * that execution may resume on the new core, unlock our slot and finally
2543 * restore the interrupt level */ 2896 * restore the interrupt level */
2544 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT | 2897 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
2545 TBOP_UNLOCK_LIST; 2898 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
2546 cores[core].blk_ops.list_p = &cores[new_core].waking; 2899 cores[core].block_task = current;
2547#if CONFIG_CORELOCK == CORELOCK_SWAP 2900
2548 cores[core].blk_ops.state = STATE_RUNNING; 2901 UNLOCK_THREAD(current);
2549 cores[core].blk_ops.list_v = w; 2902
2550#endif 2903 /* Alert other core to activity */
2904 core_wake(new_core);
2551 2905
2552#ifdef HAVE_PRIORITY_SCHEDULING
2553 current->priority_x = HIGHEST_PRIORITY;
2554 cores[core].highest_priority = LOWEST_PRIORITY;
2555#endif
2556 /* Do the stack switching, cache_maintenence and switch_thread call - 2906 /* Do the stack switching, cache_maintenence and switch_thread call -
2557 requires native code */ 2907 requires native code */
2558 switch_thread_core(core, current); 2908 switch_thread_core(core, current);
2559 2909
2560#ifdef HAVE_PRIORITY_SCHEDULING
2561 current->priority_x = LOWEST_PRIORITY;
2562 cores[current->core].highest_priority = LOWEST_PRIORITY;
2563#endif
2564
2565 /* Finally return the old core to caller */ 2910 /* Finally return the old core to caller */
2566 return current->retval; 2911 return current->retval;
2567 (void)state;
2568} 2912}
2569#endif /* NUM_CORES > 1 */ 2913#endif /* NUM_CORES > 1 */
2570 2914
@@ -2578,12 +2922,11 @@ void init_threads(void)
2578{ 2922{
2579 const unsigned int core = CURRENT_CORE; 2923 const unsigned int core = CURRENT_CORE;
2580 struct thread_entry *thread; 2924 struct thread_entry *thread;
2581 int slot;
2582 2925
2583 /* CPU will initialize first and then sleep */ 2926 /* CPU will initialize first and then sleep */
2584 slot = find_empty_thread_slot(); 2927 thread = find_empty_thread_slot();
2585 2928
2586 if (slot >= MAXTHREADS) 2929 if (thread == NULL)
2587 { 2930 {
2588 /* WTF? There really must be a slot available at this stage. 2931 /* WTF? There really must be a slot available at this stage.
2589 * This can fail if, for example, .bss isn't zero'ed out by the loader 2932 * This can fail if, for example, .bss isn't zero'ed out by the loader
@@ -2592,33 +2935,29 @@ void init_threads(void)
2592 } 2935 }
2593 2936
2594 /* Initialize initially non-zero members of core */ 2937 /* Initialize initially non-zero members of core */
2595 thread_queue_init(&cores[core].waking);
2596 cores[core].next_tmo_check = current_tick; /* Something not in the past */ 2938 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2597#ifdef HAVE_PRIORITY_SCHEDULING
2598 cores[core].highest_priority = LOWEST_PRIORITY;
2599#endif
2600 2939
2601 /* Initialize initially non-zero members of slot */ 2940 /* Initialize initially non-zero members of slot */
2602 thread = &threads[slot]; 2941 UNLOCK_THREAD(thread); /* No sync worries yet */
2603 thread->name = main_thread_name; 2942 thread->name = main_thread_name;
2604 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); /* No sync worries yet */ 2943 thread->state = STATE_RUNNING;
2605#if NUM_CORES > 1 2944 IF_COP( thread->core = core; )
2606 thread->core = core;
2607#endif
2608#ifdef HAVE_PRIORITY_SCHEDULING 2945#ifdef HAVE_PRIORITY_SCHEDULING
2946 corelock_init(&cores[core].rtr_cl);
2947 thread->base_priority = PRIORITY_USER_INTERFACE;
2948 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2609 thread->priority = PRIORITY_USER_INTERFACE; 2949 thread->priority = PRIORITY_USER_INTERFACE;
2610 thread->priority_x = LOWEST_PRIORITY; 2950 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2611#endif
2612#if CONFIG_CORELOCK == SW_CORELOCK
2613 corelock_init(&thread->cl);
2614#endif 2951#endif
2952 corelock_init(&thread->waiter_cl);
2953 corelock_init(&thread->slot_cl);
2615 2954
2616 add_to_list_l(&cores[core].running, thread); 2955 add_to_list_l(&cores[core].running, thread);
2617 2956
2618 if (core == CPU) 2957 if (core == CPU)
2619 { 2958 {
2620 thread->stack = stackbegin; 2959 thread->stack = stackbegin;
2621 thread->stack_size = (int)stackend - (int)stackbegin; 2960 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
2622#if NUM_CORES > 1 /* This code path will not be run on single core targets */ 2961#if NUM_CORES > 1 /* This code path will not be run on single core targets */
2623 /* TODO: HAL interface for this */ 2962 /* TODO: HAL interface for this */
2624 /* Wake up coprocessor and let it initialize kernel and threads */ 2963 /* Wake up coprocessor and let it initialize kernel and threads */
@@ -2638,22 +2977,21 @@ void init_threads(void)
2638 /* Get COP safely primed inside switch_thread where it will remain 2977 /* Get COP safely primed inside switch_thread where it will remain
2639 * until a thread actually exists on it */ 2978 * until a thread actually exists on it */
2640 CPU_CTL = PROC_WAKE; 2979 CPU_CTL = PROC_WAKE;
2641 remove_thread(NULL); 2980 thread_exit();
2642#endif /* NUM_CORES */ 2981#endif /* NUM_CORES */
2643 } 2982 }
2644} 2983}
2645 2984
2646/*--------------------------------------------------------------------------- 2985/* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2647 * Returns the maximum percentage of stack a thread ever used while running. 2986#if NUM_CORES == 1
2648 * NOTE: Some large buffer allocations that don't use enough the buffer to 2987static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
2649 * overwrite stackptr[0] will not be seen. 2988#else
2650 *--------------------------------------------------------------------------- 2989static int stack_usage(uintptr_t *stackptr, size_t stack_size)
2651 */ 2990#endif
2652int thread_stack_usage(const struct thread_entry *thread)
2653{ 2991{
2654 unsigned int *stackptr = thread->stack; 2992 unsigned int stack_words = stack_size / sizeof (uintptr_t);
2655 int stack_words = thread->stack_size / sizeof (int); 2993 unsigned int i;
2656 int i, usage = 0; 2994 int usage = 0;
2657 2995
2658 for (i = 0; i < stack_words; i++) 2996 for (i = 0; i < stack_words; i++)
2659 { 2997 {
@@ -2667,6 +3005,17 @@ int thread_stack_usage(const struct thread_entry *thread)
2667 return usage; 3005 return usage;
2668} 3006}
2669 3007
3008/*---------------------------------------------------------------------------
3009 * Returns the maximum percentage of stack a thread ever used while running.
3010 * NOTE: Some large buffer allocations that don't use enough the buffer to
3011 * overwrite stackptr[0] will not be seen.
3012 *---------------------------------------------------------------------------
3013 */
3014int thread_stack_usage(const struct thread_entry *thread)
3015{
3016 return stack_usage(thread->stack, thread->stack_size);
3017}
3018
2670#if NUM_CORES > 1 3019#if NUM_CORES > 1
2671/*--------------------------------------------------------------------------- 3020/*---------------------------------------------------------------------------
2672 * Returns the maximum percentage of the core's idle stack ever used during 3021 * Returns the maximum percentage of the core's idle stack ever used during
@@ -2675,19 +3024,7 @@ int thread_stack_usage(const struct thread_entry *thread)
2675 */ 3024 */
2676int idle_stack_usage(unsigned int core) 3025int idle_stack_usage(unsigned int core)
2677{ 3026{
2678 unsigned int *stackptr = idle_stacks[core]; 3027 return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
2679 int i, usage = 0;
2680
2681 for (i = 0; i < IDLE_STACK_WORDS; i++)
2682 {
2683 if (stackptr[i] != DEADBEEF)
2684 {
2685 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
2686 break;
2687 }
2688 }
2689
2690 return usage;
2691} 3028}
2692#endif 3029#endif
2693 3030