summaryrefslogtreecommitdiff
path: root/firmware/kernel/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/thread.c')
-rw-r--r--firmware/kernel/thread.c1837
1 files changed, 653 insertions, 1184 deletions
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c
index c148f6b76e..b916c3b521 100644
--- a/firmware/kernel/thread.c
+++ b/firmware/kernel/thread.c
@@ -37,11 +37,6 @@
37#endif 37#endif
38#include "core_alloc.h" 38#include "core_alloc.h"
39 39
40/****************************************************************************
41 * ATTENTION!! *
42 * See notes below on implementing processor-specific portions! *
43 ***************************************************************************/
44
45/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */ 40/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
46#ifdef DEBUG 41#ifdef DEBUG
47#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */ 42#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
@@ -49,7 +44,11 @@
49#define THREAD_EXTRA_CHECKS 0 44#define THREAD_EXTRA_CHECKS 0
50#endif 45#endif
51 46
52/** 47/****************************************************************************
48 * ATTENTION!! *
49 * See notes below on implementing processor-specific portions! *
50 ****************************************************************************
51 *
53 * General locking order to guarantee progress. Order must be observed but 52 * General locking order to guarantee progress. Order must be observed but
54 * all stages are not nescessarily obligatory. Going from 1) to 3) is 53 * all stages are not nescessarily obligatory. Going from 1) to 3) is
55 * perfectly legal. 54 * perfectly legal.
@@ -66,14 +65,14 @@
66 * unlock and the other processor's handler may proceed at that time. Not 65 * unlock and the other processor's handler may proceed at that time. Not
67 * nescessary when the resource in question is definitely not available to 66 * nescessary when the resource in question is definitely not available to
68 * interrupt handlers. 67 * interrupt handlers.
69 * 68 *
70 * 2) Kernel Object 69 * 2) Kernel Object
71 * 1) May be needed beforehand if the kernel object allows dual-use such as 70 * 1) May be needed beforehand if the kernel object allows dual-use such as
72 * event queues. The kernel object must have a scheme to protect itself from 71 * event queues. The kernel object must have a scheme to protect itself from
73 * access by another processor and is responsible for serializing the calls 72 * access by another processor and is responsible for serializing the calls
74 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each 73 * to block_thread and wakeup_thread both to themselves and to each other.
75 * other. Objects' queues are also protected here. 74 * Objects' queues are also protected here.
76 * 75 *
77 * 3) Thread Slot 76 * 3) Thread Slot
78 * This locks access to the thread's slot such that its state cannot be 77 * This locks access to the thread's slot such that its state cannot be
79 * altered by another processor when a state change is in progress such as 78 * altered by another processor when a state change is in progress such as
@@ -121,68 +120,62 @@
121 * available then some careful non-blocking synchonization is needed (as on 120 * available then some careful non-blocking synchonization is needed (as on
122 * PP targets at the moment). 121 * PP targets at the moment).
123 *--------------------------------------------------------------------------- 122 *---------------------------------------------------------------------------
123 *
124 *
125 *---------------------------------------------------------------------------
126 * Priority distribution structure (one category for each possible priority):
127 *
128 * +----+----+----+ ... +------+
129 * hist: | F0 | F1 | F2 | | Fn-1 |
130 * +----+----+----+ ... +------+
131 * mask: | b0 | b1 | b2 | | bn-1 |
132 * +----+----+----+ ... +------+
133 *
134 * F = count of threads at priority category n (frequency)
135 * b = bitmask of non-zero priority categories (occupancy)
136 *
137 * / if H[n] != 0 : 1
138 * b[n] = |
139 * \ else : 0
140 *
141 *---------------------------------------------------------------------------
142 * Basic priority inheritance priotocol (PIP):
143 *
144 * Mn = mutex n, Tn = thread n
145 *
146 * A lower priority thread inherits the priority of the highest priority
147 * thread blocked waiting for it to complete an action (such as release a
148 * mutex or respond to a message via queue_send):
149 *
150 * 1) T2->M1->T1
151 *
152 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
153 * priority than T1 then T1 inherits the priority of T2.
154 *
155 * 2) T3
156 * \/
157 * T2->M1->T1
158 *
159 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
160 * T1 inherits the higher of T2 and T3.
161 *
162 * 3) T3->M2->T2->M1->T1
163 *
164 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
165 * then T1 inherits the priority of T3 through T2.
166 *
167 * Blocking chains can grow arbitrarily complex (though it's best that they
168 * not form at all very often :) and build-up from these units.
169 *---------------------------------------------------------------------------
124 */ 170 */
125 171static FORCE_INLINE void core_sleep(IF_COP_VOID(unsigned int core));
126/* Cast to the the machine pointer size, whose size could be < 4 or > 32 172static FORCE_INLINE void store_context(void* addr);
127 * (someday :). */ 173static FORCE_INLINE void load_context(const void* addr);
128static struct core_entry cores[NUM_CORES] IBSS_ATTR;
129struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
130
131static const char main_thread_name[] = "main";
132#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
133extern uintptr_t stackbegin[];
134extern uintptr_t stackend[];
135#else
136extern uintptr_t *stackbegin;
137extern uintptr_t *stackend;
138#endif
139
140static inline void core_sleep(IF_COP_VOID(unsigned int core))
141 __attribute__((always_inline));
142
143void check_tmo_threads(void)
144 __attribute__((noinline));
145
146static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
147 __attribute__((always_inline));
148
149static void add_to_list_tmo(struct thread_entry *thread)
150 __attribute__((noinline));
151
152static void core_schedule_wakeup(struct thread_entry *thread)
153 __attribute__((noinline));
154
155#if NUM_CORES > 1
156static inline void run_blocking_ops(
157 unsigned int core, struct thread_entry *thread)
158 __attribute__((always_inline));
159#endif
160
161static void thread_stkov(struct thread_entry *thread)
162 __attribute__((noinline));
163
164static inline void store_context(void* addr)
165 __attribute__((always_inline));
166
167static inline void load_context(const void* addr)
168 __attribute__((always_inline));
169
170#if NUM_CORES > 1
171static void thread_final_exit_do(struct thread_entry *current)
172 __attribute__((noinline)) NORETURN_ATTR USED_ATTR;
173#else
174static inline void thread_final_exit(struct thread_entry *current)
175 __attribute__((always_inline)) NORETURN_ATTR;
176#endif
177
178void switch_thread(void)
179 __attribute__((noinline));
180 174
181/**************************************************************************** 175/****************************************************************************
182 * Processor/OS-specific section - include necessary core support 176 * Processor/OS-specific section - include necessary core support
183 */ 177 */
184 178
185
186#include "asm/thread.c" 179#include "asm/thread.c"
187 180
188#if defined (CPU_PP) 181#if defined (CPU_PP)
@@ -193,20 +186,17 @@ void switch_thread(void)
193 * End Processor-specific section 186 * End Processor-specific section
194 ***************************************************************************/ 187 ***************************************************************************/
195 188
196static NO_INLINE 189static NO_INLINE NORETURN_ATTR
197 void thread_panicf(const char *msg, struct thread_entry *thread) 190 void thread_panicf(const char *msg, struct thread_entry *thread)
198{ 191{
199 IF_COP( const unsigned int core = thread->core; ) 192 IF_COP( const unsigned int core = thread->core; )
200 static char namebuf[sizeof (((struct thread_debug_info *)0)->name)]; 193 static char name[sizeof (((struct thread_debug_info *)0)->name)];
201 const char *name = thread->name; 194 format_thread_name(name, sizeof (name), thread);
202 if (!name)
203 name = "";
204 snprintf(namebuf, sizeof (namebuf), *name ? "%s" : "%s%08lX",
205 name, (unsigned long)thread->id);
206 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); 195 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
196 while (1);
207} 197}
208 198
209static void thread_stkov(struct thread_entry *thread) 199static NO_INLINE void thread_stkov(struct thread_entry *thread)
210{ 200{
211 thread_panicf("Stkov", thread); 201 thread_panicf("Stkov", thread);
212} 202}
@@ -218,36 +208,51 @@ static void thread_stkov(struct thread_entry *thread)
218 ({ if (!({ exp; })) thread_panicf((msg), (thread)); }) 208 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
219#else 209#else
220#define THREAD_PANICF(msg, thread) \ 210#define THREAD_PANICF(msg, thread) \
221 do {} while (0) 211 do {} while (1)
222#define THREAD_ASSERT(exp, msg, thread) \ 212#define THREAD_ASSERT(exp, msg, thread) \
223 do {} while (0) 213 do {} while (0)
224#endif /* THREAD_EXTRA_CHECKS */ 214#endif /* THREAD_EXTRA_CHECKS */
225 215
216/* Thread locking */
217#if NUM_CORES > 1
218#define LOCK_THREAD(thread) \
219 ({ corelock_lock(&(thread)->slot_cl); })
220#define TRY_LOCK_THREAD(thread) \
221 ({ corelock_try_lock(&(thread)->slot_cl); })
222#define UNLOCK_THREAD(thread) \
223 ({ corelock_unlock(&(thread)->slot_cl); })
224#else /* NUM_CORES == 1*/
225#define LOCK_THREAD(thread) \
226 ({ (void)(thread); })
227#define TRY_LOCK_THREAD(thread) \
228 ({ (void)(thread); })
229#define UNLOCK_THREAD(thread) \
230 ({ (void)(thread); })
231#endif /* NUM_CORES */
232
226/* RTR list */ 233/* RTR list */
227#define RTR_LOCK(core) \ 234#define RTR_LOCK(corep) \
228 ({ corelock_lock(&cores[core].rtr_cl); }) 235 corelock_lock(&(corep)->rtr_cl)
229#define RTR_UNLOCK(core) \ 236#define RTR_UNLOCK(corep) \
230 ({ corelock_unlock(&cores[core].rtr_cl); }) 237 corelock_unlock(&(corep)->rtr_cl)
231 238
232#ifdef HAVE_PRIORITY_SCHEDULING 239#ifdef HAVE_PRIORITY_SCHEDULING
233#define rtr_add_entry(core, priority) \ 240#define rtr_add_entry(corep, priority) \
234 prio_add_entry(&cores[core].rtr, (priority)) 241 prio_add_entry(&(corep)->rtr_dist, (priority))
235 242#define rtr_subtract_entry(corep, priority) \
236#define rtr_subtract_entry(core, priority) \ 243 prio_subtract_entry(&(corep)->rtr_dist, (priority))
237 prio_subtract_entry(&cores[core].rtr, (priority)) 244#define rtr_move_entry(corep, from, to) \
238 245 prio_move_entry(&(corep)->rtr_dist, (from), (to))
239#define rtr_move_entry(core, from, to) \ 246#else /* !HAVE_PRIORITY_SCHEDULING */
240 prio_move_entry(&cores[core].rtr, (from), (to)) 247#define rtr_add_entry(corep, priority) \
241#else 248 do {} while (0)
242#define rtr_add_entry(core, priority) 249#define rtr_subtract_entry(corep, priority) \
243#define rtr_add_entry_inl(core, priority) 250 do {} while (0)
244#define rtr_subtract_entry(core, priority) 251#define rtr_move_entry(corep, from, to) \
245#define rtr_subtract_entry_inl(core, priotity) 252 do {} while (0)
246#define rtr_move_entry(core, from, to) 253#endif /* HAVE_PRIORITY_SCHEDULING */
247#define rtr_move_entry_inl(core, from, to)
248#endif
249 254
250static inline void thread_store_context(struct thread_entry *thread) 255static FORCE_INLINE void thread_store_context(struct thread_entry *thread)
251{ 256{
252#if (CONFIG_PLATFORM & PLATFORM_HOSTED) 257#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
253 thread->__errno = errno; 258 thread->__errno = errno;
@@ -255,7 +260,7 @@ static inline void thread_store_context(struct thread_entry *thread)
255 store_context(&thread->context); 260 store_context(&thread->context);
256} 261}
257 262
258static inline void thread_load_context(struct thread_entry *thread) 263static FORCE_INLINE void thread_load_context(struct thread_entry *thread)
259{ 264{
260 load_context(&thread->context); 265 load_context(&thread->context);
261#if (CONFIG_PLATFORM & PLATFORM_HOSTED) 266#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
@@ -263,272 +268,31 @@ static inline void thread_load_context(struct thread_entry *thread)
263#endif 268#endif
264} 269}
265 270
266static inline unsigned int should_switch_tasks(void) 271static FORCE_INLINE unsigned int
272should_switch_tasks(struct thread_entry *thread)
267{ 273{
268 unsigned int result = THREAD_OK;
269
270#ifdef HAVE_PRIORITY_SCHEDULING
271 struct thread_entry *current = cores[CURRENT_CORE].running;
272 if (current &&
273 priobit_ffs(&cores[IF_COP_CORE(current->core)].rtr.mask)
274 < current->priority)
275 {
276 /* There is a thread ready to run of higher priority on the same
277 * core as the current one; recommend a task switch. */
278 result |= THREAD_SWITCH;
279 }
280#endif /* HAVE_PRIORITY_SCHEDULING */
281
282 return result;
283}
284
285#ifdef HAVE_PRIORITY_SCHEDULING 274#ifdef HAVE_PRIORITY_SCHEDULING
286/*--------------------------------------------------------------------------- 275 const unsigned int core = CURRENT_CORE;
287 * Locks the thread registered as the owner of the block and makes sure it
288 * didn't change in the meantime
289 *---------------------------------------------------------------------------
290 */
291#if NUM_CORES == 1
292static inline struct thread_entry * lock_blocker_thread(struct blocker *bl)
293{
294 return bl->thread;
295}
296#else /* NUM_CORES > 1 */
297static struct thread_entry * lock_blocker_thread(struct blocker *bl)
298{
299 /* The blocker thread may change during the process of trying to
300 capture it */
301 while (1)
302 {
303 struct thread_entry *t = bl->thread;
304
305 /* TRY, or else deadlocks are possible */
306 if (!t)
307 {
308 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
309 if (corelock_try_lock(&blsplay->cl))
310 {
311 if (!bl->thread)
312 return NULL; /* Still multi */
313
314 corelock_unlock(&blsplay->cl);
315 }
316 }
317 else
318 {
319 if (TRY_LOCK_THREAD(t))
320 {
321 if (bl->thread == t)
322 return t;
323
324 UNLOCK_THREAD(t);
325 }
326 }
327 }
328}
329#endif /* NUM_CORES */
330
331static inline void unlock_blocker_thread(struct blocker *bl)
332{
333#if NUM_CORES > 1 276#if NUM_CORES > 1
334 struct thread_entry *blt = bl->thread; 277 /* Forget about it if different CPU */
335 if (blt) 278 if (thread->core != core)
336 UNLOCK_THREAD(blt); 279 return THREAD_OK;
337 else 280#endif
338 corelock_unlock(&((struct blocker_splay *)bl)->cl); 281 /* Just woke something therefore a thread is on the run queue */
339#endif /* NUM_CORES > 1*/ 282 struct thread_entry *current =
340 (void)bl; 283 RTR_THREAD_FIRST(&__core_id_entry(core)->rtr);
341} 284 if (LIKELY(thread->priority >= current->priority))
285 return THREAD_OK;
286
287 /* There is a thread ready to run of higher priority on the same
288 * core as the current one; recommend a task switch. */
289 return THREAD_OK | THREAD_SWITCH;
290#else
291 return THREAD_OK;
342#endif /* HAVE_PRIORITY_SCHEDULING */ 292#endif /* HAVE_PRIORITY_SCHEDULING */
343
344/*---------------------------------------------------------------------------
345 * Thread list structure - circular:
346 * +------------------------------+
347 * | |
348 * +--+---+<-+---+<-+---+<-+---+<-+
349 * Head->| T | | T | | T | | T |
350 * +->+---+->+---+->+---+->+---+--+
351 * | |
352 * +------------------------------+
353 *---------------------------------------------------------------------------
354 */
355
356/*---------------------------------------------------------------------------
357 * Adds a thread to a list of threads using "insert last". Uses the "l"
358 * links.
359 *---------------------------------------------------------------------------
360 */
361static void add_to_list_l(struct thread_entry **list,
362 struct thread_entry *thread)
363{
364 struct thread_entry *l = *list;
365
366 if (l == NULL)
367 {
368 /* Insert into unoccupied list */
369 thread->l.prev = thread;
370 thread->l.next = thread;
371 *list = thread;
372 return;
373 }
374
375 /* Insert last */
376 thread->l.prev = l->l.prev;
377 thread->l.next = l;
378 l->l.prev->l.next = thread;
379 l->l.prev = thread;
380}
381
382/*---------------------------------------------------------------------------
383 * Removes a thread from a list of threads. Uses the "l" links.
384 *---------------------------------------------------------------------------
385 */
386static void remove_from_list_l(struct thread_entry **list,
387 struct thread_entry *thread)
388{
389 struct thread_entry *prev, *next;
390
391 next = thread->l.next;
392
393 if (thread == next)
394 {
395 /* The only item */
396 *list = NULL;
397 return;
398 }
399
400 if (thread == *list)
401 {
402 /* List becomes next item */
403 *list = next;
404 }
405
406 prev = thread->l.prev;
407
408 /* Fix links to jump over the removed entry. */
409 next->l.prev = prev;
410 prev->l.next = next;
411}
412
413/*---------------------------------------------------------------------------
414 * Timeout list structure - circular reverse (to make "remove item" O(1)),
415 * NULL-terminated forward (to ease the far more common forward traversal):
416 * +------------------------------+
417 * | |
418 * +--+---+<-+---+<-+---+<-+---+<-+
419 * Head->| T | | T | | T | | T |
420 * +---+->+---+->+---+->+---+-X
421 *---------------------------------------------------------------------------
422 */
423
424/*---------------------------------------------------------------------------
425 * Add a thread from the core's timout list by linking the pointers in its
426 * tmo structure.
427 *---------------------------------------------------------------------------
428 */
429static void add_to_list_tmo(struct thread_entry *thread)
430{
431 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
432 THREAD_ASSERT(thread->tmo.prev == NULL,
433 "add_to_list_tmo->already listed", thread);
434
435 thread->tmo.next = NULL;
436
437 if (tmo == NULL)
438 {
439 /* Insert into unoccupied list */
440 thread->tmo.prev = thread;
441 cores[IF_COP_CORE(thread->core)].timeout = thread;
442 return;
443 }
444
445 /* Insert Last */
446 thread->tmo.prev = tmo->tmo.prev;
447 tmo->tmo.prev->tmo.next = thread;
448 tmo->tmo.prev = thread;
449}
450
451/*---------------------------------------------------------------------------
452 * Remove a thread from the core's timout list by unlinking the pointers in
453 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
454 * is cancelled.
455 *---------------------------------------------------------------------------
456 */
457static void remove_from_list_tmo(struct thread_entry *thread)
458{
459 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
460 struct thread_entry *prev = thread->tmo.prev;
461 struct thread_entry *next = thread->tmo.next;
462
463 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
464
465 if (next != NULL)
466 next->tmo.prev = prev;
467
468 if (thread == *list)
469 {
470 /* List becomes next item and empty if next == NULL */
471 *list = next;
472 /* Mark as unlisted */
473 thread->tmo.prev = NULL;
474 }
475 else
476 {
477 if (next == NULL)
478 (*list)->tmo.prev = prev;
479 prev->tmo.next = next;
480 /* Mark as unlisted */
481 thread->tmo.prev = NULL;
482 }
483} 293}
484 294
485#ifdef HAVE_PRIORITY_SCHEDULING 295#ifdef HAVE_PRIORITY_SCHEDULING
486/*---------------------------------------------------------------------------
487 * Priority distribution structure (one category for each possible priority):
488 *
489 * +----+----+----+ ... +-----+
490 * hist: | F0 | F1 | F2 | | F31 |
491 * +----+----+----+ ... +-----+
492 * mask: | b0 | b1 | b2 | | b31 |
493 * +----+----+----+ ... +-----+
494 *
495 * F = count of threads at priority category n (frequency)
496 * b = bitmask of non-zero priority categories (occupancy)
497 *
498 * / if H[n] != 0 : 1
499 * b[n] = |
500 * \ else : 0
501 *
502 *---------------------------------------------------------------------------
503 * Basic priority inheritance priotocol (PIP):
504 *
505 * Mn = mutex n, Tn = thread n
506 *
507 * A lower priority thread inherits the priority of the highest priority
508 * thread blocked waiting for it to complete an action (such as release a
509 * mutex or respond to a message via queue_send):
510 *
511 * 1) T2->M1->T1
512 *
513 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
514 * priority than T1 then T1 inherits the priority of T2.
515 *
516 * 2) T3
517 * \/
518 * T2->M1->T1
519 *
520 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
521 * T1 inherits the higher of T2 and T3.
522 *
523 * 3) T3->M2->T2->M1->T1
524 *
525 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
526 * then T1 inherits the priority of T3 through T2.
527 *
528 * Blocking chains can grow arbitrarily complex (though it's best that they
529 * not form at all very often :) and build-up from these units.
530 *---------------------------------------------------------------------------
531 */
532 296
533/*--------------------------------------------------------------------------- 297/*---------------------------------------------------------------------------
534 * Increment frequency at category "priority" 298 * Increment frequency at category "priority"
@@ -569,25 +333,86 @@ static inline void prio_move_entry(
569 if (++pd->hist[to] == 1) 333 if (++pd->hist[to] == 1)
570 priobit_set_bit(&pd->mask, to); 334 priobit_set_bit(&pd->mask, to);
571} 335}
336
572#endif /* HAVE_PRIORITY_SCHEDULING */ 337#endif /* HAVE_PRIORITY_SCHEDULING */
573 338
574/*--------------------------------------------------------------------------- 339/*---------------------------------------------------------------------------
575 * Move a thread back to a running state on its core. 340 * Common init for new thread basic info
576 *--------------------------------------------------------------------------- 341 *---------------------------------------------------------------------------
577 */ 342 */
578static void core_schedule_wakeup(struct thread_entry *thread) 343static void new_thread_base_init(struct thread_entry *thread,
344 void **stackp, size_t *stack_sizep,
345 const char *name IF_PRIO(, int priority)
346 IF_COP(, unsigned int core))
579{ 347{
580 const unsigned int core = IF_COP_CORE(thread->core); 348 ALIGN_BUFFER(*stackp, *stack_sizep, MIN_STACK_ALIGN);
349 thread->stack = *stackp;
350 thread->stack_size = *stack_sizep;
581 351
582 RTR_LOCK(core); 352 thread->name = name;
353 wait_queue_init(&thread->queue);
354 thread->wqp = NULL;
355 tmo_set_dequeued(thread);
356#ifdef HAVE_PRIORITY_SCHEDULING
357 thread->skip_count = 0;
358 thread->blocker = NULL;
359 thread->base_priority = priority;
360 thread->priority = priority;
361 memset(&thread->pdist, 0, sizeof(thread->pdist));
362 prio_add_entry(&thread->pdist, priority);
363#endif
364#if NUM_CORES > 1
365 thread->core = core;
366#endif
367#ifdef HAVE_SCHEDULER_BOOSTCTRL
368 thread->cpu_boost = 0;
369#endif
370#ifdef HAVE_IO_PRIORITY
371 /* Default to high (foreground) priority */
372 thread->io_priority = IO_PRIORITY_IMMEDIATE;
373#endif
374}
583 375
376/*---------------------------------------------------------------------------
377 * Move a thread onto the core's run queue and promote it
378 *---------------------------------------------------------------------------
379 */
380static inline void core_rtr_add(struct core_entry *corep,
381 struct thread_entry *thread)
382{
383 RTR_LOCK(corep);
384 rtr_queue_add(&corep->rtr, thread);
385 rtr_add_entry(corep, thread->priority);
386#ifdef HAVE_PRIORITY_SCHEDULING
387 thread->skip_count = thread->base_priority;
388#endif
584 thread->state = STATE_RUNNING; 389 thread->state = STATE_RUNNING;
390 RTR_UNLOCK(corep);
391}
585 392
586 add_to_list_l(&cores[core].running, thread); 393/*---------------------------------------------------------------------------
587 rtr_add_entry(core, thread->priority); 394 * Remove a thread from the core's run queue
588 395 *---------------------------------------------------------------------------
589 RTR_UNLOCK(core); 396 */
397static inline void core_rtr_remove(struct core_entry *corep,
398 struct thread_entry *thread)
399{
400 RTR_LOCK(corep);
401 rtr_queue_remove(&corep->rtr, thread);
402 rtr_subtract_entry(corep, thread->priority);
403 /* Does not demote state */
404 RTR_UNLOCK(corep);
405}
590 406
407/*---------------------------------------------------------------------------
408 * Move a thread back to a running state on its core
409 *---------------------------------------------------------------------------
410 */
411static NO_INLINE void core_schedule_wakeup(struct thread_entry *thread)
412{
413 const unsigned int core = IF_COP_CORE(thread->core);
414 struct core_entry *corep = __core_id_entry(core);
415 core_rtr_add(corep, thread);
591#if NUM_CORES > 1 416#if NUM_CORES > 1
592 if (core != CURRENT_CORE) 417 if (core != CURRENT_CORE)
593 core_wake(core); 418 core_wake(core);
@@ -596,17 +421,75 @@ static void core_schedule_wakeup(struct thread_entry *thread)
596 421
597#ifdef HAVE_PRIORITY_SCHEDULING 422#ifdef HAVE_PRIORITY_SCHEDULING
598/*--------------------------------------------------------------------------- 423/*---------------------------------------------------------------------------
424 * Locks the thread registered as the owner of the block and makes sure it
425 * didn't change in the meantime
426 *---------------------------------------------------------------------------
427 */
428#if NUM_CORES == 1
429static inline struct thread_entry * lock_blocker_thread(struct blocker *bl)
430{
431 return bl->thread;
432}
433#else /* NUM_CORES > 1 */
434static struct thread_entry * lock_blocker_thread(struct blocker *bl)
435{
436 /* The blocker thread may change during the process of trying to
437 capture it */
438 while (1)
439 {
440 struct thread_entry *t = bl->thread;
441
442 /* TRY, or else deadlocks are possible */
443 if (!t)
444 {
445 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
446 if (corelock_try_lock(&blsplay->cl))
447 {
448 if (!bl->thread)
449 return NULL; /* Still multi */
450
451 corelock_unlock(&blsplay->cl);
452 }
453 }
454 else
455 {
456 if (TRY_LOCK_THREAD(t))
457 {
458 if (bl->thread == t)
459 return t;
460
461 UNLOCK_THREAD(t);
462 }
463 }
464 }
465}
466#endif /* NUM_CORES */
467
468static inline void unlock_blocker_thread(struct blocker *bl)
469{
470#if NUM_CORES > 1
471 struct thread_entry *blt = bl->thread;
472 if (blt)
473 UNLOCK_THREAD(blt);
474 else
475 corelock_unlock(&((struct blocker_splay *)bl)->cl);
476#endif /* NUM_CORES > 1*/
477 (void)bl;
478}
479
480/*---------------------------------------------------------------------------
599 * Change the priority and rtr entry for a running thread 481 * Change the priority and rtr entry for a running thread
600 *--------------------------------------------------------------------------- 482 *---------------------------------------------------------------------------
601 */ 483 */
602static inline void set_running_thread_priority( 484static inline void set_rtr_thread_priority(
603 struct thread_entry *thread, int priority) 485 struct thread_entry *thread, int priority)
604{ 486{
605 const unsigned int core = IF_COP_CORE(thread->core); 487 const unsigned int core = IF_COP_CORE(thread->core);
606 RTR_LOCK(core); 488 struct core_entry *corep = __core_id_entry(core);
607 rtr_move_entry(core, thread->priority, priority); 489 RTR_LOCK(corep);
490 rtr_move_entry(corep, thread->priority, priority);
608 thread->priority = priority; 491 thread->priority = priority;
609 RTR_UNLOCK(core); 492 RTR_UNLOCK(corep);
610} 493}
611 494
612/*--------------------------------------------------------------------------- 495/*---------------------------------------------------------------------------
@@ -619,30 +502,21 @@ static inline void set_running_thread_priority(
619 * penalty under high contention. 502 * penalty under high contention.
620 *--------------------------------------------------------------------------- 503 *---------------------------------------------------------------------------
621 */ 504 */
622static int find_highest_priority_in_list_l( 505static int wait_queue_find_priority(struct __wait_queue *wqp)
623 struct thread_entry * const thread)
624{ 506{
625 if (LIKELY(thread != NULL)) 507 int highest_priority = PRIORITY_IDLE;
626 { 508 struct thread_entry *thread = WQ_THREAD_FIRST(wqp);
627 /* Go though list until the ending up at the initial thread */
628 int highest_priority = thread->priority;
629 struct thread_entry *curr = thread;
630 509
631 do 510 while (thread != NULL)
632 { 511 {
633 int priority = curr->priority; 512 int priority = thread->priority;
634 513 if (priority < highest_priority)
635 if (priority < highest_priority) 514 highest_priority = priority;
636 highest_priority = priority;
637
638 curr = curr->l.next;
639 }
640 while (curr != thread);
641 515
642 return highest_priority; 516 thread = WQ_THREAD_NEXT(thread);
643 } 517 }
644 518
645 return PRIORITY_IDLE; 519 return highest_priority;
646} 520}
647 521
648/*--------------------------------------------------------------------------- 522/*---------------------------------------------------------------------------
@@ -666,7 +540,7 @@ static void inherit_priority(
666 { 540 {
667 /* Multiple owners */ 541 /* Multiple owners */
668 struct blocker_splay *blsplay = (struct blocker_splay *)bl; 542 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
669 543
670 /* Recurse down the all the branches of this; it's the only way. 544 /* Recurse down the all the branches of this; it's the only way.
671 We might meet the same queue several times if more than one of 545 We might meet the same queue several times if more than one of
672 these threads is waiting the same queue. That isn't a problem 546 these threads is waiting the same queue. That isn't a problem
@@ -674,7 +548,7 @@ static void inherit_priority(
674 FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum) 548 FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
675 { 549 {
676 bl->priority = oldblpr; /* To see the change each time */ 550 bl->priority = oldblpr; /* To see the change each time */
677 blt = &threads[slotnum]; 551 blt = __thread_slot_entry(slotnum);
678 LOCK_THREAD(blt); 552 LOCK_THREAD(blt);
679 inherit_priority(blocker0, bl, blt, newblpr); 553 inherit_priority(blocker0, bl, blt, newblpr);
680 } 554 }
@@ -699,7 +573,7 @@ static void inherit_priority(
699 573
700 if (blt->state == STATE_RUNNING) 574 if (blt->state == STATE_RUNNING)
701 { 575 {
702 set_running_thread_priority(blt, newpr); 576 set_rtr_thread_priority(blt, newpr);
703 break; /* Running: last in chain */ 577 break; /* Running: last in chain */
704 } 578 }
705 579
@@ -714,7 +588,7 @@ static void inherit_priority(
714 break; /* Full circle - deadlock! */ 588 break; /* Full circle - deadlock! */
715 589
716 /* Blocker becomes current thread and the process repeats */ 590 /* Blocker becomes current thread and the process repeats */
717 struct thread_entry **bqp = blt->bqp; 591 struct __wait_queue *wqp = wait_queue_ptr(blt);
718 struct thread_entry *t = blt; 592 struct thread_entry *t = blt;
719 blt = lock_blocker_thread(bl); 593 blt = lock_blocker_thread(bl);
720 594
@@ -725,7 +599,7 @@ static void inherit_priority(
725 if (newpr <= oldblpr) 599 if (newpr <= oldblpr)
726 newblpr = newpr; 600 newblpr = newpr;
727 else if (oldpr <= oldblpr) 601 else if (oldpr <= oldblpr)
728 newblpr = find_highest_priority_in_list_l(*bqp); 602 newblpr = wait_queue_find_priority(wqp);
729 603
730 if (newblpr == oldblpr) 604 if (newblpr == oldblpr)
731 break; /* Queue priority not changing */ 605 break; /* Queue priority not changing */
@@ -735,22 +609,46 @@ static void inherit_priority(
735} 609}
736 610
737/*--------------------------------------------------------------------------- 611/*---------------------------------------------------------------------------
738 * Quick-disinherit of priority elevation. 'thread' must be a running thread. 612 * Quick-inherit of priority elevation. 'thread' must be not runnable
739 *--------------------------------------------------------------------------- 613 *---------------------------------------------------------------------------
740 */ 614 */
741static void priority_disinherit_internal(struct thread_entry *thread, 615static void priority_inherit_internal_inner(struct thread_entry *thread,
742 int blpr) 616 int blpr)
617{
618 if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < thread->priority)
619 thread->priority = blpr;
620}
621
622static inline void priority_inherit_internal(struct thread_entry *thread,
623 int blpr)
743{ 624{
744 if (blpr < PRIORITY_IDLE && 625 if (blpr < PRIORITY_IDLE)
745 prio_subtract_entry(&thread->pdist, blpr) == 0 && 626 priority_inherit_internal_inner(thread, blpr);
627}
628
629/*---------------------------------------------------------------------------
630 * Quick-disinherit of priority elevation. 'thread' must current
631 *---------------------------------------------------------------------------
632 */
633static void priority_disinherit_internal_inner(struct thread_entry *thread,
634 int blpr)
635{
636 if (prio_subtract_entry(&thread->pdist, blpr) == 0 &&
746 blpr <= thread->priority) 637 blpr <= thread->priority)
747 { 638 {
748 int priority = priobit_ffs(&thread->pdist.mask); 639 int priority = priobit_ffs(&thread->pdist.mask);
749 if (priority != thread->priority) 640 if (priority != thread->priority)
750 set_running_thread_priority(thread, priority); 641 set_rtr_thread_priority(thread, priority);
751 } 642 }
752} 643}
753 644
645static inline void priority_disinherit_internal(struct thread_entry *thread,
646 int blpr)
647{
648 if (blpr < PRIORITY_IDLE)
649 priority_disinherit_internal_inner(thread, blpr);
650}
651
754void priority_disinherit(struct thread_entry *thread, struct blocker *bl) 652void priority_disinherit(struct thread_entry *thread, struct blocker *bl)
755{ 653{
756 LOCK_THREAD(thread); 654 LOCK_THREAD(thread);
@@ -767,30 +665,32 @@ static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread)
767{ 665{
768 /* All threads will have the same blocker and queue; only we are changing 666 /* All threads will have the same blocker and queue; only we are changing
769 it now */ 667 it now */
770 struct thread_entry **bqp = thread->bqp; 668 struct __wait_queue *wqp = wait_queue_ptr(thread);
771 struct blocker_splay *blsplay = (struct blocker_splay *)thread->blocker; 669 struct blocker *bl = thread->blocker;
772 struct thread_entry *blt = blsplay->blocker.thread; 670 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
671 struct thread_entry *blt = bl->thread;
773 672
774 /* The first thread is already locked and is assumed tagged "multi" */ 673 /* The first thread is already locked and is assumed tagged "multi" */
775 int count = 1; 674 int count = 1;
776 struct thread_entry *temp_queue = NULL;
777 675
778 /* 'thread' is locked on entry */ 676 /* Multiple versions of the wait queue may be seen if doing more than
677 one thread; queue removal isn't destructive to the pointers of the node
678 being removed; this may lead to the blocker priority being wrong for a
679 time but it gets fixed up below after getting exclusive access to the
680 queue */
779 while (1) 681 while (1)
780 { 682 {
781 LOCK_THREAD(blt);
782
783 remove_from_list_l(bqp, thread);
784 thread->blocker = NULL; 683 thread->blocker = NULL;
684 wait_queue_remove(thread);
785 685
786 struct thread_entry *tnext = *bqp; 686 unsigned int slotnum = THREAD_ID_SLOT(thread->id);
687 threadbit_set_bit(&blsplay->mask, slotnum);
688
689 struct thread_entry *tnext = WQ_THREAD_NEXT(thread);
787 if (tnext == NULL || tnext->retval == 0) 690 if (tnext == NULL || tnext->retval == 0)
788 break; 691 break;
789 692
790 add_to_list_l(&temp_queue, thread);
791
792 UNLOCK_THREAD(thread); 693 UNLOCK_THREAD(thread);
793 UNLOCK_THREAD(blt);
794 694
795 count++; 695 count++;
796 thread = tnext; 696 thread = tnext;
@@ -798,65 +698,51 @@ static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread)
798 LOCK_THREAD(thread); 698 LOCK_THREAD(thread);
799 } 699 }
800 700
801 int blpr = blsplay->blocker.priority;
802 priority_disinherit_internal(blt, blpr);
803
804 /* Locking order reverses here since the threads are no longer on the 701 /* Locking order reverses here since the threads are no longer on the
805 queue side */ 702 queued side */
806 if (count > 1) 703 if (count > 1)
807 {
808 add_to_list_l(&temp_queue, thread);
809 UNLOCK_THREAD(thread);
810 corelock_lock(&blsplay->cl); 704 corelock_lock(&blsplay->cl);
811 705
812 blpr = find_highest_priority_in_list_l(*bqp); 706 LOCK_THREAD(blt);
707
708 int blpr = bl->priority;
709 priority_disinherit_internal(blt, blpr);
710
711 if (count > 1)
712 {
813 blsplay->blocker.thread = NULL; 713 blsplay->blocker.thread = NULL;
814 714
815 thread = temp_queue; 715 blpr = wait_queue_find_priority(wqp);
816 LOCK_THREAD(thread); 716
717 FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
718 {
719 UNLOCK_THREAD(thread);
720 thread = __thread_slot_entry(slotnum);
721 LOCK_THREAD(thread);
722 priority_inherit_internal(thread, blpr);
723 core_schedule_wakeup(thread);
724 }
817 } 725 }
818 else 726 else
819 { 727 {
820 /* Becomes a simple, direct transfer */ 728 /* Becomes a simple, direct transfer */
821 if (thread->priority <= blpr)
822 blpr = find_highest_priority_in_list_l(*bqp);
823 blsplay->blocker.thread = thread; 729 blsplay->blocker.thread = thread;
824 }
825
826 blsplay->blocker.priority = blpr;
827 730
828 while (1) 731 if (thread->priority <= blpr)
829 { 732 blpr = wait_queue_find_priority(wqp);
830 unsigned int slotnum = THREAD_ID_SLOT(thread->id);
831 threadbit_set_bit(&blsplay->mask, slotnum);
832
833 if (blpr < PRIORITY_IDLE)
834 {
835 prio_add_entry(&thread->pdist, blpr);
836 if (blpr < thread->priority)
837 thread->priority = blpr;
838 }
839
840 if (count > 1)
841 remove_from_list_l(&temp_queue, thread);
842 733
734 priority_inherit_internal(thread, blpr);
843 core_schedule_wakeup(thread); 735 core_schedule_wakeup(thread);
736 }
844 737
845 UNLOCK_THREAD(thread); 738 UNLOCK_THREAD(thread);
846
847 thread = temp_queue;
848 if (thread == NULL)
849 break;
850 739
851 LOCK_THREAD(thread); 740 bl->priority = blpr;
852 }
853 741
854 UNLOCK_THREAD(blt); 742 UNLOCK_THREAD(blt);
855 743
856 if (count > 1) 744 if (count > 1)
857 {
858 corelock_unlock(&blsplay->cl); 745 corelock_unlock(&blsplay->cl);
859 }
860 746
861 blt->retval = count; 747 blt->retval = count;
862} 748}
@@ -876,29 +762,20 @@ static void wakeup_thread_transfer(struct thread_entry *thread)
876 struct blocker *bl = thread->blocker; 762 struct blocker *bl = thread->blocker;
877 struct thread_entry *blt = bl->thread; 763 struct thread_entry *blt = bl->thread;
878 764
879 THREAD_ASSERT(cores[CURRENT_CORE].running == blt, 765 THREAD_ASSERT(__running_self_entry() == blt,
880 "UPPT->wrong thread", cores[CURRENT_CORE].running); 766 "UPPT->wrong thread", __running_self_entry());
881 767
882 LOCK_THREAD(blt); 768 LOCK_THREAD(blt);
883 769
884 struct thread_entry **bqp = thread->bqp;
885 remove_from_list_l(bqp, thread);
886 thread->blocker = NULL; 770 thread->blocker = NULL;
771 struct __wait_queue *wqp = wait_queue_remove(thread);
887 772
888 int blpr = bl->priority; 773 int blpr = bl->priority;
889 774
890 /* Remove the object's boost from the owning thread */ 775 /* Remove the object's boost from the owning thread */
891 if (prio_subtract_entry(&blt->pdist, blpr) == 0 && blpr <= blt->priority) 776 priority_disinherit_internal_inner(blt, blpr);
892 {
893 /* No more threads at this priority are waiting and the old level is
894 * at least the thread level */
895 int priority = priobit_ffs(&blt->pdist.mask);
896 if (priority != blt->priority)
897 set_running_thread_priority(blt, priority);
898 }
899
900 struct thread_entry *tnext = *bqp;
901 777
778 struct thread_entry *tnext = WQ_THREAD_FIRST(wqp);
902 if (LIKELY(tnext == NULL)) 779 if (LIKELY(tnext == NULL))
903 { 780 {
904 /* Expected shortcut - no more waiters */ 781 /* Expected shortcut - no more waiters */
@@ -906,20 +783,20 @@ static void wakeup_thread_transfer(struct thread_entry *thread)
906 } 783 }
907 else 784 else
908 { 785 {
909 /* If lowering, we need to scan threads remaining in queue */ 786 /* If thread is at the blocker priority, its removal may drop it */
910 int priority = thread->priority; 787 if (thread->priority <= blpr)
911 if (priority <= blpr) 788 blpr = wait_queue_find_priority(wqp);
912 blpr = find_highest_priority_in_list_l(tnext);
913 789
914 if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < priority) 790 priority_inherit_internal_inner(thread, blpr);
915 thread->priority = blpr; /* Raise new owner */
916 } 791 }
917 792
793 bl->thread = thread; /* This thread pwns */
794
918 core_schedule_wakeup(thread); 795 core_schedule_wakeup(thread);
919 UNLOCK_THREAD(thread); 796 UNLOCK_THREAD(thread);
920 797
921 bl->thread = thread; /* This thread pwns */ 798 bl->priority = blpr; /* Save highest blocked priority */
922 bl->priority = blpr; /* Save highest blocked priority */ 799
923 UNLOCK_THREAD(blt); 800 UNLOCK_THREAD(blt);
924} 801}
925 802
@@ -933,9 +810,9 @@ static void wakeup_thread_release(struct thread_entry *thread)
933{ 810{
934 struct blocker *bl = thread->blocker; 811 struct blocker *bl = thread->blocker;
935 struct thread_entry *blt = lock_blocker_thread(bl); 812 struct thread_entry *blt = lock_blocker_thread(bl);
936 struct thread_entry **bqp = thread->bqp; 813
937 remove_from_list_l(bqp, thread);
938 thread->blocker = NULL; 814 thread->blocker = NULL;
815 struct __wait_queue *wqp = wait_queue_remove(thread);
939 816
940 /* Off to see the wizard... */ 817 /* Off to see the wizard... */
941 core_schedule_wakeup(thread); 818 core_schedule_wakeup(thread);
@@ -950,7 +827,7 @@ static void wakeup_thread_release(struct thread_entry *thread)
950 827
951 UNLOCK_THREAD(thread); 828 UNLOCK_THREAD(thread);
952 829
953 int newblpr = find_highest_priority_in_list_l(*bqp); 830 int newblpr = wait_queue_find_priority(wqp);
954 if (newblpr == bl->priority) 831 if (newblpr == bl->priority)
955 { 832 {
956 /* Blocker priority won't change */ 833 /* Blocker priority won't change */
@@ -963,25 +840,17 @@ static void wakeup_thread_release(struct thread_entry *thread)
963 840
964#endif /* HAVE_PRIORITY_SCHEDULING */ 841#endif /* HAVE_PRIORITY_SCHEDULING */
965 842
843
966/*--------------------------------------------------------------------------- 844/*---------------------------------------------------------------------------
967 * Explicitly wakeup a thread on a blocking queue. Only effects threads of 845 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
968 * STATE_BLOCKED and STATE_BLOCKED_W_TMO. 846 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
969 * 847 *
970 * This code should be considered a critical section by the caller meaning 848 * INTERNAL: Intended for use by kernel and not programs.
971 * that the object's corelock should be held.
972 *
973 * INTERNAL: Intended for use by kernel objects and not for programs.
974 *--------------------------------------------------------------------------- 849 *---------------------------------------------------------------------------
975 */ 850 */
976unsigned int wakeup_thread_(struct thread_entry **list 851unsigned int wakeup_thread_(struct thread_entry *thread
977 IF_PRIO(, enum wakeup_thread_protocol proto)) 852 IF_PRIO(, enum wakeup_thread_protocol proto))
978{ 853{
979 struct thread_entry *thread = *list;
980
981 /* Check if there is a blocked thread at all. */
982 if (*list == NULL)
983 return THREAD_NONE;
984
985 LOCK_THREAD(thread); 854 LOCK_THREAD(thread);
986 855
987 /* Determine thread's current state. */ 856 /* Determine thread's current state. */
@@ -1008,24 +877,21 @@ unsigned int wakeup_thread_(struct thread_entry **list
1008 else 877 else
1009#endif /* HAVE_PRIORITY_SCHEDULING */ 878#endif /* HAVE_PRIORITY_SCHEDULING */
1010 { 879 {
1011 /* No PIP - just boost the thread by aging */ 880 wait_queue_remove(thread);
1012#ifdef HAVE_PRIORITY_SCHEDULING
1013 thread->skip_count = thread->priority;
1014#endif /* HAVE_PRIORITY_SCHEDULING */
1015 remove_from_list_l(list, thread);
1016 core_schedule_wakeup(thread); 881 core_schedule_wakeup(thread);
1017 UNLOCK_THREAD(thread); 882 UNLOCK_THREAD(thread);
1018 } 883 }
1019 884
1020 return should_switch_tasks(); 885 return should_switch_tasks(thread);
1021 886
1022 /* Nothing to do. State is not blocked. */
1023 default:
1024#if THREAD_EXTRA_CHECKS
1025 THREAD_PANICF("wakeup_thread->block invalid", thread);
1026 case STATE_RUNNING: 887 case STATE_RUNNING:
1027 case STATE_KILLED: 888 if (wait_queue_try_remove(thread))
1028#endif 889 {
890 UNLOCK_THREAD(thread);
891 return THREAD_OK; /* timed out */
892 }
893
894 default:
1029 UNLOCK_THREAD(thread); 895 UNLOCK_THREAD(thread);
1030 return THREAD_NONE; 896 return THREAD_NONE;
1031 } 897 }
@@ -1037,201 +903,102 @@ unsigned int wakeup_thread_(struct thread_entry **list
1037 * tick when the next check will occur. 903 * tick when the next check will occur.
1038 *--------------------------------------------------------------------------- 904 *---------------------------------------------------------------------------
1039 */ 905 */
1040void check_tmo_threads(void) 906static NO_INLINE void check_tmo_expired_inner(struct core_entry *corep)
1041{ 907{
1042 const unsigned int core = CURRENT_CORE;
1043 const long tick = current_tick; /* snapshot the current tick */ 908 const long tick = current_tick; /* snapshot the current tick */
1044 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */ 909 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
1045 struct thread_entry *next = cores[core].timeout; 910 struct thread_entry *prev = NULL;
911 struct thread_entry *thread = TMO_THREAD_FIRST(&corep->tmo);
1046 912
1047 /* If there are no processes waiting for a timeout, just keep the check 913 /* If there are no processes waiting for a timeout, just keep the check
1048 tick from falling into the past. */ 914 tick from falling into the past. */
1049 915
1050 /* Break the loop once we have walked through the list of all 916 /* Break the loop once we have walked through the list of all
1051 * sleeping processes or have removed them all. */ 917 * sleeping processes or have removed them all. */
1052 while (next != NULL) 918 while (thread != NULL)
1053 { 919 {
1054 /* Check sleeping threads. Allow interrupts between checks. */ 920 /* Check sleeping threads. Allow interrupts between checks. */
1055 enable_irq(); 921 enable_irq();
1056 922
1057 struct thread_entry *curr = next; 923 struct thread_entry *next = TMO_THREAD_NEXT(thread);
1058
1059 next = curr->tmo.next;
1060 924
1061 /* Lock thread slot against explicit wakeup */ 925 /* Lock thread slot against explicit wakeup */
1062 disable_irq(); 926 disable_irq();
1063 LOCK_THREAD(curr); 927 LOCK_THREAD(thread);
1064 928
1065 unsigned state = curr->state; 929 unsigned int state = thread->state;
1066 930
1067 if (state < TIMEOUT_STATE_FIRST) 931 if (LIKELY(state >= TIMEOUT_STATE_FIRST &&
1068 { 932 TIME_BEFORE(tick, thread->tmo_tick)))
1069 /* Cleanup threads no longer on a timeout but still on the
1070 * list. */
1071 remove_from_list_tmo(curr);
1072 }
1073 else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick)))
1074 { 933 {
1075 /* Timeout still pending - this will be the usual case */ 934 /* Timeout still pending - this will be the usual case */
1076 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check)) 935 if (TIME_BEFORE(thread->tmo_tick, next_tmo_check))
1077 { 936 {
1078 /* Earliest timeout found so far - move the next check up 937 /* Move the next check up to its time */
1079 to its time */ 938 next_tmo_check = thread->tmo_tick;
1080 next_tmo_check = curr->tmo_tick;
1081 } 939 }
940
941 prev = thread;
1082 } 942 }
1083 else 943 else
1084 { 944 {
1085 /* Sleep timeout has been reached so bring the thread back to 945 /* TODO: there are no priority-inheriting timeout blocks
1086 * life again. */ 946 right now but the procedure should be established */
1087 if (state == STATE_BLOCKED_W_TMO)
1088 {
1089#ifdef HAVE_CORELOCK_OBJECT
1090 /* Lock the waiting thread's kernel object */
1091 struct corelock *ocl = curr->obj_cl;
1092
1093 if (UNLIKELY(corelock_try_lock(ocl) == 0))
1094 {
1095 /* Need to retry in the correct order though the need is
1096 * unlikely */
1097 UNLOCK_THREAD(curr);
1098 corelock_lock(ocl);
1099 LOCK_THREAD(curr);
1100
1101 if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO))
1102 {
1103 /* Thread was woken or removed explicitely while slot
1104 * was unlocked */
1105 corelock_unlock(ocl);
1106 remove_from_list_tmo(curr);
1107 UNLOCK_THREAD(curr);
1108 continue;
1109 }
1110 }
1111#endif /* NUM_CORES */
1112
1113#ifdef HAVE_WAKEUP_EXT_CB
1114 if (curr->wakeup_ext_cb != NULL)
1115 curr->wakeup_ext_cb(curr);
1116#endif
1117
1118#ifdef HAVE_PRIORITY_SCHEDULING
1119 if (curr->blocker != NULL)
1120 wakeup_thread_release(curr);
1121 else
1122#endif
1123 remove_from_list_l(curr->bqp, curr);
1124
1125 corelock_unlock(ocl);
1126 }
1127 /* else state == STATE_SLEEPING */
1128 947
1129 remove_from_list_tmo(curr); 948 /* Sleep timeout has been reached / garbage collect stale list
949 items */
950 tmo_queue_expire(&corep->tmo, prev, thread);
1130 951
1131 RTR_LOCK(core); 952 if (state >= TIMEOUT_STATE_FIRST)
953 core_rtr_add(corep, thread);
1132 954
1133 curr->state = STATE_RUNNING; 955 /* removed this one - prev doesn't change */
1134
1135 add_to_list_l(&cores[core].running, curr);
1136 rtr_add_entry(core, curr->priority);
1137
1138 RTR_UNLOCK(core);
1139 } 956 }
1140 957
1141 UNLOCK_THREAD(curr); 958 UNLOCK_THREAD(thread);
1142 }
1143
1144 cores[core].next_tmo_check = next_tmo_check;
1145}
1146
1147/*---------------------------------------------------------------------------
1148 * Performs operations that must be done before blocking a thread but after
1149 * the state is saved.
1150 *---------------------------------------------------------------------------
1151 */
1152#if NUM_CORES > 1
1153static inline void run_blocking_ops(
1154 unsigned int core, struct thread_entry *thread)
1155{
1156 struct thread_blk_ops *ops = &cores[core].blk_ops;
1157 const unsigned flags = ops->flags;
1158
1159 if (LIKELY(flags == TBOP_CLEAR))
1160 return;
1161 959
1162 switch (flags) 960 thread = next;
1163 {
1164 case TBOP_SWITCH_CORE:
1165 core_switch_blk_op(core, thread);
1166 /* Fall-through */
1167 case TBOP_UNLOCK_CORELOCK:
1168 corelock_unlock(ops->cl_p);
1169 break;
1170 } 961 }
1171 962
1172 ops->flags = TBOP_CLEAR; 963 corep->next_tmo_check = next_tmo_check;
1173} 964}
1174#endif /* NUM_CORES > 1 */
1175 965
1176#ifdef RB_PROFILE 966static FORCE_INLINE void check_tmo_expired(struct core_entry *corep)
1177void profile_thread(void)
1178{ 967{
1179 profstart(cores[CURRENT_CORE].running - threads); 968 if (!TIME_BEFORE(current_tick, corep->next_tmo_check))
969 check_tmo_expired_inner(corep);
1180} 970}
1181#endif
1182 971
1183/*--------------------------------------------------------------------------- 972/*---------------------------------------------------------------------------
1184 * Prepares a thread to block on an object's list and/or for a specified 973 * Prepares a the current thread to sleep forever or for the given duration.
1185 * duration - expects object and slot to be appropriately locked if needed
1186 * and interrupts to be masked.
1187 *--------------------------------------------------------------------------- 974 *---------------------------------------------------------------------------
1188 */ 975 */
1189static inline void block_thread_on_l(struct thread_entry *thread, 976static FORCE_INLINE void prepare_block(struct thread_entry *current,
1190 unsigned state) 977 unsigned int state, int timeout)
1191{ 978{
1192 /* If inlined, unreachable branches will be pruned with no size penalty 979 const unsigned int core = IF_COP_CORE(current->core);
1193 because state is passed as a constant parameter. */
1194 const unsigned int core = IF_COP_CORE(thread->core);
1195 980
1196 /* Remove the thread from the list of running threads. */ 981 /* Remove the thread from the list of running threads. */
1197 RTR_LOCK(core); 982 struct core_entry *corep = __core_id_entry(core);
1198 remove_from_list_l(&cores[core].running, thread); 983 core_rtr_remove(corep, current);
1199 rtr_subtract_entry(core, thread->priority);
1200 RTR_UNLOCK(core);
1201 984
1202 /* Add a timeout to the block if not infinite */ 985 if (timeout >= 0)
1203 switch (state)
1204 { 986 {
1205 case STATE_BLOCKED: 987 /* Sleep may expire. */
1206 case STATE_BLOCKED_W_TMO: 988 long tmo_tick = current_tick + timeout;
1207 /* Put the thread into a new list of inactive threads. */ 989 current->tmo_tick = tmo_tick;
1208 add_to_list_l(thread->bqp, thread);
1209 990
1210 if (state == STATE_BLOCKED) 991 if (TIME_BEFORE(tmo_tick, corep->next_tmo_check))
1211 break; 992 corep->next_tmo_check = tmo_tick;
1212 993
1213 /* Fall-through */ 994 tmo_queue_register(&corep->tmo, current);
1214 case STATE_SLEEPING:
1215 /* If this thread times out sooner than any other thread, update
1216 next_tmo_check to its timeout */
1217 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1218 {
1219 cores[core].next_tmo_check = thread->tmo_tick;
1220 }
1221 995
1222 if (thread->tmo.prev == NULL) 996 if (state == STATE_BLOCKED)
1223 { 997 state = STATE_BLOCKED_W_TMO;
1224 add_to_list_tmo(thread);
1225 }
1226 /* else thread was never removed from list - just keep it there */
1227 break;
1228 } 998 }
1229 999
1230 /* Remember the the next thread about to block. */
1231 cores[core].block_task = thread;
1232
1233 /* Report new state. */ 1000 /* Report new state. */
1234 thread->state = state; 1001 current->state = state;
1235} 1002}
1236 1003
1237/*--------------------------------------------------------------------------- 1004/*---------------------------------------------------------------------------
@@ -1239,178 +1006,120 @@ static inline void block_thread_on_l(struct thread_entry *thread,
1239 * that removed itself from the running list first must specify itself in 1006 * that removed itself from the running list first must specify itself in
1240 * the paramter. 1007 * the paramter.
1241 * 1008 *
1242 * INTERNAL: Intended for use by kernel and not for programs. 1009 * INTERNAL: Intended for use by kernel and not programs.
1243 *--------------------------------------------------------------------------- 1010 *---------------------------------------------------------------------------
1244 */ 1011 */
1245void switch_thread(void) 1012void switch_thread(void)
1246{ 1013{
1247
1248 const unsigned int core = CURRENT_CORE; 1014 const unsigned int core = CURRENT_CORE;
1249 struct thread_entry *block = cores[core].block_task; 1015 struct core_entry *corep = __core_id_entry(core);
1250 struct thread_entry *thread = cores[core].running; 1016 struct thread_entry *thread = corep->running;
1251 1017
1252 /* Get context to save - next thread to run is unknown until all wakeups 1018 if (thread)
1253 * are evaluated */
1254 if (block != NULL)
1255 { 1019 {
1256 cores[core].block_task = NULL;
1257
1258#if NUM_CORES > 1
1259 if (UNLIKELY(thread == block))
1260 {
1261 /* This was the last thread running and another core woke us before
1262 * reaching here. Force next thread selection to give tmo threads or
1263 * other threads woken before this block a first chance. */
1264 block = NULL;
1265 }
1266 else
1267#endif
1268 {
1269 /* Blocking task is the old one */
1270 thread = block;
1271 }
1272 }
1273
1274#ifdef RB_PROFILE 1020#ifdef RB_PROFILE
1275#ifdef CPU_COLDFIRE 1021 profile_thread_stopped(THREAD_ID_SLOT(thread->id));
1276 _profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1277#else
1278 profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1279#endif 1022#endif
1280#endif
1281
1282 /* Begin task switching by saving our current context so that we can
1283 * restore the state of the current thread later to the point prior
1284 * to this call. */
1285 thread_store_context(thread);
1286#ifdef DEBUG 1023#ifdef DEBUG
1287 /* Check core_ctx buflib integrity */ 1024 /* Check core_ctx buflib integrity */
1288 core_check_valid(); 1025 core_check_valid();
1289#endif
1290
1291 /* Check if the current thread stack is overflown */
1292 if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
1293 thread_stkov(thread);
1294
1295#if NUM_CORES > 1
1296 /* Run any blocking operations requested before switching/sleeping */
1297 run_blocking_ops(core, thread);
1298#endif 1026#endif
1027 thread_store_context(thread);
1299 1028
1300#ifdef HAVE_PRIORITY_SCHEDULING 1029 /* Check if the current thread stack is overflown */
1301 /* Reset the value of thread's skip count */ 1030 if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
1302 thread->skip_count = 0; 1031 thread_stkov(thread);
1303#endif 1032 }
1304 1033
1034 /* TODO: make a real idle task */
1305 for (;;) 1035 for (;;)
1306 { 1036 {
1307 /* If there are threads on a timeout and the earliest wakeup is due,
1308 * check the list and wake any threads that need to start running
1309 * again. */
1310 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1311 {
1312 check_tmo_threads();
1313 }
1314
1315 disable_irq(); 1037 disable_irq();
1316 RTR_LOCK(core);
1317 1038
1318 thread = cores[core].running; 1039 /* Check for expired timeouts */
1040 check_tmo_expired(corep);
1319 1041
1320 if (UNLIKELY(thread == NULL)) 1042 RTR_LOCK(corep);
1321 {
1322 /* Enter sleep mode to reduce power usage - woken up on interrupt
1323 * or wakeup request from another core - expected to enable
1324 * interrupts. */
1325 RTR_UNLOCK(core);
1326 core_sleep(IF_COP(core));
1327 }
1328 else
1329 {
1330#ifdef HAVE_PRIORITY_SCHEDULING
1331 /* Select the new task based on priorities and the last time a
1332 * process got CPU time relative to the highest priority runnable
1333 * task. */
1334 int max = priobit_ffs(&cores[core].rtr.mask);
1335 1043
1336 if (block == NULL) 1044 if (!RTR_EMPTY(&corep->rtr))
1337 { 1045 break;
1338 /* Not switching on a block, tentatively select next thread */
1339 thread = thread->l.next;
1340 }
1341 1046
1342 for (;;) 1047 thread = NULL;
1343 { 1048
1344 int priority = thread->priority; 1049 /* Enter sleep mode to reduce power usage */
1345 int diff; 1050 RTR_UNLOCK(corep);
1346 1051 core_sleep(IF_COP(core));
1347 /* This ridiculously simple method of aging seems to work 1052
1348 * suspiciously well. It does tend to reward CPU hogs (under 1053 /* Awakened by interrupt or other CPU */
1349 * yielding) but that's generally not desirable at all. On 1054 }
1350 * the plus side, it, relatively to other threads, penalizes 1055
1351 * excess yielding which is good if some high priority thread 1056 thread = (thread && thread->state == STATE_RUNNING) ?
1352 * is performing no useful work such as polling for a device 1057 RTR_THREAD_NEXT(thread) : RTR_THREAD_FIRST(&corep->rtr);
1353 * to be ready. Of course, aging is only employed when higher 1058
1354 * and lower priority threads are runnable. The highest 1059#ifdef HAVE_PRIORITY_SCHEDULING
1355 * priority runnable thread(s) are never skipped unless a 1060 /* Select the new task based on priorities and the last time a
1356 * lower-priority process has aged sufficiently. Priorities 1061 * process got CPU time relative to the highest priority runnable
1357 * of REALTIME class are run strictly according to priority 1062 * task. If priority is not a feature, then FCFS is used (above). */
1358 * thus are not subject to switchout due to lower-priority 1063 int max = priobit_ffs(&corep->rtr_dist.mask);
1359 * processes aging; they must give up the processor by going
1360 * off the run list. */
1361 if (LIKELY(priority <= max) ||
1362 (priority > PRIORITY_REALTIME &&
1363 (diff = priority - max,
1364 ++thread->skip_count > diff*diff)))
1365 {
1366 cores[core].running = thread;
1367 break;
1368 }
1369
1370 thread = thread->l.next;
1371 }
1372#else
1373 /* Without priority use a simple FCFS algorithm */
1374 if (block == NULL)
1375 {
1376 /* Not switching on a block, select next thread */
1377 thread = thread->l.next;
1378 cores[core].running = thread;
1379 }
1380#endif /* HAVE_PRIORITY_SCHEDULING */
1381 1064
1382 RTR_UNLOCK(core); 1065 for (;;)
1383 enable_irq(); 1066 {
1067 int priority = thread->priority;
1068 int diff;
1069
1070 /* This ridiculously simple method of aging seems to work
1071 * suspiciously well. It does tend to reward CPU hogs (under
1072 * yielding) but that's generally not desirable at all. On
1073 * the plus side, it, relatively to other threads, penalizes
1074 * excess yielding which is good if some high priority thread
1075 * is performing no useful work such as polling for a device
1076 * to be ready. Of course, aging is only employed when higher
1077 * and lower priority threads are runnable. The highest
1078 * priority runnable thread(s) are never skipped unless a
1079 * lower-priority process has aged sufficiently. Priorities
1080 * of REALTIME class are run strictly according to priority
1081 * thus are not subject to switchout due to lower-priority
1082 * processes aging; they must give up the processor by going
1083 * off the run list. */
1084 if (LIKELY(priority <= max) ||
1085 (priority > PRIORITY_REALTIME &&
1086 (diff = priority - max, ++thread->skip_count > diff*diff)))
1087 {
1384 break; 1088 break;
1385 } 1089 }
1090
1091 thread = RTR_THREAD_NEXT(thread);
1386 } 1092 }
1387 1093
1388 /* And finally give control to the next thread. */ 1094 thread->skip_count = 0; /* Reset aging counter */
1095#endif /* HAVE_PRIORITY_SCHEDULING */
1096
1097 rtr_queue_make_first(&corep->rtr, thread);
1098 corep->running = thread;
1099
1100 RTR_UNLOCK(corep);
1101 enable_irq();
1102
1103 /* And finally, give control to the next thread. */
1389 thread_load_context(thread); 1104 thread_load_context(thread);
1390 1105
1391#ifdef RB_PROFILE 1106#ifdef RB_PROFILE
1392 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK); 1107 profile_thread_started(THREAD_ID_SLOT(thread->id));
1393#endif 1108#endif
1394
1395} 1109}
1396 1110
1397/*--------------------------------------------------------------------------- 1111/*---------------------------------------------------------------------------
1398 * Sleeps a thread for at least a specified number of ticks with zero being 1112 * Sleeps a thread for at least a specified number of ticks with zero being
1399 * a wait until the next tick. 1113 * a wait until the next tick.
1400 * 1114 *
1401 * INTERNAL: Intended for use by kernel and not for programs. 1115 * INTERNAL: Intended for use by kernel and not programs.
1402 *--------------------------------------------------------------------------- 1116 *---------------------------------------------------------------------------
1403 */ 1117 */
1404void sleep_thread(int ticks) 1118void sleep_thread(int ticks)
1405{ 1119{
1406 struct thread_entry *current = cores[CURRENT_CORE].running; 1120 struct thread_entry *current = __running_self_entry();
1407
1408 LOCK_THREAD(current); 1121 LOCK_THREAD(current);
1409 1122 prepare_block(current, STATE_SLEEPING, MAX(ticks, 0) + 1);
1410 /* Set our timeout, remove from run list and join timeout list. */
1411 current->tmo_tick = current_tick + MAX(ticks, 0) + 1;
1412 block_thread_on_l(current, STATE_SLEEPING);
1413
1414 UNLOCK_THREAD(current); 1123 UNLOCK_THREAD(current);
1415} 1124}
1416 1125
@@ -1418,131 +1127,42 @@ void sleep_thread(int ticks)
1418 * Block a thread on a blocking queue for explicit wakeup. If timeout is 1127 * Block a thread on a blocking queue for explicit wakeup. If timeout is
1419 * negative, the block is infinite. 1128 * negative, the block is infinite.
1420 * 1129 *
1421 * INTERNAL: Intended for use by kernel objects and not for programs. 1130 * INTERNAL: Intended for use by kernel and not programs.
1422 *--------------------------------------------------------------------------- 1131 *---------------------------------------------------------------------------
1423 */ 1132 */
1424void block_thread(struct thread_entry *current, int timeout) 1133void block_thread_(struct thread_entry *current, int timeout)
1425{ 1134{
1426 LOCK_THREAD(current); 1135 LOCK_THREAD(current);
1427 1136
1428 struct blocker *bl = NULL;
1429#ifdef HAVE_PRIORITY_SCHEDULING 1137#ifdef HAVE_PRIORITY_SCHEDULING
1430 bl = current->blocker; 1138 struct blocker *bl = current->blocker;
1431 struct thread_entry *blt = bl ? lock_blocker_thread(bl) : NULL; 1139 struct thread_entry *blt = NULL;
1432#endif /* HAVE_PRIORITY_SCHEDULING */ 1140 if (bl != NULL)
1433
1434 if (LIKELY(timeout < 0))
1435 {
1436 /* Block until explicitly woken */
1437 block_thread_on_l(current, STATE_BLOCKED);
1438 }
1439 else
1440 { 1141 {
1441 /* Set the state to blocked with the specified timeout */ 1142 current->blocker = bl;
1442 current->tmo_tick = current_tick + timeout; 1143 blt = lock_blocker_thread(bl);
1443 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1444 } 1144 }
1145#endif /* HAVE_PRIORITY_SCHEDULING */
1445 1146
1446 if (bl == NULL) 1147 wait_queue_register(current);
1447 { 1148 prepare_block(current, STATE_BLOCKED, timeout);
1448 UNLOCK_THREAD(current);
1449 return;
1450 }
1451 1149
1452#ifdef HAVE_PRIORITY_SCHEDULING 1150#ifdef HAVE_PRIORITY_SCHEDULING
1453 int newblpr = current->priority; 1151 if (bl != NULL)
1454 UNLOCK_THREAD(current);
1455
1456 if (newblpr >= bl->priority)
1457 { 1152 {
1458 unlock_blocker_thread(bl); 1153 int newblpr = current->priority;
1459 return; /* Queue priority won't change */ 1154 UNLOCK_THREAD(current);
1460 }
1461 1155
1462 inherit_priority(bl, bl, blt, newblpr); 1156 if (newblpr < bl->priority)
1157 inherit_priority(bl, bl, blt, newblpr);
1158 else
1159 unlock_blocker_thread(bl); /* Queue priority won't change */
1160 }
1161 else
1463#endif /* HAVE_PRIORITY_SCHEDULING */ 1162#endif /* HAVE_PRIORITY_SCHEDULING */
1464}
1465
1466/*---------------------------------------------------------------------------
1467 * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
1468 *---------------------------------------------------------------------------
1469 */
1470static void new_thread_id(unsigned int slot_num,
1471 struct thread_entry *thread)
1472{
1473 unsigned int version =
1474 (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
1475 & THREAD_ID_VERSION_MASK;
1476
1477 /* If wrapped to 0, make it 1 */
1478 if (version == 0)
1479 version = 1u << THREAD_ID_VERSION_SHIFT;
1480
1481 thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
1482}
1483
1484/*---------------------------------------------------------------------------
1485 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1486 * will be locked on multicore.
1487 *---------------------------------------------------------------------------
1488 */
1489static struct thread_entry * find_empty_thread_slot(void)
1490{
1491 /* Any slot could be on an interrupt-accessible list */
1492 IF_COP( int oldlevel = disable_irq_save(); )
1493 struct thread_entry *thread = NULL;
1494 int n;
1495
1496 for (n = 0; n < MAXTHREADS; n++)
1497 { 1163 {
1498 /* Obtain current slot state - lock it on multicore */ 1164 UNLOCK_THREAD(current);
1499 struct thread_entry *t = &threads[n];
1500 LOCK_THREAD(t);
1501
1502 if (t->state == STATE_KILLED)
1503 {
1504 /* Slot is empty - leave it locked and caller will unlock */
1505 thread = t;
1506 break;
1507 }
1508
1509 /* Finished examining slot - no longer busy - unlock on multicore */
1510 UNLOCK_THREAD(t);
1511 } 1165 }
1512
1513 IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
1514 not accesible to them yet */
1515 return thread;
1516}
1517
1518/*---------------------------------------------------------------------------
1519 * Return the thread_entry pointer for a thread_id. Return the current
1520 * thread if the ID is (unsigned int)-1 (alias for current).
1521 *---------------------------------------------------------------------------
1522 */
1523struct thread_entry * thread_id_entry(unsigned int thread_id)
1524{
1525 return &threads[thread_id & THREAD_ID_SLOT_MASK];
1526}
1527
1528/*---------------------------------------------------------------------------
1529 * Return the thread id of the calling thread
1530 * --------------------------------------------------------------------------
1531 */
1532unsigned int thread_self(void)
1533{
1534 return cores[CURRENT_CORE].running->id;
1535}
1536
1537/*---------------------------------------------------------------------------
1538 * Return the thread entry of the calling thread.
1539 *
1540 * INTERNAL: Intended for use by kernel and not for programs.
1541 *---------------------------------------------------------------------------
1542 */
1543struct thread_entry* thread_self_entry(void)
1544{
1545 return cores[CURRENT_CORE].running;
1546} 1166}
1547 1167
1548/*--------------------------------------------------------------------------- 1168/*---------------------------------------------------------------------------
@@ -1552,9 +1172,8 @@ struct thread_entry* thread_self_entry(void)
1552 */ 1172 */
1553void core_idle(void) 1173void core_idle(void)
1554{ 1174{
1555 IF_COP( const unsigned int core = CURRENT_CORE; )
1556 disable_irq(); 1175 disable_irq();
1557 core_sleep(IF_COP(core)); 1176 core_sleep(IF_COP(CURRENT_CORE));
1558} 1177}
1559 1178
1560/*--------------------------------------------------------------------------- 1179/*---------------------------------------------------------------------------
@@ -1570,141 +1189,64 @@ unsigned int create_thread(void (*function)(void),
1570 IF_PRIO(, int priority) 1189 IF_PRIO(, int priority)
1571 IF_COP(, unsigned int core)) 1190 IF_COP(, unsigned int core))
1572{ 1191{
1573 unsigned int i; 1192 struct thread_entry *thread = thread_alloc();
1574 unsigned int stack_words;
1575 uintptr_t stackptr, stackend;
1576 struct thread_entry *thread;
1577 unsigned state;
1578 int oldlevel;
1579
1580 thread = find_empty_thread_slot();
1581 if (thread == NULL) 1193 if (thread == NULL)
1582 {
1583 return 0; 1194 return 0;
1584 }
1585
1586 oldlevel = disable_irq_save();
1587
1588 /* Munge the stack to make it easy to spot stack overflows */
1589 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
1590 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
1591 stack_size = stackend - stackptr;
1592 stack_words = stack_size / sizeof (uintptr_t);
1593 1195
1594 for (i = 0; i < stack_words; i++) 1196 new_thread_base_init(thread, &stack, &stack_size, name
1595 { 1197 IF_PRIO(, priority) IF_COP(, core));
1596 ((uintptr_t *)stackptr)[i] = DEADBEEF;
1597 }
1598 1198
1599 /* Store interesting information */ 1199 unsigned int stack_words = stack_size / sizeof (uintptr_t);
1600 thread->name = name; 1200 if (stack_words == 0)
1601 thread->stack = (uintptr_t *)stackptr; 1201 return 0;
1602 thread->stack_size = stack_size;
1603 thread->queue = NULL;
1604#ifdef HAVE_WAKEUP_EXT_CB
1605 thread->wakeup_ext_cb = NULL;
1606#endif
1607#ifdef HAVE_SCHEDULER_BOOSTCTRL
1608 thread->cpu_boost = 0;
1609#endif
1610#ifdef HAVE_PRIORITY_SCHEDULING
1611 memset(&thread->pdist, 0, sizeof(thread->pdist));
1612 thread->blocker = NULL;
1613 thread->base_priority = priority;
1614 thread->priority = priority;
1615 thread->skip_count = priority;
1616 prio_add_entry(&thread->pdist, priority);
1617#endif
1618 1202
1619#ifdef HAVE_IO_PRIORITY 1203 /* Munge the stack to make it easy to spot stack overflows */
1620 /* Default to high (foreground) priority */ 1204 for (unsigned int i = 0; i < stack_words; i++)
1621 thread->io_priority = IO_PRIORITY_IMMEDIATE; 1205 ((uintptr_t *)stack)[i] = DEADBEEF;
1622#endif
1623 1206
1624#if NUM_CORES > 1 1207#if NUM_CORES > 1
1625 thread->core = core;
1626
1627 /* Writeback stack munging or anything else before starting */ 1208 /* Writeback stack munging or anything else before starting */
1628 if (core != CURRENT_CORE) 1209 if (core != CURRENT_CORE)
1629 {
1630 commit_dcache(); 1210 commit_dcache();
1631 }
1632#endif 1211#endif
1633 1212
1634 /* Thread is not on any timeout list but be a bit paranoid */ 1213 thread->context.sp = (typeof (thread->context.sp))(stack + stack_size);
1635 thread->tmo.prev = NULL;
1636
1637 state = (flags & CREATE_THREAD_FROZEN) ?
1638 STATE_FROZEN : STATE_RUNNING;
1639
1640 thread->context.sp = (typeof (thread->context.sp))stackend;
1641
1642 /* Load the thread's context structure with needed startup information */
1643 THREAD_STARTUP_INIT(core, thread, function); 1214 THREAD_STARTUP_INIT(core, thread, function);
1644 1215
1645 thread->state = state; 1216 int oldlevel = disable_irq_save();
1646 i = thread->id; /* Snapshot while locked */ 1217 LOCK_THREAD(thread);
1218
1219 thread->state = STATE_FROZEN;
1647 1220
1648 if (state == STATE_RUNNING) 1221 if (!(flags & CREATE_THREAD_FROZEN))
1649 core_schedule_wakeup(thread); 1222 core_schedule_wakeup(thread);
1650 1223
1224 unsigned int id = thread->id; /* Snapshot while locked */
1225
1651 UNLOCK_THREAD(thread); 1226 UNLOCK_THREAD(thread);
1652 restore_irq(oldlevel); 1227 restore_irq(oldlevel);
1653 1228
1654 return i; 1229 return id;
1655} 1230}
1656 1231
1657#ifdef HAVE_SCHEDULER_BOOSTCTRL
1658/*---------------------------------------------------------------------------
1659 * Change the boost state of a thread boosting or unboosting the CPU
1660 * as required.
1661 *---------------------------------------------------------------------------
1662 */
1663static inline void boost_thread(struct thread_entry *thread, bool boost)
1664{
1665 if ((thread->cpu_boost != 0) != boost)
1666 {
1667 thread->cpu_boost = boost;
1668 cpu_boost(boost);
1669 }
1670}
1671
1672void trigger_cpu_boost(void)
1673{
1674 struct thread_entry *current = cores[CURRENT_CORE].running;
1675 boost_thread(current, true);
1676}
1677
1678void cancel_cpu_boost(void)
1679{
1680 struct thread_entry *current = cores[CURRENT_CORE].running;
1681 boost_thread(current, false);
1682}
1683#endif /* HAVE_SCHEDULER_BOOSTCTRL */
1684
1685/*--------------------------------------------------------------------------- 1232/*---------------------------------------------------------------------------
1686 * Block the current thread until another thread terminates. A thread may 1233 * Block the current thread until another thread terminates. A thread may
1687 * wait on itself to terminate which prevents it from running again and it 1234 * wait on itself to terminate but that will deadlock
1688 * will need to be killed externally. 1235 *.
1689 * Parameter is the ID as returned from create_thread(). 1236 * Parameter is the ID as returned from create_thread().
1690 *--------------------------------------------------------------------------- 1237 *---------------------------------------------------------------------------
1691 */ 1238 */
1692void thread_wait(unsigned int thread_id) 1239void thread_wait(unsigned int thread_id)
1693{ 1240{
1694 struct thread_entry *current = cores[CURRENT_CORE].running; 1241 struct thread_entry *current = __running_self_entry();
1695 struct thread_entry *thread = thread_id_entry(thread_id); 1242 struct thread_entry *thread = __thread_id_entry(thread_id);
1696 1243
1697 /* Lock thread-as-waitable-object lock */
1698 corelock_lock(&thread->waiter_cl); 1244 corelock_lock(&thread->waiter_cl);
1699 1245
1700 /* Be sure it hasn't been killed yet */
1701 if (thread->id == thread_id && thread->state != STATE_KILLED) 1246 if (thread->id == thread_id && thread->state != STATE_KILLED)
1702 { 1247 {
1703 IF_COP( current->obj_cl = &thread->waiter_cl; )
1704 current->bqp = &thread->queue;
1705
1706 disable_irq(); 1248 disable_irq();
1707 block_thread(current, TIMEOUT_BLOCK); 1249 block_thread(current, TIMEOUT_BLOCK, &thread->queue, NULL);
1708 1250
1709 corelock_unlock(&thread->waiter_cl); 1251 corelock_unlock(&thread->waiter_cl);
1710 1252
@@ -1716,36 +1258,35 @@ void thread_wait(unsigned int thread_id)
1716} 1258}
1717 1259
1718/*--------------------------------------------------------------------------- 1260/*---------------------------------------------------------------------------
1719 * Exit the current thread. The Right Way to Do Things (TM). 1261 * Exit the current thread
1720 *--------------------------------------------------------------------------- 1262 *---------------------------------------------------------------------------
1721 */ 1263 */
1722/* This is done to foil optimizations that may require the current stack, 1264static USED_ATTR NORETURN_ATTR
1723 * such as optimizing subexpressions that put variables on the stack that 1265void thread_exit_final(struct thread_entry *current)
1724 * get used after switching stacks. */
1725#if NUM_CORES > 1
1726/* Called by ASM stub */
1727static void thread_final_exit_do(struct thread_entry *current)
1728#else
1729/* No special procedure is required before calling */
1730static inline void thread_final_exit(struct thread_entry *current)
1731#endif
1732{ 1266{
1733 /* At this point, this thread isn't using resources allocated for 1267 /* Slot is no longer this thread */
1734 * execution except the slot itself. */ 1268 new_thread_id(current);
1269 current->name = NULL;
1735 1270
1736 /* Signal this thread */ 1271 /* No longer using resources from creator */
1737 thread_queue_wake(&current->queue); 1272 wait_queue_wake(&current->queue);
1273
1274 UNLOCK_THREAD(current);
1738 corelock_unlock(&current->waiter_cl); 1275 corelock_unlock(&current->waiter_cl);
1276
1277 thread_free(current);
1278
1739 switch_thread(); 1279 switch_thread();
1280
1740 /* This should never and must never be reached - if it is, the 1281 /* This should never and must never be reached - if it is, the
1741 * state is corrupted */ 1282 * state is corrupted */
1742 THREAD_PANICF("thread_exit->K:*R", current); 1283 THREAD_PANICF("thread_exit->K:*R", current);
1743 while (1);
1744} 1284}
1745 1285
1746void thread_exit(void) 1286void thread_exit(void)
1747{ 1287{
1748 register struct thread_entry * current = cores[CURRENT_CORE].running; 1288 struct core_entry *corep = __core_id_entry(CURRENT_CORE);
1289 register struct thread_entry *current = corep->running;
1749 1290
1750 /* Cancel CPU boost if any */ 1291 /* Cancel CPU boost if any */
1751 cancel_cpu_boost(); 1292 cancel_cpu_boost();
@@ -1764,24 +1305,21 @@ void thread_exit(void)
1764 thread_panicf("abandon ship!", current); 1305 thread_panicf("abandon ship!", current);
1765#endif /* HAVE_PRIORITY_SCHEDULING */ 1306#endif /* HAVE_PRIORITY_SCHEDULING */
1766 1307
1767 if (current->tmo.prev != NULL) 1308 /* Remove from scheduler lists */
1768 { 1309 tmo_queue_remove(&corep->tmo, current);
1769 /* Cancel pending timeout list removal */ 1310 prepare_block(current, STATE_KILLED, -1);
1770 remove_from_list_tmo(current); 1311 corep->running = NULL; /* No switch_thread context save */
1771 }
1772
1773 /* Switch tasks and never return */
1774 block_thread_on_l(current, STATE_KILLED);
1775
1776 /* Slot must be unusable until thread is really gone */
1777 UNLOCK_THREAD_AT_TASK_SWITCH(current);
1778 1312
1779 /* Update ID for this slot */ 1313#ifdef RB_PROFILE
1780 new_thread_id(current->id, current); 1314 profile_thread_stopped(THREAD_ID_SLOT(current->id));
1781 current->name = NULL; 1315#endif
1782 1316
1783 /* Do final cleanup and remove the thread */ 1317 /* Do final release of resources and remove the thread */
1784 thread_final_exit(current); 1318#if NUM_CORES > 1
1319 thread_exit_finalize(current->core, current);
1320#else
1321 thread_exit_final(current);
1322#endif
1785} 1323}
1786 1324
1787#ifdef HAVE_PRIORITY_SCHEDULING 1325#ifdef HAVE_PRIORITY_SCHEDULING
@@ -1796,10 +1334,8 @@ int thread_set_priority(unsigned int thread_id, int priority)
1796 return -1; /* Invalid priority argument */ 1334 return -1; /* Invalid priority argument */
1797 1335
1798 int old_base_priority = -1; 1336 int old_base_priority = -1;
1799 struct thread_entry *thread = thread_id_entry(thread_id); 1337 struct thread_entry *thread = __thread_id_entry(thread_id);
1800 1338
1801 /* Thread could be on any list and therefore on an interrupt accessible
1802 one - disable interrupts */
1803 const int oldlevel = disable_irq_save(); 1339 const int oldlevel = disable_irq_save();
1804 LOCK_THREAD(thread); 1340 LOCK_THREAD(thread);
1805 1341
@@ -1825,7 +1361,7 @@ int thread_set_priority(unsigned int thread_id, int priority)
1825 { 1361 {
1826 /* This thread is running - just change location on the run queue. 1362 /* This thread is running - just change location on the run queue.
1827 Also sets thread->priority. */ 1363 Also sets thread->priority. */
1828 set_running_thread_priority(thread, new_priority); 1364 set_rtr_thread_priority(thread, new_priority);
1829 goto done; 1365 goto done;
1830 } 1366 }
1831 1367
@@ -1838,7 +1374,7 @@ int thread_set_priority(unsigned int thread_id, int priority)
1838 } 1374 }
1839 1375
1840 struct thread_entry *blt = lock_blocker_thread(bl); 1376 struct thread_entry *blt = lock_blocker_thread(bl);
1841 struct thread_entry **bqp = thread->bqp; 1377 struct __wait_queue *wqp = wait_queue_ptr(thread);
1842 1378
1843 thread->priority = new_priority; 1379 thread->priority = new_priority;
1844 1380
@@ -1850,7 +1386,7 @@ int thread_set_priority(unsigned int thread_id, int priority)
1850 if (new_priority < oldblpr) 1386 if (new_priority < oldblpr)
1851 newblpr = new_priority; 1387 newblpr = new_priority;
1852 else if (old_priority <= oldblpr) 1388 else if (old_priority <= oldblpr)
1853 newblpr = find_highest_priority_in_list_l(*bqp); 1389 newblpr = wait_queue_find_priority(wqp);
1854 1390
1855 if (newblpr == oldblpr) 1391 if (newblpr == oldblpr)
1856 { 1392 {
@@ -1872,7 +1408,7 @@ done:
1872 */ 1408 */
1873int thread_get_priority(unsigned int thread_id) 1409int thread_get_priority(unsigned int thread_id)
1874{ 1410{
1875 struct thread_entry *thread = thread_id_entry(thread_id); 1411 struct thread_entry *thread = __thread_id_entry(thread_id);
1876 int base_priority = thread->base_priority; 1412 int base_priority = thread->base_priority;
1877 1413
1878 /* Simply check without locking slot. It may or may not be valid by the 1414 /* Simply check without locking slot. It may or may not be valid by the
@@ -1888,13 +1424,13 @@ int thread_get_priority(unsigned int thread_id)
1888#ifdef HAVE_IO_PRIORITY 1424#ifdef HAVE_IO_PRIORITY
1889int thread_get_io_priority(unsigned int thread_id) 1425int thread_get_io_priority(unsigned int thread_id)
1890{ 1426{
1891 struct thread_entry *thread = thread_id_entry(thread_id); 1427 struct thread_entry *thread = __thread_id_entry(thread_id);
1892 return thread->io_priority; 1428 return thread->io_priority;
1893} 1429}
1894 1430
1895void thread_set_io_priority(unsigned int thread_id,int io_priority) 1431void thread_set_io_priority(unsigned int thread_id,int io_priority)
1896{ 1432{
1897 struct thread_entry *thread = thread_id_entry(thread_id); 1433 struct thread_entry *thread = __thread_id_entry(thread_id);
1898 thread->io_priority = io_priority; 1434 thread->io_priority = io_priority;
1899} 1435}
1900#endif 1436#endif
@@ -1907,7 +1443,7 @@ void thread_set_io_priority(unsigned int thread_id,int io_priority)
1907 */ 1443 */
1908void thread_thaw(unsigned int thread_id) 1444void thread_thaw(unsigned int thread_id)
1909{ 1445{
1910 struct thread_entry *thread = thread_id_entry(thread_id); 1446 struct thread_entry *thread = __thread_id_entry(thread_id);
1911 int oldlevel = disable_irq_save(); 1447 int oldlevel = disable_irq_save();
1912 1448
1913 LOCK_THREAD(thread); 1449 LOCK_THREAD(thread);
@@ -1926,68 +1462,72 @@ void thread_thaw(unsigned int thread_id)
1926 * Switch the processor that the currently executing thread runs on. 1462 * Switch the processor that the currently executing thread runs on.
1927 *--------------------------------------------------------------------------- 1463 *---------------------------------------------------------------------------
1928 */ 1464 */
1465static USED_ATTR NORETURN_ATTR
1466void switch_core_final(unsigned int old_core, struct thread_entry *current)
1467{
1468 /* Old core won't be using slot resources at this point */
1469 core_schedule_wakeup(current);
1470 UNLOCK_THREAD(current);
1471#ifdef RB_PROFILE
1472 profile_thread_stopped(THREAD_ID_SLOT(current->id));
1473#endif
1474 switch_thread();
1475 /* not reached */
1476 THREAD_PANICF("switch_core_final->same core!", current);
1477 (void)old_core;
1478}
1479
1929unsigned int switch_core(unsigned int new_core) 1480unsigned int switch_core(unsigned int new_core)
1930{ 1481{
1931 const unsigned int core = CURRENT_CORE; 1482 const unsigned int old_core = CURRENT_CORE;
1932 struct thread_entry *current = cores[core].running; 1483 if (old_core == new_core)
1484 return old_core; /* No change */
1933 1485
1934 if (core == new_core) 1486 struct core_entry *corep = __core_id_entry(old_core);
1935 { 1487 struct thread_entry *current = corep->running;
1936 /* No change - just return same core */
1937 return core;
1938 }
1939 1488
1940 disable_irq(); 1489 disable_irq();
1941 LOCK_THREAD(current); 1490 LOCK_THREAD(current);
1942 1491
1943 /* Get us off the running list for the current core */ 1492 /* Remove us from old core lists */
1944 RTR_LOCK(core); 1493 tmo_queue_remove(&corep->tmo, current);
1945 remove_from_list_l(&cores[core].running, current); 1494 core_rtr_remove(corep, current);
1946 rtr_subtract_entry(core, current->priority); 1495 corep->running = NULL; /* No switch_thread context save */
1947 RTR_UNLOCK(core);
1948
1949 /* Stash return value (old core) in a safe place */
1950 current->retval = core;
1951
1952 /* If a timeout hadn't yet been cleaned-up it must be removed now or
1953 * the other core will likely attempt a removal from the wrong list! */
1954 if (current->tmo.prev != NULL)
1955 {
1956 remove_from_list_tmo(current);
1957 }
1958 1496
1959 /* Change the core number for this thread slot */ 1497 /* Do the actual migration */
1960 current->core = new_core; 1498 current->core = new_core;
1499 switch_thread_core(old_core, current);
1961 1500
1962 /* Do not use core_schedule_wakeup here since this will result in 1501 /* Executing on new core */
1963 * the thread starting to run on the other core before being finished on 1502 return old_core;
1964 * this one. Delay the list unlock to keep the other core stuck 1503}
1965 * until this thread is ready. */ 1504#endif /* NUM_CORES > 1 */
1966 RTR_LOCK(new_core);
1967
1968 rtr_add_entry(new_core, current->priority);
1969 add_to_list_l(&cores[new_core].running, current);
1970
1971 /* Make a callback into device-specific code, unlock the wakeup list so
1972 * that execution may resume on the new core, unlock our slot and finally
1973 * restore the interrupt level */
1974 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
1975 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
1976 cores[core].block_task = current;
1977
1978 UNLOCK_THREAD(current);
1979 1505
1980 /* Alert other core to activity */ 1506#ifdef HAVE_SCHEDULER_BOOSTCTRL
1981 core_wake(new_core); 1507/*---------------------------------------------------------------------------
1508 * Change the boost state of a thread boosting or unboosting the CPU
1509 * as required.
1510 *---------------------------------------------------------------------------
1511 */
1512static inline void boost_thread(struct thread_entry *thread, bool boost)
1513{
1514 if ((thread->cpu_boost != 0) != boost)
1515 {
1516 thread->cpu_boost = boost;
1517 cpu_boost(boost);
1518 }
1519}
1982 1520
1983 /* Do the stack switching, cache_maintenence and switch_thread call - 1521void trigger_cpu_boost(void)
1984 requires native code */ 1522{
1985 switch_thread_core(core, current); 1523 boost_thread(__running_self_entry(), true);
1524}
1986 1525
1987 /* Finally return the old core to caller */ 1526void cancel_cpu_boost(void)
1988 return current->retval; 1527{
1528 boost_thread(__running_self_entry(), false);
1989} 1529}
1990#endif /* NUM_CORES > 1 */ 1530#endif /* HAVE_SCHEDULER_BOOSTCTRL */
1991 1531
1992/*--------------------------------------------------------------------------- 1532/*---------------------------------------------------------------------------
1993 * Initialize threading API. This assumes interrupts are not yet enabled. On 1533 * Initialize threading API. This assumes interrupts are not yet enabled. On
@@ -1998,127 +1538,56 @@ unsigned int switch_core(unsigned int new_core)
1998void INIT_ATTR init_threads(void) 1538void INIT_ATTR init_threads(void)
1999{ 1539{
2000 const unsigned int core = CURRENT_CORE; 1540 const unsigned int core = CURRENT_CORE;
2001 struct thread_entry *thread;
2002 1541
2003 if (core == CPU) 1542 if (core == CPU)
2004 { 1543 {
2005 /* Initialize core locks and IDs in all slots */ 1544 thread_alloc_init(); /* before using cores! */
2006 int n; 1545
2007 for (n = 0; n < MAXTHREADS; n++) 1546 /* Create main thread */
1547 struct thread_entry *thread = thread_alloc();
1548 if (thread == NULL)
2008 { 1549 {
2009 thread = &threads[n]; 1550 /* WTF? There really must be a slot available at this stage.
2010 corelock_init(&thread->waiter_cl); 1551 * This can fail if, for example, .bss isn't zero'ed out by the
2011 corelock_init(&thread->slot_cl); 1552 * loader or threads is in the wrong section. */
2012 thread->id = THREAD_ID_INIT(n); 1553 THREAD_PANICF("init_threads->no slot", NULL);
2013 } 1554 }
2014 }
2015
2016 /* CPU will initialize first and then sleep */
2017 thread = find_empty_thread_slot();
2018 1555
2019 if (thread == NULL) 1556 size_t stack_size;
2020 { 1557 void *stack = __get_main_stack(&stack_size);
2021 /* WTF? There really must be a slot available at this stage. 1558 new_thread_base_init(thread, &stack, &stack_size, __main_thread_name
2022 * This can fail if, for example, .bss isn't zero'ed out by the loader 1559 IF_PRIO(, PRIORITY_MAIN_THREAD) IF_COP(, core));
2023 * or threads is in the wrong section. */
2024 THREAD_PANICF("init_threads->no slot", NULL);
2025 }
2026 1560
2027 /* Initialize initially non-zero members of core */ 1561 struct core_entry *corep = __core_id_entry(core);
2028 cores[core].next_tmo_check = current_tick; /* Something not in the past */ 1562 core_rtr_add(corep, thread);
1563 corep->running = thread;
2029 1564
2030 /* Initialize initially non-zero members of slot */ 1565#ifdef INIT_MAIN_THREAD
2031 UNLOCK_THREAD(thread); /* No sync worries yet */ 1566 init_main_thread(&thread->context);
2032 thread->name = main_thread_name;
2033 thread->state = STATE_RUNNING;
2034 IF_COP( thread->core = core; )
2035#ifdef HAVE_PRIORITY_SCHEDULING
2036 corelock_init(&cores[core].rtr_cl);
2037 thread->base_priority = PRIORITY_USER_INTERFACE;
2038 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2039 thread->priority = PRIORITY_USER_INTERFACE;
2040 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2041#endif 1567#endif
1568 }
2042 1569
2043 add_to_list_l(&cores[core].running, thread); 1570#if NUM_CORES > 1
2044 1571 /* Boot CPU:
2045 if (core == CPU) 1572 * Wait for other processors to finish their inits since create_thread
2046 { 1573 * isn't safe to call until the kernel inits are done. The first
2047 thread->stack = stackbegin; 1574 * threads created in the system must of course be created by CPU.
2048 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin; 1575 * Another possible approach is to initialize all cores and slots
2049#if NUM_CORES > 1 /* This code path will not be run on single core targets */ 1576 * for each core by CPU, let the remainder proceed in parallel and
2050 /* Wait for other processors to finish their inits since create_thread 1577 * signal CPU when all are finished.
2051 * isn't safe to call until the kernel inits are done. The first 1578 *
2052 * threads created in the system must of course be created by CPU. 1579 * Other:
2053 * Another possible approach is to initialize all cores and slots 1580 * After last processor completes, it should signal all others to
2054 * for each core by CPU, let the remainder proceed in parallel and 1581 * proceed or may signal the next and call thread_exit(). The last one
2055 * signal CPU when all are finished. */ 1582 * to finish will signal CPU.
2056 core_thread_init(CPU); 1583 */
2057 } 1584 core_thread_init(core);
2058 else 1585
1586 if (core != CPU)
2059 { 1587 {
2060 /* Initial stack is the idle stack */ 1588 /* No main thread on coprocessors - go idle and wait */
2061 thread->stack = idle_stacks[core]; 1589 switch_thread();
2062 thread->stack_size = IDLE_STACK_SIZE; 1590 THREAD_PANICF("init_threads() - coprocessor returned", NULL);
2063 /* After last processor completes, it should signal all others to
2064 * proceed or may signal the next and call thread_exit(). The last one
2065 * to finish will signal CPU. */
2066 core_thread_init(core);
2067 /* Other cores do not have a main thread - go idle inside switch_thread
2068 * until a thread can run on the core. */
2069 thread_exit();
2070#endif /* NUM_CORES */
2071 } 1591 }
2072#ifdef INIT_MAIN_THREAD 1592#endif /* NUM_CORES */
2073 init_main_thread(&thread->context);
2074#endif
2075}
2076
2077/* Unless otherwise defined, do nothing */
2078#ifndef YIELD_KERNEL_HOOK
2079#define YIELD_KERNEL_HOOK() false
2080#endif
2081#ifndef SLEEP_KERNEL_HOOK
2082#define SLEEP_KERNEL_HOOK(ticks) false
2083#endif
2084
2085/*---------------------------------------------------------------------------
2086 * Suspends a thread's execution for at least the specified number of ticks.
2087 *
2088 * May result in CPU core entering wait-for-interrupt mode if no other thread
2089 * may be scheduled.
2090 *
2091 * NOTE: sleep(0) sleeps until the end of the current tick
2092 * sleep(n) that doesn't result in rescheduling:
2093 * n <= ticks suspended < n + 1
2094 * n to n+1 is a lower bound. Other factors may affect the actual time
2095 * a thread is suspended before it runs again.
2096 *---------------------------------------------------------------------------
2097 */
2098unsigned sleep(unsigned ticks)
2099{
2100 /* In certain situations, certain bootloaders in particular, a normal
2101 * threading call is inappropriate. */
2102 if (SLEEP_KERNEL_HOOK(ticks))
2103 return 0; /* Handled */
2104
2105 disable_irq();
2106 sleep_thread(ticks);
2107 switch_thread();
2108 return 0;
2109}
2110
2111/*---------------------------------------------------------------------------
2112 * Elects another thread to run or, if no other thread may be made ready to
2113 * run, immediately returns control back to the calling thread.
2114 *---------------------------------------------------------------------------
2115 */
2116void yield(void)
2117{
2118 /* In certain situations, certain bootloaders in particular, a normal
2119 * threading call is inappropriate. */
2120 if (YIELD_KERNEL_HOOK())
2121 return; /* handled */
2122
2123 switch_thread();
2124} 1593}