summaryrefslogtreecommitdiff
path: root/firmware/export/thread.h
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/export/thread.h')
-rw-r--r--firmware/export/thread.h371
1 files changed, 167 insertions, 204 deletions
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index dd97ab1e83..bb1cb7cd17 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -26,21 +26,35 @@
26 26
27/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works 27/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
28 * by giving high priority threads more CPU time than less priority threads 28 * by giving high priority threads more CPU time than less priority threads
29 * when they need it. 29 * when they need it. Priority is differential such that the priority
30 * 30 * difference between a lower priority runnable thread and the highest priority
31 * runnable thread determines the amount of aging nescessary for the lower
32 * priority thread to be scheduled in order to prevent starvation.
33 *
31 * If software playback codec pcm buffer is going down to critical, codec 34 * If software playback codec pcm buffer is going down to critical, codec
32 * can change it own priority to REALTIME to override user interface and 35 * can gradually raise its own priority to override user interface and
33 * prevent playback skipping. 36 * prevent playback skipping.
34 */ 37 */
38#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
39#define PRIORITY_RESERVED_LOW 32 /* Reserved */
35#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */ 40#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
36#define LOWEST_PRIORITY 100 /* The lowest possible thread priority */ 41#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
37#define PRIORITY_REALTIME 1 42/* Realtime range reserved for threads that will not allow threads of lower
38#define PRIORITY_USER_INTERFACE 4 /* The main thread */ 43 * priority to age and run (future expansion) */
39#define PRIORITY_RECORDING 4 /* Recording thread */ 44#define PRIORITY_REALTIME_1 1
40#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */ 45#define PRIORITY_REALTIME_2 2
41#define PRIORITY_BUFFERING 4 /* Codec buffering thread */ 46#define PRIORITY_REALTIME_3 3
42#define PRIORITY_SYSTEM 6 /* All other firmware threads */ 47#define PRIORITY_REALTIME_4 4
43#define PRIORITY_BACKGROUND 8 /* Normal application threads */ 48#define PRIORITY_REALTIME 4 /* Lowest realtime range */
49#define PRIORITY_USER_INTERFACE 16 /* The main thread */
50#define PRIORITY_RECORDING 16 /* Recording thread */
51#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
52#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
53#define PRIORITY_BUFFERING 16 /* Codec buffering thread */
54#define PRIORITY_SYSTEM 18 /* All other firmware threads */
55#define PRIORITY_BACKGROUND 20 /* Normal application threads */
56#define NUM_PRIORITIES 32
57#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
44 58
45/* TODO: Only a minor tweak to create_thread would be needed to let 59/* TODO: Only a minor tweak to create_thread would be needed to let
46 * thread slots be caller allocated - no essential threading functionality 60 * thread slots be caller allocated - no essential threading functionality
@@ -59,80 +73,40 @@
59 73
60#define DEFAULT_STACK_SIZE 0x400 /* Bytes */ 74#define DEFAULT_STACK_SIZE 0x400 /* Bytes */
61 75
62/**
63 * "Busy" values that can be swapped into a variable to indicate
64 * that the variable or object pointed to is in use by another processor
65 * core. When accessed, the busy value is swapped-in while the current
66 * value is atomically returned. If the swap returns the busy value,
67 * the processor should retry the operation until some other value is
68 * returned. When modification is finished, the new value should be
69 * written which unlocks it and updates it atomically.
70 *
71 * Procedure:
72 * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
73 *
74 * Modify/examine object at mem location or variable. Create "new_value"
75 * as suitable.
76 *
77 * variable = new_value or curr_value;
78 *
79 * To check a value for busy and perform an operation if not:
80 * curr_value = swap(&variable, BUSY_VALUE);
81 *
82 * if (curr_value != BUSY_VALUE)
83 * {
84 * Modify/examine object at mem location or variable. Create "new_value"
85 * as suitable.
86 * variable = new_value or curr_value;
87 * }
88 * else
89 * {
90 * Do nothing - already busy
91 * }
92 *
93 * Only ever restore when an actual value is returned or else it could leave
94 * the variable locked permanently if another processor unlocked in the
95 * meantime. The next access attempt would deadlock for all processors since
96 * an abandoned busy status would be left behind.
97 */
98#define STATE_BUSYuptr ((void*)UINTPTR_MAX)
99#define STATE_BUSYu8 UINT8_MAX
100#define STATE_BUSYi INT_MIN
101
102#ifndef SIMULATOR 76#ifndef SIMULATOR
103/* Need to keep structures inside the header file because debug_menu 77/* Need to keep structures inside the header file because debug_menu
104 * needs them. */ 78 * needs them. */
105#ifdef CPU_COLDFIRE 79#ifdef CPU_COLDFIRE
106struct regs 80struct regs
107{ 81{
108 unsigned int macsr; /* 0 - EMAC status register */ 82 uint32_t macsr; /* 0 - EMAC status register */
109 unsigned int d[6]; /* 4-24 - d2-d7 */ 83 uint32_t d[6]; /* 4-24 - d2-d7 */
110 unsigned int a[5]; /* 28-44 - a2-a6 */ 84 uint32_t a[5]; /* 28-44 - a2-a6 */
111 void *sp; /* 48 - Stack pointer (a7) */ 85 uint32_t sp; /* 48 - Stack pointer (a7) */
112 void *start; /* 52 - Thread start address, or NULL when started */ 86 uint32_t start; /* 52 - Thread start address, or NULL when started */
113}; 87};
114#elif CONFIG_CPU == SH7034 88#elif CONFIG_CPU == SH7034
115struct regs 89struct regs
116{ 90{
117 unsigned int r[7]; /* 0-24 - Registers r8 thru r14 */ 91 uint32_t r[7]; /* 0-24 - Registers r8 thru r14 */
118 void *sp; /* 28 - Stack pointer (r15) */ 92 uint32_t sp; /* 28 - Stack pointer (r15) */
119 void *pr; /* 32 - Procedure register */ 93 uint32_t pr; /* 32 - Procedure register */
120 void *start; /* 36 - Thread start address, or NULL when started */ 94 uint32_t start; /* 36 - Thread start address, or NULL when started */
121}; 95};
122#elif defined(CPU_ARM) 96#elif defined(CPU_ARM)
123struct regs 97struct regs
124{ 98{
125 unsigned int r[8]; /* 0-28 - Registers r4-r11 */ 99 uint32_t r[8]; /* 0-28 - Registers r4-r11 */
126 void *sp; /* 32 - Stack pointer (r13) */ 100 uint32_t sp; /* 32 - Stack pointer (r13) */
127 unsigned int lr; /* 36 - r14 (lr) */ 101 uint32_t lr; /* 36 - r14 (lr) */
128 void *start; /* 40 - Thread start address, or NULL when started */ 102 uint32_t start; /* 40 - Thread start address, or NULL when started */
129}; 103};
130#endif /* CONFIG_CPU */ 104#endif /* CONFIG_CPU */
131#else 105#else
132struct regs 106struct regs
133{ 107{
134 void *t; /* Simulator OS thread */ 108 void *t; /* Simulator OS thread */
135 void *c; /* Condition for blocking and sync */ 109 void *s; /* Semaphore for blocking and wakeup */
136 void (*start)(void); /* Start function */ 110 void (*start)(void); /* Start function */
137}; 111};
138#endif /* !SIMULATOR */ 112#endif /* !SIMULATOR */
@@ -154,13 +128,13 @@ enum
154 thread_thaw is called with its ID */ 128 thread_thaw is called with its ID */
155 THREAD_NUM_STATES, 129 THREAD_NUM_STATES,
156 TIMEOUT_STATE_FIRST = STATE_SLEEPING, 130 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
157#if NUM_CORES > 1
158 STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
159#endif
160}; 131};
161 132
162#if NUM_CORES > 1 133#if NUM_CORES > 1
163#define THREAD_DESTRUCT ((const char *)0x84905617) 134/* Pointer value for name field to indicate thread is being killed. Using
135 * an alternate STATE_* won't work since that would interfere with operation
136 * while the thread is still running. */
137#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
164#endif 138#endif
165 139
166/* Link information for lists thread is in */ 140/* Link information for lists thread is in */
@@ -188,7 +162,7 @@ void corelock_unlock(struct corelock *cl);
188/* Use native atomic swap/exchange instruction */ 162/* Use native atomic swap/exchange instruction */
189struct corelock 163struct corelock
190{ 164{
191 unsigned char locked; 165 volatile unsigned char locked;
192} __attribute__((packed)); 166} __attribute__((packed));
193 167
194#define corelock_init(cl) \ 168#define corelock_init(cl) \
@@ -207,15 +181,36 @@ struct corelock
207#define corelock_unlock(cl) 181#define corelock_unlock(cl)
208#endif /* core locking selection */ 182#endif /* core locking selection */
209 183
210struct thread_queue 184#ifdef HAVE_PRIORITY_SCHEDULING
185struct blocker
211{ 186{
212 struct thread_entry *queue; /* list of threads waiting - 187 struct thread_entry *thread; /* thread blocking other threads
213 _must_ be first member */ 188 (aka. object owner) */
214#if CONFIG_CORELOCK == SW_CORELOCK 189 int priority; /* highest priority waiter */
215 struct corelock cl; /* lock for atomic list operations */ 190 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
216#endif 191};
192
193/* Choices of wakeup protocol */
194
195/* For transfer of object ownership by one thread to another thread by
196 * the owning thread itself (mutexes) */
197struct thread_entry *
198 wakeup_priority_protocol_transfer(struct thread_entry *thread);
199
200/* For release by owner where ownership doesn't change - other threads,
201 * interrupts, timeouts, etc. (mutex timeout, queues) */
202struct thread_entry *
203 wakeup_priority_protocol_release(struct thread_entry *thread);
204
205
206struct priority_distribution
207{
208 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
209 uint32_t mask; /* Bitmask of hist entries that are not zero */
217}; 210};
218 211
212#endif /* HAVE_PRIORITY_SCHEDULING */
213
219/* Information kept in each thread slot 214/* Information kept in each thread slot
220 * members are arranged according to size - largest first - in order 215 * members are arranged according to size - largest first - in order
221 * to ensure both alignment and packing at the same time. 216 * to ensure both alignment and packing at the same time.
@@ -224,88 +219,83 @@ struct thread_entry
224{ 219{
225 struct regs context; /* Register context at switch - 220 struct regs context; /* Register context at switch -
226 _must_ be first member */ 221 _must_ be first member */
227 void *stack; /* Pointer to top of stack */ 222 uintptr_t *stack; /* Pointer to top of stack */
228 const char *name; /* Thread name */ 223 const char *name; /* Thread name */
229 long tmo_tick; /* Tick when thread should be woken from 224 long tmo_tick; /* Tick when thread should be woken from
230 timeout */ 225 timeout -
226 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
231 struct thread_list l; /* Links for blocked/waking/running - 227 struct thread_list l; /* Links for blocked/waking/running -
232 circular linkage in both directions */ 228 circular linkage in both directions */
233 struct thread_list tmo; /* Links for timeout list - 229 struct thread_list tmo; /* Links for timeout list -
234 Self-pointer-terminated in reverse direction, 230 Circular in reverse direction, NULL-terminated in
235 NULL-terminated in forward direction */ 231 forward direction -
236 struct thread_queue *bqp; /* Pointer to list variable in kernel 232 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
233 struct thread_entry **bqp; /* Pointer to list variable in kernel
237 object where thread is blocked - used 234 object where thread is blocked - used
238 for implicit unblock and explicit wake */ 235 for implicit unblock and explicit wake
239#if CONFIG_CORELOCK == SW_CORELOCK 236 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
240 struct thread_entry **bqnlp; /* Pointer to list variable in kernel 237#if NUM_CORES > 1
241 object where thread is blocked - non-locked 238 struct corelock *obj_cl; /* Object corelock where thead is blocked -
242 operations will be used */ 239 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
243#endif 240#endif
244 struct thread_entry *queue; /* List of threads waiting for thread to be 241 struct thread_entry *queue; /* List of threads waiting for thread to be
245 removed */ 242 removed */
246#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 243#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
247 intptr_t retval; /* Return value from a blocked operation */ 244 #define HAVE_WAKEUP_EXT_CB
245 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
246 performs special steps needed when being
247 forced off of an object's wait queue that
248 go beyond the standard wait queue removal
249 and priority disinheritance */
250 /* Only enabled when using queue_send for now */
251#endif
252#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
253 intptr_t retval; /* Return value from a blocked operation/
254 misc. use */
248#endif 255#endif
249#ifdef HAVE_PRIORITY_SCHEDULING 256#ifdef HAVE_PRIORITY_SCHEDULING
250 long last_run; /* Last tick when started */ 257 /* Priority summary of owned objects that support inheritance */
258 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
259 on an object that supports PIP -
260 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
261 struct priority_distribution pdist; /* Priority summary of owned objects
262 that have blocked threads and thread's own
263 base priority */
264 int skip_count; /* Number of times skipped if higher priority
265 thread was running */
251#endif 266#endif
252 unsigned short stack_size; /* Size of stack in bytes */ 267 unsigned short stack_size; /* Size of stack in bytes */
253#ifdef HAVE_PRIORITY_SCHEDULING 268#ifdef HAVE_PRIORITY_SCHEDULING
254 unsigned char priority; /* Current priority */ 269 unsigned char base_priority; /* Base priority (set explicitly during
255 unsigned char priority_x; /* Inherited priority - right now just a 270 creation or thread_set_priority) */
256 runtime guarantee flag */ 271 unsigned char priority; /* Scheduled priority (higher of base or
272 all threads blocked by this one) */
257#endif 273#endif
258 unsigned char state; /* Thread slot state (STATE_*) */ 274 unsigned char state; /* Thread slot state (STATE_*) */
259#if NUM_CORES > 1
260 unsigned char core; /* The core to which thread belongs */
261#endif
262#ifdef HAVE_SCHEDULER_BOOSTCTRL 275#ifdef HAVE_SCHEDULER_BOOSTCTRL
263 unsigned char boosted; /* CPU frequency boost flag */ 276 unsigned char cpu_boost; /* CPU frequency boost flag */
264#endif 277#endif
265#if CONFIG_CORELOCK == SW_CORELOCK 278#if NUM_CORES > 1
266 struct corelock cl; /* Corelock to lock thread slot */ 279 unsigned char core; /* The core to which thread belongs */
280 struct corelock waiter_cl; /* Corelock for thread_wait */
281 struct corelock slot_cl; /* Corelock to lock thread slot */
267#endif 282#endif
268}; 283};
269 284
270#if NUM_CORES > 1 285#if NUM_CORES > 1
271/* Operations to be performed just before stopping a thread and starting 286/* Operations to be performed just before stopping a thread and starting
272 a new one if specified before calling switch_thread */ 287 a new one if specified before calling switch_thread */
273#define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */ 288enum
274#if CONFIG_CORELOCK == CORELOCK_SWAP 289{
275#define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */ 290 TBOP_CLEAR = 0, /* No operation to do */
276#define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */ 291 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
277#define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/ 292 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
278#endif /* CONFIG_CORELOCK */ 293};
279#define TBOP_UNLOCK_CORELOCK 0x04
280#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
281#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
282#define TBOP_SWITCH_CORE 0x20 /* Call the core switch preparation routine */
283 294
284struct thread_blk_ops 295struct thread_blk_ops
285{ 296{
286#if CONFIG_CORELOCK != SW_CORELOCK 297 struct corelock *cl_p; /* pointer to corelock */
287 union 298 unsigned char flags; /* TBOP_* flags */
288 {
289 int var_iv; /* int variable value to set */
290 uint8_t var_u8v; /* unsigned char valur to set */
291 struct thread_entry *list_v; /* list pointer queue value to set */
292 };
293#endif
294 union
295 {
296#if CONFIG_CORELOCK != SW_CORELOCK
297 int *var_ip; /* pointer to int variable */
298 uint8_t *var_u8p; /* pointer to unsigned char varuable */
299#endif
300 struct thread_queue *list_p; /* pointer to list variable */
301 };
302#if CONFIG_CORELOCK == SW_CORELOCK
303 struct corelock *cl_p; /* corelock to unlock */
304 struct thread_entry *thread; /* thread to unlock */
305#elif CONFIG_CORELOCK == CORELOCK_SWAP
306 unsigned char state; /* new thread state (performs unlock) */
307#endif /* SOFTWARE_CORELOCK */
308 unsigned char flags; /* TBOP_* flags */
309}; 299};
310#endif /* NUM_CORES > 1 */ 300#endif /* NUM_CORES > 1 */
311 301
@@ -316,28 +306,30 @@ struct core_entry
316{ 306{
317 /* "Active" lists - core is constantly active on these and are never 307 /* "Active" lists - core is constantly active on these and are never
318 locked and interrupts do not access them */ 308 locked and interrupts do not access them */
319 struct thread_entry *running; /* threads that are running */ 309 struct thread_entry *running; /* threads that are running (RTR) */
320 struct thread_entry *timeout; /* threads that are on a timeout before 310 struct thread_entry *timeout; /* threads that are on a timeout before
321 running again */ 311 running again */
322 /* "Shared" lists - cores interact in a synchronized manner - access 312 struct thread_entry *block_task; /* Task going off running list */
323 is locked between cores and interrupts */ 313#ifdef HAVE_PRIORITY_SCHEDULING
324 struct thread_queue waking; /* intermediate locked list that 314 struct priority_distribution rtr; /* Summary of running and ready-to-run
325 hold threads other core should wake up 315 threads */
326 on next task switch */ 316#endif
327 long next_tmo_check; /* soonest time to check tmo threads */ 317 long next_tmo_check; /* soonest time to check tmo threads */
328#if NUM_CORES > 1 318#if NUM_CORES > 1
329 struct thread_blk_ops blk_ops; /* operations to perform when 319 struct thread_blk_ops blk_ops; /* operations to perform when
330 blocking a thread */ 320 blocking a thread */
331#endif /* NUM_CORES */
332#ifdef HAVE_PRIORITY_SCHEDULING 321#ifdef HAVE_PRIORITY_SCHEDULING
333 unsigned char highest_priority; 322 struct corelock rtr_cl; /* Lock for rtr list */
334#endif 323#endif
324#endif /* NUM_CORES */
335}; 325};
336 326
337#ifdef HAVE_PRIORITY_SCHEDULING 327#ifdef HAVE_PRIORITY_SCHEDULING
338#define IF_PRIO(...) __VA_ARGS__ 328#define IF_PRIO(...) __VA_ARGS__
329#define IFN_PRIO(...)
339#else 330#else
340#define IF_PRIO(...) 331#define IF_PRIO(...)
332#define IFN_PRIO(...) __VA_ARGS__
341#endif 333#endif
342 334
343/* Macros generate better code than an inline function is this case */ 335/* Macros generate better code than an inline function is this case */
@@ -464,13 +456,18 @@ struct core_entry
464void core_idle(void); 456void core_idle(void);
465void core_wake(IF_COP_VOID(unsigned int core)); 457void core_wake(IF_COP_VOID(unsigned int core));
466 458
459/* Initialize the scheduler */
460void init_threads(void);
461
462/* Allocate a thread in the scheduler */
467#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ 463#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
468struct thread_entry* 464struct thread_entry*
469 create_thread(void (*function)(void), void* stack, int stack_size, 465 create_thread(void (*function)(void), void* stack, size_t stack_size,
470 unsigned flags, const char *name 466 unsigned flags, const char *name
471 IF_PRIO(, int priority) 467 IF_PRIO(, int priority)
472 IF_COP(, unsigned int core)); 468 IF_COP(, unsigned int core));
473 469
470/* Set and clear the CPU frequency boost flag for the calling thread */
474#ifdef HAVE_SCHEDULER_BOOSTCTRL 471#ifdef HAVE_SCHEDULER_BOOSTCTRL
475void trigger_cpu_boost(void); 472void trigger_cpu_boost(void);
476void cancel_cpu_boost(void); 473void cancel_cpu_boost(void);
@@ -478,86 +475,52 @@ void cancel_cpu_boost(void);
478#define trigger_cpu_boost() 475#define trigger_cpu_boost()
479#define cancel_cpu_boost() 476#define cancel_cpu_boost()
480#endif 477#endif
478/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
479 * Has no effect on a thread not frozen. */
481void thread_thaw(struct thread_entry *thread); 480void thread_thaw(struct thread_entry *thread);
481/* Wait for a thread to exit */
482void thread_wait(struct thread_entry *thread); 482void thread_wait(struct thread_entry *thread);
483/* Exit the current thread */
484void thread_exit(void);
485#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
486#define ALLOW_REMOVE_THREAD
487/* Remove a thread from the scheduler */
483void remove_thread(struct thread_entry *thread); 488void remove_thread(struct thread_entry *thread);
484void switch_thread(struct thread_entry *old); 489#endif
485void sleep_thread(int ticks);
486 490
487/** 491/* Switch to next runnable thread */
488 * Setup to allow using thread queues as locked or non-locked without speed 492void switch_thread(void);
489 * sacrifices in both core locking types. 493/* Blocks a thread for at least the specified number of ticks (0 = wait until
490 * 494 * next tick) */
491 * The blocking/waking function inline two different version of the real 495void sleep_thread(int ticks);
492 * function into the stubs when a software or other separate core locking 496/* Indefinitely blocks the current thread on a thread queue */
493 * mechanism is employed. 497void block_thread(struct thread_entry *current);
494 * 498/* Blocks the current thread on a thread queue until explicitely woken or
495 * When a simple test-and-set or similar instruction is available, locking 499 * the timeout is reached */
496 * has no cost and so one version is used and the internal worker is called 500void block_thread_w_tmo(struct thread_entry *current, int timeout);
497 * directly. 501
498 * 502/* Return bit flags for thread wakeup */
499 * CORELOCK_NONE is treated the same as when an atomic instruction can be 503#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
500 * used. 504#define THREAD_OK 0x1 /* A thread was woken up */
501 */ 505#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
506 higher priority than current were woken) */
502 507
503/* Blocks the current thread on a thread queue */
504#if CONFIG_CORELOCK == SW_CORELOCK
505void block_thread(struct thread_queue *tq);
506void block_thread_no_listlock(struct thread_entry **list);
507#else
508void _block_thread(struct thread_queue *tq);
509static inline void block_thread(struct thread_queue *tq)
510 { _block_thread(tq); }
511static inline void block_thread_no_listlock(struct thread_entry **list)
512 { _block_thread((struct thread_queue *)list); }
513#endif /* CONFIG_CORELOCK */
514
515/* Blocks the current thread on a thread queue for a max amount of time
516 * There is no "_no_listlock" version because timeout blocks without sync on
517 * the blocking queues is not permitted since either core could access the
518 * list at any time to do an implicit wake. In other words, objects with
519 * timeout support require lockable queues. */
520void block_thread_w_tmo(struct thread_queue *tq, int timeout);
521
522/* Wakes up the thread at the head of the queue */
523#define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
524#define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
525#if CONFIG_CORELOCK == SW_CORELOCK
526struct thread_entry * wakeup_thread(struct thread_queue *tq);
527struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
528#else
529struct thread_entry * _wakeup_thread(struct thread_queue *list);
530static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
531 { return _wakeup_thread(tq); }
532static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
533 { return _wakeup_thread((struct thread_queue *)list); }
534#endif /* CONFIG_CORELOCK */
535
536/* Initialize a thread_queue object. */
537static inline void thread_queue_init(struct thread_queue *tq)
538 { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
539/* A convenience function for waking an entire queue of threads. */ 508/* A convenience function for waking an entire queue of threads. */
540static inline void thread_queue_wake(struct thread_queue *tq) 509unsigned int thread_queue_wake(struct thread_entry **list);
541 { while (wakeup_thread(tq) != NULL); } 510
542/* The no-listlock version of thread_queue_wake() */ 511/* Wakeup a thread at the head of a list */
543static inline void thread_queue_wake_no_listlock(struct thread_entry **list) 512unsigned int wakeup_thread(struct thread_entry **list);
544 { while (wakeup_thread_no_listlock(list) != NULL); }
545 513
546#ifdef HAVE_PRIORITY_SCHEDULING 514#ifdef HAVE_PRIORITY_SCHEDULING
547int thread_set_priority(struct thread_entry *thread, int priority); 515int thread_set_priority(struct thread_entry *thread, int priority);
548int thread_get_priority(struct thread_entry *thread); 516int thread_get_priority(struct thread_entry *thread);
549/* Yield that guarantees thread execution once per round regardless of
550 thread's scheduler priority - basically a transient realtime boost
551 without altering the scheduler's thread precedence. */
552void priority_yield(void);
553#else
554#define priority_yield yield
555#endif /* HAVE_PRIORITY_SCHEDULING */ 517#endif /* HAVE_PRIORITY_SCHEDULING */
556#if NUM_CORES > 1 518#if NUM_CORES > 1
557unsigned int switch_core(unsigned int new_core); 519unsigned int switch_core(unsigned int new_core);
558#endif 520#endif
559struct thread_entry * thread_get_current(void); 521struct thread_entry * thread_get_current(void);
560void init_threads(void); 522
523/* Debugging info - only! */
561int thread_stack_usage(const struct thread_entry *thread); 524int thread_stack_usage(const struct thread_entry *thread);
562#if NUM_CORES > 1 525#if NUM_CORES > 1
563int idle_stack_usage(unsigned int core); 526int idle_stack_usage(unsigned int core);