summaryrefslogtreecommitdiff
path: root/firmware/export/thread.h
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2007-10-16 01:25:17 +0000
committerMichael Sevakis <jethead71@rockbox.org>2007-10-16 01:25:17 +0000
commita9b2fb5ee3114fe835f6515b6aeae7454f66d821 (patch)
treefc4e96d0c1f215565918406c8827b16b806c1345 /firmware/export/thread.h
parenta3fbbc9fa7e12fd3fce122bbd235dc362050e024 (diff)
downloadrockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.tar.gz
rockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.zip
Finally full multicore support for PortalPlayer 502x targets with an eye towards the possibility of other types. All SVN targets the low-lag code to speed up blocking operations. Most files are modified here simple due to a name change to actually support a real event object and a param change to create_thread. Add some use of new features but just sit on things for a bit and leave full integration for later. Work will continue on to address size on sensitive targets and simplify things if possible. Any PP target having problems with SWP can easily be changed to sw corelocks with one #define change in config.h though only PP5020 has shown an issue and seems to work without any difficulties.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15134 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/export/thread.h')
-rw-r--r--firmware/export/thread.h552
1 files changed, 437 insertions, 115 deletions
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 7c683ddde5..20cde1a8e3 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -21,6 +21,7 @@
21 21
22#include "config.h" 22#include "config.h"
23#include <inttypes.h> 23#include <inttypes.h>
24#include <stddef.h>
24#include <stdbool.h> 25#include <stdbool.h>
25 26
26/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works 27/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
@@ -31,13 +32,15 @@
31 * can change it own priority to REALTIME to override user interface and 32 * can change it own priority to REALTIME to override user interface and
32 * prevent playback skipping. 33 * prevent playback skipping.
33 */ 34 */
35#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
36#define LOWEST_PRIORITY 100 /* The lowest possible thread priority */
34#define PRIORITY_REALTIME 1 37#define PRIORITY_REALTIME 1
35#define PRIORITY_USER_INTERFACE 4 /* The main thread */ 38#define PRIORITY_USER_INTERFACE 4 /* The main thread */
36#define PRIORITY_RECORDING 4 /* Recording thread */ 39#define PRIORITY_RECORDING 4 /* Recording thread */
37#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */ 40#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
38#define PRIORITY_BUFFERING 4 /* Codec buffering thread */ 41#define PRIORITY_BUFFERING 4 /* Codec buffering thread */
39#define PRIORITY_SYSTEM 6 /* All other firmware threads */ 42#define PRIORITY_SYSTEM 6 /* All other firmware threads */
40#define PRIORITY_BACKGROUND 8 /* Normal application threads */ 43#define PRIORITY_BACKGROUND 8 /* Normal application threads */
41 44
42#if CONFIG_CODEC == SWCODEC 45#if CONFIG_CODEC == SWCODEC
43#define MAXTHREADS 16 46#define MAXTHREADS 16
@@ -47,6 +50,46 @@
47 50
48#define DEFAULT_STACK_SIZE 0x400 /* Bytes */ 51#define DEFAULT_STACK_SIZE 0x400 /* Bytes */
49 52
53/**
54 * "Busy" values that can be swapped into a variable to indicate
55 * that the variable or object pointed to is in use by another processor
56 * core. When accessed, the busy value is swapped-in while the current
57 * value is atomically returned. If the swap returns the busy value,
58 * the processor should retry the operation until some other value is
59 * returned. When modification is finished, the new value should be
60 * written which unlocks it and updates it atomically.
61 *
62 * Procedure:
63 * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
64 *
65 * Modify/examine object at mem location or variable. Create "new_value"
66 * as suitable.
67 *
68 * variable = new_value or curr_value;
69 *
70 * To check a value for busy and perform an operation if not:
71 * curr_value = swap(&variable, BUSY_VALUE);
72 *
73 * if (curr_value != BUSY_VALUE)
74 * {
75 * Modify/examine object at mem location or variable. Create "new_value"
76 * as suitable.
77 * variable = new_value or curr_value;
78 * }
79 * else
80 * {
81 * Do nothing - already busy
82 * }
83 *
84 * Only ever restore when an actual value is returned or else it could leave
85 * the variable locked permanently if another processor unlocked in the
86 * meantime. The next access attempt would deadlock for all processors since
87 * an abandoned busy status would be left behind.
88 */
89#define STATE_BUSYuptr ((void*)UINTPTR_MAX)
90#define STATE_BUSYu8 UINT8_MAX
91#define STATE_BUSYi INT_MIN
92
50#ifndef SIMULATOR 93#ifndef SIMULATOR
51/* Need to keep structures inside the header file because debug_menu 94/* Need to keep structures inside the header file because debug_menu
52 * needs them. */ 95 * needs them. */
@@ -58,7 +101,7 @@ struct regs
58 unsigned int a[5]; /* 28-44 - a2-a6 */ 101 unsigned int a[5]; /* 28-44 - a2-a6 */
59 void *sp; /* 48 - Stack pointer (a7) */ 102 void *sp; /* 48 - Stack pointer (a7) */
60 void *start; /* 52 - Thread start address, or NULL when started */ 103 void *start; /* 52 - Thread start address, or NULL when started */
61} __attribute__((packed)); 104};
62#elif CONFIG_CPU == SH7034 105#elif CONFIG_CPU == SH7034
63struct regs 106struct regs
64{ 107{
@@ -66,7 +109,7 @@ struct regs
66 void *sp; /* 28 - Stack pointer (r15) */ 109 void *sp; /* 28 - Stack pointer (r15) */
67 void *pr; /* 32 - Procedure register */ 110 void *pr; /* 32 - Procedure register */
68 void *start; /* 36 - Thread start address, or NULL when started */ 111 void *start; /* 36 - Thread start address, or NULL when started */
69} __attribute__((packed)); 112};
70#elif defined(CPU_ARM) 113#elif defined(CPU_ARM)
71struct regs 114struct regs
72{ 115{
@@ -74,7 +117,7 @@ struct regs
74 void *sp; /* 32 - Stack pointer (r13) */ 117 void *sp; /* 32 - Stack pointer (r13) */
75 unsigned int lr; /* 36 - r14 (lr) */ 118 unsigned int lr; /* 36 - r14 (lr) */
76 void *start; /* 40 - Thread start address, or NULL when started */ 119 void *start; /* 40 - Thread start address, or NULL when started */
77} __attribute__((packed)); 120};
78#endif /* CONFIG_CPU */ 121#endif /* CONFIG_CPU */
79#else 122#else
80struct regs 123struct regs
@@ -85,58 +128,206 @@ struct regs
85}; 128};
86#endif /* !SIMULATOR */ 129#endif /* !SIMULATOR */
87 130
88#define STATE_RUNNING 0x00000000 131/* NOTE: The use of the word "queue" may also refer to a linked list of
89#define STATE_BLOCKED 0x20000000 132 threads being maintainted that are normally dealt with in FIFO order
90#define STATE_SLEEPING 0x40000000 133 and not nescessarily kernel event_queue */
91#define STATE_BLOCKED_W_TMO 0x60000000 134enum
92 135{
93#define THREAD_STATE_MASK 0x60000000 136 /* States without a timeout must be first */
94#define STATE_ARG_MASK 0x1FFFFFFF 137 STATE_KILLED = 0, /* Thread is killed (default) */
95 138 STATE_RUNNING, /* Thread is currently running */
96#define GET_STATE_ARG(state) (state & STATE_ARG_MASK) 139 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
97#define GET_STATE(state) (state & THREAD_STATE_MASK) 140 /* These states involve adding the thread to the tmo list */
98#define SET_STATE(var,state,arg) (var = (state | ((arg) & STATE_ARG_MASK))) 141 STATE_SLEEPING, /* Thread is sleeping with a timeout */
99#define CLEAR_STATE_ARG(var) (var &= ~STATE_ARG_MASK) 142 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
100 143 /* Miscellaneous states */
101#define STATE_BOOSTED 0x80000000 144 STATE_FROZEN, /* Thread is suspended and will not run until
102#define STATE_IS_BOOSTED(var) (var & STATE_BOOSTED) 145 thread_thaw is called with its ID */
103#define SET_BOOST_STATE(var) (var |= STATE_BOOSTED) 146 THREAD_NUM_STATES,
104 147 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
105struct thread_entry { 148#if NUM_CORES > 1
106 struct regs context; 149 STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
107 const char *name;
108 void *stack;
109 unsigned long statearg;
110 unsigned short stack_size;
111# if NUM_CORES > 1
112 unsigned char core; /* To which core threads belongs to. */
113# endif
114#ifdef HAVE_PRIORITY_SCHEDULING
115 unsigned char priority;
116 unsigned char priority_x;
117 long last_run;
118#endif 150#endif
119 struct thread_entry *next, *prev; 151};
120#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 152
121 intptr_t retval; 153#if NUM_CORES > 1
154#define THREAD_DESTRUCT ((const char *)0x84905617)
122#endif 155#endif
156
157/* Link information for lists thread is in */
158struct thread_entry; /* forward */
159struct thread_list
160{
161 struct thread_entry *prev; /* Previous thread in a list */
162 struct thread_entry *next; /* Next thread in a list */
123}; 163};
124 164
125struct core_entry { 165/* Small objects for core-wise mutual exclusion */
126 struct thread_entry *running; 166#if CONFIG_CORELOCK == SW_CORELOCK
127 struct thread_entry *sleeping; 167/* No reliable atomic instruction available - use Peterson's algorithm */
128 struct thread_entry *waking; 168struct corelock
129 struct thread_entry **wakeup_list; 169{
170 volatile unsigned char myl[NUM_CORES];
171 volatile unsigned char turn;
172} __attribute__((packed));
173
174void corelock_init(struct corelock *cl);
175void corelock_lock(struct corelock *cl);
176int corelock_try_lock(struct corelock *cl);
177void corelock_unlock(struct corelock *cl);
178#elif CONFIG_CORELOCK == CORELOCK_SWAP
179/* Use native atomic swap/exchange instruction */
180struct corelock
181{
182 unsigned char locked;
183} __attribute__((packed));
184
185#define corelock_init(cl) \
186 ({ (cl)->locked = 0; })
187#define corelock_lock(cl) \
188 ({ while (test_and_set(&(cl)->locked, 1)); })
189#define corelock_try_lock(cl) \
190 ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
191#define corelock_unlock(cl) \
192 ({ (cl)->locked = 0; })
193#else
194/* No atomic corelock op needed or just none defined */
195#define corelock_init(cl)
196#define corelock_lock(cl)
197#define corelock_try_lock(cl)
198#define corelock_unlock(cl)
199#endif /* core locking selection */
200
201struct thread_queue
202{
203 struct thread_entry *queue; /* list of threads waiting -
204 _must_ be first member */
205#if CONFIG_CORELOCK == SW_CORELOCK
206 struct corelock cl; /* lock for atomic list operations */
207#endif
208};
209
210/* Information kept in each thread slot
211 * members are arranged according to size - largest first - in order
212 * to ensure both alignment and packing at the same time.
213 */
214struct thread_entry
215{
216 struct regs context; /* Register context at switch -
217 _must_ be first member */
218 void *stack; /* Pointer to top of stack */
219 const char *name; /* Thread name */
220 long tmo_tick; /* Tick when thread should be woken from
221 timeout */
222 struct thread_list l; /* Links for blocked/waking/running -
223 circular linkage in both directions */
224 struct thread_list tmo; /* Links for timeout list -
225 Self-pointer-terminated in reverse direction,
226 NULL-terminated in forward direction */
227 struct thread_queue *bqp; /* Pointer to list variable in kernel
228 object where thread is blocked - used
229 for implicit unblock and explicit wake */
230#if CONFIG_CORELOCK == SW_CORELOCK
231 struct thread_entry **bqnlp; /* Pointer to list variable in kernel
232 object where thread is blocked - non-locked
233 operations will be used */
234#endif
235 struct thread_entry *queue; /* List of threads waiting for thread to be
236 removed */
237#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
238 intptr_t retval; /* Return value from a blocked operation */
239#endif
240#ifdef HAVE_PRIORITY_SCHEDULING
241 long last_run; /* Last tick when started */
242#endif
243 unsigned short stack_size; /* Size of stack in bytes */
130#ifdef HAVE_PRIORITY_SCHEDULING 244#ifdef HAVE_PRIORITY_SCHEDULING
131 long highest_priority; 245 unsigned char priority; /* Current priority */
246 unsigned char priority_x; /* Inherited priority - right now just a
247 runtime guarantee flag */
132#endif 248#endif
249 unsigned char state; /* Thread slot state (STATE_*) */
133#if NUM_CORES > 1 250#if NUM_CORES > 1
134 volatile bool lock_issued; 251 unsigned char core; /* The core to which thread belongs */
135 volatile bool kernel_running; 252#endif
253#ifdef HAVE_SCHEDULER_BOOSTCTRL
254 unsigned char boosted; /* CPU frequency boost flag */
255#endif
256#if CONFIG_CORELOCK == SW_CORELOCK
257 struct corelock cl; /* Corelock to lock thread slot */
258#endif
259};
260
261#if NUM_CORES > 1
262/* Operations to be performed just before stopping a thread and starting
263 a new one if specified before calling switch_thread */
264#define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */
265#if CONFIG_CORELOCK == CORELOCK_SWAP
266#define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */
267#define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */
268#define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/
269#endif /* CONFIG_CORELOCK */
270#define TBOP_UNLOCK_CORELOCK 0x04
271#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
272#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
273#define TBOP_IRQ_LEVEL 0x20 /* Set a new irq level */
274#define TBOP_SWITCH_CORE 0x40 /* Call the core switch preparation routine */
275
276struct thread_blk_ops
277{
278 int irq_level; /* new IRQ level to set */
279#if CONFIG_CORELOCK != SW_CORELOCK
280 union
281 {
282 int var_iv; /* int variable value to set */
283 uint8_t var_u8v; /* unsigned char valur to set */
284 struct thread_entry *list_v; /* list pointer queue value to set */
285 };
286#endif
287 union
288 {
289#if CONFIG_CORELOCK != SW_CORELOCK
290 int *var_ip; /* pointer to int variable */
291 uint8_t *var_u8p; /* pointer to unsigned char varuable */
292#endif
293 struct thread_queue *list_p; /* pointer to list variable */
294 };
295#if CONFIG_CORELOCK == SW_CORELOCK
296 struct corelock *cl_p; /* corelock to unlock */
297 struct thread_entry *thread; /* thread to unlock */
298#elif CONFIG_CORELOCK == CORELOCK_SWAP
299 unsigned char state; /* new thread state (performs unlock) */
300#endif /* SOFTWARE_CORELOCK */
301 unsigned char flags; /* TBOP_* flags */
302};
303#endif /* NUM_CORES > 1 */
304
305/* Information kept for each core
306 * Member are arranged for the same reason as in thread_entry
307 */
308struct core_entry
309{
310 /* "Active" lists - core is constantly active on these and are never
311 locked and interrupts do not access them */
312 struct thread_entry *running; /* threads that are running */
313 struct thread_entry *timeout; /* threads that are on a timeout before
314 running again */
315 /* "Shared" lists - cores interact in a synchronized manner - access
316 is locked between cores and interrupts */
317 struct thread_queue waking; /* intermediate locked list that
318 hold threads other core should wake up
319 on next task switch */
320 long next_tmo_check; /* soonest time to check tmo threads */
321#if NUM_CORES > 1
322 struct thread_blk_ops blk_ops; /* operations to perform when
323 blocking a thread */
324#else
325 #define STAY_IRQ_LEVEL (-1)
326 int irq_level; /* sets the irq level to irq_level */
327#endif /* NUM_CORES */
328#ifdef HAVE_PRIORITY_SCHEDULING
329 unsigned char highest_priority;
136#endif 330#endif
137 long last_tick;
138 int switch_to_irq_level;
139 #define STAY_IRQ_LEVEL -1
140}; 331};
141 332
142#ifdef HAVE_PRIORITY_SCHEDULING 333#ifdef HAVE_PRIORITY_SCHEDULING
@@ -145,82 +336,210 @@ struct core_entry {
145#define IF_PRIO(...) 336#define IF_PRIO(...)
146#endif 337#endif
147 338
148/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
149 * Just use it for ARM, Coldfire and whatever else well...why not?
150 */
151
152/* Macros generate better code than an inline function is this case */ 339/* Macros generate better code than an inline function is this case */
153#if (defined (CPU_PP) || defined (CPU_ARM)) && CONFIG_CPU != PP5020 340#if (defined (CPU_PP) || defined (CPU_ARM))
154#define test_and_set(x_, v_) \ 341/* atomic */
155({ \ 342#ifdef SOFTWARE_CORELOCK
156 uint32_t old; \ 343#define test_and_set(a, v, cl) \
157 asm volatile ( \ 344 xchg8((a), (v), (cl))
158 "swpb %[old], %[v], [%[x]] \r\n" \ 345/* atomic */
159 : [old]"=r"(old) \ 346#define xchg8(a, v, cl) \
160 : [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \ 347({ uint32_t o; \
161 ); \ 348 corelock_lock(cl); \
162 old; \ 349 o = *(uint8_t *)(a); \
163 }) 350 *(uint8_t *)(a) = (v); \
351 corelock_unlock(cl); \
352 o; })
353#define xchg32(a, v, cl) \
354({ uint32_t o; \
355 corelock_lock(cl); \
356 o = *(uint32_t *)(a); \
357 *(uint32_t *)(a) = (v); \
358 corelock_unlock(cl); \
359 o; })
360#define xchgptr(a, v, cl) \
361({ typeof (*(a)) o; \
362 corelock_lock(cl); \
363 o = *(a); \
364 *(a) = (v); \
365 corelock_unlock(cl); \
366 o; })
367#else
368/* atomic */
369#define test_and_set(a, v, ...) \
370 xchg8((a), (v))
371#define xchg8(a, v, ...) \
372({ uint32_t o; \
373 asm volatile( \
374 "swpb %0, %1, [%2]" \
375 : "=r"(o) \
376 : "r"(v), \
377 "r"((uint8_t*)(a))); \
378 o; })
379/* atomic */
380#define xchg32(a, v, ...) \
381({ uint32_t o; \
382 asm volatile( \
383 "swp %0, %1, [%2]" \
384 : "=r"(o) \
385 : "r"((uint32_t)(v)), \
386 "r"((uint32_t*)(a))); \
387 o; })
388/* atomic */
389#define xchgptr(a, v, ...) \
390({ typeof (*(a)) o; \
391 asm volatile( \
392 "swp %0, %1, [%2]" \
393 : "=r"(o) \
394 : "r"(v), "r"(a)); \
395 o; })
396#endif /* SOFTWARE_CORELOCK */
164#elif defined (CPU_COLDFIRE) 397#elif defined (CPU_COLDFIRE)
165#define test_and_set(x_, v_) \ 398/* atomic */
166({ \ 399/* one branch will be optimized away if v is a constant expression */
167 uint8_t old; \ 400#define test_and_set(a, v, ...) \
168 asm volatile ( \ 401({ uint32_t o = 0; \
169 "bset.l %[v], (%[x]) \r\n" \ 402 if (v) { \
170 "sne.b %[old] \r\n" \ 403 asm volatile ( \
171 : [old]"=d,d"(old) \ 404 "bset.b #0, (%0)" \
172 : [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \ 405 : : "a"((uint8_t*)(a)) \
173 ); \ 406 : "cc"); \
174 old; \ 407 } else { \
175 }) 408 asm volatile ( \
409 "bclr.b #0, (%0)" \
410 : : "a"((uint8_t*)(a)) \
411 : "cc"); \
412 } \
413 asm volatile ("sne.b %0" \
414 : "+d"(o)); \
415 o; })
176#elif CONFIG_CPU == SH7034 416#elif CONFIG_CPU == SH7034
177#define test_and_set(x_, v_) \ 417/* atomic */
178({ \ 418#define test_and_set(a, v, ...) \
179 uint32_t old; \ 419({ uint32_t o; \
180 asm volatile ( \ 420 asm volatile ( \
181 "tas.b @%[x] \r\n" \ 421 "tas.b @%2 \n" \
182 "mov #-1, %[old] \r\n" \ 422 "mov #-1, %0 \n" \
183 "negc %[old], %[old] \r\n" \ 423 "negc %0, %0 \n" \
184 : [old]"=r"(old) \ 424 : "=r"(o) \
185 : [v]"M"((uint32_t)v_), /* Value of v_ must be 1 */ \ 425 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
186 [x]"r"((uint8_t *)x_) \ 426 "r"((uint8_t *)(a))); \
187 ); \ 427 o; })
188 old; \ 428#endif /* CONFIG_CPU == */
189 }) 429
190#else 430/* defaults for no asm version */
191/* default for no asm version */ 431#ifndef test_and_set
192#define test_and_set(x_, v_) \ 432/* not atomic */
193({ \ 433#define test_and_set(a, v, ...) \
194 uint32_t old = *(uint32_t *)x_; \ 434({ uint32_t o = *(uint8_t *)(a); \
195 *(uint32_t *)x_ = v_; \ 435 *(uint8_t *)(a) = (v); \
196 old; \ 436 o; })
197 }) 437#endif /* test_and_set */
198#endif 438#ifndef xchg8
439/* not atomic */
440#define xchg8(a, v, ...) \
441({ uint32_t o = *(uint8_t *)(a); \
442 *(uint8_t *)(a) = (v); \
443 o; })
444#endif /* xchg8 */
445#ifndef xchg32
446/* not atomic */
447#define xchg32(a, v, ...) \
448({ uint32_t o = *(uint32_t *)(a); \
449 *(uint32_t *)(a) = (v); \
450 o; })
451#endif /* xchg32 */
452#ifndef xchgptr
453/* not atomic */
454#define xchgptr(a, v, ...) \
455({ typeof (*(a)) o = *(a); \
456 *(a) = (v); \
457 o; })
458#endif /* xchgptr */
199 459
460void core_idle(void);
461void core_wake(IF_COP_VOID(unsigned int core));
462
463#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
200struct thread_entry* 464struct thread_entry*
201 create_thread(void (*function)(void), void* stack, int stack_size, 465 create_thread(void (*function)(void), void* stack, int stack_size,
202 const char *name IF_PRIO(, int priority) 466 unsigned flags, const char *name
203 IF_COP(, unsigned int core, bool fallback)); 467 IF_PRIO(, int priority)
468 IF_COP(, unsigned int core));
204 469
205#ifdef HAVE_SCHEDULER_BOOSTCTRL 470#ifdef HAVE_SCHEDULER_BOOSTCTRL
206void trigger_cpu_boost(void); 471void trigger_cpu_boost(void);
207#else 472#else
208#define trigger_cpu_boost() 473#define trigger_cpu_boost()
209#endif 474#endif
210 475void thread_thaw(struct thread_entry *thread);
476void thread_wait(struct thread_entry *thread);
211void remove_thread(struct thread_entry *thread); 477void remove_thread(struct thread_entry *thread);
212void switch_thread(bool save_context, struct thread_entry **blocked_list); 478void switch_thread(struct thread_entry *old);
213void sleep_thread(int ticks); 479void sleep_thread(int ticks);
214void block_thread(struct thread_entry **thread); 480
215void block_thread_w_tmo(struct thread_entry **thread, int timeout); 481/**
216void set_irq_level_and_block_thread(struct thread_entry **thread, int level); 482 * Setup to allow using thread queues as locked or non-locked without speed
217void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list, 483 * sacrifices in both core locking types.
218 int timeout, int level); 484 *
219void wakeup_thread(struct thread_entry **thread); 485 * The blocking/waking function inline two different version of the real
220void wakeup_thread_irq_safe(struct thread_entry **thread); 486 * function into the stubs when a software or other separate core locking
487 * mechanism is employed.
488 *
489 * When a simple test-and-set or similar instruction is available, locking
490 * has no cost and so one version is used and the internal worker is called
491 * directly.
492 *
493 * CORELOCK_NONE is treated the same as when an atomic instruction can be
494 * used.
495 */
496
497/* Blocks the current thread on a thread queue */
498#if CONFIG_CORELOCK == SW_CORELOCK
499void block_thread(struct thread_queue *tq);
500void block_thread_no_listlock(struct thread_entry **list);
501#else
502void _block_thread(struct thread_queue *tq);
503static inline void block_thread(struct thread_queue *tq)
504 { _block_thread(tq); }
505static inline void block_thread_no_listlock(struct thread_entry **list)
506 { _block_thread((struct thread_queue *)list); }
507#endif /* CONFIG_CORELOCK */
508
509/* Blocks the current thread on a thread queue for a max amount of time
510 * There is no "_no_listlock" version because timeout blocks without sync on
511 * the blocking queues is not permitted since either core could access the
512 * list at any time to do an implicit wake. In other words, objects with
513 * timeout support require lockable queues. */
514void block_thread_w_tmo(struct thread_queue *tq, int timeout);
515
516/* Wakes up the thread at the head of the queue */
517#define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
518#define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
519#if CONFIG_CORELOCK == SW_CORELOCK
520struct thread_entry * wakeup_thread(struct thread_queue *tq);
521struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
522#else
523struct thread_entry * _wakeup_thread(struct thread_queue *list);
524static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
525 { return _wakeup_thread(tq); }
526static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
527 { return _wakeup_thread((struct thread_queue *)list); }
528#endif /* CONFIG_CORELOCK */
529
530/* Initialize a thread_queue object. */
531static inline void thread_queue_init(struct thread_queue *tq)
532 { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
533/* A convenience function for waking an entire queue of threads. */
534static inline void thread_queue_wake(struct thread_queue *tq)
535 { while (wakeup_thread(tq) != NULL); }
536/* The no-listlock version of thread_queue_wake() */
537static inline void thread_queue_wake_no_listlock(struct thread_entry **list)
538 { while (wakeup_thread_no_listlock(list) != NULL); }
539
221#ifdef HAVE_PRIORITY_SCHEDULING 540#ifdef HAVE_PRIORITY_SCHEDULING
222int thread_set_priority(struct thread_entry *thread, int priority); 541int thread_set_priority(struct thread_entry *thread, int priority);
223int thread_get_priority(struct thread_entry *thread); 542int thread_get_priority(struct thread_entry *thread);
224/* Yield that guarantees thread execution once per round regardless of 543/* Yield that guarantees thread execution once per round regardless of
225 thread's scheduler priority - basically a transient realtime boost 544 thread's scheduler priority - basically a transient realtime boost
226 without altering the scheduler's thread precedence. */ 545 without altering the scheduler's thread precedence. */
@@ -228,17 +547,20 @@ void priority_yield(void);
228#else 547#else
229#define priority_yield yield 548#define priority_yield yield
230#endif /* HAVE_PRIORITY_SCHEDULING */ 549#endif /* HAVE_PRIORITY_SCHEDULING */
550#if NUM_CORES > 1
551unsigned int switch_core(unsigned int new_core);
552#endif
231struct thread_entry * thread_get_current(void); 553struct thread_entry * thread_get_current(void);
232void init_threads(void); 554void init_threads(void);
233int thread_stack_usage(const struct thread_entry *thread); 555int thread_stack_usage(const struct thread_entry *thread);
234#if NUM_CORES > 1 556#if NUM_CORES > 1
235int idle_stack_usage(unsigned int core); 557int idle_stack_usage(unsigned int core);
236#endif 558#endif
237int thread_get_status(const struct thread_entry *thread); 559unsigned thread_get_status(const struct thread_entry *thread);
238void thread_get_name(char *buffer, int size, 560void thread_get_name(char *buffer, int size,
239 struct thread_entry *thread); 561 struct thread_entry *thread);
240#ifdef RB_PROFILE 562#ifdef RB_PROFILE
241void profile_thread(void); 563void profile_thread(void);
242#endif 564#endif
243 565
244#endif 566#endif /* THREAD_H */