summaryrefslogtreecommitdiff
path: root/firmware/kernel/thread-internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/thread-internal.h')
-rw-r--r--firmware/kernel/thread-internal.h407
1 files changed, 270 insertions, 137 deletions
diff --git a/firmware/kernel/thread-internal.h b/firmware/kernel/thread-internal.h
index 894bd1fe7c..10606a54a6 100644
--- a/firmware/kernel/thread-internal.h
+++ b/firmware/kernel/thread-internal.h
@@ -78,30 +78,11 @@ struct priority_distribution
78 78
79#endif /* HAVE_PRIORITY_SCHEDULING */ 79#endif /* HAVE_PRIORITY_SCHEDULING */
80 80
81#ifdef HAVE_CORELOCK_OBJECT 81#define __rtr_queue lldc_head
82/* Operations to be performed just before stopping a thread and starting 82#define __rtr_queue_node lldc_node
83 a new one if specified before calling switch_thread */
84enum
85{
86 TBOP_CLEAR = 0, /* No operation to do */
87 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
88 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
89};
90 83
91struct thread_blk_ops 84#define __tmo_queue ll_head
92{ 85#define __tmo_queue_node ll_node
93 struct corelock *cl_p; /* pointer to corelock */
94 unsigned char flags; /* TBOP_* flags */
95};
96#endif /* NUM_CORES > 1 */
97
98/* Link information for lists thread is in */
99struct thread_entry; /* forward */
100struct thread_list
101{
102 struct thread_entry *prev; /* Previous thread in a list */
103 struct thread_entry *next; /* Next thread in a list */
104};
105 86
106/* Information kept in each thread slot 87/* Information kept in each thread slot
107 * members are arranged according to size - largest first - in order 88 * members are arranged according to size - largest first - in order
@@ -109,73 +90,64 @@ struct thread_list
109 */ 90 */
110struct thread_entry 91struct thread_entry
111{ 92{
112 struct regs context; /* Register context at switch - 93 struct regs context; /* Register context at switch -
113 _must_ be first member */ 94 _must_ be first member */
114 uintptr_t *stack; /* Pointer to top of stack */ 95#ifndef HAVE_SDL_THREADS
115 const char *name; /* Thread name */ 96 uintptr_t *stack; /* Pointer to top of stack */
116 long tmo_tick; /* Tick when thread should be woken from
117 timeout -
118 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
119 struct thread_list l; /* Links for blocked/waking/running -
120 circular linkage in both directions */
121 struct thread_list tmo; /* Links for timeout list -
122 Circular in reverse direction, NULL-terminated in
123 forward direction -
124 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
125 struct thread_entry **bqp; /* Pointer to list variable in kernel
126 object where thread is blocked - used
127 for implicit unblock and explicit wake
128 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
129#ifdef HAVE_CORELOCK_OBJECT
130 struct corelock *obj_cl; /* Object corelock where thead is blocked -
131 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
132 struct corelock waiter_cl; /* Corelock for thread_wait */
133 struct corelock slot_cl; /* Corelock to lock thread slot */
134 unsigned char core; /* The core to which thread belongs */
135#endif
136 struct thread_entry *queue; /* List of threads waiting for thread to be
137 removed */
138#ifdef HAVE_WAKEUP_EXT_CB
139 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
140 performs special steps needed when being
141 forced off of an object's wait queue that
142 go beyond the standard wait queue removal
143 and priority disinheritance */
144 /* Only enabled when using queue_send for now */
145#endif 97#endif
146#if defined(HAVE_SEMAPHORE_OBJECTS) || \ 98 const char *name; /* Thread name */
147 defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \ 99 long tmo_tick; /* Tick when thread should be woken */
148 NUM_CORES > 1 100 struct __rtr_queue_node rtr; /* Node for run queue */
149 volatile intptr_t retval; /* Return value from a blocked operation/ 101 struct __tmo_queue_node tmo; /* Links for timeout list */
150 misc. use */ 102 struct __wait_queue_node wq; /* Node for wait queue */
103 struct __wait_queue *volatile wqp; /* Pointer to registered wait queue */
104#if NUM_CORES > 1
105 struct corelock waiter_cl; /* Corelock for thread_wait */
106 struct corelock slot_cl; /* Corelock to lock thread slot */
107 unsigned char core; /* The core to which thread belongs */
151#endif 108#endif
152 uint32_t id; /* Current slot id */ 109 struct __wait_queue queue; /* List of threads waiting for thread to be
153 int __errno; /* Thread error number (errno tls) */ 110 removed */
111 volatile intptr_t retval; /* Return value from a blocked operation/
112 misc. use */
113 uint32_t id; /* Current slot id */
114 int __errno; /* Thread error number (errno tls) */
154#ifdef HAVE_PRIORITY_SCHEDULING 115#ifdef HAVE_PRIORITY_SCHEDULING
155 /* Priority summary of owned objects that support inheritance */ 116 /* Priority summary of owned objects that support inheritance */
156 struct blocker *blocker; /* Pointer to blocker when this thread is blocked 117 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
157 on an object that supports PIP - 118 on an object that supports PIP -
158 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ 119 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
159 struct priority_distribution pdist; /* Priority summary of owned objects 120 struct priority_distribution pdist; /* Priority summary of owned objects
160 that have blocked threads and thread's own 121 that have blocked threads and thread's own
161 base priority */ 122 base priority */
162 int skip_count; /* Number of times skipped if higher priority 123 int skip_count; /* Number of times skipped if higher priority
163 thread was running */ 124 thread was running */
164 unsigned char base_priority; /* Base priority (set explicitly during 125 unsigned char base_priority; /* Base priority (set explicitly during
165 creation or thread_set_priority) */ 126 creation or thread_set_priority) */
166 unsigned char priority; /* Scheduled priority (higher of base or 127 unsigned char priority; /* Scheduled priority (higher of base or
167 all threads blocked by this one) */ 128 all threads blocked by this one) */
168#endif 129#endif
169 unsigned short stack_size; /* Size of stack in bytes */ 130#ifndef HAVE_SDL_THREADS
170 unsigned char state; /* Thread slot state (STATE_*) */ 131 unsigned short stack_size; /* Size of stack in bytes */
132#endif
133 unsigned char state; /* Thread slot state (STATE_*) */
171#ifdef HAVE_SCHEDULER_BOOSTCTRL 134#ifdef HAVE_SCHEDULER_BOOSTCTRL
172 unsigned char cpu_boost; /* CPU frequency boost flag */ 135 unsigned char cpu_boost; /* CPU frequency boost flag */
173#endif 136#endif
174#ifdef HAVE_IO_PRIORITY 137#ifdef HAVE_IO_PRIORITY
175 unsigned char io_priority; 138 unsigned char io_priority;
176#endif 139#endif
177}; 140};
178 141
142/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */
143#define THREAD_ID_VERSION_SHIFT 8
144#define THREAD_ID_VERSION_MASK 0xffffff00
145#define THREAD_ID_SLOT_MASK 0x000000ff
146#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
147#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK)
148
149#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
150
179/* Information kept for each core 151/* Information kept for each core
180 * Members are arranged for the same reason as in thread_entry 152 * Members are arranged for the same reason as in thread_entry
181 */ 153 */
@@ -183,53 +155,97 @@ struct core_entry
183{ 155{
184 /* "Active" lists - core is constantly active on these and are never 156 /* "Active" lists - core is constantly active on these and are never
185 locked and interrupts do not access them */ 157 locked and interrupts do not access them */
186 struct thread_entry *running; /* threads that are running (RTR) */ 158 struct __rtr_queue rtr; /* Threads that are runnable */
187 struct thread_entry *timeout; /* threads that are on a timeout before 159 struct __tmo_queue tmo; /* Threads on a bounded wait */
188 running again */ 160 struct thread_entry *running; /* Currently running thread */
189 struct thread_entry *block_task; /* Task going off running list */
190#ifdef HAVE_PRIORITY_SCHEDULING 161#ifdef HAVE_PRIORITY_SCHEDULING
191 struct priority_distribution rtr; /* Summary of running and ready-to-run 162 struct priority_distribution rtr_dist; /* Summary of runnables */
192 threads */
193#endif 163#endif
194 long next_tmo_check; /* soonest time to check tmo threads */ 164 long next_tmo_check; /* Next due timeout check */
195#ifdef HAVE_CORELOCK_OBJECT 165#if NUM_CORES > 1
196 struct thread_blk_ops blk_ops; /* operations to perform when 166 struct corelock rtr_cl; /* Lock for rtr list */
197 blocking a thread */
198 struct corelock rtr_cl; /* Lock for rtr list */
199#endif /* NUM_CORES */ 167#endif /* NUM_CORES */
200}; 168};
201 169
202/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ 170/* Hide a few scheduler details from itself to make allocation more flexible */
203#define THREAD_ID_VERSION_SHIFT 8 171#define __main_thread_name \
204#define THREAD_ID_VERSION_MASK 0xffffff00 172 ({ extern const char __main_thread_name_str[]; \
205#define THREAD_ID_SLOT_MASK 0x000000ff 173 __main_thread_name_str; })
206#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) 174
207#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) 175static FORCE_INLINE
176 void * __get_main_stack(size_t *stacksize)
177{
178#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
179 extern uintptr_t stackbegin[];
180 extern uintptr_t stackend[];
181#else
182 extern uintptr_t *stackbegin;
183 extern uintptr_t *stackend;
184#endif
185 *stacksize = (uintptr_t)stackend - (uintptr_t)stackbegin;
186 return stackbegin;
187}
208 188
209/* Thread locking */ 189void format_thread_name(char *buf, size_t bufsize,
190 const struct thread_entry *thread);
191
192static FORCE_INLINE
193 struct core_entry * __core_id_entry(unsigned int core)
194{
210#if NUM_CORES > 1 195#if NUM_CORES > 1
211#define LOCK_THREAD(thread) \ 196 extern struct core_entry * __cores[NUM_CORES];
212 ({ corelock_lock(&(thread)->slot_cl); }) 197 return __cores[core];
213#define TRY_LOCK_THREAD(thread) \ 198#else
214 ({ corelock_try_lock(&(thread)->slot_cl); }) 199 extern struct core_entry __cores[NUM_CORES];
215#define UNLOCK_THREAD(thread) \ 200 return &__cores[core];
216 ({ corelock_unlock(&(thread)->slot_cl); }) 201#endif
217#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ 202}
218 ({ unsigned int _core = (thread)->core; \
219 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
220 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
221#else /* NUM_CORES == 1*/
222#define LOCK_THREAD(thread) \
223 ({ (void)(thread); })
224#define TRY_LOCK_THREAD(thread) \
225 ({ (void)(thread); })
226#define UNLOCK_THREAD(thread) \
227 ({ (void)(thread); })
228#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
229 ({ (void)(thread); })
230#endif /* NUM_CORES */
231 203
232#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) 204#define __running_self_entry() \
205 __core_id_entry(CURRENT_CORE)->running
206
207static FORCE_INLINE
208 struct thread_entry * __thread_slot_entry(unsigned int slotnum)
209{
210 extern struct thread_entry * __threads[MAXTHREADS];
211 return __threads[slotnum];
212}
213
214#define __thread_id_entry(id) \
215 __thread_slot_entry(THREAD_ID_SLOT(id))
216
217#define THREAD_FROM(p, member) \
218 container_of(p, struct thread_entry, member)
219
220#define RTR_EMPTY(rtrp) \
221 ({ (rtrp)->head == NULL; })
222
223#define RTR_THREAD_FIRST(rtrp) \
224 ({ THREAD_FROM((rtrp)->head, rtr); })
225
226#define RTR_THREAD_NEXT(thread) \
227 ({ THREAD_FROM((thread)->rtr.next, rtr); })
228
229#define TMO_THREAD_FIRST(tmop) \
230 ({ struct __tmo_queue *__tmop = (tmop); \
231 __tmop->head ? THREAD_FROM(__tmop->head, tmo) : NULL; })
232
233#define TMO_THREAD_NEXT(thread) \
234 ({ struct __tmo_queue_node *__next = (thread)->tmo.next; \
235 __next ? THREAD_FROM(__next, tmo) : NULL; })
236
237#define WQ_THREAD_FIRST(wqp) \
238 ({ struct __wait_queue *__wqp = (wqp); \
239 __wqp->head ? THREAD_FROM(__wqp->head, wq) : NULL; })
240
241#define WQ_THREAD_NEXT(thread) \
242 ({ struct __wait_queue_node *__next = (thread)->wq.next; \
243 __next ? THREAD_FROM(__next, wq) : NULL; })
244
245void thread_alloc_init(void) INIT_ATTR;
246struct thread_entry * thread_alloc(void);
247void thread_free(struct thread_entry *thread);
248void new_thread_id(struct thread_entry *thread);
233 249
234/* Switch to next runnable thread */ 250/* Switch to next runnable thread */
235void switch_thread(void); 251void switch_thread(void);
@@ -237,7 +253,21 @@ void switch_thread(void);
237 * next tick) */ 253 * next tick) */
238void sleep_thread(int ticks); 254void sleep_thread(int ticks);
239/* Blocks the current thread on a thread queue (< 0 == infinite) */ 255/* Blocks the current thread on a thread queue (< 0 == infinite) */
240void block_thread(struct thread_entry *current, int timeout); 256void block_thread_(struct thread_entry *current, int timeout);
257
258#ifdef HAVE_PRIORITY_SCHEDULING
259#define block_thread(thread, timeout, __wqp, bl) \
260 ({ struct thread_entry *__t = (thread); \
261 __t->wqp = (__wqp); \
262 if (!__builtin_constant_p(bl) || (bl)) \
263 __t->blocker = (bl); \
264 block_thread_(__t, (timeout)); })
265#else
266#define block_thread(thread, timeout, __wqp, bl...) \
267 ({ struct thread_entry *__t = (thread); \
268 __t->wqp = (__wqp); \
269 block_thread_(__t, (timeout)); })
270#endif
241 271
242/* Return bit flags for thread wakeup */ 272/* Return bit flags for thread wakeup */
243#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ 273#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
@@ -246,7 +276,7 @@ void block_thread(struct thread_entry *current, int timeout);
246 higher priority than current were woken) */ 276 higher priority than current were woken) */
247 277
248/* A convenience function for waking an entire queue of threads. */ 278/* A convenience function for waking an entire queue of threads. */
249unsigned int thread_queue_wake(struct thread_entry **list); 279unsigned int wait_queue_wake(struct __wait_queue *wqp);
250 280
251/* Wakeup a thread at the head of a list */ 281/* Wakeup a thread at the head of a list */
252enum wakeup_thread_protocol 282enum wakeup_thread_protocol
@@ -257,36 +287,139 @@ enum wakeup_thread_protocol
257 WAKEUP_TRANSFER_MULTI, 287 WAKEUP_TRANSFER_MULTI,
258}; 288};
259 289
260unsigned int wakeup_thread_(struct thread_entry **list 290unsigned int wakeup_thread_(struct thread_entry *thread
261 IF_PRIO(, enum wakeup_thread_protocol proto)); 291 IF_PRIO(, enum wakeup_thread_protocol proto));
262 292
263#ifdef HAVE_PRIORITY_SCHEDULING 293#ifdef HAVE_PRIORITY_SCHEDULING
264#define wakeup_thread(list, proto) \ 294#define wakeup_thread(thread, proto) \
265 wakeup_thread_((list), (proto)) 295 wakeup_thread_((thread), (proto))
266#else /* !HAVE_PRIORITY_SCHEDULING */ 296#else
267#define wakeup_thread(list, proto...) \ 297#define wakeup_thread(thread, proto...) \
268 wakeup_thread_((list)); 298 wakeup_thread_((thread));
269#endif /* HAVE_PRIORITY_SCHEDULING */ 299#endif
270 300
271#ifdef HAVE_IO_PRIORITY 301#ifdef RB_PROFILE
272void thread_set_io_priority(unsigned int thread_id, int io_priority); 302void profile_thread(void);
273int thread_get_io_priority(unsigned int thread_id);
274#endif /* HAVE_IO_PRIORITY */
275#if NUM_CORES > 1
276unsigned int switch_core(unsigned int new_core);
277#endif 303#endif
278 304
279/* Return the id of the calling thread. */ 305static inline void rtr_queue_init(struct __rtr_queue *rtrp)
280unsigned int thread_self(void); 306{
307 lldc_init(rtrp);
308}
309
310static inline void rtr_queue_make_first(struct __rtr_queue *rtrp,
311 struct thread_entry *thread)
312{
313 rtrp->head = &thread->rtr;
314}
281 315
282/* Return the thread_entry for the calling thread */ 316static inline void rtr_queue_add(struct __rtr_queue *rtrp,
283struct thread_entry* thread_self_entry(void); 317 struct thread_entry *thread)
318{
319 lldc_insert_last(rtrp, &thread->rtr);
320}
284 321
285/* Return thread entry from id */ 322static inline void rtr_queue_remove(struct __rtr_queue *rtrp,
286struct thread_entry *thread_id_entry(unsigned int thread_id); 323 struct thread_entry *thread)
324{
325 lldc_remove(rtrp, &thread->rtr);
326}
287 327
288#ifdef RB_PROFILE 328#define TMO_NOT_QUEUED (NULL + 1)
289void profile_thread(void); 329
330static inline bool tmo_is_queued(struct thread_entry *thread)
331{
332 return thread->tmo.next != TMO_NOT_QUEUED;
333}
334
335static inline void tmo_set_dequeued(struct thread_entry *thread)
336{
337 thread->tmo.next = TMO_NOT_QUEUED;
338}
339
340static inline void tmo_queue_init(struct __tmo_queue *tmop)
341{
342 ll_init(tmop);
343}
344
345static inline void tmo_queue_expire(struct __tmo_queue *tmop,
346 struct thread_entry *prev,
347 struct thread_entry *thread)
348{
349 ll_remove_next(tmop, prev ? &prev->tmo : NULL);
350 tmo_set_dequeued(thread);
351}
352
353static inline void tmo_queue_remove(struct __tmo_queue *tmop,
354 struct thread_entry *thread)
355{
356 if (tmo_is_queued(thread))
357 {
358 ll_remove(tmop, &thread->tmo);
359 tmo_set_dequeued(thread);
360 }
361}
362
363static inline void tmo_queue_register(struct __tmo_queue *tmop,
364 struct thread_entry *thread)
365{
366 if (!tmo_is_queued(thread))
367 ll_insert_last(tmop, &thread->tmo);
368}
369
370static inline void wait_queue_init(struct __wait_queue *wqp)
371{
372 lld_init(wqp);
373}
374
375static inline void wait_queue_register(struct thread_entry *thread)
376{
377 lld_insert_last(thread->wqp, &thread->wq);
378}
379
380static inline struct __wait_queue *
381 wait_queue_ptr(struct thread_entry *thread)
382{
383 return thread->wqp;
384}
385
386static inline struct __wait_queue *
387 wait_queue_remove(struct thread_entry *thread)
388{
389 struct __wait_queue *wqp = thread->wqp;
390 thread->wqp = NULL;
391 lld_remove(wqp, &thread->wq);
392 return wqp;
393}
394
395static inline struct __wait_queue *
396 wait_queue_try_remove(struct thread_entry *thread)
397{
398 struct __wait_queue *wqp = thread->wqp;
399 if (wqp)
400 {
401 thread->wqp = NULL;
402 lld_remove(wqp, &thread->wq);
403 }
404
405 return wqp;
406}
407
408static inline void blocker_init(struct blocker *bl)
409{
410 bl->thread = NULL;
411#ifdef HAVE_PRIORITY_SCHEDULING
412 bl->priority = PRIORITY_IDLE;
413#endif
414}
415
416static inline void blocker_splay_init(struct blocker_splay *blsplay)
417{
418 blocker_init(&blsplay->blocker);
419#ifdef HAVE_PRIORITY_SCHEDULING
420 threadbit_clear(&blsplay->mask);
290#endif 421#endif
422 corelock_init(&blsplay->cl);
423}
291 424
292#endif /* THREAD_INTERNAL_H */ 425#endif /* THREAD_INTERNAL_H */