summaryrefslogtreecommitdiff
path: root/firmware/kernel/include/thread.h
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/include/thread.h')
-rw-r--r--firmware/kernel/include/thread.h387
1 files changed, 387 insertions, 0 deletions
diff --git a/firmware/kernel/include/thread.h b/firmware/kernel/include/thread.h
new file mode 100644
index 0000000000..9cc33b23ae
--- /dev/null
+++ b/firmware/kernel/include/thread.h
@@ -0,0 +1,387 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef THREAD_H
23#define THREAD_H
24
25#include "config.h"
26#include <inttypes.h>
27#include <stddef.h>
28#include <stdbool.h>
29#include "gcc_extensions.h"
30#include "corelock.h"
31
32/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
33 * by giving high priority threads more CPU time than lower priority threads
34 * when they need it. Priority is differential such that the priority
35 * difference between a lower priority runnable thread and the highest priority
36 * runnable thread determines the amount of aging necessary for the lower
37 * priority thread to be scheduled in order to prevent starvation.
38 *
39 * If software playback codec pcm buffer is going down to critical, codec
40 * can gradually raise its own priority to override user interface and
41 * prevent playback skipping.
42 */
43#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
44#define PRIORITY_RESERVED_LOW 32 /* Reserved */
45#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
46#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
47/* Realtime range reserved for threads that will not allow threads of lower
48 * priority to age and run (future expansion) */
49#define PRIORITY_REALTIME_1 1
50#define PRIORITY_REALTIME_2 2
51#define PRIORITY_REALTIME_3 3
52#define PRIORITY_REALTIME_4 4
53#define PRIORITY_REALTIME 4 /* Lowest realtime range */
54#define PRIORITY_BUFFERING 15 /* Codec buffering thread */
55#define PRIORITY_USER_INTERFACE 16 /* The main thread */
56#define PRIORITY_RECORDING 16 /* Recording thread */
57#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
58#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
59#define PRIORITY_SYSTEM 18 /* All other firmware threads */
60#define PRIORITY_BACKGROUND 20 /* Normal application threads */
61#define NUM_PRIORITIES 32
62#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
63
64#define IO_PRIORITY_IMMEDIATE 0
65#define IO_PRIORITY_BACKGROUND 32
66
67
68#if CONFIG_CODEC == SWCODEC
69# ifdef HAVE_HARDWARE_CLICK
70# define BASETHREADS 17
71# else
72# define BASETHREADS 16
73# endif
74#else
75# define BASETHREADS 11
76#endif /* CONFIG_CODE == * */
77
78#ifndef TARGET_EXTRA_THREADS
79#define TARGET_EXTRA_THREADS 0
80#endif
81
82#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
83/*
84 * We need more stack when we run under a host
85 * maybe more expensive C lib functions?
86 *
87 * simulator (possibly) doesn't simulate stack usage anyway but well ... */
88
89#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
90struct regs
91{
92 void *t; /* OS thread */
93 void *told; /* Last thread in slot (explained in thead-sdl.c) */
94 void *s; /* Semaphore for blocking and wakeup */
95 void (*start)(void); /* Start function */
96};
97
98#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
99#else
100#include "asm/thread.h"
101#endif /* HAVE_SDL_THREADS */
102
103/* NOTE: The use of the word "queue" may also refer to a linked list of
104 threads being maintained that are normally dealt with in FIFO order
105 and not necessarily kernel event_queue */
106enum
107{
108 /* States without a timeout must be first */
109 STATE_KILLED = 0, /* Thread is killed (default) */
110 STATE_RUNNING, /* Thread is currently running */
111 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
112 /* These states involve adding the thread to the tmo list */
113 STATE_SLEEPING, /* Thread is sleeping with a timeout */
114 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
115 /* Miscellaneous states */
116 STATE_FROZEN, /* Thread is suspended and will not run until
117 thread_thaw is called with its ID */
118 THREAD_NUM_STATES,
119 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
120};
121
122#if NUM_CORES > 1
123/* Pointer value for name field to indicate thread is being killed. Using
124 * an alternate STATE_* won't work since that would interfere with operation
125 * while the thread is still running. */
126#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
127#endif
128
129/* Link information for lists thread is in */
130struct thread_entry; /* forward */
131struct thread_list
132{
133 struct thread_entry *prev; /* Previous thread in a list */
134 struct thread_entry *next; /* Next thread in a list */
135};
136
137#ifdef HAVE_PRIORITY_SCHEDULING
138struct blocker
139{
140 struct thread_entry * volatile thread; /* thread blocking other threads
141 (aka. object owner) */
142 int priority; /* highest priority waiter */
143 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
144};
145
146/* Choices of wakeup protocol */
147
148/* For transfer of object ownership by one thread to another thread by
149 * the owning thread itself (mutexes) */
150struct thread_entry *
151 wakeup_priority_protocol_transfer(struct thread_entry *thread);
152
153/* For release by owner where ownership doesn't change - other threads,
154 * interrupts, timeouts, etc. (mutex timeout, queues) */
155struct thread_entry *
156 wakeup_priority_protocol_release(struct thread_entry *thread);
157
158
159struct priority_distribution
160{
161 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
162 uint32_t mask; /* Bitmask of hist entries that are not zero */
163};
164
165#endif /* HAVE_PRIORITY_SCHEDULING */
166
167/* Information kept in each thread slot
168 * members are arranged according to size - largest first - in order
169 * to ensure both alignment and packing at the same time.
170 */
171struct thread_entry
172{
173 struct regs context; /* Register context at switch -
174 _must_ be first member */
175 uintptr_t *stack; /* Pointer to top of stack */
176 const char *name; /* Thread name */
177 long tmo_tick; /* Tick when thread should be woken from
178 timeout -
179 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
180 struct thread_list l; /* Links for blocked/waking/running -
181 circular linkage in both directions */
182 struct thread_list tmo; /* Links for timeout list -
183 Circular in reverse direction, NULL-terminated in
184 forward direction -
185 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
186 struct thread_entry **bqp; /* Pointer to list variable in kernel
187 object where thread is blocked - used
188 for implicit unblock and explicit wake
189 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
190#ifdef HAVE_CORELOCK_OBJECT
191 struct corelock *obj_cl; /* Object corelock where thead is blocked -
192 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
193 struct corelock waiter_cl; /* Corelock for thread_wait */
194 struct corelock slot_cl; /* Corelock to lock thread slot */
195 unsigned char core; /* The core to which thread belongs */
196#endif
197 struct thread_entry *queue; /* List of threads waiting for thread to be
198 removed */
199#ifdef HAVE_WAKEUP_EXT_CB
200 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
201 performs special steps needed when being
202 forced off of an object's wait queue that
203 go beyond the standard wait queue removal
204 and priority disinheritance */
205 /* Only enabled when using queue_send for now */
206#endif
207#if defined(HAVE_SEMAPHORE_OBJECTS) || \
208 defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
209 NUM_CORES > 1
210 volatile intptr_t retval; /* Return value from a blocked operation/
211 misc. use */
212#endif
213#ifdef HAVE_PRIORITY_SCHEDULING
214 /* Priority summary of owned objects that support inheritance */
215 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
216 on an object that supports PIP -
217 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
218 struct priority_distribution pdist; /* Priority summary of owned objects
219 that have blocked threads and thread's own
220 base priority */
221 int skip_count; /* Number of times skipped if higher priority
222 thread was running */
223 unsigned char base_priority; /* Base priority (set explicitly during
224 creation or thread_set_priority) */
225 unsigned char priority; /* Scheduled priority (higher of base or
226 all threads blocked by this one) */
227#endif
228 uint16_t id; /* Current slot id */
229 unsigned short stack_size; /* Size of stack in bytes */
230 unsigned char state; /* Thread slot state (STATE_*) */
231#ifdef HAVE_SCHEDULER_BOOSTCTRL
232 unsigned char cpu_boost; /* CPU frequency boost flag */
233#endif
234#ifdef HAVE_IO_PRIORITY
235 unsigned char io_priority;
236#endif
237};
238
239/*** Macros for internal use ***/
240/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
241#define THREAD_ID_VERSION_SHIFT 8
242#define THREAD_ID_VERSION_MASK 0xff00
243#define THREAD_ID_SLOT_MASK 0x00ff
244#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
245
246#ifdef HAVE_CORELOCK_OBJECT
247/* Operations to be performed just before stopping a thread and starting
248 a new one if specified before calling switch_thread */
249enum
250{
251 TBOP_CLEAR = 0, /* No operation to do */
252 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
253 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
254};
255
256struct thread_blk_ops
257{
258 struct corelock *cl_p; /* pointer to corelock */
259 unsigned char flags; /* TBOP_* flags */
260};
261#endif /* NUM_CORES > 1 */
262
263/* Information kept for each core
264 * Members are arranged for the same reason as in thread_entry
265 */
266struct core_entry
267{
268 /* "Active" lists - core is constantly active on these and are never
269 locked and interrupts do not access them */
270 struct thread_entry *running; /* threads that are running (RTR) */
271 struct thread_entry *timeout; /* threads that are on a timeout before
272 running again */
273 struct thread_entry *block_task; /* Task going off running list */
274#ifdef HAVE_PRIORITY_SCHEDULING
275 struct priority_distribution rtr; /* Summary of running and ready-to-run
276 threads */
277#endif
278 long next_tmo_check; /* soonest time to check tmo threads */
279#ifdef HAVE_CORELOCK_OBJECT
280 struct thread_blk_ops blk_ops; /* operations to perform when
281 blocking a thread */
282 struct corelock rtr_cl; /* Lock for rtr list */
283#endif /* NUM_CORES */
284};
285
286extern void yield(void);
287extern unsigned sleep(unsigned ticks);
288
289#ifdef HAVE_PRIORITY_SCHEDULING
290#define IF_PRIO(...) __VA_ARGS__
291#define IFN_PRIO(...)
292#else
293#define IF_PRIO(...)
294#define IFN_PRIO(...) __VA_ARGS__
295#endif
296
297void core_idle(void);
298void core_wake(IF_COP_VOID(unsigned int core));
299
300/* Initialize the scheduler */
301void init_threads(void) INIT_ATTR;
302
303/* Allocate a thread in the scheduler */
304#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
305unsigned int create_thread(void (*function)(void),
306 void* stack, size_t stack_size,
307 unsigned flags, const char *name
308 IF_PRIO(, int priority)
309 IF_COP(, unsigned int core));
310
311/* Set and clear the CPU frequency boost flag for the calling thread */
312#ifdef HAVE_SCHEDULER_BOOSTCTRL
313void trigger_cpu_boost(void);
314void cancel_cpu_boost(void);
315#else
316#define trigger_cpu_boost() do { } while(0)
317#define cancel_cpu_boost() do { } while(0)
318#endif
319/* Return thread entry from id */
320struct thread_entry *thread_id_entry(unsigned int thread_id);
321/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
322 * Has no effect on a thread not frozen. */
323void thread_thaw(unsigned int thread_id);
324/* Wait for a thread to exit */
325void thread_wait(unsigned int thread_id);
326/* Exit the current thread */
327void thread_exit(void) NORETURN_ATTR;
328#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
329#define ALLOW_REMOVE_THREAD
330/* Remove a thread from the scheduler */
331void remove_thread(unsigned int thread_id);
332#endif
333
334/* Switch to next runnable thread */
335void switch_thread(void);
336/* Blocks a thread for at least the specified number of ticks (0 = wait until
337 * next tick) */
338void sleep_thread(int ticks);
339/* Indefinitely blocks the current thread on a thread queue */
340void block_thread(struct thread_entry *current);
341/* Blocks the current thread on a thread queue until explicitely woken or
342 * the timeout is reached */
343void block_thread_w_tmo(struct thread_entry *current, int timeout);
344
345/* Return bit flags for thread wakeup */
346#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
347#define THREAD_OK 0x1 /* A thread was woken up */
348#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
349 higher priority than current were woken) */
350
351/* A convenience function for waking an entire queue of threads. */
352unsigned int thread_queue_wake(struct thread_entry **list);
353
354/* Wakeup a thread at the head of a list */
355unsigned int wakeup_thread(struct thread_entry **list);
356
357#ifdef HAVE_PRIORITY_SCHEDULING
358int thread_set_priority(unsigned int thread_id, int priority);
359int thread_get_priority(unsigned int thread_id);
360#endif /* HAVE_PRIORITY_SCHEDULING */
361#ifdef HAVE_IO_PRIORITY
362void thread_set_io_priority(unsigned int thread_id, int io_priority);
363int thread_get_io_priority(unsigned int thread_id);
364#endif /* HAVE_IO_PRIORITY */
365#if NUM_CORES > 1
366unsigned int switch_core(unsigned int new_core);
367#endif
368
369/* Return the id of the calling thread. */
370unsigned int thread_self(void);
371
372/* Return the thread_entry for the calling thread.
373 * INTERNAL: Intended for use by kernel and not for programs. */
374struct thread_entry* thread_self_entry(void);
375
376/* Debugging info - only! */
377int thread_stack_usage(const struct thread_entry *thread);
378#if NUM_CORES > 1
379int idle_stack_usage(unsigned int core);
380#endif
381void thread_get_name(char *buffer, int size,
382 struct thread_entry *thread);
383#ifdef RB_PROFILE
384void profile_thread(void);
385#endif
386
387#endif /* THREAD_H */