summaryrefslogtreecommitdiff
path: root/firmware/kernel/thread-internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/thread-internal.h')
-rw-r--r--firmware/kernel/thread-internal.h357
1 files changed, 357 insertions, 0 deletions
diff --git a/firmware/kernel/thread-internal.h b/firmware/kernel/thread-internal.h
new file mode 100644
index 0000000000..c2acdfbaa9
--- /dev/null
+++ b/firmware/kernel/thread-internal.h
@@ -0,0 +1,357 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef THREAD_H
23#define THREAD_H
24
25#include "config.h"
26#include <inttypes.h>
27#include <stddef.h>
28#include <stdbool.h>
29#include "gcc_extensions.h"
30
31/*
32 * We need more stack when we run under a host
33 * maybe more expensive C lib functions?
34 *
35 * simulator (possibly) doesn't simulate stack usage anyway but well ... */
36
37#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
38struct regs
39{
40 void *t; /* OS thread */
41 void *told; /* Last thread in slot (explained in thead-sdl.c) */
42 void *s; /* Semaphore for blocking and wakeup */
43 void (*start)(void); /* Start function */
44};
45
46#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
47#else
48#include "asm/thread.h"
49#endif /* HAVE_SDL_THREADS */
50
51#ifdef CPU_PP
52#ifdef HAVE_CORELOCK_OBJECT
53/* No reliable atomic instruction available - use Peterson's algorithm */
54struct corelock
55{
56 volatile unsigned char myl[NUM_CORES];
57 volatile unsigned char turn;
58} __attribute__((packed));
59
60/* Too big to inline everywhere */
61void corelock_init(struct corelock *cl);
62void corelock_lock(struct corelock *cl);
63int corelock_try_lock(struct corelock *cl);
64void corelock_unlock(struct corelock *cl);
65#endif /* HAVE_CORELOCK_OBJECT */
66#endif /* CPU_PP */
67
68/* NOTE: The use of the word "queue" may also refer to a linked list of
69 threads being maintained that are normally dealt with in FIFO order
70 and not necessarily kernel event_queue */
71enum
72{
73 /* States without a timeout must be first */
74 STATE_KILLED = 0, /* Thread is killed (default) */
75 STATE_RUNNING, /* Thread is currently running */
76 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
77 /* These states involve adding the thread to the tmo list */
78 STATE_SLEEPING, /* Thread is sleeping with a timeout */
79 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
80 /* Miscellaneous states */
81 STATE_FROZEN, /* Thread is suspended and will not run until
82 thread_thaw is called with its ID */
83 THREAD_NUM_STATES,
84 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
85};
86
87#if NUM_CORES > 1
88/* Pointer value for name field to indicate thread is being killed. Using
89 * an alternate STATE_* won't work since that would interfere with operation
90 * while the thread is still running. */
91#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
92#endif
93
94/* Link information for lists thread is in */
95struct thread_entry; /* forward */
96struct thread_list
97{
98 struct thread_entry *prev; /* Previous thread in a list */
99 struct thread_entry *next; /* Next thread in a list */
100};
101
102#ifndef HAVE_CORELOCK_OBJECT
103/* No atomic corelock op needed or just none defined */
104#define corelock_init(cl)
105#define corelock_lock(cl)
106#define corelock_try_lock(cl)
107#define corelock_unlock(cl)
108#endif /* HAVE_CORELOCK_OBJECT */
109
110#ifdef HAVE_PRIORITY_SCHEDULING
111struct blocker
112{
113 struct thread_entry * volatile thread; /* thread blocking other threads
114 (aka. object owner) */
115 int priority; /* highest priority waiter */
116 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
117};
118
119/* Choices of wakeup protocol */
120
121/* For transfer of object ownership by one thread to another thread by
122 * the owning thread itself (mutexes) */
123struct thread_entry *
124 wakeup_priority_protocol_transfer(struct thread_entry *thread);
125
126/* For release by owner where ownership doesn't change - other threads,
127 * interrupts, timeouts, etc. (mutex timeout, queues) */
128struct thread_entry *
129 wakeup_priority_protocol_release(struct thread_entry *thread);
130
131
132struct priority_distribution
133{
134 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
135 uint32_t mask; /* Bitmask of hist entries that are not zero */
136};
137
138#endif /* HAVE_PRIORITY_SCHEDULING */
139
140/* Information kept in each thread slot
141 * members are arranged according to size - largest first - in order
142 * to ensure both alignment and packing at the same time.
143 */
144struct thread_entry
145{
146 struct regs context; /* Register context at switch -
147 _must_ be first member */
148 uintptr_t *stack; /* Pointer to top of stack */
149 const char *name; /* Thread name */
150 long tmo_tick; /* Tick when thread should be woken from
151 timeout -
152 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
153 struct thread_list l; /* Links for blocked/waking/running -
154 circular linkage in both directions */
155 struct thread_list tmo; /* Links for timeout list -
156 Circular in reverse direction, NULL-terminated in
157 forward direction -
158 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
159 struct thread_entry **bqp; /* Pointer to list variable in kernel
160 object where thread is blocked - used
161 for implicit unblock and explicit wake
162 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
163#ifdef HAVE_CORELOCK_OBJECT
164 struct corelock *obj_cl; /* Object corelock where thead is blocked -
165 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
166 struct corelock waiter_cl; /* Corelock for thread_wait */
167 struct corelock slot_cl; /* Corelock to lock thread slot */
168 unsigned char core; /* The core to which thread belongs */
169#endif
170 struct thread_entry *queue; /* List of threads waiting for thread to be
171 removed */
172#ifdef HAVE_WAKEUP_EXT_CB
173 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
174 performs special steps needed when being
175 forced off of an object's wait queue that
176 go beyond the standard wait queue removal
177 and priority disinheritance */
178 /* Only enabled when using queue_send for now */
179#endif
180#if defined(HAVE_SEMAPHORE_OBJECTS) || \
181 defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
182 NUM_CORES > 1
183 volatile intptr_t retval; /* Return value from a blocked operation/
184 misc. use */
185#endif
186#ifdef HAVE_PRIORITY_SCHEDULING
187 /* Priority summary of owned objects that support inheritance */
188 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
189 on an object that supports PIP -
190 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
191 struct priority_distribution pdist; /* Priority summary of owned objects
192 that have blocked threads and thread's own
193 base priority */
194 int skip_count; /* Number of times skipped if higher priority
195 thread was running */
196 unsigned char base_priority; /* Base priority (set explicitly during
197 creation or thread_set_priority) */
198 unsigned char priority; /* Scheduled priority (higher of base or
199 all threads blocked by this one) */
200#endif
201 uint16_t id; /* Current slot id */
202 unsigned short stack_size; /* Size of stack in bytes */
203 unsigned char state; /* Thread slot state (STATE_*) */
204#ifdef HAVE_SCHEDULER_BOOSTCTRL
205 unsigned char cpu_boost; /* CPU frequency boost flag */
206#endif
207#ifdef HAVE_IO_PRIORITY
208 unsigned char io_priority;
209#endif
210};
211
212/*** Macros for internal use ***/
213/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
214#define THREAD_ID_VERSION_SHIFT 8
215#define THREAD_ID_VERSION_MASK 0xff00
216#define THREAD_ID_SLOT_MASK 0x00ff
217#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
218
219#ifdef HAVE_CORELOCK_OBJECT
220/* Operations to be performed just before stopping a thread and starting
221 a new one if specified before calling switch_thread */
222enum
223{
224 TBOP_CLEAR = 0, /* No operation to do */
225 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
226 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
227};
228
229struct thread_blk_ops
230{
231 struct corelock *cl_p; /* pointer to corelock */
232 unsigned char flags; /* TBOP_* flags */
233};
234#endif /* NUM_CORES > 1 */
235
236/* Information kept for each core
237 * Members are arranged for the same reason as in thread_entry
238 */
239struct core_entry
240{
241 /* "Active" lists - core is constantly active on these and are never
242 locked and interrupts do not access them */
243 struct thread_entry *running; /* threads that are running (RTR) */
244 struct thread_entry *timeout; /* threads that are on a timeout before
245 running again */
246 struct thread_entry *block_task; /* Task going off running list */
247#ifdef HAVE_PRIORITY_SCHEDULING
248 struct priority_distribution rtr; /* Summary of running and ready-to-run
249 threads */
250#endif
251 long next_tmo_check; /* soonest time to check tmo threads */
252#ifdef HAVE_CORELOCK_OBJECT
253 struct thread_blk_ops blk_ops; /* operations to perform when
254 blocking a thread */
255 struct corelock rtr_cl; /* Lock for rtr list */
256#endif /* NUM_CORES */
257};
258
259#ifdef HAVE_PRIORITY_SCHEDULING
260#define IF_PRIO(...) __VA_ARGS__
261#define IFN_PRIO(...)
262#else
263#define IF_PRIO(...)
264#define IFN_PRIO(...) __VA_ARGS__
265#endif
266
267void core_idle(void);
268void core_wake(IF_COP_VOID(unsigned int core));
269
270/* Initialize the scheduler */
271void init_threads(void) INIT_ATTR;
272
273/* Allocate a thread in the scheduler */
274#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
275unsigned int create_thread(void (*function)(void),
276 void* stack, size_t stack_size,
277 unsigned flags, const char *name
278 IF_PRIO(, int priority)
279 IF_COP(, unsigned int core));
280
281/* Set and clear the CPU frequency boost flag for the calling thread */
282#ifdef HAVE_SCHEDULER_BOOSTCTRL
283void trigger_cpu_boost(void);
284void cancel_cpu_boost(void);
285#else
286#define trigger_cpu_boost() do { } while(0)
287#define cancel_cpu_boost() do { } while(0)
288#endif
289/* Return thread entry from id */
290struct thread_entry *thread_id_entry(unsigned int thread_id);
291/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
292 * Has no effect on a thread not frozen. */
293void thread_thaw(unsigned int thread_id);
294/* Wait for a thread to exit */
295void thread_wait(unsigned int thread_id);
296/* Exit the current thread */
297void thread_exit(void) NORETURN_ATTR;
298#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
299#define ALLOW_REMOVE_THREAD
300/* Remove a thread from the scheduler */
301void remove_thread(unsigned int thread_id);
302#endif
303
304/* Switch to next runnable thread */
305void switch_thread(void);
306/* Blocks a thread for at least the specified number of ticks (0 = wait until
307 * next tick) */
308void sleep_thread(int ticks);
309/* Indefinitely blocks the current thread on a thread queue */
310void block_thread(struct thread_entry *current);
311/* Blocks the current thread on a thread queue until explicitely woken or
312 * the timeout is reached */
313void block_thread_w_tmo(struct thread_entry *current, int timeout);
314
315/* Return bit flags for thread wakeup */
316#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
317#define THREAD_OK 0x1 /* A thread was woken up */
318#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
319 higher priority than current were woken) */
320
321/* A convenience function for waking an entire queue of threads. */
322unsigned int thread_queue_wake(struct thread_entry **list);
323
324/* Wakeup a thread at the head of a list */
325unsigned int wakeup_thread(struct thread_entry **list);
326
327#ifdef HAVE_PRIORITY_SCHEDULING
328int thread_set_priority(unsigned int thread_id, int priority);
329int thread_get_priority(unsigned int thread_id);
330#endif /* HAVE_PRIORITY_SCHEDULING */
331#ifdef HAVE_IO_PRIORITY
332void thread_set_io_priority(unsigned int thread_id, int io_priority);
333int thread_get_io_priority(unsigned int thread_id);
334#endif /* HAVE_IO_PRIORITY */
335#if NUM_CORES > 1
336unsigned int switch_core(unsigned int new_core);
337#endif
338
339/* Return the id of the calling thread. */
340unsigned int thread_self(void);
341
342/* Return the thread_entry for the calling thread.
343 * INTERNAL: Intended for use by kernel and not for programs. */
344struct thread_entry* thread_self_entry(void);
345
346/* Debugging info - only! */
347int thread_stack_usage(const struct thread_entry *thread);
348#if NUM_CORES > 1
349int idle_stack_usage(unsigned int core);
350#endif
351void thread_get_name(char *buffer, int size,
352 struct thread_entry *thread);
353#ifdef RB_PROFILE
354void profile_thread(void);
355#endif
356
357#endif /* THREAD_H */