diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2014-08-08 01:39:29 -0400 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2014-08-08 01:59:59 -0400 |
commit | 981d028c09d10ed867f2f955f58d60b753c64f29 (patch) | |
tree | 0dab835a14c5cb3e740be4e46be93c42aec76bc5 /firmware/kernel/include | |
parent | 53d9f2e6a7564e487bdac87f6e28c662e8407458 (diff) | |
download | rockbox-981d028c09d10ed867f2f955f58d60b753c64f29.tar.gz rockbox-981d028c09d10ed867f2f955f58d60b753c64f29.zip |
Do some kernel cleanup
* Seal away private thread and kernel definitions and declarations
into the internal headers in order to better hide internal structure.
* Add a thread-common.c file that keeps shared functions together.
List functions aren't messed with since that's about to be changed to
different ones.
* It is necessary to modify some ARM/PP stuff since GCC was complaining
about constant pool distance and I would rather not force dump it. Just
bl the cache calls in the startup and exit code and let it use veneers
if it must.
* Clean up redundant #includes in relevant areas and reorganize them.
* Expunge useless and dangerous stuff like remove_thread().
Change-Id: I6e22932fad61a9fac30fd1363c071074ee7ab382
Diffstat (limited to 'firmware/kernel/include')
-rw-r--r-- | firmware/kernel/include/corelock.h | 12 | ||||
-rw-r--r-- | firmware/kernel/include/kernel.h | 19 | ||||
-rw-r--r-- | firmware/kernel/include/mrsw_lock.h | 2 | ||||
-rw-r--r-- | firmware/kernel/include/mutex.h | 2 | ||||
-rw-r--r-- | firmware/kernel/include/semaphore.h | 1 | ||||
-rw-r--r-- | firmware/kernel/include/thread.h | 279 |
6 files changed, 45 insertions, 270 deletions
diff --git a/firmware/kernel/include/corelock.h b/firmware/kernel/include/corelock.h index 79302e0e3c..402ae07d19 100644 --- a/firmware/kernel/include/corelock.h +++ b/firmware/kernel/include/corelock.h | |||
@@ -28,10 +28,14 @@ | |||
28 | #ifndef HAVE_CORELOCK_OBJECT | 28 | #ifndef HAVE_CORELOCK_OBJECT |
29 | 29 | ||
30 | /* No atomic corelock op needed or just none defined */ | 30 | /* No atomic corelock op needed or just none defined */ |
31 | #define corelock_init(cl) | 31 | #define corelock_init(cl) \ |
32 | #define corelock_lock(cl) | 32 | do {} while (0) |
33 | #define corelock_try_lock(cl) | 33 | #define corelock_lock(cl) \ |
34 | #define corelock_unlock(cl) | 34 | do {} while (0) |
35 | #define corelock_try_lock(cl) \ | ||
36 | do {} while (0) | ||
37 | #define corelock_unlock(cl) \ | ||
38 | do {} while (0) | ||
35 | 39 | ||
36 | #else | 40 | #else |
37 | 41 | ||
diff --git a/firmware/kernel/include/kernel.h b/firmware/kernel/include/kernel.h index d2ffffcda9..fc6dfca8c3 100644 --- a/firmware/kernel/include/kernel.h +++ b/firmware/kernel/include/kernel.h | |||
@@ -48,23 +48,4 @@ | |||
48 | #define TIMEOUT_BLOCK -1 | 48 | #define TIMEOUT_BLOCK -1 |
49 | #define TIMEOUT_NOBLOCK 0 | 49 | #define TIMEOUT_NOBLOCK 0 |
50 | 50 | ||
51 | static inline void kernel_init(void) | ||
52 | { | ||
53 | /* Init the threading API */ | ||
54 | init_threads(); | ||
55 | |||
56 | /* Other processors will not reach this point in a multicore build. | ||
57 | * In a single-core build with multiple cores they fall-through and | ||
58 | * sleep in cop_main without returning. */ | ||
59 | if (CURRENT_CORE == CPU) | ||
60 | { | ||
61 | init_queues(); | ||
62 | init_tick(); | ||
63 | #ifdef KDEV_INIT | ||
64 | kernel_device_init(); | ||
65 | #endif | ||
66 | } | ||
67 | } | ||
68 | |||
69 | |||
70 | #endif /* KERNEL_H */ | 51 | #endif /* KERNEL_H */ |
diff --git a/firmware/kernel/include/mrsw_lock.h b/firmware/kernel/include/mrsw_lock.h index fbfe1d405d..d919f7be26 100644 --- a/firmware/kernel/include/mrsw_lock.h +++ b/firmware/kernel/include/mrsw_lock.h | |||
@@ -21,6 +21,8 @@ | |||
21 | #ifndef MRSW_LOCK_H | 21 | #ifndef MRSW_LOCK_H |
22 | #define MRSW_LOCK_H | 22 | #define MRSW_LOCK_H |
23 | 23 | ||
24 | #include "thread.h" | ||
25 | |||
24 | /* Multi-reader, single-writer object that allows mutltiple readers or a | 26 | /* Multi-reader, single-writer object that allows mutltiple readers or a |
25 | * single writer thread access to a critical section. | 27 | * single writer thread access to a critical section. |
26 | * | 28 | * |
diff --git a/firmware/kernel/include/mutex.h b/firmware/kernel/include/mutex.h index 02b85f331f..72736ec8fd 100644 --- a/firmware/kernel/include/mutex.h +++ b/firmware/kernel/include/mutex.h | |||
@@ -22,8 +22,6 @@ | |||
22 | #ifndef MUTEX_H | 22 | #ifndef MUTEX_H |
23 | #define MUTEX_H | 23 | #define MUTEX_H |
24 | 24 | ||
25 | #include <stdbool.h> | ||
26 | #include "config.h" | ||
27 | #include "thread.h" | 25 | #include "thread.h" |
28 | 26 | ||
29 | struct mutex | 27 | struct mutex |
diff --git a/firmware/kernel/include/semaphore.h b/firmware/kernel/include/semaphore.h index 40e60bb88d..16095d9c2d 100644 --- a/firmware/kernel/include/semaphore.h +++ b/firmware/kernel/include/semaphore.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef SEMAPHORE_H | 22 | #ifndef SEMAPHORE_H |
23 | #define SEMAPHORE_H | 23 | #define SEMAPHORE_H |
24 | 24 | ||
25 | #include "config.h" | ||
26 | #include "thread.h" | 25 | #include "thread.h" |
27 | 26 | ||
28 | struct semaphore | 27 | struct semaphore |
diff --git a/firmware/kernel/include/thread.h b/firmware/kernel/include/thread.h index e10b4e21b4..5a8bff0107 100644 --- a/firmware/kernel/include/thread.h +++ b/firmware/kernel/include/thread.h | |||
@@ -18,17 +18,16 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | |||
22 | #ifndef THREAD_H | 21 | #ifndef THREAD_H |
23 | #define THREAD_H | 22 | #define THREAD_H |
24 | 23 | ||
25 | #include "config.h" | ||
26 | #include <inttypes.h> | 24 | #include <inttypes.h> |
27 | #include <stddef.h> | 25 | #include <stddef.h> |
28 | #include <stdbool.h> | 26 | #include <stdbool.h> |
27 | #include "config.h" | ||
29 | #include "gcc_extensions.h" | 28 | #include "gcc_extensions.h" |
30 | #include "corelock.h" | ||
31 | #include "bitarray.h" | 29 | #include "bitarray.h" |
30 | #include "corelock.h" | ||
32 | 31 | ||
33 | /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works | 32 | /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works |
34 | * by giving high priority threads more CPU time than lower priority threads | 33 | * by giving high priority threads more CPU time than lower priority threads |
@@ -65,7 +64,6 @@ | |||
65 | #define IO_PRIORITY_IMMEDIATE 0 | 64 | #define IO_PRIORITY_IMMEDIATE 0 |
66 | #define IO_PRIORITY_BACKGROUND 32 | 65 | #define IO_PRIORITY_BACKGROUND 32 |
67 | 66 | ||
68 | |||
69 | #if CONFIG_CODEC == SWCODEC | 67 | #if CONFIG_CODEC == SWCODEC |
70 | # ifdef HAVE_HARDWARE_CLICK | 68 | # ifdef HAVE_HARDWARE_CLICK |
71 | # define BASETHREADS 17 | 69 | # define BASETHREADS 17 |
@@ -85,6 +83,8 @@ | |||
85 | BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS) | 83 | BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS) |
86 | BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES) | 84 | BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES) |
87 | 85 | ||
86 | struct thread_entry; | ||
87 | |||
88 | /* | 88 | /* |
89 | * We need more stack when we run under a host | 89 | * We need more stack when we run under a host |
90 | * maybe more expensive C lib functions? | 90 | * maybe more expensive C lib functions? |
@@ -92,53 +92,22 @@ BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES) | |||
92 | * simulator (possibly) doesn't simulate stack usage anyway but well ... */ | 92 | * simulator (possibly) doesn't simulate stack usage anyway but well ... */ |
93 | 93 | ||
94 | #if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__) | 94 | #if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__) |
95 | struct regs | ||
96 | { | ||
97 | void *t; /* OS thread */ | ||
98 | void *told; /* Last thread in slot (explained in thead-sdl.c) */ | ||
99 | void *s; /* Semaphore for blocking and wakeup */ | ||
100 | void (*start)(void); /* Start function */ | ||
101 | }; | ||
102 | |||
103 | #define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */ | 95 | #define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */ |
104 | #else | 96 | #else |
105 | #include "asm/thread.h" | 97 | #include "asm/thread.h" |
106 | #endif /* HAVE_SDL_THREADS */ | 98 | #endif /* HAVE_SDL_THREADS */ |
107 | 99 | ||
108 | /* NOTE: The use of the word "queue" may also refer to a linked list of | 100 | extern void yield(void); |
109 | threads being maintained that are normally dealt with in FIFO order | 101 | extern unsigned sleep(unsigned ticks); |
110 | and not necessarily kernel event_queue */ | ||
111 | enum | ||
112 | { | ||
113 | /* States without a timeout must be first */ | ||
114 | STATE_KILLED = 0, /* Thread is killed (default) */ | ||
115 | STATE_RUNNING, /* Thread is currently running */ | ||
116 | STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */ | ||
117 | /* These states involve adding the thread to the tmo list */ | ||
118 | STATE_SLEEPING, /* Thread is sleeping with a timeout */ | ||
119 | STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */ | ||
120 | /* Miscellaneous states */ | ||
121 | STATE_FROZEN, /* Thread is suspended and will not run until | ||
122 | thread_thaw is called with its ID */ | ||
123 | THREAD_NUM_STATES, | ||
124 | TIMEOUT_STATE_FIRST = STATE_SLEEPING, | ||
125 | }; | ||
126 | 102 | ||
127 | #if NUM_CORES > 1 | 103 | #ifdef HAVE_PRIORITY_SCHEDULING |
128 | /* Pointer value for name field to indicate thread is being killed. Using | 104 | #define IF_PRIO(...) __VA_ARGS__ |
129 | * an alternate STATE_* won't work since that would interfere with operation | 105 | #define IFN_PRIO(...) |
130 | * while the thread is still running. */ | 106 | #else |
131 | #define THREAD_DESTRUCT ((const char *)~(intptr_t)0) | 107 | #define IF_PRIO(...) |
108 | #define IFN_PRIO(...) __VA_ARGS__ | ||
132 | #endif | 109 | #endif |
133 | 110 | ||
134 | /* Link information for lists thread is in */ | ||
135 | struct thread_entry; /* forward */ | ||
136 | struct thread_list | ||
137 | { | ||
138 | struct thread_entry *prev; /* Previous thread in a list */ | ||
139 | struct thread_entry *next; /* Next thread in a list */ | ||
140 | }; | ||
141 | |||
142 | /* Basic structure describing the owner of an object */ | 111 | /* Basic structure describing the owner of an object */ |
143 | struct blocker | 112 | struct blocker |
144 | { | 113 | { |
@@ -163,157 +132,9 @@ struct blocker_splay | |||
163 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 132 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
164 | }; | 133 | }; |
165 | 134 | ||
166 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
167 | |||
168 | /* Quick-disinherit of priority elevation. Must be a running thread. */ | ||
169 | void priority_disinherit(struct thread_entry *thread, struct blocker *bl); | ||
170 | |||
171 | struct priority_distribution | ||
172 | { | ||
173 | uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ | ||
174 | priobit_t mask; /* Bitmask of hist entries that are not zero */ | ||
175 | }; | ||
176 | |||
177 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
178 | |||
179 | /* Information kept in each thread slot | ||
180 | * members are arranged according to size - largest first - in order | ||
181 | * to ensure both alignment and packing at the same time. | ||
182 | */ | ||
183 | struct thread_entry | ||
184 | { | ||
185 | struct regs context; /* Register context at switch - | ||
186 | _must_ be first member */ | ||
187 | uintptr_t *stack; /* Pointer to top of stack */ | ||
188 | const char *name; /* Thread name */ | ||
189 | long tmo_tick; /* Tick when thread should be woken from | ||
190 | timeout - | ||
191 | states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ | ||
192 | struct thread_list l; /* Links for blocked/waking/running - | ||
193 | circular linkage in both directions */ | ||
194 | struct thread_list tmo; /* Links for timeout list - | ||
195 | Circular in reverse direction, NULL-terminated in | ||
196 | forward direction - | ||
197 | states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ | ||
198 | struct thread_entry **bqp; /* Pointer to list variable in kernel | ||
199 | object where thread is blocked - used | ||
200 | for implicit unblock and explicit wake | ||
201 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
202 | #ifdef HAVE_CORELOCK_OBJECT | ||
203 | struct corelock *obj_cl; /* Object corelock where thead is blocked - | ||
204 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
205 | struct corelock waiter_cl; /* Corelock for thread_wait */ | ||
206 | struct corelock slot_cl; /* Corelock to lock thread slot */ | ||
207 | unsigned char core; /* The core to which thread belongs */ | ||
208 | #endif | ||
209 | struct thread_entry *queue; /* List of threads waiting for thread to be | ||
210 | removed */ | ||
211 | #ifdef HAVE_WAKEUP_EXT_CB | ||
212 | void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that | ||
213 | performs special steps needed when being | ||
214 | forced off of an object's wait queue that | ||
215 | go beyond the standard wait queue removal | ||
216 | and priority disinheritance */ | ||
217 | /* Only enabled when using queue_send for now */ | ||
218 | #endif | ||
219 | #if defined(HAVE_SEMAPHORE_OBJECTS) || \ | ||
220 | defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \ | ||
221 | NUM_CORES > 1 | ||
222 | volatile intptr_t retval; /* Return value from a blocked operation/ | ||
223 | misc. use */ | ||
224 | #endif | ||
225 | uint32_t id; /* Current slot id */ | ||
226 | int __errno; /* Thread error number (errno tls) */ | ||
227 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
228 | /* Priority summary of owned objects that support inheritance */ | ||
229 | struct blocker *blocker; /* Pointer to blocker when this thread is blocked | ||
230 | on an object that supports PIP - | ||
231 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
232 | struct priority_distribution pdist; /* Priority summary of owned objects | ||
233 | that have blocked threads and thread's own | ||
234 | base priority */ | ||
235 | int skip_count; /* Number of times skipped if higher priority | ||
236 | thread was running */ | ||
237 | unsigned char base_priority; /* Base priority (set explicitly during | ||
238 | creation or thread_set_priority) */ | ||
239 | unsigned char priority; /* Scheduled priority (higher of base or | ||
240 | all threads blocked by this one) */ | ||
241 | #endif | ||
242 | unsigned short stack_size; /* Size of stack in bytes */ | ||
243 | unsigned char state; /* Thread slot state (STATE_*) */ | ||
244 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
245 | unsigned char cpu_boost; /* CPU frequency boost flag */ | ||
246 | #endif | ||
247 | #ifdef HAVE_IO_PRIORITY | ||
248 | unsigned char io_priority; | ||
249 | #endif | ||
250 | }; | ||
251 | |||
252 | /*** Macros for internal use ***/ | ||
253 | /* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ | ||
254 | #define THREAD_ID_VERSION_SHIFT 8 | ||
255 | #define THREAD_ID_VERSION_MASK 0xffffff00 | ||
256 | #define THREAD_ID_SLOT_MASK 0x000000ff | ||
257 | #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) | ||
258 | #define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) | ||
259 | |||
260 | #ifdef HAVE_CORELOCK_OBJECT | ||
261 | /* Operations to be performed just before stopping a thread and starting | ||
262 | a new one if specified before calling switch_thread */ | ||
263 | enum | ||
264 | { | ||
265 | TBOP_CLEAR = 0, /* No operation to do */ | ||
266 | TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ | ||
267 | TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ | ||
268 | }; | ||
269 | |||
270 | struct thread_blk_ops | ||
271 | { | ||
272 | struct corelock *cl_p; /* pointer to corelock */ | ||
273 | unsigned char flags; /* TBOP_* flags */ | ||
274 | }; | ||
275 | #endif /* NUM_CORES > 1 */ | ||
276 | |||
277 | /* Information kept for each core | ||
278 | * Members are arranged for the same reason as in thread_entry | ||
279 | */ | ||
280 | struct core_entry | ||
281 | { | ||
282 | /* "Active" lists - core is constantly active on these and are never | ||
283 | locked and interrupts do not access them */ | ||
284 | struct thread_entry *running; /* threads that are running (RTR) */ | ||
285 | struct thread_entry *timeout; /* threads that are on a timeout before | ||
286 | running again */ | ||
287 | struct thread_entry *block_task; /* Task going off running list */ | ||
288 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
289 | struct priority_distribution rtr; /* Summary of running and ready-to-run | ||
290 | threads */ | ||
291 | #endif | ||
292 | long next_tmo_check; /* soonest time to check tmo threads */ | ||
293 | #ifdef HAVE_CORELOCK_OBJECT | ||
294 | struct thread_blk_ops blk_ops; /* operations to perform when | ||
295 | blocking a thread */ | ||
296 | struct corelock rtr_cl; /* Lock for rtr list */ | ||
297 | #endif /* NUM_CORES */ | ||
298 | }; | ||
299 | |||
300 | extern void yield(void); | ||
301 | extern unsigned sleep(unsigned ticks); | ||
302 | |||
303 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
304 | #define IF_PRIO(...) __VA_ARGS__ | ||
305 | #define IFN_PRIO(...) | ||
306 | #else | ||
307 | #define IF_PRIO(...) | ||
308 | #define IFN_PRIO(...) __VA_ARGS__ | ||
309 | #endif | ||
310 | |||
311 | void core_idle(void); | 135 | void core_idle(void); |
312 | void core_wake(IF_COP_VOID(unsigned int core)); | 136 | void core_wake(IF_COP_VOID(unsigned int core)); |
313 | 137 | ||
314 | /* Initialize the scheduler */ | ||
315 | void init_threads(void) INIT_ATTR; | ||
316 | |||
317 | /* Allocate a thread in the scheduler */ | 138 | /* Allocate a thread in the scheduler */ |
318 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ | 139 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ |
319 | unsigned int create_thread(void (*function)(void), | 140 | unsigned int create_thread(void (*function)(void), |
@@ -330,59 +151,17 @@ void cancel_cpu_boost(void); | |||
330 | #define trigger_cpu_boost() do { } while(0) | 151 | #define trigger_cpu_boost() do { } while(0) |
331 | #define cancel_cpu_boost() do { } while(0) | 152 | #define cancel_cpu_boost() do { } while(0) |
332 | #endif | 153 | #endif |
333 | /* Return thread entry from id */ | 154 | /* Make a frozen thread runnable (when started with CREATE_THREAD_FROZEN). |
334 | struct thread_entry *thread_id_entry(unsigned int thread_id); | ||
335 | /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN). | ||
336 | * Has no effect on a thread not frozen. */ | 155 | * Has no effect on a thread not frozen. */ |
337 | void thread_thaw(unsigned int thread_id); | 156 | void thread_thaw(unsigned int thread_id); |
338 | /* Wait for a thread to exit */ | 157 | /* Wait for a thread to exit */ |
339 | void thread_wait(unsigned int thread_id); | 158 | void thread_wait(unsigned int thread_id); |
340 | /* Exit the current thread */ | 159 | /* Exit the current thread */ |
341 | void thread_exit(void) NORETURN_ATTR; | 160 | void thread_exit(void) NORETURN_ATTR; |
342 | #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF) | ||
343 | #define ALLOW_REMOVE_THREAD | ||
344 | /* Remove a thread from the scheduler */ | ||
345 | void remove_thread(unsigned int thread_id); | ||
346 | #endif | ||
347 | |||
348 | /* Switch to next runnable thread */ | ||
349 | void switch_thread(void); | ||
350 | /* Blocks a thread for at least the specified number of ticks (0 = wait until | ||
351 | * next tick) */ | ||
352 | void sleep_thread(int ticks); | ||
353 | /* Blocks the current thread on a thread queue (< 0 == infinite) */ | ||
354 | void block_thread(struct thread_entry *current, int timeout); | ||
355 | |||
356 | /* Return bit flags for thread wakeup */ | ||
357 | #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ | ||
358 | #define THREAD_OK 0x1 /* A thread was woken up */ | ||
359 | #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of | ||
360 | higher priority than current were woken) */ | ||
361 | |||
362 | /* A convenience function for waking an entire queue of threads. */ | ||
363 | unsigned int thread_queue_wake(struct thread_entry **list); | ||
364 | |||
365 | /* Wakeup a thread at the head of a list */ | ||
366 | enum wakeup_thread_protocol | ||
367 | { | ||
368 | WAKEUP_DEFAULT, | ||
369 | WAKEUP_TRANSFER, | ||
370 | WAKEUP_RELEASE, | ||
371 | WAKEUP_TRANSFER_MULTI, | ||
372 | }; | ||
373 | |||
374 | unsigned int wakeup_thread_(struct thread_entry **list | ||
375 | IF_PRIO(, enum wakeup_thread_protocol proto)); | ||
376 | 161 | ||
377 | #ifdef HAVE_PRIORITY_SCHEDULING | 162 | #ifdef HAVE_PRIORITY_SCHEDULING |
378 | #define wakeup_thread(list, proto) \ | ||
379 | wakeup_thread_((list), (proto)) | ||
380 | |||
381 | int thread_set_priority(unsigned int thread_id, int priority); | 163 | int thread_set_priority(unsigned int thread_id, int priority); |
382 | int thread_get_priority(unsigned int thread_id); | 164 | int thread_get_priority(unsigned int thread_id); |
383 | #else /* !HAVE_PRIORITY_SCHEDULING */ | ||
384 | #define wakeup_thread(list, proto...) \ | ||
385 | wakeup_thread_((list)); | ||
386 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 165 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
387 | 166 | ||
388 | #ifdef HAVE_IO_PRIORITY | 167 | #ifdef HAVE_IO_PRIORITY |
@@ -396,19 +175,31 @@ unsigned int switch_core(unsigned int new_core); | |||
396 | /* Return the id of the calling thread. */ | 175 | /* Return the id of the calling thread. */ |
397 | unsigned int thread_self(void); | 176 | unsigned int thread_self(void); |
398 | 177 | ||
399 | /* Return the thread_entry for the calling thread. | ||
400 | * INTERNAL: Intended for use by kernel and not for programs. */ | ||
401 | struct thread_entry* thread_self_entry(void); | ||
402 | |||
403 | /* Debugging info - only! */ | 178 | /* Debugging info - only! */ |
404 | int thread_stack_usage(const struct thread_entry *thread); | ||
405 | #if NUM_CORES > 1 | 179 | #if NUM_CORES > 1 |
406 | int idle_stack_usage(unsigned int core); | 180 | struct core_debug_info |
181 | { | ||
182 | unsigned int idle_stack_usage; | ||
183 | }; | ||
184 | |||
185 | int core_get_debug_info(unsigned int core, struct core_debug_info *infop); | ||
186 | |||
187 | #endif /* NUM_CORES */ | ||
188 | |||
189 | struct thread_debug_info | ||
190 | { | ||
191 | char statusstr[4]; | ||
192 | char name[32]; | ||
193 | unsigned int stack_usage; | ||
194 | #if NUM_CORES > 1 | ||
195 | unsigned int core; | ||
407 | #endif | 196 | #endif |
408 | void thread_get_name(char *buffer, int size, | 197 | #ifdef HAVE_PRIORITY_SCHEDULING |
409 | struct thread_entry *thread); | 198 | int base_priority; |
410 | #ifdef RB_PROFILE | 199 | int current_priority; |
411 | void profile_thread(void); | ||
412 | #endif | 200 | #endif |
201 | }; | ||
202 | int thread_get_debug_info(unsigned int thread_id, | ||
203 | struct thread_debug_info *infop); | ||
413 | 204 | ||
414 | #endif /* THREAD_H */ | 205 | #endif /* THREAD_H */ |