diff options
Diffstat (limited to 'firmware')
-rw-r--r-- | firmware/SOURCES | 1 | ||||
-rw-r--r-- | firmware/asm/arm/thread.c | 4 | ||||
-rw-r--r-- | firmware/kernel/include/corelock.h | 12 | ||||
-rw-r--r-- | firmware/kernel/include/kernel.h | 19 | ||||
-rw-r--r-- | firmware/kernel/include/mrsw_lock.h | 2 | ||||
-rw-r--r-- | firmware/kernel/include/mutex.h | 2 | ||||
-rw-r--r-- | firmware/kernel/include/semaphore.h | 1 | ||||
-rw-r--r-- | firmware/kernel/include/thread.h | 279 | ||||
-rw-r--r-- | firmware/kernel/kernel-internal.h | 22 | ||||
-rw-r--r-- | firmware/kernel/mrsw_lock.c | 8 | ||||
-rw-r--r-- | firmware/kernel/mutex.c | 7 | ||||
-rw-r--r-- | firmware/kernel/pthread/thread.c | 17 | ||||
-rw-r--r-- | firmware/kernel/queue.c | 8 | ||||
-rw-r--r-- | firmware/kernel/semaphore.c | 12 | ||||
-rw-r--r-- | firmware/kernel/thread-common.c | 152 | ||||
-rw-r--r-- | firmware/kernel/thread-internal.h | 237 | ||||
-rw-r--r-- | firmware/kernel/thread.c | 400 | ||||
-rw-r--r-- | firmware/libc/errno.c | 2 | ||||
-rw-r--r-- | firmware/target/arm/pp/thread-pp.c | 10 | ||||
-rw-r--r-- | firmware/target/hosted/sdl/thread-sdl.c | 80 |
20 files changed, 347 insertions, 928 deletions
diff --git a/firmware/SOURCES b/firmware/SOURCES index 584254a666..a9f9ce5780 100644 --- a/firmware/SOURCES +++ b/firmware/SOURCES | |||
@@ -1838,6 +1838,7 @@ target/hosted/sdl/thread-sdl.c | |||
1838 | #else | 1838 | #else |
1839 | kernel/thread.c | 1839 | kernel/thread.c |
1840 | #endif | 1840 | #endif |
1841 | kernel/thread-common.c | ||
1841 | kernel/tick.c | 1842 | kernel/tick.c |
1842 | #ifdef INCLUDE_TIMEOUT_API | 1843 | #ifdef INCLUDE_TIMEOUT_API |
1843 | kernel/timeout.c | 1844 | kernel/timeout.c |
diff --git a/firmware/asm/arm/thread.c b/firmware/asm/arm/thread.c index fd443f2873..cf685526e3 100644 --- a/firmware/asm/arm/thread.c +++ b/firmware/asm/arm/thread.c | |||
@@ -34,9 +34,7 @@ static void __attribute__((naked)) USED_ATTR start_thread(void) | |||
34 | "mov r1, #0 \n" /* Mark thread as running */ | 34 | "mov r1, #0 \n" /* Mark thread as running */ |
35 | "str r1, [r0, #40] \n" | 35 | "str r1, [r0, #40] \n" |
36 | #if NUM_CORES > 1 | 36 | #if NUM_CORES > 1 |
37 | "ldr r0, =commit_discard_idcache \n" /* Invalidate this core's cache. */ | 37 | "bl commit_discard_idcache \n" /* Invalidate this core's cache. */ |
38 | "mov lr, pc \n" /* This could be the first entry into */ | ||
39 | "bx r0 \n" /* plugin or codec code for this core. */ | ||
40 | #endif | 38 | #endif |
41 | "mov lr, pc \n" /* Call thread function */ | 39 | "mov lr, pc \n" /* Call thread function */ |
42 | "bx r4 \n" | 40 | "bx r4 \n" |
diff --git a/firmware/kernel/include/corelock.h b/firmware/kernel/include/corelock.h index 79302e0e3c..402ae07d19 100644 --- a/firmware/kernel/include/corelock.h +++ b/firmware/kernel/include/corelock.h | |||
@@ -28,10 +28,14 @@ | |||
28 | #ifndef HAVE_CORELOCK_OBJECT | 28 | #ifndef HAVE_CORELOCK_OBJECT |
29 | 29 | ||
30 | /* No atomic corelock op needed or just none defined */ | 30 | /* No atomic corelock op needed or just none defined */ |
31 | #define corelock_init(cl) | 31 | #define corelock_init(cl) \ |
32 | #define corelock_lock(cl) | 32 | do {} while (0) |
33 | #define corelock_try_lock(cl) | 33 | #define corelock_lock(cl) \ |
34 | #define corelock_unlock(cl) | 34 | do {} while (0) |
35 | #define corelock_try_lock(cl) \ | ||
36 | do {} while (0) | ||
37 | #define corelock_unlock(cl) \ | ||
38 | do {} while (0) | ||
35 | 39 | ||
36 | #else | 40 | #else |
37 | 41 | ||
diff --git a/firmware/kernel/include/kernel.h b/firmware/kernel/include/kernel.h index d2ffffcda9..fc6dfca8c3 100644 --- a/firmware/kernel/include/kernel.h +++ b/firmware/kernel/include/kernel.h | |||
@@ -48,23 +48,4 @@ | |||
48 | #define TIMEOUT_BLOCK -1 | 48 | #define TIMEOUT_BLOCK -1 |
49 | #define TIMEOUT_NOBLOCK 0 | 49 | #define TIMEOUT_NOBLOCK 0 |
50 | 50 | ||
51 | static inline void kernel_init(void) | ||
52 | { | ||
53 | /* Init the threading API */ | ||
54 | init_threads(); | ||
55 | |||
56 | /* Other processors will not reach this point in a multicore build. | ||
57 | * In a single-core build with multiple cores they fall-through and | ||
58 | * sleep in cop_main without returning. */ | ||
59 | if (CURRENT_CORE == CPU) | ||
60 | { | ||
61 | init_queues(); | ||
62 | init_tick(); | ||
63 | #ifdef KDEV_INIT | ||
64 | kernel_device_init(); | ||
65 | #endif | ||
66 | } | ||
67 | } | ||
68 | |||
69 | |||
70 | #endif /* KERNEL_H */ | 51 | #endif /* KERNEL_H */ |
diff --git a/firmware/kernel/include/mrsw_lock.h b/firmware/kernel/include/mrsw_lock.h index fbfe1d405d..d919f7be26 100644 --- a/firmware/kernel/include/mrsw_lock.h +++ b/firmware/kernel/include/mrsw_lock.h | |||
@@ -21,6 +21,8 @@ | |||
21 | #ifndef MRSW_LOCK_H | 21 | #ifndef MRSW_LOCK_H |
22 | #define MRSW_LOCK_H | 22 | #define MRSW_LOCK_H |
23 | 23 | ||
24 | #include "thread.h" | ||
25 | |||
24 | /* Multi-reader, single-writer object that allows mutltiple readers or a | 26 | /* Multi-reader, single-writer object that allows mutltiple readers or a |
25 | * single writer thread access to a critical section. | 27 | * single writer thread access to a critical section. |
26 | * | 28 | * |
diff --git a/firmware/kernel/include/mutex.h b/firmware/kernel/include/mutex.h index 02b85f331f..72736ec8fd 100644 --- a/firmware/kernel/include/mutex.h +++ b/firmware/kernel/include/mutex.h | |||
@@ -22,8 +22,6 @@ | |||
22 | #ifndef MUTEX_H | 22 | #ifndef MUTEX_H |
23 | #define MUTEX_H | 23 | #define MUTEX_H |
24 | 24 | ||
25 | #include <stdbool.h> | ||
26 | #include "config.h" | ||
27 | #include "thread.h" | 25 | #include "thread.h" |
28 | 26 | ||
29 | struct mutex | 27 | struct mutex |
diff --git a/firmware/kernel/include/semaphore.h b/firmware/kernel/include/semaphore.h index 40e60bb88d..16095d9c2d 100644 --- a/firmware/kernel/include/semaphore.h +++ b/firmware/kernel/include/semaphore.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #ifndef SEMAPHORE_H | 22 | #ifndef SEMAPHORE_H |
23 | #define SEMAPHORE_H | 23 | #define SEMAPHORE_H |
24 | 24 | ||
25 | #include "config.h" | ||
26 | #include "thread.h" | 25 | #include "thread.h" |
27 | 26 | ||
28 | struct semaphore | 27 | struct semaphore |
diff --git a/firmware/kernel/include/thread.h b/firmware/kernel/include/thread.h index e10b4e21b4..5a8bff0107 100644 --- a/firmware/kernel/include/thread.h +++ b/firmware/kernel/include/thread.h | |||
@@ -18,17 +18,16 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | |||
22 | #ifndef THREAD_H | 21 | #ifndef THREAD_H |
23 | #define THREAD_H | 22 | #define THREAD_H |
24 | 23 | ||
25 | #include "config.h" | ||
26 | #include <inttypes.h> | 24 | #include <inttypes.h> |
27 | #include <stddef.h> | 25 | #include <stddef.h> |
28 | #include <stdbool.h> | 26 | #include <stdbool.h> |
27 | #include "config.h" | ||
29 | #include "gcc_extensions.h" | 28 | #include "gcc_extensions.h" |
30 | #include "corelock.h" | ||
31 | #include "bitarray.h" | 29 | #include "bitarray.h" |
30 | #include "corelock.h" | ||
32 | 31 | ||
33 | /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works | 32 | /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works |
34 | * by giving high priority threads more CPU time than lower priority threads | 33 | * by giving high priority threads more CPU time than lower priority threads |
@@ -65,7 +64,6 @@ | |||
65 | #define IO_PRIORITY_IMMEDIATE 0 | 64 | #define IO_PRIORITY_IMMEDIATE 0 |
66 | #define IO_PRIORITY_BACKGROUND 32 | 65 | #define IO_PRIORITY_BACKGROUND 32 |
67 | 66 | ||
68 | |||
69 | #if CONFIG_CODEC == SWCODEC | 67 | #if CONFIG_CODEC == SWCODEC |
70 | # ifdef HAVE_HARDWARE_CLICK | 68 | # ifdef HAVE_HARDWARE_CLICK |
71 | # define BASETHREADS 17 | 69 | # define BASETHREADS 17 |
@@ -85,6 +83,8 @@ | |||
85 | BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS) | 83 | BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS) |
86 | BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES) | 84 | BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES) |
87 | 85 | ||
86 | struct thread_entry; | ||
87 | |||
88 | /* | 88 | /* |
89 | * We need more stack when we run under a host | 89 | * We need more stack when we run under a host |
90 | * maybe more expensive C lib functions? | 90 | * maybe more expensive C lib functions? |
@@ -92,53 +92,22 @@ BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES) | |||
92 | * simulator (possibly) doesn't simulate stack usage anyway but well ... */ | 92 | * simulator (possibly) doesn't simulate stack usage anyway but well ... */ |
93 | 93 | ||
94 | #if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__) | 94 | #if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__) |
95 | struct regs | ||
96 | { | ||
97 | void *t; /* OS thread */ | ||
98 | void *told; /* Last thread in slot (explained in thead-sdl.c) */ | ||
99 | void *s; /* Semaphore for blocking and wakeup */ | ||
100 | void (*start)(void); /* Start function */ | ||
101 | }; | ||
102 | |||
103 | #define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */ | 95 | #define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */ |
104 | #else | 96 | #else |
105 | #include "asm/thread.h" | 97 | #include "asm/thread.h" |
106 | #endif /* HAVE_SDL_THREADS */ | 98 | #endif /* HAVE_SDL_THREADS */ |
107 | 99 | ||
108 | /* NOTE: The use of the word "queue" may also refer to a linked list of | 100 | extern void yield(void); |
109 | threads being maintained that are normally dealt with in FIFO order | 101 | extern unsigned sleep(unsigned ticks); |
110 | and not necessarily kernel event_queue */ | ||
111 | enum | ||
112 | { | ||
113 | /* States without a timeout must be first */ | ||
114 | STATE_KILLED = 0, /* Thread is killed (default) */ | ||
115 | STATE_RUNNING, /* Thread is currently running */ | ||
116 | STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */ | ||
117 | /* These states involve adding the thread to the tmo list */ | ||
118 | STATE_SLEEPING, /* Thread is sleeping with a timeout */ | ||
119 | STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */ | ||
120 | /* Miscellaneous states */ | ||
121 | STATE_FROZEN, /* Thread is suspended and will not run until | ||
122 | thread_thaw is called with its ID */ | ||
123 | THREAD_NUM_STATES, | ||
124 | TIMEOUT_STATE_FIRST = STATE_SLEEPING, | ||
125 | }; | ||
126 | 102 | ||
127 | #if NUM_CORES > 1 | 103 | #ifdef HAVE_PRIORITY_SCHEDULING |
128 | /* Pointer value for name field to indicate thread is being killed. Using | 104 | #define IF_PRIO(...) __VA_ARGS__ |
129 | * an alternate STATE_* won't work since that would interfere with operation | 105 | #define IFN_PRIO(...) |
130 | * while the thread is still running. */ | 106 | #else |
131 | #define THREAD_DESTRUCT ((const char *)~(intptr_t)0) | 107 | #define IF_PRIO(...) |
108 | #define IFN_PRIO(...) __VA_ARGS__ | ||
132 | #endif | 109 | #endif |
133 | 110 | ||
134 | /* Link information for lists thread is in */ | ||
135 | struct thread_entry; /* forward */ | ||
136 | struct thread_list | ||
137 | { | ||
138 | struct thread_entry *prev; /* Previous thread in a list */ | ||
139 | struct thread_entry *next; /* Next thread in a list */ | ||
140 | }; | ||
141 | |||
142 | /* Basic structure describing the owner of an object */ | 111 | /* Basic structure describing the owner of an object */ |
143 | struct blocker | 112 | struct blocker |
144 | { | 113 | { |
@@ -163,157 +132,9 @@ struct blocker_splay | |||
163 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 132 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
164 | }; | 133 | }; |
165 | 134 | ||
166 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
167 | |||
168 | /* Quick-disinherit of priority elevation. Must be a running thread. */ | ||
169 | void priority_disinherit(struct thread_entry *thread, struct blocker *bl); | ||
170 | |||
171 | struct priority_distribution | ||
172 | { | ||
173 | uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ | ||
174 | priobit_t mask; /* Bitmask of hist entries that are not zero */ | ||
175 | }; | ||
176 | |||
177 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
178 | |||
179 | /* Information kept in each thread slot | ||
180 | * members are arranged according to size - largest first - in order | ||
181 | * to ensure both alignment and packing at the same time. | ||
182 | */ | ||
183 | struct thread_entry | ||
184 | { | ||
185 | struct regs context; /* Register context at switch - | ||
186 | _must_ be first member */ | ||
187 | uintptr_t *stack; /* Pointer to top of stack */ | ||
188 | const char *name; /* Thread name */ | ||
189 | long tmo_tick; /* Tick when thread should be woken from | ||
190 | timeout - | ||
191 | states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ | ||
192 | struct thread_list l; /* Links for blocked/waking/running - | ||
193 | circular linkage in both directions */ | ||
194 | struct thread_list tmo; /* Links for timeout list - | ||
195 | Circular in reverse direction, NULL-terminated in | ||
196 | forward direction - | ||
197 | states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ | ||
198 | struct thread_entry **bqp; /* Pointer to list variable in kernel | ||
199 | object where thread is blocked - used | ||
200 | for implicit unblock and explicit wake | ||
201 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
202 | #ifdef HAVE_CORELOCK_OBJECT | ||
203 | struct corelock *obj_cl; /* Object corelock where thead is blocked - | ||
204 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
205 | struct corelock waiter_cl; /* Corelock for thread_wait */ | ||
206 | struct corelock slot_cl; /* Corelock to lock thread slot */ | ||
207 | unsigned char core; /* The core to which thread belongs */ | ||
208 | #endif | ||
209 | struct thread_entry *queue; /* List of threads waiting for thread to be | ||
210 | removed */ | ||
211 | #ifdef HAVE_WAKEUP_EXT_CB | ||
212 | void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that | ||
213 | performs special steps needed when being | ||
214 | forced off of an object's wait queue that | ||
215 | go beyond the standard wait queue removal | ||
216 | and priority disinheritance */ | ||
217 | /* Only enabled when using queue_send for now */ | ||
218 | #endif | ||
219 | #if defined(HAVE_SEMAPHORE_OBJECTS) || \ | ||
220 | defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \ | ||
221 | NUM_CORES > 1 | ||
222 | volatile intptr_t retval; /* Return value from a blocked operation/ | ||
223 | misc. use */ | ||
224 | #endif | ||
225 | uint32_t id; /* Current slot id */ | ||
226 | int __errno; /* Thread error number (errno tls) */ | ||
227 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
228 | /* Priority summary of owned objects that support inheritance */ | ||
229 | struct blocker *blocker; /* Pointer to blocker when this thread is blocked | ||
230 | on an object that supports PIP - | ||
231 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
232 | struct priority_distribution pdist; /* Priority summary of owned objects | ||
233 | that have blocked threads and thread's own | ||
234 | base priority */ | ||
235 | int skip_count; /* Number of times skipped if higher priority | ||
236 | thread was running */ | ||
237 | unsigned char base_priority; /* Base priority (set explicitly during | ||
238 | creation or thread_set_priority) */ | ||
239 | unsigned char priority; /* Scheduled priority (higher of base or | ||
240 | all threads blocked by this one) */ | ||
241 | #endif | ||
242 | unsigned short stack_size; /* Size of stack in bytes */ | ||
243 | unsigned char state; /* Thread slot state (STATE_*) */ | ||
244 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
245 | unsigned char cpu_boost; /* CPU frequency boost flag */ | ||
246 | #endif | ||
247 | #ifdef HAVE_IO_PRIORITY | ||
248 | unsigned char io_priority; | ||
249 | #endif | ||
250 | }; | ||
251 | |||
252 | /*** Macros for internal use ***/ | ||
253 | /* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ | ||
254 | #define THREAD_ID_VERSION_SHIFT 8 | ||
255 | #define THREAD_ID_VERSION_MASK 0xffffff00 | ||
256 | #define THREAD_ID_SLOT_MASK 0x000000ff | ||
257 | #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) | ||
258 | #define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) | ||
259 | |||
260 | #ifdef HAVE_CORELOCK_OBJECT | ||
261 | /* Operations to be performed just before stopping a thread and starting | ||
262 | a new one if specified before calling switch_thread */ | ||
263 | enum | ||
264 | { | ||
265 | TBOP_CLEAR = 0, /* No operation to do */ | ||
266 | TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ | ||
267 | TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ | ||
268 | }; | ||
269 | |||
270 | struct thread_blk_ops | ||
271 | { | ||
272 | struct corelock *cl_p; /* pointer to corelock */ | ||
273 | unsigned char flags; /* TBOP_* flags */ | ||
274 | }; | ||
275 | #endif /* NUM_CORES > 1 */ | ||
276 | |||
277 | /* Information kept for each core | ||
278 | * Members are arranged for the same reason as in thread_entry | ||
279 | */ | ||
280 | struct core_entry | ||
281 | { | ||
282 | /* "Active" lists - core is constantly active on these and are never | ||
283 | locked and interrupts do not access them */ | ||
284 | struct thread_entry *running; /* threads that are running (RTR) */ | ||
285 | struct thread_entry *timeout; /* threads that are on a timeout before | ||
286 | running again */ | ||
287 | struct thread_entry *block_task; /* Task going off running list */ | ||
288 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
289 | struct priority_distribution rtr; /* Summary of running and ready-to-run | ||
290 | threads */ | ||
291 | #endif | ||
292 | long next_tmo_check; /* soonest time to check tmo threads */ | ||
293 | #ifdef HAVE_CORELOCK_OBJECT | ||
294 | struct thread_blk_ops blk_ops; /* operations to perform when | ||
295 | blocking a thread */ | ||
296 | struct corelock rtr_cl; /* Lock for rtr list */ | ||
297 | #endif /* NUM_CORES */ | ||
298 | }; | ||
299 | |||
300 | extern void yield(void); | ||
301 | extern unsigned sleep(unsigned ticks); | ||
302 | |||
303 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
304 | #define IF_PRIO(...) __VA_ARGS__ | ||
305 | #define IFN_PRIO(...) | ||
306 | #else | ||
307 | #define IF_PRIO(...) | ||
308 | #define IFN_PRIO(...) __VA_ARGS__ | ||
309 | #endif | ||
310 | |||
311 | void core_idle(void); | 135 | void core_idle(void); |
312 | void core_wake(IF_COP_VOID(unsigned int core)); | 136 | void core_wake(IF_COP_VOID(unsigned int core)); |
313 | 137 | ||
314 | /* Initialize the scheduler */ | ||
315 | void init_threads(void) INIT_ATTR; | ||
316 | |||
317 | /* Allocate a thread in the scheduler */ | 138 | /* Allocate a thread in the scheduler */ |
318 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ | 139 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ |
319 | unsigned int create_thread(void (*function)(void), | 140 | unsigned int create_thread(void (*function)(void), |
@@ -330,59 +151,17 @@ void cancel_cpu_boost(void); | |||
330 | #define trigger_cpu_boost() do { } while(0) | 151 | #define trigger_cpu_boost() do { } while(0) |
331 | #define cancel_cpu_boost() do { } while(0) | 152 | #define cancel_cpu_boost() do { } while(0) |
332 | #endif | 153 | #endif |
333 | /* Return thread entry from id */ | 154 | /* Make a frozen thread runnable (when started with CREATE_THREAD_FROZEN). |
334 | struct thread_entry *thread_id_entry(unsigned int thread_id); | ||
335 | /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN). | ||
336 | * Has no effect on a thread not frozen. */ | 155 | * Has no effect on a thread not frozen. */ |
337 | void thread_thaw(unsigned int thread_id); | 156 | void thread_thaw(unsigned int thread_id); |
338 | /* Wait for a thread to exit */ | 157 | /* Wait for a thread to exit */ |
339 | void thread_wait(unsigned int thread_id); | 158 | void thread_wait(unsigned int thread_id); |
340 | /* Exit the current thread */ | 159 | /* Exit the current thread */ |
341 | void thread_exit(void) NORETURN_ATTR; | 160 | void thread_exit(void) NORETURN_ATTR; |
342 | #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF) | ||
343 | #define ALLOW_REMOVE_THREAD | ||
344 | /* Remove a thread from the scheduler */ | ||
345 | void remove_thread(unsigned int thread_id); | ||
346 | #endif | ||
347 | |||
348 | /* Switch to next runnable thread */ | ||
349 | void switch_thread(void); | ||
350 | /* Blocks a thread for at least the specified number of ticks (0 = wait until | ||
351 | * next tick) */ | ||
352 | void sleep_thread(int ticks); | ||
353 | /* Blocks the current thread on a thread queue (< 0 == infinite) */ | ||
354 | void block_thread(struct thread_entry *current, int timeout); | ||
355 | |||
356 | /* Return bit flags for thread wakeup */ | ||
357 | #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ | ||
358 | #define THREAD_OK 0x1 /* A thread was woken up */ | ||
359 | #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of | ||
360 | higher priority than current were woken) */ | ||
361 | |||
362 | /* A convenience function for waking an entire queue of threads. */ | ||
363 | unsigned int thread_queue_wake(struct thread_entry **list); | ||
364 | |||
365 | /* Wakeup a thread at the head of a list */ | ||
366 | enum wakeup_thread_protocol | ||
367 | { | ||
368 | WAKEUP_DEFAULT, | ||
369 | WAKEUP_TRANSFER, | ||
370 | WAKEUP_RELEASE, | ||
371 | WAKEUP_TRANSFER_MULTI, | ||
372 | }; | ||
373 | |||
374 | unsigned int wakeup_thread_(struct thread_entry **list | ||
375 | IF_PRIO(, enum wakeup_thread_protocol proto)); | ||
376 | 161 | ||
377 | #ifdef HAVE_PRIORITY_SCHEDULING | 162 | #ifdef HAVE_PRIORITY_SCHEDULING |
378 | #define wakeup_thread(list, proto) \ | ||
379 | wakeup_thread_((list), (proto)) | ||
380 | |||
381 | int thread_set_priority(unsigned int thread_id, int priority); | 163 | int thread_set_priority(unsigned int thread_id, int priority); |
382 | int thread_get_priority(unsigned int thread_id); | 164 | int thread_get_priority(unsigned int thread_id); |
383 | #else /* !HAVE_PRIORITY_SCHEDULING */ | ||
384 | #define wakeup_thread(list, proto...) \ | ||
385 | wakeup_thread_((list)); | ||
386 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 165 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
387 | 166 | ||
388 | #ifdef HAVE_IO_PRIORITY | 167 | #ifdef HAVE_IO_PRIORITY |
@@ -396,19 +175,31 @@ unsigned int switch_core(unsigned int new_core); | |||
396 | /* Return the id of the calling thread. */ | 175 | /* Return the id of the calling thread. */ |
397 | unsigned int thread_self(void); | 176 | unsigned int thread_self(void); |
398 | 177 | ||
399 | /* Return the thread_entry for the calling thread. | ||
400 | * INTERNAL: Intended for use by kernel and not for programs. */ | ||
401 | struct thread_entry* thread_self_entry(void); | ||
402 | |||
403 | /* Debugging info - only! */ | 178 | /* Debugging info - only! */ |
404 | int thread_stack_usage(const struct thread_entry *thread); | ||
405 | #if NUM_CORES > 1 | 179 | #if NUM_CORES > 1 |
406 | int idle_stack_usage(unsigned int core); | 180 | struct core_debug_info |
181 | { | ||
182 | unsigned int idle_stack_usage; | ||
183 | }; | ||
184 | |||
185 | int core_get_debug_info(unsigned int core, struct core_debug_info *infop); | ||
186 | |||
187 | #endif /* NUM_CORES */ | ||
188 | |||
189 | struct thread_debug_info | ||
190 | { | ||
191 | char statusstr[4]; | ||
192 | char name[32]; | ||
193 | unsigned int stack_usage; | ||
194 | #if NUM_CORES > 1 | ||
195 | unsigned int core; | ||
407 | #endif | 196 | #endif |
408 | void thread_get_name(char *buffer, int size, | 197 | #ifdef HAVE_PRIORITY_SCHEDULING |
409 | struct thread_entry *thread); | 198 | int base_priority; |
410 | #ifdef RB_PROFILE | 199 | int current_priority; |
411 | void profile_thread(void); | ||
412 | #endif | 200 | #endif |
201 | }; | ||
202 | int thread_get_debug_info(unsigned int thread_id, | ||
203 | struct thread_debug_info *infop); | ||
413 | 204 | ||
414 | #endif /* THREAD_H */ | 205 | #endif /* THREAD_H */ |
diff --git a/firmware/kernel/kernel-internal.h b/firmware/kernel/kernel-internal.h index 51c589ac8f..8f7e3e28cb 100644 --- a/firmware/kernel/kernel-internal.h +++ b/firmware/kernel/kernel-internal.h | |||
@@ -22,8 +22,8 @@ | |||
22 | #ifndef KERNEL_INTERNAL_H | 22 | #ifndef KERNEL_INTERNAL_H |
23 | #define KERNEL_INTERNAL_H | 23 | #define KERNEL_INTERNAL_H |
24 | 24 | ||
25 | #include "config.h" | 25 | #include "thread-internal.h" |
26 | #include "debug.h" | 26 | #include "kernel.h" |
27 | 27 | ||
28 | /* Make this nonzero to enable more elaborate checks on objects */ | 28 | /* Make this nonzero to enable more elaborate checks on objects */ |
29 | #if defined(DEBUG) || defined(SIMULATOR) | 29 | #if defined(DEBUG) || defined(SIMULATOR) |
@@ -45,5 +45,23 @@ | |||
45 | #define KERNEL_ASSERT(exp, msg...) ({}) | 45 | #define KERNEL_ASSERT(exp, msg...) ({}) |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | static inline void kernel_init(void) | ||
49 | { | ||
50 | /* Init the threading API */ | ||
51 | extern void init_threads(void); | ||
52 | init_threads(); | ||
53 | |||
54 | /* Other processors will not reach this point in a multicore build. | ||
55 | * In a single-core build with multiple cores they fall-through and | ||
56 | * sleep in cop_main without returning. */ | ||
57 | if (CURRENT_CORE == CPU) | ||
58 | { | ||
59 | init_queues(); | ||
60 | init_tick(); | ||
61 | #ifdef KDEV_INIT | ||
62 | kernel_device_init(); | ||
63 | #endif | ||
64 | } | ||
65 | } | ||
48 | 66 | ||
49 | #endif /* KERNEL_INTERNAL_H */ | 67 | #endif /* KERNEL_INTERNAL_H */ |
diff --git a/firmware/kernel/mrsw_lock.c b/firmware/kernel/mrsw_lock.c index 46ab893622..45c8801b74 100644 --- a/firmware/kernel/mrsw_lock.c +++ b/firmware/kernel/mrsw_lock.c | |||
@@ -18,12 +18,8 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | #include <string.h> | ||
22 | #include "config.h" | ||
23 | #include "system.h" | ||
24 | #include "thread.h" | ||
25 | #include "kernel.h" | ||
26 | #include "kernel-internal.h" | 21 | #include "kernel-internal.h" |
22 | #include "mrsw-lock.h" | ||
27 | 23 | ||
28 | #ifdef HAVE_PRIORITY_SCHEDULING | 24 | #ifdef HAVE_PRIORITY_SCHEDULING |
29 | 25 | ||
@@ -45,9 +41,7 @@ mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current, | |||
45 | Therefore, if the queue has threads, then the next after the | 41 | Therefore, if the queue has threads, then the next after the |
46 | owning readers is a writer and this is not the last reader. */ | 42 | owning readers is a writer and this is not the last reader. */ |
47 | if (mrsw->queue) | 43 | if (mrsw->queue) |
48 | { | ||
49 | corelock_lock(&mrsw->splay.cl); | 44 | corelock_lock(&mrsw->splay.cl); |
50 | } | ||
51 | 45 | ||
52 | threadbit_clear_bit(&mrsw->splay.mask, slotnum); | 46 | threadbit_clear_bit(&mrsw->splay.mask, slotnum); |
53 | 47 | ||
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c index 2e90b0f4b1..e5729dc893 100644 --- a/firmware/kernel/mutex.c +++ b/firmware/kernel/mutex.c | |||
@@ -23,13 +23,8 @@ | |||
23 | /**************************************************************************** | 23 | /**************************************************************************** |
24 | * Simple mutex functions ;) | 24 | * Simple mutex functions ;) |
25 | ****************************************************************************/ | 25 | ****************************************************************************/ |
26 | |||
27 | #include <stdbool.h> | ||
28 | #include "config.h" | ||
29 | #include "system.h" | ||
30 | #include "kernel.h" | ||
31 | #include "thread-internal.h" | ||
32 | #include "kernel-internal.h" | 26 | #include "kernel-internal.h" |
27 | #include "mutex.h" | ||
33 | 28 | ||
34 | /* Initialize a mutex object - call before any use and do not call again once | 29 | /* Initialize a mutex object - call before any use and do not call again once |
35 | * the object is available to other threads */ | 30 | * the object is available to other threads */ |
diff --git a/firmware/kernel/pthread/thread.c b/firmware/kernel/pthread/thread.c index a80ce876e8..354a946698 100644 --- a/firmware/kernel/pthread/thread.c +++ b/firmware/kernel/pthread/thread.c | |||
@@ -194,23 +194,6 @@ static void remove_from_list_l(struct thread_entry **list, | |||
194 | thread->l.next->l.prev = thread->l.prev; | 194 | thread->l.next->l.prev = thread->l.prev; |
195 | } | 195 | } |
196 | 196 | ||
197 | unsigned int thread_queue_wake(struct thread_entry **list) | ||
198 | { | ||
199 | unsigned int result = THREAD_NONE; | ||
200 | |||
201 | for (;;) | ||
202 | { | ||
203 | unsigned int rc = wakeup_thread(list); | ||
204 | |||
205 | if (rc == THREAD_NONE) | ||
206 | break; | ||
207 | |||
208 | result |= rc; | ||
209 | } | ||
210 | |||
211 | return result; | ||
212 | } | ||
213 | |||
214 | /* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point | 197 | /* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point |
215 | * to a corelock instance, and this corelock must be held by the caller */ | 198 | * to a corelock instance, and this corelock must be held by the caller */ |
216 | void block_thread_switch(struct thread_entry *t, struct corelock *cl) | 199 | void block_thread_switch(struct thread_entry *t, struct corelock *cl) |
diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c index c8beb908b6..0ba7d7e00b 100644 --- a/firmware/kernel/queue.c +++ b/firmware/kernel/queue.c | |||
@@ -18,16 +18,10 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | |||
22 | #include <string.h> | 21 | #include <string.h> |
23 | #include "config.h" | ||
24 | #include "kernel.h" | ||
25 | #include "system.h" | ||
26 | #include "queue.h" | ||
27 | #include "corelock.h" | ||
28 | #include "kernel-internal.h" | 22 | #include "kernel-internal.h" |
23 | #include "queue.h" | ||
29 | #include "general.h" | 24 | #include "general.h" |
30 | #include "panic.h" | ||
31 | 25 | ||
32 | /* This array holds all queues that are initiated. It is used for broadcast. */ | 26 | /* This array holds all queues that are initiated. It is used for broadcast. */ |
33 | static struct | 27 | static struct |
diff --git a/firmware/kernel/semaphore.c b/firmware/kernel/semaphore.c index b6ce7fd742..1505038fbc 100644 --- a/firmware/kernel/semaphore.c +++ b/firmware/kernel/semaphore.c | |||
@@ -18,18 +18,8 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | |||
22 | |||
23 | /**************************************************************************** | ||
24 | * Simple mutex functions ;) | ||
25 | ****************************************************************************/ | ||
26 | |||
27 | #include <stdbool.h> | ||
28 | #include "config.h" | ||
29 | #include "kernel.h" | ||
30 | #include "semaphore.h" | ||
31 | #include "kernel-internal.h" | 21 | #include "kernel-internal.h" |
32 | #include "thread-internal.h" | 22 | #include "semaphore.h" |
33 | 23 | ||
34 | /**************************************************************************** | 24 | /**************************************************************************** |
35 | * Simple semaphore functions ;) | 25 | * Simple semaphore functions ;) |
diff --git a/firmware/kernel/thread-common.c b/firmware/kernel/thread-common.c new file mode 100644 index 0000000000..b8b8ffbd4c --- /dev/null +++ b/firmware/kernel/thread-common.c | |||
@@ -0,0 +1,152 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Ulf Ralberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | #include "thread-internal.h" | ||
22 | #include "system.h" | ||
23 | |||
24 | /*--------------------------------------------------------------------------- | ||
25 | * Wakeup an entire queue of threads - returns bitwise-or of return bitmask | ||
26 | * from each operation or THREAD_NONE of nothing was awakened. Object owning | ||
27 | * the queue must be locked first. | ||
28 | * | ||
29 | * INTERNAL: Intended for use by kernel objects and not for programs. | ||
30 | *--------------------------------------------------------------------------- | ||
31 | */ | ||
32 | unsigned int thread_queue_wake(struct thread_entry **list) | ||
33 | { | ||
34 | unsigned result = THREAD_NONE; | ||
35 | |||
36 | for (;;) | ||
37 | { | ||
38 | unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT); | ||
39 | |||
40 | if (rc == THREAD_NONE) | ||
41 | break; /* No more threads */ | ||
42 | |||
43 | result |= rc; | ||
44 | } | ||
45 | |||
46 | return result; | ||
47 | } | ||
48 | |||
49 | |||
50 | /** Debug screen stuff **/ | ||
51 | |||
52 | /*--------------------------------------------------------------------------- | ||
53 | * returns the stack space used in bytes | ||
54 | *--------------------------------------------------------------------------- | ||
55 | */ | ||
56 | static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size) | ||
57 | { | ||
58 | unsigned int usage = 0; | ||
59 | unsigned int stack_words = stack_size / sizeof (uintptr_t); | ||
60 | |||
61 | for (unsigned int i = 0; i < stack_words; i++) | ||
62 | { | ||
63 | if (stackptr[i] != DEADBEEF) | ||
64 | { | ||
65 | usage = (stack_words - i) * 100 / stack_words; | ||
66 | break; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | return usage; | ||
71 | } | ||
72 | |||
73 | #if NUM_CORES > 1 | ||
74 | /*--------------------------------------------------------------------------- | ||
75 | * Returns the maximum percentage of the core's idle stack ever used during | ||
76 | * runtime. | ||
77 | *--------------------------------------------------------------------------- | ||
78 | */ | ||
79 | int core_get_debug_info(unsigned int core, struct core_debug_info *infop) | ||
80 | { | ||
81 | extern uintptr_t * const idle_stacks[NUM_CORES]; | ||
82 | |||
83 | if (core >= NUM_CORES || !infop) | ||
84 | return -1; | ||
85 | |||
86 | infop->idle_stack_usage = stack_usage(idle_stacks[core], IDLE_STACK_SIZE); | ||
87 | return 1; | ||
88 | } | ||
89 | #endif /* NUM_CORES > 1 */ | ||
90 | |||
91 | int thread_get_debug_info(unsigned int thread_id, | ||
92 | struct thread_debug_info *infop) | ||
93 | { | ||
94 | static const char status_chars[THREAD_NUM_STATES+1] = | ||
95 | { | ||
96 | [0 ... THREAD_NUM_STATES] = '?', | ||
97 | [STATE_RUNNING] = 'R', | ||
98 | [STATE_BLOCKED] = 'B', | ||
99 | [STATE_SLEEPING] = 'S', | ||
100 | [STATE_BLOCKED_W_TMO] = 'T', | ||
101 | [STATE_FROZEN] = 'F', | ||
102 | [STATE_KILLED] = 'K', | ||
103 | }; | ||
104 | |||
105 | if (!infop) | ||
106 | return -1; | ||
107 | |||
108 | unsigned int slot = THREAD_ID_SLOT(thread_id); | ||
109 | if (slot >= MAXTHREADS) | ||
110 | return -1; | ||
111 | |||
112 | extern struct thread_entry threads[MAXTHREADS]; | ||
113 | struct thread_entry *thread = &threads[slot]; | ||
114 | |||
115 | int oldlevel = disable_irq_save(); | ||
116 | LOCK_THREAD(thread); | ||
117 | |||
118 | unsigned int state = thread->state; | ||
119 | |||
120 | if (state != STATE_KILLED) | ||
121 | { | ||
122 | const char *name = thread->name; | ||
123 | if (!name) | ||
124 | name = ""; | ||
125 | |||
126 | bool cpu_boost = false; | ||
127 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
128 | cpu_boost = thread->cpu_boost; | ||
129 | #endif | ||
130 | infop->stack_usage = stack_usage(thread->stack, thread->stack_size); | ||
131 | #if NUM_CORES > 1 | ||
132 | infop->core = thread->core; | ||
133 | #endif | ||
134 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
135 | infop->base_priority = thread->base_priority; | ||
136 | infop->current_priority = thread->priority; | ||
137 | #endif | ||
138 | |||
139 | snprintf(infop->statusstr, sizeof (infop->statusstr), "%c%c", | ||
140 | cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '), | ||
141 | status_chars[state]); | ||
142 | |||
143 | const char *fmt = *name ? "%s" : "%s%08lX"; | ||
144 | snprintf(infop->name, sizeof (infop->name), fmt, name, | ||
145 | thread->id); | ||
146 | } | ||
147 | |||
148 | UNLOCK_THREAD(thread); | ||
149 | restore_irq(oldlevel); | ||
150 | |||
151 | return state == STATE_KILLED ? 0 : 1; | ||
152 | } | ||
diff --git a/firmware/kernel/thread-internal.h b/firmware/kernel/thread-internal.h index c2acdfbaa9..894bd1fe7c 100644 --- a/firmware/kernel/thread-internal.h +++ b/firmware/kernel/thread-internal.h | |||
@@ -18,15 +18,13 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | #ifndef THREAD_INTERNAL_H | ||
22 | #define THREAD_INTERNAL_H | ||
21 | 23 | ||
22 | #ifndef THREAD_H | 24 | #include "thread.h" |
23 | #define THREAD_H | 25 | #include <stdio.h> |
24 | 26 | #include "panic.h" | |
25 | #include "config.h" | 27 | #include "debug.h" |
26 | #include <inttypes.h> | ||
27 | #include <stddef.h> | ||
28 | #include <stdbool.h> | ||
29 | #include "gcc_extensions.h" | ||
30 | 28 | ||
31 | /* | 29 | /* |
32 | * We need more stack when we run under a host | 30 | * We need more stack when we run under a host |
@@ -48,23 +46,6 @@ struct regs | |||
48 | #include "asm/thread.h" | 46 | #include "asm/thread.h" |
49 | #endif /* HAVE_SDL_THREADS */ | 47 | #endif /* HAVE_SDL_THREADS */ |
50 | 48 | ||
51 | #ifdef CPU_PP | ||
52 | #ifdef HAVE_CORELOCK_OBJECT | ||
53 | /* No reliable atomic instruction available - use Peterson's algorithm */ | ||
54 | struct corelock | ||
55 | { | ||
56 | volatile unsigned char myl[NUM_CORES]; | ||
57 | volatile unsigned char turn; | ||
58 | } __attribute__((packed)); | ||
59 | |||
60 | /* Too big to inline everywhere */ | ||
61 | void corelock_init(struct corelock *cl); | ||
62 | void corelock_lock(struct corelock *cl); | ||
63 | int corelock_try_lock(struct corelock *cl); | ||
64 | void corelock_unlock(struct corelock *cl); | ||
65 | #endif /* HAVE_CORELOCK_OBJECT */ | ||
66 | #endif /* CPU_PP */ | ||
67 | |||
68 | /* NOTE: The use of the word "queue" may also refer to a linked list of | 49 | /* NOTE: The use of the word "queue" may also refer to a linked list of |
69 | threads being maintained that are normally dealt with in FIFO order | 50 | threads being maintained that are normally dealt with in FIFO order |
70 | and not necessarily kernel event_queue */ | 51 | and not necessarily kernel event_queue */ |
@@ -84,58 +65,43 @@ enum | |||
84 | TIMEOUT_STATE_FIRST = STATE_SLEEPING, | 65 | TIMEOUT_STATE_FIRST = STATE_SLEEPING, |
85 | }; | 66 | }; |
86 | 67 | ||
87 | #if NUM_CORES > 1 | 68 | #ifdef HAVE_PRIORITY_SCHEDULING |
88 | /* Pointer value for name field to indicate thread is being killed. Using | ||
89 | * an alternate STATE_* won't work since that would interfere with operation | ||
90 | * while the thread is still running. */ | ||
91 | #define THREAD_DESTRUCT ((const char *)~(intptr_t)0) | ||
92 | #endif | ||
93 | 69 | ||
94 | /* Link information for lists thread is in */ | 70 | /* Quick-disinherit of priority elevation. Must be a running thread. */ |
95 | struct thread_entry; /* forward */ | 71 | void priority_disinherit(struct thread_entry *thread, struct blocker *bl); |
96 | struct thread_list | 72 | |
73 | struct priority_distribution | ||
97 | { | 74 | { |
98 | struct thread_entry *prev; /* Previous thread in a list */ | 75 | uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ |
99 | struct thread_entry *next; /* Next thread in a list */ | 76 | priobit_t mask; /* Bitmask of hist entries that are not zero */ |
100 | }; | 77 | }; |
101 | 78 | ||
102 | #ifndef HAVE_CORELOCK_OBJECT | 79 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
103 | /* No atomic corelock op needed or just none defined */ | ||
104 | #define corelock_init(cl) | ||
105 | #define corelock_lock(cl) | ||
106 | #define corelock_try_lock(cl) | ||
107 | #define corelock_unlock(cl) | ||
108 | #endif /* HAVE_CORELOCK_OBJECT */ | ||
109 | 80 | ||
110 | #ifdef HAVE_PRIORITY_SCHEDULING | 81 | #ifdef HAVE_CORELOCK_OBJECT |
111 | struct blocker | 82 | /* Operations to be performed just before stopping a thread and starting |
83 | a new one if specified before calling switch_thread */ | ||
84 | enum | ||
112 | { | 85 | { |
113 | struct thread_entry * volatile thread; /* thread blocking other threads | 86 | TBOP_CLEAR = 0, /* No operation to do */ |
114 | (aka. object owner) */ | 87 | TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ |
115 | int priority; /* highest priority waiter */ | 88 | TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ |
116 | struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread); | ||
117 | }; | 89 | }; |
118 | 90 | ||
119 | /* Choices of wakeup protocol */ | 91 | struct thread_blk_ops |
120 | |||
121 | /* For transfer of object ownership by one thread to another thread by | ||
122 | * the owning thread itself (mutexes) */ | ||
123 | struct thread_entry * | ||
124 | wakeup_priority_protocol_transfer(struct thread_entry *thread); | ||
125 | |||
126 | /* For release by owner where ownership doesn't change - other threads, | ||
127 | * interrupts, timeouts, etc. (mutex timeout, queues) */ | ||
128 | struct thread_entry * | ||
129 | wakeup_priority_protocol_release(struct thread_entry *thread); | ||
130 | |||
131 | |||
132 | struct priority_distribution | ||
133 | { | 92 | { |
134 | uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ | 93 | struct corelock *cl_p; /* pointer to corelock */ |
135 | uint32_t mask; /* Bitmask of hist entries that are not zero */ | 94 | unsigned char flags; /* TBOP_* flags */ |
136 | }; | 95 | }; |
96 | #endif /* NUM_CORES > 1 */ | ||
137 | 97 | ||
138 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 98 | /* Link information for lists thread is in */ |
99 | struct thread_entry; /* forward */ | ||
100 | struct thread_list | ||
101 | { | ||
102 | struct thread_entry *prev; /* Previous thread in a list */ | ||
103 | struct thread_entry *next; /* Next thread in a list */ | ||
104 | }; | ||
139 | 105 | ||
140 | /* Information kept in each thread slot | 106 | /* Information kept in each thread slot |
141 | * members are arranged according to size - largest first - in order | 107 | * members are arranged according to size - largest first - in order |
@@ -183,6 +149,8 @@ struct thread_entry | |||
183 | volatile intptr_t retval; /* Return value from a blocked operation/ | 149 | volatile intptr_t retval; /* Return value from a blocked operation/ |
184 | misc. use */ | 150 | misc. use */ |
185 | #endif | 151 | #endif |
152 | uint32_t id; /* Current slot id */ | ||
153 | int __errno; /* Thread error number (errno tls) */ | ||
186 | #ifdef HAVE_PRIORITY_SCHEDULING | 154 | #ifdef HAVE_PRIORITY_SCHEDULING |
187 | /* Priority summary of owned objects that support inheritance */ | 155 | /* Priority summary of owned objects that support inheritance */ |
188 | struct blocker *blocker; /* Pointer to blocker when this thread is blocked | 156 | struct blocker *blocker; /* Pointer to blocker when this thread is blocked |
@@ -198,7 +166,6 @@ struct thread_entry | |||
198 | unsigned char priority; /* Scheduled priority (higher of base or | 166 | unsigned char priority; /* Scheduled priority (higher of base or |
199 | all threads blocked by this one) */ | 167 | all threads blocked by this one) */ |
200 | #endif | 168 | #endif |
201 | uint16_t id; /* Current slot id */ | ||
202 | unsigned short stack_size; /* Size of stack in bytes */ | 169 | unsigned short stack_size; /* Size of stack in bytes */ |
203 | unsigned char state; /* Thread slot state (STATE_*) */ | 170 | unsigned char state; /* Thread slot state (STATE_*) */ |
204 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 171 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
@@ -209,30 +176,6 @@ struct thread_entry | |||
209 | #endif | 176 | #endif |
210 | }; | 177 | }; |
211 | 178 | ||
212 | /*** Macros for internal use ***/ | ||
213 | /* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */ | ||
214 | #define THREAD_ID_VERSION_SHIFT 8 | ||
215 | #define THREAD_ID_VERSION_MASK 0xff00 | ||
216 | #define THREAD_ID_SLOT_MASK 0x00ff | ||
217 | #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) | ||
218 | |||
219 | #ifdef HAVE_CORELOCK_OBJECT | ||
220 | /* Operations to be performed just before stopping a thread and starting | ||
221 | a new one if specified before calling switch_thread */ | ||
222 | enum | ||
223 | { | ||
224 | TBOP_CLEAR = 0, /* No operation to do */ | ||
225 | TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ | ||
226 | TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ | ||
227 | }; | ||
228 | |||
229 | struct thread_blk_ops | ||
230 | { | ||
231 | struct corelock *cl_p; /* pointer to corelock */ | ||
232 | unsigned char flags; /* TBOP_* flags */ | ||
233 | }; | ||
234 | #endif /* NUM_CORES > 1 */ | ||
235 | |||
236 | /* Information kept for each core | 179 | /* Information kept for each core |
237 | * Members are arranged for the same reason as in thread_entry | 180 | * Members are arranged for the same reason as in thread_entry |
238 | */ | 181 | */ |
@@ -256,61 +199,45 @@ struct core_entry | |||
256 | #endif /* NUM_CORES */ | 199 | #endif /* NUM_CORES */ |
257 | }; | 200 | }; |
258 | 201 | ||
259 | #ifdef HAVE_PRIORITY_SCHEDULING | 202 | /* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ |
260 | #define IF_PRIO(...) __VA_ARGS__ | 203 | #define THREAD_ID_VERSION_SHIFT 8 |
261 | #define IFN_PRIO(...) | 204 | #define THREAD_ID_VERSION_MASK 0xffffff00 |
262 | #else | 205 | #define THREAD_ID_SLOT_MASK 0x000000ff |
263 | #define IF_PRIO(...) | 206 | #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) |
264 | #define IFN_PRIO(...) __VA_ARGS__ | 207 | #define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) |
265 | #endif | ||
266 | |||
267 | void core_idle(void); | ||
268 | void core_wake(IF_COP_VOID(unsigned int core)); | ||
269 | |||
270 | /* Initialize the scheduler */ | ||
271 | void init_threads(void) INIT_ATTR; | ||
272 | 208 | ||
273 | /* Allocate a thread in the scheduler */ | 209 | /* Thread locking */ |
274 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ | 210 | #if NUM_CORES > 1 |
275 | unsigned int create_thread(void (*function)(void), | 211 | #define LOCK_THREAD(thread) \ |
276 | void* stack, size_t stack_size, | 212 | ({ corelock_lock(&(thread)->slot_cl); }) |
277 | unsigned flags, const char *name | 213 | #define TRY_LOCK_THREAD(thread) \ |
278 | IF_PRIO(, int priority) | 214 | ({ corelock_try_lock(&(thread)->slot_cl); }) |
279 | IF_COP(, unsigned int core)); | 215 | #define UNLOCK_THREAD(thread) \ |
216 | ({ corelock_unlock(&(thread)->slot_cl); }) | ||
217 | #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ | ||
218 | ({ unsigned int _core = (thread)->core; \ | ||
219 | cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \ | ||
220 | cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) | ||
221 | #else /* NUM_CORES == 1*/ | ||
222 | #define LOCK_THREAD(thread) \ | ||
223 | ({ (void)(thread); }) | ||
224 | #define TRY_LOCK_THREAD(thread) \ | ||
225 | ({ (void)(thread); }) | ||
226 | #define UNLOCK_THREAD(thread) \ | ||
227 | ({ (void)(thread); }) | ||
228 | #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ | ||
229 | ({ (void)(thread); }) | ||
230 | #endif /* NUM_CORES */ | ||
280 | 231 | ||
281 | /* Set and clear the CPU frequency boost flag for the calling thread */ | 232 | #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) |
282 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
283 | void trigger_cpu_boost(void); | ||
284 | void cancel_cpu_boost(void); | ||
285 | #else | ||
286 | #define trigger_cpu_boost() do { } while(0) | ||
287 | #define cancel_cpu_boost() do { } while(0) | ||
288 | #endif | ||
289 | /* Return thread entry from id */ | ||
290 | struct thread_entry *thread_id_entry(unsigned int thread_id); | ||
291 | /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN). | ||
292 | * Has no effect on a thread not frozen. */ | ||
293 | void thread_thaw(unsigned int thread_id); | ||
294 | /* Wait for a thread to exit */ | ||
295 | void thread_wait(unsigned int thread_id); | ||
296 | /* Exit the current thread */ | ||
297 | void thread_exit(void) NORETURN_ATTR; | ||
298 | #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF) | ||
299 | #define ALLOW_REMOVE_THREAD | ||
300 | /* Remove a thread from the scheduler */ | ||
301 | void remove_thread(unsigned int thread_id); | ||
302 | #endif | ||
303 | 233 | ||
304 | /* Switch to next runnable thread */ | 234 | /* Switch to next runnable thread */ |
305 | void switch_thread(void); | 235 | void switch_thread(void); |
306 | /* Blocks a thread for at least the specified number of ticks (0 = wait until | 236 | /* Blocks a thread for at least the specified number of ticks (0 = wait until |
307 | * next tick) */ | 237 | * next tick) */ |
308 | void sleep_thread(int ticks); | 238 | void sleep_thread(int ticks); |
309 | /* Indefinitely blocks the current thread on a thread queue */ | 239 | /* Blocks the current thread on a thread queue (< 0 == infinite) */ |
310 | void block_thread(struct thread_entry *current); | 240 | void block_thread(struct thread_entry *current, int timeout); |
311 | /* Blocks the current thread on a thread queue until explicitely woken or | ||
312 | * the timeout is reached */ | ||
313 | void block_thread_w_tmo(struct thread_entry *current, int timeout); | ||
314 | 241 | ||
315 | /* Return bit flags for thread wakeup */ | 242 | /* Return bit flags for thread wakeup */ |
316 | #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ | 243 | #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ |
@@ -322,12 +249,25 @@ void block_thread_w_tmo(struct thread_entry *current, int timeout); | |||
322 | unsigned int thread_queue_wake(struct thread_entry **list); | 249 | unsigned int thread_queue_wake(struct thread_entry **list); |
323 | 250 | ||
324 | /* Wakeup a thread at the head of a list */ | 251 | /* Wakeup a thread at the head of a list */ |
325 | unsigned int wakeup_thread(struct thread_entry **list); | 252 | enum wakeup_thread_protocol |
253 | { | ||
254 | WAKEUP_DEFAULT, | ||
255 | WAKEUP_TRANSFER, | ||
256 | WAKEUP_RELEASE, | ||
257 | WAKEUP_TRANSFER_MULTI, | ||
258 | }; | ||
259 | |||
260 | unsigned int wakeup_thread_(struct thread_entry **list | ||
261 | IF_PRIO(, enum wakeup_thread_protocol proto)); | ||
326 | 262 | ||
327 | #ifdef HAVE_PRIORITY_SCHEDULING | 263 | #ifdef HAVE_PRIORITY_SCHEDULING |
328 | int thread_set_priority(unsigned int thread_id, int priority); | 264 | #define wakeup_thread(list, proto) \ |
329 | int thread_get_priority(unsigned int thread_id); | 265 | wakeup_thread_((list), (proto)) |
266 | #else /* !HAVE_PRIORITY_SCHEDULING */ | ||
267 | #define wakeup_thread(list, proto...) \ | ||
268 | wakeup_thread_((list)); | ||
330 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 269 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
270 | |||
331 | #ifdef HAVE_IO_PRIORITY | 271 | #ifdef HAVE_IO_PRIORITY |
332 | void thread_set_io_priority(unsigned int thread_id, int io_priority); | 272 | void thread_set_io_priority(unsigned int thread_id, int io_priority); |
333 | int thread_get_io_priority(unsigned int thread_id); | 273 | int thread_get_io_priority(unsigned int thread_id); |
@@ -339,19 +279,14 @@ unsigned int switch_core(unsigned int new_core); | |||
339 | /* Return the id of the calling thread. */ | 279 | /* Return the id of the calling thread. */ |
340 | unsigned int thread_self(void); | 280 | unsigned int thread_self(void); |
341 | 281 | ||
342 | /* Return the thread_entry for the calling thread. | 282 | /* Return the thread_entry for the calling thread */ |
343 | * INTERNAL: Intended for use by kernel and not for programs. */ | ||
344 | struct thread_entry* thread_self_entry(void); | 283 | struct thread_entry* thread_self_entry(void); |
345 | 284 | ||
346 | /* Debugging info - only! */ | 285 | /* Return thread entry from id */ |
347 | int thread_stack_usage(const struct thread_entry *thread); | 286 | struct thread_entry *thread_id_entry(unsigned int thread_id); |
348 | #if NUM_CORES > 1 | 287 | |
349 | int idle_stack_usage(unsigned int core); | ||
350 | #endif | ||
351 | void thread_get_name(char *buffer, int size, | ||
352 | struct thread_entry *thread); | ||
353 | #ifdef RB_PROFILE | 288 | #ifdef RB_PROFILE |
354 | void profile_thread(void); | 289 | void profile_thread(void); |
355 | #endif | 290 | #endif |
356 | 291 | ||
357 | #endif /* THREAD_H */ | 292 | #endif /* THREAD_INTERNAL_H */ |
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c index 9855cc3c84..5bb6eb5522 100644 --- a/firmware/kernel/thread.c +++ b/firmware/kernel/thread.c | |||
@@ -28,11 +28,7 @@ | |||
28 | #undef _FORTIFY_SOURCE | 28 | #undef _FORTIFY_SOURCE |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #include <stdbool.h> | 31 | #include "thread-internal.h" |
32 | #include <stdio.h> | ||
33 | #include "thread.h" | ||
34 | #include "panic.h" | ||
35 | #include "system.h" | ||
36 | #include "kernel.h" | 32 | #include "kernel.h" |
37 | #include "cpu.h" | 33 | #include "cpu.h" |
38 | #include "string.h" | 34 | #include "string.h" |
@@ -40,8 +36,6 @@ | |||
40 | #include <profile.h> | 36 | #include <profile.h> |
41 | #endif | 37 | #endif |
42 | #include "core_alloc.h" | 38 | #include "core_alloc.h" |
43 | #include "gcc_extensions.h" | ||
44 | #include "corelock.h" | ||
45 | 39 | ||
46 | /**************************************************************************** | 40 | /**************************************************************************** |
47 | * ATTENTION!! * | 41 | * ATTENTION!! * |
@@ -131,7 +125,6 @@ | |||
131 | 125 | ||
132 | /* Cast to the the machine pointer size, whose size could be < 4 or > 32 | 126 | /* Cast to the the machine pointer size, whose size could be < 4 or > 32 |
133 | * (someday :). */ | 127 | * (someday :). */ |
134 | #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) | ||
135 | static struct core_entry cores[NUM_CORES] IBSS_ATTR; | 128 | static struct core_entry cores[NUM_CORES] IBSS_ATTR; |
136 | struct thread_entry threads[MAXTHREADS] IBSS_ATTR; | 129 | struct thread_entry threads[MAXTHREADS] IBSS_ATTR; |
137 | 130 | ||
@@ -204,57 +197,36 @@ void switch_thread(void) | |||
204 | * End Processor-specific section | 197 | * End Processor-specific section |
205 | ***************************************************************************/ | 198 | ***************************************************************************/ |
206 | 199 | ||
207 | #if THREAD_EXTRA_CHECKS | 200 | static NO_INLINE |
208 | static void thread_panicf(const char *msg, struct thread_entry *thread) | 201 | void thread_panicf(const char *msg, struct thread_entry *thread) |
209 | { | 202 | { |
210 | IF_COP( const unsigned int core = thread->core; ) | 203 | IF_COP( const unsigned int core = thread->core; ) |
211 | static char name[32]; | 204 | static char namebuf[sizeof (((struct thread_debug_info *)0)->name)]; |
212 | thread_get_name(name, 32, thread); | 205 | const char *name = thread->name; |
206 | if (!name) | ||
207 | name = ""; | ||
208 | snprintf(namebuf, sizeof (namebuf), *name ? "%s" : "%s%08lX", | ||
209 | name, (unsigned long)thread->id); | ||
213 | panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); | 210 | panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); |
214 | } | 211 | } |
212 | |||
215 | static void thread_stkov(struct thread_entry *thread) | 213 | static void thread_stkov(struct thread_entry *thread) |
216 | { | 214 | { |
217 | thread_panicf("Stkov", thread); | 215 | thread_panicf("Stkov", thread); |
218 | } | 216 | } |
217 | |||
218 | #if THREAD_EXTRA_CHECKS | ||
219 | #define THREAD_PANICF(msg, thread) \ | 219 | #define THREAD_PANICF(msg, thread) \ |
220 | thread_panicf(msg, thread) | 220 | thread_panicf(msg, thread) |
221 | #define THREAD_ASSERT(exp, msg, thread) \ | 221 | #define THREAD_ASSERT(exp, msg, thread) \ |
222 | ({ if (!({ exp; })) thread_panicf((msg), (thread)); }) | 222 | ({ if (!({ exp; })) thread_panicf((msg), (thread)); }) |
223 | #else | 223 | #else |
224 | static void thread_stkov(struct thread_entry *thread) | 224 | #define THREAD_PANICF(msg, thread) \ |
225 | { | 225 | do {} while (0) |
226 | IF_COP( const unsigned int core = thread->core; ) | 226 | #define THREAD_ASSERT(exp, msg, thread) \ |
227 | static char name[32]; | 227 | do {} while (0) |
228 | thread_get_name(name, 32, thread); | ||
229 | panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core)); | ||
230 | } | ||
231 | #define THREAD_PANICF(msg, thread) | ||
232 | #define THREAD_ASSERT(exp, msg, thread) | ||
233 | #endif /* THREAD_EXTRA_CHECKS */ | 228 | #endif /* THREAD_EXTRA_CHECKS */ |
234 | 229 | ||
235 | /* Thread locking */ | ||
236 | #if NUM_CORES > 1 | ||
237 | #define LOCK_THREAD(thread) \ | ||
238 | ({ corelock_lock(&(thread)->slot_cl); }) | ||
239 | #define TRY_LOCK_THREAD(thread) \ | ||
240 | ({ corelock_try_lock(&(thread)->slot_cl); }) | ||
241 | #define UNLOCK_THREAD(thread) \ | ||
242 | ({ corelock_unlock(&(thread)->slot_cl); }) | ||
243 | #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ | ||
244 | ({ unsigned int _core = (thread)->core; \ | ||
245 | cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \ | ||
246 | cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) | ||
247 | #else | ||
248 | #define LOCK_THREAD(thread) \ | ||
249 | ({ (void)(thread); }) | ||
250 | #define TRY_LOCK_THREAD(thread) \ | ||
251 | ({ (void)(thread); }) | ||
252 | #define UNLOCK_THREAD(thread) \ | ||
253 | ({ (void)(thread); }) | ||
254 | #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ | ||
255 | ({ (void)(thread); }) | ||
256 | #endif | ||
257 | |||
258 | /* RTR list */ | 230 | /* RTR list */ |
259 | #define RTR_LOCK(core) \ | 231 | #define RTR_LOCK(core) \ |
260 | ({ corelock_lock(&cores[core].rtr_cl); }) | 232 | ({ corelock_lock(&cores[core].rtr_cl); }) |
@@ -993,27 +965,6 @@ static void wakeup_thread_release(struct thread_entry *thread) | |||
993 | inherit_priority(bl, bl, blt, newblpr); | 965 | inherit_priority(bl, bl, blt, newblpr); |
994 | } | 966 | } |
995 | 967 | ||
996 | /*--------------------------------------------------------------------------- | ||
997 | * No threads must be blocked waiting for this thread except for it to exit. | ||
998 | * The alternative is more elaborate cleanup and object registration code. | ||
999 | * Check this for risk of silent data corruption when objects with | ||
1000 | * inheritable blocking are abandoned by the owner - not precise but may | ||
1001 | * catch something. | ||
1002 | *--------------------------------------------------------------------------- | ||
1003 | */ | ||
1004 | static void __attribute__((noinline)) check_for_obj_waiters( | ||
1005 | const char *function, struct thread_entry *thread) | ||
1006 | { | ||
1007 | /* Only one bit in the mask should be set with a frequency on 1 which | ||
1008 | * represents the thread's own base priority */ | ||
1009 | if (priobit_popcount(&thread->pdist.mask) != 1 || | ||
1010 | thread->pdist.hist[priobit_ffs(&thread->pdist.mask)] > 1) | ||
1011 | { | ||
1012 | unsigned char name[32]; | ||
1013 | thread_get_name(name, 32, thread); | ||
1014 | panicf("%s->%s with obj. waiters", function, name); | ||
1015 | } | ||
1016 | } | ||
1017 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 968 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
1018 | 969 | ||
1019 | /*--------------------------------------------------------------------------- | 970 | /*--------------------------------------------------------------------------- |
@@ -1520,31 +1471,6 @@ void block_thread(struct thread_entry *current, int timeout) | |||
1520 | } | 1471 | } |
1521 | 1472 | ||
1522 | /*--------------------------------------------------------------------------- | 1473 | /*--------------------------------------------------------------------------- |
1523 | * Wakeup an entire queue of threads - returns bitwise-or of return bitmask | ||
1524 | * from each operation or THREAD_NONE of nothing was awakened. Object owning | ||
1525 | * the queue must be locked first. | ||
1526 | * | ||
1527 | * INTERNAL: Intended for use by kernel objects and not for programs. | ||
1528 | *--------------------------------------------------------------------------- | ||
1529 | */ | ||
1530 | unsigned int thread_queue_wake(struct thread_entry **list) | ||
1531 | { | ||
1532 | unsigned result = THREAD_NONE; | ||
1533 | |||
1534 | for (;;) | ||
1535 | { | ||
1536 | unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT); | ||
1537 | |||
1538 | if (rc == THREAD_NONE) | ||
1539 | break; /* No more threads */ | ||
1540 | |||
1541 | result |= rc; | ||
1542 | } | ||
1543 | |||
1544 | return result; | ||
1545 | } | ||
1546 | |||
1547 | /*--------------------------------------------------------------------------- | ||
1548 | * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00. | 1474 | * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00. |
1549 | *--------------------------------------------------------------------------- | 1475 | *--------------------------------------------------------------------------- |
1550 | */ | 1476 | */ |
@@ -1580,7 +1506,7 @@ static struct thread_entry * find_empty_thread_slot(void) | |||
1580 | struct thread_entry *t = &threads[n]; | 1506 | struct thread_entry *t = &threads[n]; |
1581 | LOCK_THREAD(t); | 1507 | LOCK_THREAD(t); |
1582 | 1508 | ||
1583 | if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT )) | 1509 | if (t->state == STATE_KILLED) |
1584 | { | 1510 | { |
1585 | /* Slot is empty - leave it locked and caller will unlock */ | 1511 | /* Slot is empty - leave it locked and caller will unlock */ |
1586 | thread = t; | 1512 | thread = t; |
@@ -1836,21 +1762,14 @@ void thread_exit(void) | |||
1836 | corelock_lock(¤t->waiter_cl); | 1762 | corelock_lock(¤t->waiter_cl); |
1837 | LOCK_THREAD(current); | 1763 | LOCK_THREAD(current); |
1838 | 1764 | ||
1839 | #if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1 | ||
1840 | if (current->name == THREAD_DESTRUCT) | ||
1841 | { | ||
1842 | /* Thread being killed - become a waiter */ | ||
1843 | unsigned int id = current->id; | ||
1844 | UNLOCK_THREAD(current); | ||
1845 | corelock_unlock(¤t->waiter_cl); | ||
1846 | thread_wait(id); | ||
1847 | THREAD_PANICF("thread_exit->WK:*R", current); | ||
1848 | } | ||
1849 | #endif | ||
1850 | |||
1851 | #ifdef HAVE_PRIORITY_SCHEDULING | 1765 | #ifdef HAVE_PRIORITY_SCHEDULING |
1852 | check_for_obj_waiters("thread_exit", current); | 1766 | /* Only one bit in the mask should be set with a frequency on 1 which |
1853 | #endif | 1767 | * represents the thread's own base priority otherwise threads are waiting |
1768 | * on an abandoned object */ | ||
1769 | if (priobit_popcount(¤t->pdist.mask) != 1 || | ||
1770 | current->pdist.hist[priobit_ffs(¤t->pdist.mask)] > 1) | ||
1771 | thread_panicf("abandon ship!", current); | ||
1772 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
1854 | 1773 | ||
1855 | if (current->tmo.prev != NULL) | 1774 | if (current->tmo.prev != NULL) |
1856 | { | 1775 | { |
@@ -1872,186 +1791,6 @@ void thread_exit(void) | |||
1872 | thread_final_exit(current); | 1791 | thread_final_exit(current); |
1873 | } | 1792 | } |
1874 | 1793 | ||
1875 | #ifdef ALLOW_REMOVE_THREAD | ||
1876 | /*--------------------------------------------------------------------------- | ||
1877 | * Remove a thread from the scheduler. Not The Right Way to Do Things in | ||
1878 | * normal programs. | ||
1879 | * | ||
1880 | * Parameter is the ID as returned from create_thread(). | ||
1881 | * | ||
1882 | * Use with care on threads that are not under careful control as this may | ||
1883 | * leave various objects in an undefined state. | ||
1884 | *--------------------------------------------------------------------------- | ||
1885 | */ | ||
1886 | void remove_thread(unsigned int thread_id) | ||
1887 | { | ||
1888 | #ifdef HAVE_CORELOCK_OBJECT | ||
1889 | /* core is not constant here because of core switching */ | ||
1890 | unsigned int core = CURRENT_CORE; | ||
1891 | unsigned int old_core = NUM_CORES; | ||
1892 | struct corelock *ocl = NULL; | ||
1893 | #else | ||
1894 | const unsigned int core = CURRENT_CORE; | ||
1895 | #endif | ||
1896 | struct thread_entry *current = cores[core].running; | ||
1897 | struct thread_entry *thread = thread_id_entry(thread_id); | ||
1898 | |||
1899 | unsigned state; | ||
1900 | int oldlevel; | ||
1901 | |||
1902 | if (thread == current) | ||
1903 | thread_exit(); /* Current thread - do normal exit */ | ||
1904 | |||
1905 | oldlevel = disable_irq_save(); | ||
1906 | |||
1907 | corelock_lock(&thread->waiter_cl); | ||
1908 | LOCK_THREAD(thread); | ||
1909 | |||
1910 | state = thread->state; | ||
1911 | |||
1912 | if (thread->id != thread_id || state == STATE_KILLED) | ||
1913 | goto thread_killed; | ||
1914 | |||
1915 | #if NUM_CORES > 1 | ||
1916 | if (thread->name == THREAD_DESTRUCT) | ||
1917 | { | ||
1918 | /* Thread being killed - become a waiter */ | ||
1919 | UNLOCK_THREAD(thread); | ||
1920 | corelock_unlock(&thread->waiter_cl); | ||
1921 | restore_irq(oldlevel); | ||
1922 | thread_wait(thread_id); | ||
1923 | return; | ||
1924 | } | ||
1925 | |||
1926 | thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */ | ||
1927 | |||
1928 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1929 | check_for_obj_waiters("remove_thread", thread); | ||
1930 | #endif | ||
1931 | |||
1932 | if (thread->core != core) | ||
1933 | { | ||
1934 | /* Switch cores and safely extract the thread there */ | ||
1935 | /* Slot HAS to be unlocked or a deadlock could occur which means other | ||
1936 | * threads have to be guided into becoming thread waiters if they | ||
1937 | * attempt to remove it. */ | ||
1938 | unsigned int new_core = thread->core; | ||
1939 | |||
1940 | corelock_unlock(&thread->waiter_cl); | ||
1941 | |||
1942 | UNLOCK_THREAD(thread); | ||
1943 | restore_irq(oldlevel); | ||
1944 | |||
1945 | old_core = switch_core(new_core); | ||
1946 | |||
1947 | oldlevel = disable_irq_save(); | ||
1948 | |||
1949 | corelock_lock(&thread->waiter_cl); | ||
1950 | LOCK_THREAD(thread); | ||
1951 | |||
1952 | state = thread->state; | ||
1953 | core = new_core; | ||
1954 | /* Perform the extraction and switch ourselves back to the original | ||
1955 | processor */ | ||
1956 | } | ||
1957 | #endif /* NUM_CORES > 1 */ | ||
1958 | |||
1959 | if (thread->tmo.prev != NULL) | ||
1960 | { | ||
1961 | /* Clean thread off the timeout list if a timeout check hasn't | ||
1962 | * run yet */ | ||
1963 | remove_from_list_tmo(thread); | ||
1964 | } | ||
1965 | |||
1966 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
1967 | /* Cancel CPU boost if any */ | ||
1968 | boost_thread(thread, false); | ||
1969 | #endif | ||
1970 | |||
1971 | IF_COP( retry_state: ) | ||
1972 | |||
1973 | switch (state) | ||
1974 | { | ||
1975 | case STATE_RUNNING: | ||
1976 | RTR_LOCK(core); | ||
1977 | /* Remove thread from ready to run tasks */ | ||
1978 | remove_from_list_l(&cores[core].running, thread); | ||
1979 | rtr_subtract_entry(core, thread->priority); | ||
1980 | RTR_UNLOCK(core); | ||
1981 | break; | ||
1982 | case STATE_BLOCKED: | ||
1983 | case STATE_BLOCKED_W_TMO: | ||
1984 | /* Remove thread from the queue it's blocked on - including its | ||
1985 | * own if waiting there */ | ||
1986 | #if NUM_CORES > 1 | ||
1987 | if (&thread->waiter_cl != thread->obj_cl) | ||
1988 | { | ||
1989 | ocl = thread->obj_cl; | ||
1990 | |||
1991 | if (UNLIKELY(corelock_try_lock(ocl) == 0)) | ||
1992 | { | ||
1993 | UNLOCK_THREAD(thread); | ||
1994 | corelock_lock(ocl); | ||
1995 | LOCK_THREAD(thread); | ||
1996 | |||
1997 | if (UNLIKELY(thread->state != state)) | ||
1998 | { | ||
1999 | /* Something woke the thread */ | ||
2000 | state = thread->state; | ||
2001 | corelock_unlock(ocl); | ||
2002 | goto retry_state; | ||
2003 | } | ||
2004 | } | ||
2005 | } | ||
2006 | #endif | ||
2007 | #ifdef HAVE_WAKEUP_EXT_CB | ||
2008 | if (thread->wakeup_ext_cb != NULL) | ||
2009 | thread->wakeup_ext_cb(thread); | ||
2010 | #endif | ||
2011 | |||
2012 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
2013 | /* Remove thread's priority influence from its chain if needed */ | ||
2014 | if (thread->blocker != NULL) | ||
2015 | wakeup_priority_protocol_release(thread); | ||
2016 | else | ||
2017 | #endif | ||
2018 | remove_from_list_l(thread->bqp, thread); | ||
2019 | |||
2020 | #if NUM_CORES > 1 | ||
2021 | if (ocl != NULL) | ||
2022 | corelock_unlock(ocl); | ||
2023 | #endif | ||
2024 | break; | ||
2025 | /* Otherwise thread is frozen and hasn't run yet */ | ||
2026 | } | ||
2027 | |||
2028 | new_thread_id(thread_id, thread); | ||
2029 | thread->state = STATE_KILLED; | ||
2030 | |||
2031 | /* If thread was waiting on itself, it will have been removed above. | ||
2032 | * The wrong order would result in waking the thread first and deadlocking | ||
2033 | * since the slot is already locked. */ | ||
2034 | thread_queue_wake(&thread->queue); | ||
2035 | |||
2036 | thread->name = NULL; | ||
2037 | |||
2038 | thread_killed: /* Thread was already killed */ | ||
2039 | /* Removal complete - safe to unlock and reenable interrupts */ | ||
2040 | corelock_unlock(&thread->waiter_cl); | ||
2041 | UNLOCK_THREAD(thread); | ||
2042 | restore_irq(oldlevel); | ||
2043 | |||
2044 | #if NUM_CORES > 1 | ||
2045 | if (old_core < NUM_CORES) | ||
2046 | { | ||
2047 | /* Did a removal on another processor's thread - switch back to | ||
2048 | native core */ | ||
2049 | switch_core(old_core); | ||
2050 | } | ||
2051 | #endif | ||
2052 | } | ||
2053 | #endif /* ALLOW_REMOVE_THREAD */ | ||
2054 | |||
2055 | #ifdef HAVE_PRIORITY_SCHEDULING | 1794 | #ifdef HAVE_PRIORITY_SCHEDULING |
2056 | /*--------------------------------------------------------------------------- | 1795 | /*--------------------------------------------------------------------------- |
2057 | * Sets the thread's relative base priority for the core it runs on. Any | 1796 | * Sets the thread's relative base priority for the core it runs on. Any |
@@ -2205,20 +1944,9 @@ unsigned int switch_core(unsigned int new_core) | |||
2205 | return core; | 1944 | return core; |
2206 | } | 1945 | } |
2207 | 1946 | ||
2208 | int oldlevel = disable_irq_save(); | 1947 | disable_irq(); |
2209 | LOCK_THREAD(current); | 1948 | LOCK_THREAD(current); |
2210 | 1949 | ||
2211 | if (current->name == THREAD_DESTRUCT) | ||
2212 | { | ||
2213 | /* Thread being killed - deactivate and let process complete */ | ||
2214 | unsigned int id = current->id; | ||
2215 | UNLOCK_THREAD(current); | ||
2216 | restore_irq(oldlevel); | ||
2217 | thread_wait(id); | ||
2218 | /* Should never be reached */ | ||
2219 | THREAD_PANICF("switch_core->D:*R", current); | ||
2220 | } | ||
2221 | |||
2222 | /* Get us off the running list for the current core */ | 1950 | /* Get us off the running list for the current core */ |
2223 | RTR_LOCK(core); | 1951 | RTR_LOCK(core); |
2224 | remove_from_list_l(&cores[core].running, current); | 1952 | remove_from_list_l(&cores[core].running, current); |
@@ -2274,7 +2002,7 @@ unsigned int switch_core(unsigned int new_core) | |||
2274 | * are safe to perform. | 2002 | * are safe to perform. |
2275 | *--------------------------------------------------------------------------- | 2003 | *--------------------------------------------------------------------------- |
2276 | */ | 2004 | */ |
2277 | void init_threads(void) | 2005 | void INIT_ATTR init_threads(void) |
2278 | { | 2006 | { |
2279 | const unsigned int core = CURRENT_CORE; | 2007 | const unsigned int core = CURRENT_CORE; |
2280 | struct thread_entry *thread; | 2008 | struct thread_entry *thread; |
@@ -2353,82 +2081,6 @@ void init_threads(void) | |||
2353 | #endif | 2081 | #endif |
2354 | } | 2082 | } |
2355 | 2083 | ||
2356 | /* Shared stack scan helper for thread_stack_usage and idle_stack_usage */ | ||
2357 | #if NUM_CORES == 1 | ||
2358 | static inline int stack_usage(uintptr_t *stackptr, size_t stack_size) | ||
2359 | #else | ||
2360 | static int stack_usage(uintptr_t *stackptr, size_t stack_size) | ||
2361 | #endif | ||
2362 | { | ||
2363 | unsigned int stack_words = stack_size / sizeof (uintptr_t); | ||
2364 | unsigned int i; | ||
2365 | int usage = 0; | ||
2366 | |||
2367 | for (i = 0; i < stack_words; i++) | ||
2368 | { | ||
2369 | if (stackptr[i] != DEADBEEF) | ||
2370 | { | ||
2371 | usage = ((stack_words - i) * 100) / stack_words; | ||
2372 | break; | ||
2373 | } | ||
2374 | } | ||
2375 | |||
2376 | return usage; | ||
2377 | } | ||
2378 | |||
2379 | /*--------------------------------------------------------------------------- | ||
2380 | * Returns the maximum percentage of stack a thread ever used while running. | ||
2381 | * NOTE: Some large buffer allocations that don't use enough the buffer to | ||
2382 | * overwrite stackptr[0] will not be seen. | ||
2383 | *--------------------------------------------------------------------------- | ||
2384 | */ | ||
2385 | int thread_stack_usage(const struct thread_entry *thread) | ||
2386 | { | ||
2387 | if (LIKELY(thread->stack_size > 0)) | ||
2388 | return stack_usage(thread->stack, thread->stack_size); | ||
2389 | return 0; | ||
2390 | } | ||
2391 | |||
2392 | #if NUM_CORES > 1 | ||
2393 | /*--------------------------------------------------------------------------- | ||
2394 | * Returns the maximum percentage of the core's idle stack ever used during | ||
2395 | * runtime. | ||
2396 | *--------------------------------------------------------------------------- | ||
2397 | */ | ||
2398 | int idle_stack_usage(unsigned int core) | ||
2399 | { | ||
2400 | return stack_usage(idle_stacks[core], IDLE_STACK_SIZE); | ||
2401 | } | ||
2402 | #endif | ||
2403 | |||
2404 | /*--------------------------------------------------------------------------- | ||
2405 | * Fills in the buffer with the specified thread's name. If the name is NULL, | ||
2406 | * empty, or the thread is in destruct state a formatted ID is written | ||
2407 | * instead. | ||
2408 | *--------------------------------------------------------------------------- | ||
2409 | */ | ||
2410 | void thread_get_name(char *buffer, int size, | ||
2411 | struct thread_entry *thread) | ||
2412 | { | ||
2413 | if (size <= 0) | ||
2414 | return; | ||
2415 | |||
2416 | *buffer = '\0'; | ||
2417 | |||
2418 | if (thread) | ||
2419 | { | ||
2420 | /* Display thread name if one or ID if none */ | ||
2421 | const char *name = thread->name; | ||
2422 | const char *fmt = "%s"; | ||
2423 | if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0') | ||
2424 | { | ||
2425 | name = (const char *)(uintptr_t)thread->id; | ||
2426 | fmt = "%04lX"; | ||
2427 | } | ||
2428 | snprintf(buffer, size, fmt, name); | ||
2429 | } | ||
2430 | } | ||
2431 | |||
2432 | /* Unless otherwise defined, do nothing */ | 2084 | /* Unless otherwise defined, do nothing */ |
2433 | #ifndef YIELD_KERNEL_HOOK | 2085 | #ifndef YIELD_KERNEL_HOOK |
2434 | #define YIELD_KERNEL_HOOK() false | 2086 | #define YIELD_KERNEL_HOOK() false |
diff --git a/firmware/libc/errno.c b/firmware/libc/errno.c index 2e3cd9083e..146d6196ca 100644 --- a/firmware/libc/errno.c +++ b/firmware/libc/errno.c | |||
@@ -1,4 +1,4 @@ | |||
1 | #include "thread.h" | 1 | #include "../thread-internal.h" |
2 | int * __errno(void) | 2 | int * __errno(void) |
3 | { | 3 | { |
4 | return &thread_self_entry()->__errno; | 4 | return &thread_self_entry()->__errno; |
diff --git a/firmware/target/arm/pp/thread-pp.c b/firmware/target/arm/pp/thread-pp.c index ed4bdbeac1..b2e7fb018d 100644 --- a/firmware/target/arm/pp/thread-pp.c +++ b/firmware/target/arm/pp/thread-pp.c | |||
@@ -45,7 +45,7 @@ extern uintptr_t cpu_idlestackbegin[]; | |||
45 | extern uintptr_t cpu_idlestackend[]; | 45 | extern uintptr_t cpu_idlestackend[]; |
46 | extern uintptr_t cop_idlestackbegin[]; | 46 | extern uintptr_t cop_idlestackbegin[]; |
47 | extern uintptr_t cop_idlestackend[]; | 47 | extern uintptr_t cop_idlestackend[]; |
48 | static uintptr_t * const idle_stacks[NUM_CORES] = | 48 | uintptr_t * const idle_stacks[NUM_CORES] = |
49 | { | 49 | { |
50 | [CPU] = cpu_idlestackbegin, | 50 | [CPU] = cpu_idlestackbegin, |
51 | [COP] = cop_idlestackbegin | 51 | [COP] = cop_idlestackbegin |
@@ -92,9 +92,7 @@ static inline void NORETURN_ATTR __attribute__((always_inline)) | |||
92 | { | 92 | { |
93 | asm volatile ( | 93 | asm volatile ( |
94 | "cmp %1, #0 \n" /* CPU? */ | 94 | "cmp %1, #0 \n" /* CPU? */ |
95 | "ldrne r0, =commit_dcache \n" /* No? write back data */ | 95 | "blne commit_dcache \n" |
96 | "movne lr, pc \n" | ||
97 | "bxne r0 \n" | ||
98 | "mov r0, %0 \n" /* copy thread parameter */ | 96 | "mov r0, %0 \n" /* copy thread parameter */ |
99 | "mov sp, %2 \n" /* switch to idle stack */ | 97 | "mov sp, %2 \n" /* switch to idle stack */ |
100 | "bl thread_final_exit_do \n" /* finish removal */ | 98 | "bl thread_final_exit_do \n" /* finish removal */ |
@@ -163,9 +161,7 @@ static void __attribute__((naked)) | |||
163 | "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ | 161 | "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ |
164 | "mov r1, #0 \n" /* Clear start address */ | 162 | "mov r1, #0 \n" /* Clear start address */ |
165 | "str r1, [r0, #40] \n" | 163 | "str r1, [r0, #40] \n" |
166 | "ldr r0, =commit_discard_idcache \n" /* Invalidate new core's cache */ | 164 | "bl commit_discard_idcache \n" /* Invalidate new core's cache */ |
167 | "mov lr, pc \n" | ||
168 | "bx r0 \n" | ||
169 | "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ | 165 | "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ |
170 | : : "i"(IDLE_STACK_WORDS) | 166 | : : "i"(IDLE_STACK_WORDS) |
171 | ); | 167 | ); |
diff --git a/firmware/target/hosted/sdl/thread-sdl.c b/firmware/target/hosted/sdl/thread-sdl.c index e117a4e3b6..fda877e0f5 100644 --- a/firmware/target/hosted/sdl/thread-sdl.c +++ b/firmware/target/hosted/sdl/thread-sdl.c | |||
@@ -28,10 +28,7 @@ | |||
28 | #include <setjmp.h> | 28 | #include <setjmp.h> |
29 | #include "system-sdl.h" | 29 | #include "system-sdl.h" |
30 | #include "thread-sdl.h" | 30 | #include "thread-sdl.h" |
31 | #include "system.h" | 31 | #include "../kernel-internal.h" |
32 | #include "kernel.h" | ||
33 | #include "thread.h" | ||
34 | #include "debug.h" | ||
35 | #include "core_alloc.h" | 32 | #include "core_alloc.h" |
36 | 33 | ||
37 | /* Define this as 1 to show informational messages that are not errors. */ | 34 | /* Define this as 1 to show informational messages that are not errors. */ |
@@ -165,6 +162,7 @@ static struct thread_entry * find_empty_thread_slot(void) | |||
165 | /* Initialize SDL threading */ | 162 | /* Initialize SDL threading */ |
166 | void init_threads(void) | 163 | void init_threads(void) |
167 | { | 164 | { |
165 | static uintptr_t main_stack[] = { DEADBEEF, 0 }; | ||
168 | struct thread_entry *thread; | 166 | struct thread_entry *thread; |
169 | int n; | 167 | int n; |
170 | 168 | ||
@@ -187,8 +185,8 @@ void init_threads(void) | |||
187 | then create the SDL thread - it is possible to have a quick, early | 185 | then create the SDL thread - it is possible to have a quick, early |
188 | shutdown try to access the structure. */ | 186 | shutdown try to access the structure. */ |
189 | thread = &threads[0]; | 187 | thread = &threads[0]; |
190 | thread->stack = (uintptr_t *)" "; | 188 | thread->stack = main_stack; |
191 | thread->stack_size = 8; | 189 | thread->stack_size = sizeof (main_stack); |
192 | thread->name = "main"; | 190 | thread->name = "main"; |
193 | thread->state = STATE_RUNNING; | 191 | thread->state = STATE_RUNNING; |
194 | thread->context.s = SDL_CreateSemaphore(0); | 192 | thread->context.s = SDL_CreateSemaphore(0); |
@@ -439,23 +437,6 @@ unsigned int wakeup_thread_(struct thread_entry **list) | |||
439 | return THREAD_NONE; | 437 | return THREAD_NONE; |
440 | } | 438 | } |
441 | 439 | ||
442 | unsigned int thread_queue_wake(struct thread_entry **list) | ||
443 | { | ||
444 | unsigned int result = THREAD_NONE; | ||
445 | |||
446 | for (;;) | ||
447 | { | ||
448 | unsigned int rc = wakeup_thread_(list); | ||
449 | |||
450 | if (rc == THREAD_NONE) | ||
451 | break; | ||
452 | |||
453 | result |= rc; | ||
454 | } | ||
455 | |||
456 | return result; | ||
457 | } | ||
458 | |||
459 | void thread_thaw(unsigned int thread_id) | 440 | void thread_thaw(unsigned int thread_id) |
460 | { | 441 | { |
461 | struct thread_entry *thread = thread_id_entry(thread_id); | 442 | struct thread_entry *thread = thread_id_entry(thread_id); |
@@ -542,6 +523,10 @@ unsigned int create_thread(void (*function)(void), | |||
542 | return 0; | 523 | return 0; |
543 | } | 524 | } |
544 | 525 | ||
526 | unsigned int stack_words = stack_size / sizeof (uintptr_t); | ||
527 | for (unsigned int i = stack_words; i-- > 0;) | ||
528 | ((uintptr_t *)stack)[i] = DEADBEEF; | ||
529 | |||
545 | thread->stack = stack; | 530 | thread->stack = stack; |
546 | thread->stack_size = stack_size; | 531 | thread->stack_size = stack_size; |
547 | thread->name = name; | 532 | thread->name = name; |
@@ -557,11 +542,7 @@ unsigned int create_thread(void (*function)(void), | |||
557 | return thread->id; | 542 | return thread->id; |
558 | } | 543 | } |
559 | 544 | ||
560 | #ifndef ALLOW_REMOVE_THREAD | ||
561 | static void remove_thread(unsigned int thread_id) | 545 | static void remove_thread(unsigned int thread_id) |
562 | #else | ||
563 | void remove_thread(unsigned int thread_id) | ||
564 | #endif | ||
565 | { | 546 | { |
566 | struct thread_entry *current = cores[CURRENT_CORE].running; | 547 | struct thread_entry *current = cores[CURRENT_CORE].running; |
567 | struct thread_entry *thread = thread_id_entry(thread_id); | 548 | struct thread_entry *thread = thread_id_entry(thread_id); |
@@ -657,41 +638,6 @@ void thread_wait(unsigned int thread_id) | |||
657 | } | 638 | } |
658 | } | 639 | } |
659 | 640 | ||
660 | int thread_stack_usage(const struct thread_entry *thread) | ||
661 | { | ||
662 | return 50; | ||
663 | (void)thread; | ||
664 | } | ||
665 | |||
666 | /* Return name if one or ID if none */ | ||
667 | void thread_get_name(char *buffer, int size, | ||
668 | struct thread_entry *thread) | ||
669 | { | ||
670 | if (size <= 0) | ||
671 | return; | ||
672 | |||
673 | *buffer = '\0'; | ||
674 | |||
675 | if (thread) | ||
676 | { | ||
677 | /* Display thread name if one or ID if none */ | ||
678 | bool named = thread->name && *thread->name; | ||
679 | const char *fmt = named ? "%s" : "%04lX"; | ||
680 | intptr_t name = named ? | ||
681 | (intptr_t)thread->name : (intptr_t)thread->id; | ||
682 | snprintf(buffer, size, fmt, name); | ||
683 | } | ||
684 | } | ||
685 | |||
686 | /* Unless otherwise defined, do nothing */ | ||
687 | #ifndef YIELD_KERNEL_HOOK | ||
688 | #define YIELD_KERNEL_HOOK() false | ||
689 | #endif | ||
690 | #ifndef SLEEP_KERNEL_HOOK | ||
691 | #define SLEEP_KERNEL_HOOK(ticks) false | ||
692 | #endif | ||
693 | |||
694 | |||
695 | /*--------------------------------------------------------------------------- | 641 | /*--------------------------------------------------------------------------- |
696 | * Suspends a thread's execution for at least the specified number of ticks. | 642 | * Suspends a thread's execution for at least the specified number of ticks. |
697 | * | 643 | * |
@@ -707,11 +653,6 @@ void thread_get_name(char *buffer, int size, | |||
707 | */ | 653 | */ |
708 | unsigned sleep(unsigned ticks) | 654 | unsigned sleep(unsigned ticks) |
709 | { | 655 | { |
710 | /* In certain situations, certain bootloaders in particular, a normal | ||
711 | * threading call is inappropriate. */ | ||
712 | if (SLEEP_KERNEL_HOOK(ticks)) | ||
713 | return 0; /* Handled */ | ||
714 | |||
715 | disable_irq(); | 656 | disable_irq(); |
716 | sleep_thread(ticks); | 657 | sleep_thread(ticks); |
717 | switch_thread(); | 658 | switch_thread(); |
@@ -725,10 +666,5 @@ unsigned sleep(unsigned ticks) | |||
725 | */ | 666 | */ |
726 | void yield(void) | 667 | void yield(void) |
727 | { | 668 | { |
728 | /* In certain situations, certain bootloaders in particular, a normal | ||
729 | * threading call is inappropriate. */ | ||
730 | if (YIELD_KERNEL_HOOK()) | ||
731 | return; /* handled */ | ||
732 | |||
733 | switch_thread(); | 669 | switch_thread(); |
734 | } | 670 | } |