summaryrefslogtreecommitdiff
path: root/firmware/kernel/thread-common.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-08-08 06:33:51 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-16 05:15:37 -0400
commit6ed00870abd566d7267d2436c2693f5a281cda2f (patch)
tree6011c73e302254fc73f61a1b8b1f295ded1f5d56 /firmware/kernel/thread-common.c
parenteb63d8b4a2a7cbe4e98216b48a75391718fcebd7 (diff)
downloadrockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.tar.gz
rockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.zip
Base scheduler queues off linked lists and do cleanup/consolidation
Abstracts threading from itself a bit, changes the way its queues are handled and does type hiding for that as well. Do alot here due to already required major brain surgery. Threads may now be on a run queue and a wait queue simultaneously so that the expired timer only has to wake the thread but not remove it from the wait queue which simplifies the implicit wake handling. List formats change for wait queues-- doubly-linked, not circular. Timeout queue is now singly-linked. The run queue is still circular as before. Adds a better thread slot allocator that may keep the slot marked as used regardless of the thread state. Assists in dumping special tasks that switch_thread was tasked to perform (blocking tasks). Deletes alot of code yet surprisingly, gets larger than expected. Well, I'm not not minding that for the time being-- omlettes and break a few eggs and all that. Change-Id: I0834d7bb16b2aecb2f63b58886eeda6ae4f29d59
Diffstat (limited to 'firmware/kernel/thread-common.c')
-rw-r--r--firmware/kernel/thread-common.c247
1 files changed, 213 insertions, 34 deletions
diff --git a/firmware/kernel/thread-common.c b/firmware/kernel/thread-common.c
index b8b8ffbd4c..aad6610feb 100644
--- a/firmware/kernel/thread-common.c
+++ b/firmware/kernel/thread-common.c
@@ -18,39 +18,222 @@
18 * KIND, either express or implied. 18 * KIND, either express or implied.
19 * 19 *
20 ****************************************************************************/ 20 ****************************************************************************/
21#include "thread-internal.h" 21#include "kernel-internal.h"
22#include "system.h" 22#include "system.h"
23 23
24/* Unless otherwise defined, do nothing */
25#ifndef YIELD_KERNEL_HOOK
26#define YIELD_KERNEL_HOOK() false
27#endif
28#ifndef SLEEP_KERNEL_HOOK
29#define SLEEP_KERNEL_HOOK(ticks) false
30#endif
31
32const char __main_thread_name_str[] = "main";
33
34/* Array indexing is more efficient in inlines if the elements are a native
35 word size (100s of bytes fewer instructions) */
36
37#if NUM_CORES > 1
38static struct core_entry __core_entries[NUM_CORES] IBSS_ATTR;
39struct core_entry *__cores[NUM_CORES] IBSS_ATTR;
40#else
41struct core_entry __cores[NUM_CORES] IBSS_ATTR;
42#endif
43
44static struct thread_entry __thread_entries[MAXTHREADS] IBSS_ATTR;
45struct thread_entry *__threads[MAXTHREADS] IBSS_ATTR;
46
47
48/** Internal functions **/
49
24/*--------------------------------------------------------------------------- 50/*---------------------------------------------------------------------------
25 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask 51 * Find an empty thread slot or NULL if none found. The slot returned will
26 * from each operation or THREAD_NONE of nothing was awakened. Object owning 52 * be locked on multicore.
27 * the queue must be locked first.
28 *
29 * INTERNAL: Intended for use by kernel objects and not for programs.
30 *--------------------------------------------------------------------------- 53 *---------------------------------------------------------------------------
31 */ 54 */
32unsigned int thread_queue_wake(struct thread_entry **list) 55static struct threadalloc
33{ 56{
34 unsigned result = THREAD_NONE; 57 threadbit_t avail;
58#if NUM_CORES > 1
59 struct corelock cl;
60#endif
61} threadalloc SHAREDBSS_ATTR;
62
63/*---------------------------------------------------------------------------
64 * Initialize the thread allocator
65 *---------------------------------------------------------------------------
66 */
67void thread_alloc_init(void)
68{
69 corelock_init(&threadalloc.cl);
35 70
36 for (;;) 71 for (unsigned int core = 0; core < NUM_CORES; core++)
37 { 72 {
38 unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT); 73 #if NUM_CORES > 1
74 struct core_entry *c = &__core_entries[core];
75 __cores[core] = c;
76 #else
77 struct core_entry *c = &__cores[core];
78 #endif
79 rtr_queue_init(&c->rtr);
80 corelock_init(&c->rtr_cl);
81 tmo_queue_init(&c->tmo);
82 c->next_tmo_check = current_tick; /* Something not in the past */
83 }
39 84
40 if (rc == THREAD_NONE) 85 for (unsigned int slotnum = 0; slotnum < MAXTHREADS; slotnum++)
41 break; /* No more threads */ 86 {
87 struct thread_entry *t = &__thread_entries[slotnum];
88 __threads[slotnum] = t;
89 corelock_init(&t->waiter_cl);
90 corelock_init(&t->slot_cl);
91 t->id = THREAD_ID_INIT(slotnum);
92 threadbit_set_bit(&threadalloc.avail, slotnum);
93 }
94}
95
96/*---------------------------------------------------------------------------
97 * Allocate a thread alot
98 *---------------------------------------------------------------------------
99 */
100struct thread_entry * thread_alloc(void)
101{
102 struct thread_entry *thread = NULL;
42 103
43 result |= rc; 104 corelock_lock(&threadalloc.cl);
105
106 unsigned int slotnum = threadbit_ffs(&threadalloc.avail);
107 if (slotnum < MAXTHREADS)
108 {
109 threadbit_clear_bit(&threadalloc.avail, slotnum);
110 thread = __threads[slotnum];
44 } 111 }
45 112
113 corelock_unlock(&threadalloc.cl);
114
115 return thread;
116}
117
118/*---------------------------------------------------------------------------
119 * Free the thread slot of 'thread'
120 *---------------------------------------------------------------------------
121 */
122void thread_free(struct thread_entry *thread)
123{
124 corelock_lock(&threadalloc.cl);
125 threadbit_set_bit(&threadalloc.avail, THREAD_ID_SLOT(thread->id));
126 corelock_unlock(&threadalloc.cl);
127}
128
129/*---------------------------------------------------------------------------
130 * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
131 *---------------------------------------------------------------------------
132 */
133void new_thread_id(struct thread_entry *thread)
134{
135 uint32_t id = thread->id + (1u << THREAD_ID_VERSION_SHIFT);
136
137 /* If wrapped to 0, make it 1 */
138 if ((id & THREAD_ID_VERSION_MASK) == 0)
139 id |= (1u << THREAD_ID_VERSION_SHIFT);
140
141 thread->id = id;
142}
143
144/*---------------------------------------------------------------------------
145 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
146 * from each operation or THREAD_NONE of nothing was awakened.
147 *---------------------------------------------------------------------------
148 */
149unsigned int wait_queue_wake(struct __wait_queue *wqp)
150{
151 unsigned result = THREAD_NONE;
152 struct thread_entry *thread;
153
154 while ((thread = WQ_THREAD_FIRST(wqp)))
155 result |= wakeup_thread(thread, WAKEUP_DEFAULT);
156
46 return result; 157 return result;
47} 158}
48 159
49 160
161/** Public functions **/
162
163#ifdef RB_PROFILE
164void profile_thread(void)
165{
166 profstart(THREAD_ID_SLOT(__running_self_entry()->id));
167}
168#endif
169
170/*---------------------------------------------------------------------------
171 * Return the thread id of the calling thread
172 * --------------------------------------------------------------------------
173 */
174unsigned int thread_self(void)
175{
176 return __running_self_entry()->id;
177}
178
179/*---------------------------------------------------------------------------
180 * Suspends a thread's execution for at least the specified number of ticks.
181 *
182 * May result in CPU core entering wait-for-interrupt mode if no other thread
183 * may be scheduled.
184 *
185 * NOTE: sleep(0) sleeps until the end of the current tick
186 * sleep(n) that doesn't result in rescheduling:
187 * n <= ticks suspended < n + 1
188 * n to n+1 is a lower bound. Other factors may affect the actual time
189 * a thread is suspended before it runs again.
190 *---------------------------------------------------------------------------
191 */
192unsigned sleep(unsigned ticks)
193{
194 /* In certain situations, certain bootloaders in particular, a normal
195 * threading call is inappropriate. */
196 if (SLEEP_KERNEL_HOOK(ticks))
197 return 0; /* Handled */
198
199 disable_irq();
200 sleep_thread(ticks);
201 switch_thread();
202 return 0;
203}
204
205/*---------------------------------------------------------------------------
206 * Elects another thread to run or, if no other thread may be made ready to
207 * run, immediately returns control back to the calling thread.
208 *---------------------------------------------------------------------------
209 */
210void yield(void)
211{
212 /* In certain situations, certain bootloaders in particular, a normal
213 * threading call is inappropriate. */
214 if (YIELD_KERNEL_HOOK())
215 return; /* Handled */
216
217 switch_thread();
218}
219
220
50/** Debug screen stuff **/ 221/** Debug screen stuff **/
51 222
223void format_thread_name(char *buf, size_t bufsize,
224 const struct thread_entry *thread)
225{
226 const char *name = thread->name;
227 if (!name)
228 name = "";
229
230 const char *fmt = *name ? "%s" : "%s%08lX";
231 snprintf(buf, bufsize, fmt, name, thread->id);
232}
233
234#ifndef HAVE_SDL_THREADS
52/*--------------------------------------------------------------------------- 235/*---------------------------------------------------------------------------
53 * returns the stack space used in bytes 236 * Returns the maximum percentage of the stack ever used during runtime.
54 *--------------------------------------------------------------------------- 237 *---------------------------------------------------------------------------
55 */ 238 */
56static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size) 239static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
@@ -69,13 +252,9 @@ static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
69 252
70 return usage; 253 return usage;
71} 254}
255#endif /* HAVE_SDL_THREADS */
72 256
73#if NUM_CORES > 1 257#if NUM_CORES > 1
74/*---------------------------------------------------------------------------
75 * Returns the maximum percentage of the core's idle stack ever used during
76 * runtime.
77 *---------------------------------------------------------------------------
78 */
79int core_get_debug_info(unsigned int core, struct core_debug_info *infop) 258int core_get_debug_info(unsigned int core, struct core_debug_info *infop)
80{ 259{
81 extern uintptr_t * const idle_stacks[NUM_CORES]; 260 extern uintptr_t * const idle_stacks[NUM_CORES];
@@ -105,29 +284,29 @@ int thread_get_debug_info(unsigned int thread_id,
105 if (!infop) 284 if (!infop)
106 return -1; 285 return -1;
107 286
108 unsigned int slot = THREAD_ID_SLOT(thread_id); 287 unsigned int slotnum = THREAD_ID_SLOT(thread_id);
109 if (slot >= MAXTHREADS) 288 if (slotnum >= MAXTHREADS)
110 return -1; 289 return -1;
111 290
112 extern struct thread_entry threads[MAXTHREADS]; 291 struct thread_entry *thread = __thread_slot_entry(slotnum);
113 struct thread_entry *thread = &threads[slot];
114 292
115 int oldlevel = disable_irq_save(); 293 int oldlevel = disable_irq_save();
116 LOCK_THREAD(thread); 294 corelock_lock(&threadalloc.cl);
295 corelock_lock(&thread->slot_cl);
117 296
118 unsigned int state = thread->state; 297 unsigned int state = thread->state;
119 298
120 if (state != STATE_KILLED) 299 int ret = 0;
121 {
122 const char *name = thread->name;
123 if (!name)
124 name = "";
125 300
301 if (threadbit_test_bit(&threadalloc.avail, slotnum) == 0)
302 {
126 bool cpu_boost = false; 303 bool cpu_boost = false;
127#ifdef HAVE_SCHEDULER_BOOSTCTRL 304#ifdef HAVE_SCHEDULER_BOOSTCTRL
128 cpu_boost = thread->cpu_boost; 305 cpu_boost = thread->cpu_boost;
129#endif 306#endif
307#ifndef HAVE_SDL_THREADS
130 infop->stack_usage = stack_usage(thread->stack, thread->stack_size); 308 infop->stack_usage = stack_usage(thread->stack, thread->stack_size);
309#endif
131#if NUM_CORES > 1 310#if NUM_CORES > 1
132 infop->core = thread->core; 311 infop->core = thread->core;
133#endif 312#endif
@@ -140,13 +319,13 @@ int thread_get_debug_info(unsigned int thread_id,
140 cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '), 319 cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '),
141 status_chars[state]); 320 status_chars[state]);
142 321
143 const char *fmt = *name ? "%s" : "%s%08lX"; 322 format_thread_name(infop->name, sizeof (infop->name), thread);
144 snprintf(infop->name, sizeof (infop->name), fmt, name, 323 ret = 1;
145 thread->id);
146 } 324 }
147 325
148 UNLOCK_THREAD(thread); 326 corelock_unlock(&thread->slot_cl);
327 corelock_unlock(&threadalloc.cl);
149 restore_irq(oldlevel); 328 restore_irq(oldlevel);
150 329
151 return state == STATE_KILLED ? 0 : 1; 330 return ret;
152} 331}