summaryrefslogtreecommitdiff
path: root/firmware/target/hosted/sdl/thread-sdl.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/hosted/sdl/thread-sdl.c')
-rw-r--r--firmware/target/hosted/sdl/thread-sdl.c429
1 files changed, 120 insertions, 309 deletions
diff --git a/firmware/target/hosted/sdl/thread-sdl.c b/firmware/target/hosted/sdl/thread-sdl.c
index fda877e0f5..a76941f103 100644
--- a/firmware/target/hosted/sdl/thread-sdl.c
+++ b/firmware/target/hosted/sdl/thread-sdl.c
@@ -32,13 +32,13 @@
32#include "core_alloc.h" 32#include "core_alloc.h"
33 33
34/* Define this as 1 to show informational messages that are not errors. */ 34/* Define this as 1 to show informational messages that are not errors. */
35#define THREAD_SDL_DEBUGF_ENABLED 0 35#define THREAD_SDL_DEBUGF_ENABLED 1
36 36
37#if THREAD_SDL_DEBUGF_ENABLED 37#if THREAD_SDL_DEBUGF_ENABLED
38#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__) 38#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__)
39static char __name[32]; 39static char __name[sizeof (((struct thread_debug_info *)0)->name)];
40#define THREAD_SDL_GET_NAME(thread) \ 40#define THREAD_SDL_GET_NAME(thread) \
41 ({ thread_get_name(__name, ARRAYLEN(__name), thread); __name; }) 41 ({ format_thread_name(__name, sizeof (__name), thread); __name; })
42#else 42#else
43#define THREAD_SDL_DEBUGF(...) 43#define THREAD_SDL_DEBUGF(...)
44#define THREAD_SDL_GET_NAME(thread) 44#define THREAD_SDL_GET_NAME(thread)
@@ -47,9 +47,6 @@ static char __name[32];
47#define THREAD_PANICF(str...) \ 47#define THREAD_PANICF(str...) \
48 ({ fprintf(stderr, str); exit(-1); }) 48 ({ fprintf(stderr, str); exit(-1); })
49 49
50/* Thread/core entries as in rockbox core */
51static struct core_entry cores[NUM_CORES];
52struct thread_entry threads[MAXTHREADS];
53/* Jump buffers for graceful exit - kernel threads don't stay neatly 50/* Jump buffers for graceful exit - kernel threads don't stay neatly
54 * in their start routines responding to messages so this is the only 51 * in their start routines responding to messages so this is the only
55 * way to get them back in there so they may exit */ 52 * way to get them back in there so they may exit */
@@ -74,7 +71,7 @@ void sim_thread_shutdown(void)
74 71
75 /* Tell all threads jump back to their start routines, unlock and exit 72 /* Tell all threads jump back to their start routines, unlock and exit
76 gracefully - we'll check each one in turn for it's status. Threads 73 gracefully - we'll check each one in turn for it's status. Threads
77 _could_ terminate via remove_thread or multiple threads could exit 74 _could_ terminate via thread_exit or multiple threads could exit
78 on each unlock but that is safe. */ 75 on each unlock but that is safe. */
79 76
80 /* Do this before trying to acquire lock */ 77 /* Do this before trying to acquire lock */
@@ -86,7 +83,7 @@ void sim_thread_shutdown(void)
86 /* Signal all threads on delay or block */ 83 /* Signal all threads on delay or block */
87 for (i = 0; i < MAXTHREADS; i++) 84 for (i = 0; i < MAXTHREADS; i++)
88 { 85 {
89 struct thread_entry *thread = &threads[i]; 86 struct thread_entry *thread = __thread_slot_entry(i);
90 if (thread->context.s == NULL) 87 if (thread->context.s == NULL)
91 continue; 88 continue;
92 SDL_SemPost(thread->context.s); 89 SDL_SemPost(thread->context.s);
@@ -95,7 +92,7 @@ void sim_thread_shutdown(void)
95 /* Wait for all threads to finish and cleanup old ones. */ 92 /* Wait for all threads to finish and cleanup old ones. */
96 for (i = 0; i < MAXTHREADS; i++) 93 for (i = 0; i < MAXTHREADS; i++)
97 { 94 {
98 struct thread_entry *thread = &threads[i]; 95 struct thread_entry *thread = __thread_slot_entry(i);
99 SDL_Thread *t = thread->context.t; 96 SDL_Thread *t = thread->context.t;
100 97
101 if (t != NULL) 98 if (t != NULL)
@@ -111,11 +108,11 @@ void sim_thread_shutdown(void)
111 } 108 }
112 else 109 else
113 { 110 {
114 /* Wait on any previous thread in this location-- could be one not quite 111 /* Wait on any previous thread in this location-- could be one not
115 * finished exiting but has just unlocked the mutex. If it's NULL, the 112 * quite finished exiting but has just unlocked the mutex. If it's
116 * call returns immediately. 113 * NULL, the call returns immediately.
117 * 114 *
118 * See remove_thread below for more information. */ 115 * See thread_exit below for more information. */
119 SDL_WaitThread(thread->context.told, NULL); 116 SDL_WaitThread(thread->context.told, NULL);
120 } 117 }
121 } 118 }
@@ -126,103 +123,6 @@ void sim_thread_shutdown(void)
126 threads_status = THREADS_EXIT_COMMAND_DONE; 123 threads_status = THREADS_EXIT_COMMAND_DONE;
127} 124}
128 125
129static void new_thread_id(unsigned int slot_num,
130 struct thread_entry *thread)
131{
132 unsigned int version =
133 (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
134 & THREAD_ID_VERSION_MASK;
135
136 if (version == 0)
137 version = 1u << THREAD_ID_VERSION_SHIFT;
138
139 thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
140}
141
142static struct thread_entry * find_empty_thread_slot(void)
143{
144 struct thread_entry *thread = NULL;
145 int n;
146
147 for (n = 0; n < MAXTHREADS; n++)
148 {
149 int state = threads[n].state;
150
151 if (state == STATE_KILLED)
152 {
153 thread = &threads[n];
154 break;
155 }
156 }
157
158 return thread;
159}
160
161
162/* Initialize SDL threading */
163void init_threads(void)
164{
165 static uintptr_t main_stack[] = { DEADBEEF, 0 };
166 struct thread_entry *thread;
167 int n;
168
169 memset(cores, 0, sizeof(cores));
170 memset(threads, 0, sizeof(threads));
171
172 m = SDL_CreateMutex();
173
174 if (SDL_LockMutex(m) == -1)
175 {
176 fprintf(stderr, "Couldn't lock mutex\n");
177 return;
178 }
179
180 /* Initialize all IDs */
181 for (n = 0; n < MAXTHREADS; n++)
182 threads[n].id = THREAD_ID_INIT(n);
183
184 /* Slot 0 is reserved for the main thread - initialize it here and
185 then create the SDL thread - it is possible to have a quick, early
186 shutdown try to access the structure. */
187 thread = &threads[0];
188 thread->stack = main_stack;
189 thread->stack_size = sizeof (main_stack);
190 thread->name = "main";
191 thread->state = STATE_RUNNING;
192 thread->context.s = SDL_CreateSemaphore(0);
193 thread->context.t = NULL; /* NULL for the implicit main thread */
194 cores[CURRENT_CORE].running = thread;
195
196 if (thread->context.s == NULL)
197 {
198 fprintf(stderr, "Failed to create main semaphore\n");
199 return;
200 }
201
202 /* Tell all threads jump back to their start routines, unlock and exit
203 gracefully - we'll check each one in turn for it's status. Threads
204 _could_ terminate via remove_thread or multiple threads could exit
205 on each unlock but that is safe. */
206
207 /* Setup jump for exit */
208 if (setjmp(thread_jmpbufs[0]) == 0)
209 {
210 THREAD_SDL_DEBUGF("Main thread: %p\n", thread);
211 return;
212 }
213
214 SDL_UnlockMutex(m);
215
216 /* Set to 'COMMAND_DONE' when other rockbox threads have exited. */
217 while (threads_status < THREADS_EXIT_COMMAND_DONE)
218 SDL_Delay(10);
219
220 SDL_DestroyMutex(m);
221
222 /* We're the main thead - perform exit - doesn't return. */
223 sim_do_exit();
224}
225
226void sim_thread_exception_wait(void) 126void sim_thread_exception_wait(void)
227{ 127{
228 while (1) 128 while (1)
@@ -237,7 +137,7 @@ void sim_thread_exception_wait(void)
237void sim_thread_lock(void *me) 137void sim_thread_lock(void *me)
238{ 138{
239 SDL_LockMutex(m); 139 SDL_LockMutex(m);
240 cores[CURRENT_CORE].running = (struct thread_entry *)me; 140 __running_self_entry() = (struct thread_entry *)me;
241 141
242 if (threads_status != THREADS_RUN) 142 if (threads_status != THREADS_RUN)
243 thread_exit(); 143 thread_exit();
@@ -245,70 +145,14 @@ void sim_thread_lock(void *me)
245 145
246void * sim_thread_unlock(void) 146void * sim_thread_unlock(void)
247{ 147{
248 struct thread_entry *current = cores[CURRENT_CORE].running; 148 struct thread_entry *current = __running_self_entry();
249 SDL_UnlockMutex(m); 149 SDL_UnlockMutex(m);
250 return current; 150 return current;
251} 151}
252 152
253struct thread_entry * thread_id_entry(unsigned int thread_id)
254{
255 return &threads[thread_id & THREAD_ID_SLOT_MASK];
256}
257
258static void add_to_list_l(struct thread_entry **list,
259 struct thread_entry *thread)
260{
261 if (*list == NULL)
262 {
263 /* Insert into unoccupied list */
264 thread->l.next = thread;
265 thread->l.prev = thread;
266 *list = thread;
267 }
268 else
269 {
270 /* Insert last */
271 thread->l.next = *list;
272 thread->l.prev = (*list)->l.prev;
273 thread->l.prev->l.next = thread;
274 (*list)->l.prev = thread;
275 }
276}
277
278static void remove_from_list_l(struct thread_entry **list,
279 struct thread_entry *thread)
280{
281 if (thread == thread->l.next)
282 {
283 /* The only item */
284 *list = NULL;
285 return;
286 }
287
288 if (thread == *list)
289 {
290 /* List becomes next item */
291 *list = thread->l.next;
292 }
293
294 /* Fix links to jump over the removed entry. */
295 thread->l.prev->l.next = thread->l.next;
296 thread->l.next->l.prev = thread->l.prev;
297}
298
299unsigned int thread_self(void)
300{
301 return cores[CURRENT_CORE].running->id;
302}
303
304struct thread_entry* thread_self_entry(void)
305{
306 return cores[CURRENT_CORE].running;
307}
308
309void switch_thread(void) 153void switch_thread(void)
310{ 154{
311 struct thread_entry *current = cores[CURRENT_CORE].running; 155 struct thread_entry *current = __running_self_entry();
312 156
313 enable_irq(); 157 enable_irq();
314 158
@@ -346,17 +190,7 @@ void switch_thread(void)
346 190
347 oldlevel = disable_irq_save(); 191 oldlevel = disable_irq_save();
348 192
349 if (current->state == STATE_BLOCKED_W_TMO) 193 current->state = STATE_RUNNING;
350 {
351 /* Timed out */
352 remove_from_list_l(current->bqp, current);
353
354#ifdef HAVE_WAKEUP_EXT_CB
355 if (current->wakeup_ext_cb != NULL)
356 current->wakeup_ext_cb(current);
357#endif
358 current->state = STATE_RUNNING;
359 }
360 194
361 if (result == SDL_MUTEX_TIMEDOUT) 195 if (result == SDL_MUTEX_TIMEDOUT)
362 { 196 {
@@ -384,7 +218,7 @@ void switch_thread(void)
384#ifdef DEBUG 218#ifdef DEBUG
385 core_check_valid(); 219 core_check_valid();
386#endif 220#endif
387 cores[CURRENT_CORE].running = current; 221 __running_self_entry() = current;
388 222
389 if (threads_status != THREADS_RUN) 223 if (threads_status != THREADS_RUN)
390 thread_exit(); 224 thread_exit();
@@ -392,7 +226,7 @@ void switch_thread(void)
392 226
393void sleep_thread(int ticks) 227void sleep_thread(int ticks)
394{ 228{
395 struct thread_entry *current = cores[CURRENT_CORE].running; 229 struct thread_entry *current = __running_self_entry();
396 int rem; 230 int rem;
397 231
398 current->state = STATE_SLEEPING; 232 current->state = STATE_SLEEPING;
@@ -404,7 +238,7 @@ void sleep_thread(int ticks)
404 current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem; 238 current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
405} 239}
406 240
407void block_thread(struct thread_entry *current, int ticks) 241void block_thread_(struct thread_entry *current, int ticks)
408{ 242{
409 if (ticks < 0) 243 if (ticks < 0)
410 current->state = STATE_BLOCKED; 244 current->state = STATE_BLOCKED;
@@ -414,24 +248,19 @@ void block_thread(struct thread_entry *current, int ticks)
414 current->tmo_tick = (1000/HZ)*ticks; 248 current->tmo_tick = (1000/HZ)*ticks;
415 } 249 }
416 250
417 add_to_list_l(current->bqp, current); 251 wait_queue_register(current);
418} 252}
419 253
420unsigned int wakeup_thread_(struct thread_entry **list) 254unsigned int wakeup_thread_(struct thread_entry *thread)
421{ 255{
422 struct thread_entry *thread = *list; 256 switch (thread->state)
423
424 if (thread != NULL)
425 { 257 {
426 switch (thread->state) 258 case STATE_BLOCKED:
427 { 259 case STATE_BLOCKED_W_TMO:
428 case STATE_BLOCKED: 260 wait_queue_remove(thread);
429 case STATE_BLOCKED_W_TMO: 261 thread->state = STATE_RUNNING;
430 remove_from_list_l(list, thread); 262 SDL_SemPost(thread->context.s);
431 thread->state = STATE_RUNNING; 263 return THREAD_OK;
432 SDL_SemPost(thread->context.s);
433 return THREAD_OK;
434 }
435 } 264 }
436 265
437 return THREAD_NONE; 266 return THREAD_NONE;
@@ -439,7 +268,7 @@ unsigned int wakeup_thread_(struct thread_entry **list)
439 268
440void thread_thaw(unsigned int thread_id) 269void thread_thaw(unsigned int thread_id)
441{ 270{
442 struct thread_entry *thread = thread_id_entry(thread_id); 271 struct thread_entry *thread = __thread_id_entry(thread_id);
443 272
444 if (thread->id == thread_id && thread->state == STATE_FROZEN) 273 if (thread->id == thread_id && thread->state == STATE_FROZEN)
445 { 274 {
@@ -450,15 +279,14 @@ void thread_thaw(unsigned int thread_id)
450 279
451int runthread(void *data) 280int runthread(void *data)
452{ 281{
453 struct thread_entry *current;
454 jmp_buf *current_jmpbuf;
455
456 /* Cannot access thread variables before locking the mutex as the 282 /* Cannot access thread variables before locking the mutex as the
457 data structures may not be filled-in yet. */ 283 data structures may not be filled-in yet. */
458 SDL_LockMutex(m); 284 SDL_LockMutex(m);
459 cores[CURRENT_CORE].running = (struct thread_entry *)data; 285
460 current = cores[CURRENT_CORE].running; 286 struct thread_entry *current = (struct thread_entry *)data;
461 current_jmpbuf = &thread_jmpbufs[current - threads]; 287 __running_self_entry() = current;
288
289 jmp_buf *current_jmpbuf = &thread_jmpbufs[THREAD_ID_SLOT(current->id)];
462 290
463 /* Setup jump for exit */ 291 /* Setup jump for exit */
464 if (setjmp(*current_jmpbuf) == 0) 292 if (setjmp(*current_jmpbuf) == 0)
@@ -469,14 +297,15 @@ int runthread(void *data)
469 SDL_UnlockMutex(m); 297 SDL_UnlockMutex(m);
470 SDL_SemWait(current->context.s); 298 SDL_SemWait(current->context.s);
471 SDL_LockMutex(m); 299 SDL_LockMutex(m);
472 cores[CURRENT_CORE].running = current; 300 __running_self_entry() = current;
473 } 301 }
474 302
475 if (threads_status == THREADS_RUN) 303 if (threads_status == THREADS_RUN)
476 { 304 {
477 current->context.start(); 305 current->context.start();
478 THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n", 306 THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n",
479 current - threads, THREAD_SDL_GET_NAME(current)); 307 THREAD_ID_SLOT(current->id),
308 THREAD_SDL_GET_NAME(current));
480 /* Thread routine returned - suicide */ 309 /* Thread routine returned - suicide */
481 } 310 }
482 311
@@ -495,27 +324,23 @@ unsigned int create_thread(void (*function)(void),
495 void* stack, size_t stack_size, 324 void* stack, size_t stack_size,
496 unsigned flags, const char *name) 325 unsigned flags, const char *name)
497{ 326{
498 struct thread_entry *thread;
499 SDL_Thread* t;
500 SDL_sem *s;
501
502 THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : ""); 327 THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : "");
503 328
504 thread = find_empty_thread_slot(); 329 struct thread_entry *thread = thread_alloc();
505 if (thread == NULL) 330 if (thread == NULL)
506 { 331 {
507 DEBUGF("Failed to find thread slot\n"); 332 DEBUGF("Failed to find thread slot\n");
508 return 0; 333 return 0;
509 } 334 }
510 335
511 s = SDL_CreateSemaphore(0); 336 SDL_sem *s = SDL_CreateSemaphore(0);
512 if (s == NULL) 337 if (s == NULL)
513 { 338 {
514 DEBUGF("Failed to create semaphore\n"); 339 DEBUGF("Failed to create semaphore\n");
515 return 0; 340 return 0;
516 } 341 }
517 342
518 t = SDL_CreateThread(runthread, thread); 343 SDL_Thread *t = SDL_CreateThread(runthread, thread);
519 if (t == NULL) 344 if (t == NULL)
520 { 345 {
521 DEBUGF("Failed to create SDL thread\n"); 346 DEBUGF("Failed to create SDL thread\n");
@@ -523,12 +348,6 @@ unsigned int create_thread(void (*function)(void),
523 return 0; 348 return 0;
524 } 349 }
525 350
526 unsigned int stack_words = stack_size / sizeof (uintptr_t);
527 for (unsigned int i = stack_words; i-- > 0;)
528 ((uintptr_t *)stack)[i] = DEADBEEF;
529
530 thread->stack = stack;
531 thread->stack_size = stack_size;
532 thread->name = name; 351 thread->name = name;
533 thread->state = (flags & CREATE_THREAD_FROZEN) ? 352 thread->state = (flags & CREATE_THREAD_FROZEN) ?
534 STATE_FROZEN : STATE_RUNNING; 353 STATE_FROZEN : STATE_RUNNING;
@@ -536,27 +355,22 @@ unsigned int create_thread(void (*function)(void),
536 thread->context.t = t; 355 thread->context.t = t;
537 thread->context.s = s; 356 thread->context.s = s;
538 357
539 THREAD_SDL_DEBUGF("New Thread: %d (%s)\n", 358 THREAD_SDL_DEBUGF("New Thread: %lu (%s)\n",
540 thread - threads, THREAD_SDL_GET_NAME(thread)); 359 (unsigned long)thread->id,
360 THREAD_SDL_GET_NAME(thread));
541 361
542 return thread->id; 362 return thread->id;
363 (void)stack; (void)stack_size;
543} 364}
544 365
545static void remove_thread(unsigned int thread_id) 366void thread_exit(void)
546{ 367{
547 struct thread_entry *current = cores[CURRENT_CORE].running; 368 struct thread_entry *current = __running_self_entry();
548 struct thread_entry *thread = thread_id_entry(thread_id);
549
550 SDL_Thread *t;
551 SDL_sem *s;
552
553 if (thread->id != thread_id)
554 return;
555 369
556 int oldlevel = disable_irq_save(); 370 int oldlevel = disable_irq_save();
557 371
558 t = thread->context.t; 372 SDL_Thread *t = current->context.t;
559 s = thread->context.s; 373 SDL_sem *s = current->context.s;
560 374
561 /* Wait the last thread here and keep this one or SDL will leak it since 375 /* Wait the last thread here and keep this one or SDL will leak it since
562 * it doesn't free its own library allocations unless a wait is performed. 376 * it doesn't free its own library allocations unless a wait is performed.
@@ -566,59 +380,27 @@ static void remove_thread(unsigned int thread_id)
566 * 380 *
567 * However, see more below about SDL_KillThread. 381 * However, see more below about SDL_KillThread.
568 */ 382 */
569 SDL_WaitThread(thread->context.told, NULL); 383 SDL_WaitThread(current->context.told, NULL);
570 384
571 thread->context.t = NULL; 385 current->context.t = NULL;
572 thread->context.s = NULL; 386 current->context.s = NULL;
573 thread->context.told = t; 387 current->context.told = t;
574 388
575 if (thread != current) 389 unsigned int id = current->id;
576 { 390 new_thread_id(current);
577 switch (thread->state) 391 current->state = STATE_KILLED;
578 { 392 wait_queue_wake(&current->queue);
579 case STATE_BLOCKED:
580 case STATE_BLOCKED_W_TMO:
581 /* Remove thread from object it's waiting on */
582 remove_from_list_l(thread->bqp, thread);
583
584#ifdef HAVE_WAKEUP_EXT_CB
585 if (thread->wakeup_ext_cb != NULL)
586 thread->wakeup_ext_cb(thread);
587#endif
588 break;
589 }
590
591 SDL_SemPost(s);
592 }
593
594 THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n",
595 thread - threads, THREAD_SDL_GET_NAME(thread));
596
597 new_thread_id(thread->id, thread);
598 thread->state = STATE_KILLED;
599 thread_queue_wake(&thread->queue);
600 393
601 SDL_DestroySemaphore(s); 394 SDL_DestroySemaphore(s);
602 395
603 if (thread == current) 396 /* Do a graceful exit - perform the longjmp back into the thread
604 { 397 function to return */
605 /* Do a graceful exit - perform the longjmp back into the thread
606 function to return */
607 restore_irq(oldlevel);
608 longjmp(thread_jmpbufs[current - threads], 1);
609 }
610
611 /* SDL_KillThread frees the old pointer too because it uses SDL_WaitThread
612 * to wait for the host to remove it. */
613 thread->context.told = NULL;
614 SDL_KillThread(t);
615 restore_irq(oldlevel); 398 restore_irq(oldlevel);
616}
617 399
618void thread_exit(void) 400 thread_free(current);
619{ 401
620 unsigned int id = thread_self(); 402 longjmp(thread_jmpbufs[THREAD_ID_SLOT(id)], 1);
621 remove_thread(id); 403
622 /* This should never and must never be reached - if it is, the 404 /* This should never and must never be reached - if it is, the
623 * state is corrupted */ 405 * state is corrupted */
624 THREAD_PANICF("thread_exit->K:*R (ID: %d)", id); 406 THREAD_PANICF("thread_exit->K:*R (ID: %d)", id);
@@ -627,44 +409,73 @@ void thread_exit(void)
627 409
628void thread_wait(unsigned int thread_id) 410void thread_wait(unsigned int thread_id)
629{ 411{
630 struct thread_entry *current = cores[CURRENT_CORE].running; 412 struct thread_entry *current = __running_self_entry();
631 struct thread_entry *thread = thread_id_entry(thread_id); 413 struct thread_entry *thread = __thread_id_entry(thread_id);
632 414
633 if (thread->id == thread_id && thread->state != STATE_KILLED) 415 if (thread->id == thread_id && thread->state != STATE_KILLED)
634 { 416 {
635 current->bqp = &thread->queue; 417 block_thread(current, TIMEOUT_BLOCK, &thread->queue);
636 block_thread(current, TIMEOUT_BLOCK);
637 switch_thread(); 418 switch_thread();
638 } 419 }
639} 420}
640 421
641/*--------------------------------------------------------------------------- 422/* Initialize SDL threading */
642 * Suspends a thread's execution for at least the specified number of ticks. 423void init_threads(void)
643 *
644 * May result in CPU core entering wait-for-interrupt mode if no other thread
645 * may be scheduled.
646 *
647 * NOTE: sleep(0) sleeps until the end of the current tick
648 * sleep(n) that doesn't result in rescheduling:
649 * n <= ticks suspended < n + 1
650 * n to n+1 is a lower bound. Other factors may affect the actual time
651 * a thread is suspended before it runs again.
652 *---------------------------------------------------------------------------
653 */
654unsigned sleep(unsigned ticks)
655{ 424{
656 disable_irq(); 425 m = SDL_CreateMutex();
657 sleep_thread(ticks);
658 switch_thread();
659 return 0;
660}
661 426
662/*--------------------------------------------------------------------------- 427 if (SDL_LockMutex(m) == -1)
663 * Elects another thread to run or, if no other thread may be made ready to 428 {
664 * run, immediately returns control back to the calling thread. 429 fprintf(stderr, "Couldn't lock mutex\n");
665 *--------------------------------------------------------------------------- 430 return;
666 */ 431 }
667void yield(void) 432
668{ 433 thread_alloc_init();
669 switch_thread(); 434
435 struct thread_entry *thread = thread_alloc();
436 if (thread == NULL)
437 {
438 fprintf(stderr, "Main thread alloc failed\n");
439 return;
440 }
441
442 /* Slot 0 is reserved for the main thread - initialize it here and
443 then create the SDL thread - it is possible to have a quick, early
444 shutdown try to access the structure. */
445 thread->name = __main_thread_name;
446 thread->state = STATE_RUNNING;
447 thread->context.s = SDL_CreateSemaphore(0);
448 thread->context.t = NULL; /* NULL for the implicit main thread */
449 __running_self_entry() = thread;
450
451 if (thread->context.s == NULL)
452 {
453 fprintf(stderr, "Failed to create main semaphore\n");
454 return;
455 }
456
457 /* Tell all threads jump back to their start routines, unlock and exit
458 gracefully - we'll check each one in turn for it's status. Threads
459 _could_ terminate via thread_exit or multiple threads could exit
460 on each unlock but that is safe. */
461
462 /* Setup jump for exit */
463 if (setjmp(thread_jmpbufs[THREAD_ID_SLOT(thread->id)]) == 0)
464 {
465 THREAD_SDL_DEBUGF("Main Thread: %lu (%s)\n",
466 (unsigned long)thread->id,
467 THREAD_SDL_GET_NAME(thread));
468 return;
469 }
470
471 SDL_UnlockMutex(m);
472
473 /* Set to 'COMMAND_DONE' when other rockbox threads have exited. */
474 while (threads_status < THREADS_EXIT_COMMAND_DONE)
475 SDL_Delay(10);
476
477 SDL_DestroyMutex(m);
478
479 /* We're the main thead - perform exit - doesn't return. */
480 sim_do_exit();
670} 481}