summaryrefslogtreecommitdiff
path: root/uisimulator
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2008-03-25 02:34:12 +0000
committerMichael Sevakis <jethead71@rockbox.org>2008-03-25 02:34:12 +0000
commit27cf67733936abd75fcb1f8da765977cd75906ee (patch)
treef894211a8a0c77b402dd3250b2bee2d17dcfe13f /uisimulator
parentbc2f8fd8f38a3e010cd67bbac358f6e9991153c6 (diff)
downloadrockbox-27cf67733936abd75fcb1f8da765977cd75906ee.tar.gz
rockbox-27cf67733936abd75fcb1f8da765977cd75906ee.zip
Add a complete priority inheritance implementation to the scheduler (all mutex ownership and queue_send calls are inheritable). Priorities are differential so that dispatch depends on the runnable range of priorities. Codec priority can therefore be raised in small steps (pcmbuf updated to enable). Simplify the kernel functions to ease implementation and use the same kernel.c for both sim and target (I'm tired of maintaining two ;_). 1) Not sure if a minor audio break at first buffering issue will exist on large-sector disks (the main mutex speed issue was genuinely resolved earlier). At this point it's best dealt with at the buffering level. It seems a larger filechunk could be used again. 2) Perhaps 64-bit sims will have some minor issues (finicky) but a backroll of the code of concern there is a 5-minute job. All kernel objects become incompatible so a full rebuild and update is needed.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@16791 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'uisimulator')
-rw-r--r--uisimulator/sdl/SOURCES2
-rw-r--r--uisimulator/sdl/kernel-sdl.c168
-rw-r--r--uisimulator/sdl/kernel.c739
-rw-r--r--uisimulator/sdl/system-sdl.h2
-rw-r--r--uisimulator/sdl/thread-sdl.c372
-rw-r--r--uisimulator/sdl/uisdl.c38
6 files changed, 383 insertions, 938 deletions
diff --git a/uisimulator/sdl/SOURCES b/uisimulator/sdl/SOURCES
index 7971c57163..1d5b498248 100644
--- a/uisimulator/sdl/SOURCES
+++ b/uisimulator/sdl/SOURCES
@@ -1,5 +1,5 @@
1button.c 1button.c
2kernel.c 2kernel-sdl.c
3#ifdef HAVE_LCD_BITMAP 3#ifdef HAVE_LCD_BITMAP
4lcd-bitmap.c 4lcd-bitmap.c
5#elif defined(HAVE_LCD_CHARCELLS) 5#elif defined(HAVE_LCD_CHARCELLS)
diff --git a/uisimulator/sdl/kernel-sdl.c b/uisimulator/sdl/kernel-sdl.c
new file mode 100644
index 0000000000..b6e6a34551
--- /dev/null
+++ b/uisimulator/sdl/kernel-sdl.c
@@ -0,0 +1,168 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Felix Arends
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19
20#include <stdlib.h>
21#include <SDL.h>
22#include <SDL_thread.h>
23#include "memory.h"
24#include "system-sdl.h"
25#include "uisdl.h"
26#include "kernel.h"
27#include "thread-sdl.h"
28#include "thread.h"
29#include "debug.h"
30
31static SDL_TimerID tick_timer_id;
32long start_tick;
33
34/* Condition to signal that "interrupts" may proceed */
35static SDL_cond *sim_thread_cond;
36/* Mutex to serialize changing levels and exclude other threads while
37 * inside a handler */
38static SDL_mutex *sim_irq_mtx;
39static int interrupt_level = HIGHEST_IRQ_LEVEL;
40static int handlers_pending = 0;
41static int status_reg = 0;
42
43extern void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
44
45/* Nescessary logic:
46 * 1) All threads must pass unblocked
47 * 2) Current handler must always pass unblocked
48 * 3) Threads must be excluded when irq routine is running
49 * 4) No more than one handler routine should execute at a time
50 */
51int set_irq_level(int level)
52{
53 SDL_LockMutex(sim_irq_mtx);
54
55 int oldlevel = interrupt_level;
56
57 if (status_reg == 0 && level == 0 && oldlevel != 0)
58 {
59 /* Not in a handler and "interrupts" are being reenabled */
60 if (handlers_pending > 0)
61 SDL_CondSignal(sim_thread_cond);
62 }
63
64 interrupt_level = level; /* save new level */
65
66 SDL_UnlockMutex(sim_irq_mtx);
67 return oldlevel;
68}
69
70void sim_enter_irq_handler(void)
71{
72 SDL_LockMutex(sim_irq_mtx);
73 handlers_pending++;
74
75 if(interrupt_level != 0)
76 {
77 /* "Interrupts" are disabled. Wait for reenable */
78 SDL_CondWait(sim_thread_cond, sim_irq_mtx);
79 }
80
81 status_reg = 1;
82}
83
84void sim_exit_irq_handler(void)
85{
86 if (--handlers_pending > 0)
87 SDL_CondSignal(sim_thread_cond);
88
89 status_reg = 0;
90 SDL_UnlockMutex(sim_irq_mtx);
91}
92
93bool sim_kernel_init(void)
94{
95 sim_irq_mtx = SDL_CreateMutex();
96 if (sim_irq_mtx == NULL)
97 {
98 fprintf(stderr, "Cannot create sim_handler_mtx\n");
99 return false;
100 }
101
102 sim_thread_cond = SDL_CreateCond();
103 if (sim_thread_cond == NULL)
104 {
105 fprintf(stderr, "Cannot create sim_thread_cond\n");
106 return false;
107 }
108
109 return true;
110}
111
112void sim_kernel_shutdown(void)
113{
114 SDL_RemoveTimer(tick_timer_id);
115 SDL_DestroyMutex(sim_irq_mtx);
116 SDL_DestroyCond(sim_thread_cond);
117}
118
119Uint32 tick_timer(Uint32 interval, void *param)
120{
121 long new_tick;
122
123 (void) interval;
124 (void) param;
125
126 new_tick = (SDL_GetTicks() - start_tick) / (1000/HZ);
127
128 if(new_tick != current_tick)
129 {
130 long t;
131 for(t = new_tick - current_tick; t > 0; t--)
132 {
133 int i;
134
135 sim_enter_irq_handler();
136
137 /* Run through the list of tick tasks */
138 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
139 {
140 if(tick_funcs[i])
141 {
142 tick_funcs[i]();
143 }
144 }
145
146 sim_exit_irq_handler();
147 }
148
149 current_tick = new_tick;
150 }
151
152 return 1;
153}
154
155void tick_start(unsigned int interval_in_ms)
156{
157 if (tick_timer_id != NULL)
158 {
159 SDL_RemoveTimer(tick_timer_id);
160 tick_timer_id = NULL;
161 }
162 else
163 {
164 start_tick = SDL_GetTicks();
165 }
166
167 tick_timer_id = SDL_AddTimer(interval_in_ms, tick_timer, NULL);
168}
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
deleted file mode 100644
index d67fb2b9f1..0000000000
--- a/uisimulator/sdl/kernel.c
+++ /dev/null
@@ -1,739 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Felix Arends
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19
20#include <stdlib.h>
21#include <SDL.h>
22#include <SDL_thread.h>
23#include "memory.h"
24#include "system-sdl.h"
25#include "uisdl.h"
26#include "kernel.h"
27#include "thread-sdl.h"
28#include "thread.h"
29#include "debug.h"
30
31/* Condition to signal that "interrupts" may proceed */
32static SDL_cond *sim_thread_cond;
33/* Mutex to serialize changing levels and exclude other threads while
34 * inside a handler */
35static SDL_mutex *sim_irq_mtx;
36static int interrupt_level = HIGHEST_IRQ_LEVEL;
37static int handlers_pending = 0;
38static int status_reg = 0;
39
40extern struct core_entry cores[NUM_CORES];
41
42/* Nescessary logic:
43 * 1) All threads must pass unblocked
44 * 2) Current handler must always pass unblocked
45 * 3) Threads must be excluded when irq routine is running
46 * 4) No more than one handler routine should execute at a time
47 */
48int set_irq_level(int level)
49{
50 SDL_LockMutex(sim_irq_mtx);
51
52 int oldlevel = interrupt_level;
53
54 if (status_reg == 0 && level == 0 && oldlevel != 0)
55 {
56 /* Not in a handler and "interrupts" are being reenabled */
57 if (handlers_pending > 0)
58 SDL_CondSignal(sim_thread_cond);
59 }
60
61 interrupt_level = level; /* save new level */
62
63 SDL_UnlockMutex(sim_irq_mtx);
64 return oldlevel;
65}
66
67void sim_enter_irq_handler(void)
68{
69 SDL_LockMutex(sim_irq_mtx);
70 handlers_pending++;
71
72 if(interrupt_level != 0)
73 {
74 /* "Interrupts" are disabled. Wait for reenable */
75 SDL_CondWait(sim_thread_cond, sim_irq_mtx);
76 }
77
78 status_reg = 1;
79}
80
81void sim_exit_irq_handler(void)
82{
83 if (--handlers_pending > 0)
84 SDL_CondSignal(sim_thread_cond);
85
86 status_reg = 0;
87 SDL_UnlockMutex(sim_irq_mtx);
88}
89
90bool sim_kernel_init(void)
91{
92 sim_irq_mtx = SDL_CreateMutex();
93 if (sim_irq_mtx == NULL)
94 {
95 fprintf(stderr, "Cannot create sim_handler_mtx\n");
96 return false;
97 }
98
99 sim_thread_cond = SDL_CreateCond();
100 if (sim_thread_cond == NULL)
101 {
102 fprintf(stderr, "Cannot create sim_thread_cond\n");
103 return false;
104 }
105
106 return true;
107}
108
109void sim_kernel_shutdown(void)
110{
111 SDL_DestroyMutex(sim_irq_mtx);
112 SDL_DestroyCond(sim_thread_cond);
113}
114
115volatile long current_tick = 0;
116static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
117
118/* This array holds all queues that are initiated. It is used for broadcast. */
119static struct event_queue *all_queues[MAX_NUM_QUEUES];
120static int num_queues = 0;
121
122#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
123/* Moves waiting thread's descriptor to the current sender when a
124 message is dequeued */
125static void queue_fetch_sender(struct queue_sender_list *send,
126 unsigned int i)
127{
128 struct thread_entry **spp = &send->senders[i];
129
130 if(*spp)
131 {
132 send->curr_sender = *spp;
133 *spp = NULL;
134 }
135}
136
137/* Puts the specified return value in the waiting thread's return value
138 and wakes the thread - a sender should be confirmed to exist first */
139static void queue_release_sender(struct thread_entry **sender,
140 intptr_t retval)
141{
142 (*sender)->retval = retval;
143 wakeup_thread_no_listlock(sender);
144 if(*sender != NULL)
145 {
146 fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
147 exit(-1);
148 }
149}
150
151/* Releases any waiting threads that are queued with queue_send -
152 reply with NULL */
153static void queue_release_all_senders(struct event_queue *q)
154{
155 if(q->send)
156 {
157 unsigned int i;
158 for(i = q->read; i != q->write; i++)
159 {
160 struct thread_entry **spp =
161 &q->send->senders[i & QUEUE_LENGTH_MASK];
162 if(*spp)
163 {
164 queue_release_sender(spp, 0);
165 }
166 }
167 }
168}
169
170/* Enables queue_send on the specified queue - caller allocates the extra
171 data structure */
172void queue_enable_queue_send(struct event_queue *q,
173 struct queue_sender_list *send)
174{
175 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
176 q->send = NULL;
177 if(send)
178 {
179 q->send = send;
180 memset(send, 0, sizeof(*send));
181 }
182 set_irq_level(oldlevel);
183}
184#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
185
186void queue_init(struct event_queue *q, bool register_queue)
187{
188 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
189
190 q->read = 0;
191 q->write = 0;
192 thread_queue_init(&q->queue);
193#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
194 q->send = NULL; /* No message sending by default */
195#endif
196
197 if(register_queue)
198 {
199 if(num_queues >= MAX_NUM_QUEUES)
200 {
201 fprintf(stderr, "queue_init->out of queues");
202 exit(-1);
203 }
204 /* Add it to the all_queues array */
205 all_queues[num_queues++] = q;
206 }
207
208 set_irq_level(oldlevel);
209}
210
211void queue_delete(struct event_queue *q)
212{
213 int i;
214 bool found = false;
215
216 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
217
218 /* Find the queue to be deleted */
219 for(i = 0;i < num_queues;i++)
220 {
221 if(all_queues[i] == q)
222 {
223 found = true;
224 break;
225 }
226 }
227
228 if(found)
229 {
230 /* Move the following queues up in the list */
231 for(;i < num_queues-1;i++)
232 {
233 all_queues[i] = all_queues[i+1];
234 }
235
236 num_queues--;
237 }
238
239 /* Release threads waiting on queue head */
240 thread_queue_wake(&q->queue);
241
242#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
243 /* Release waiting threads and reply to any dequeued message
244 waiting for one. */
245 queue_release_all_senders(q);
246 queue_reply(q, 0);
247#endif
248
249 q->read = 0;
250 q->write = 0;
251
252 set_irq_level(oldlevel);
253}
254
255void queue_wait(struct event_queue *q, struct queue_event *ev)
256{
257 unsigned int rd;
258 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
259
260#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 if (q->send && q->send->curr_sender)
262 {
263 /* auto-reply */
264 queue_release_sender(&q->send->curr_sender, 0);
265 }
266#endif
267
268 if (q->read == q->write)
269 {
270 do
271 {
272 block_thread(&q->queue);
273 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
274 }
275 while (q->read == q->write);
276 }
277
278 rd = q->read++ & QUEUE_LENGTH_MASK;
279 *ev = q->events[rd];
280
281#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
282 if(q->send && q->send->senders[rd])
283 {
284 /* Get data for a waiting thread if one */
285 queue_fetch_sender(q->send, rd);
286 }
287#endif
288
289 set_irq_level(oldlevel);
290}
291
292void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
293{
294 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
295
296#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
297 if (q->send && q->send->curr_sender)
298 {
299 /* auto-reply */
300 queue_release_sender(&q->send->curr_sender, 0);
301 }
302#endif
303
304 if (q->read == q->write && ticks > 0)
305 {
306 block_thread_w_tmo(&q->queue, ticks);
307 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
308 }
309
310 if(q->read != q->write)
311 {
312 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
313 *ev = q->events[rd];
314
315#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
316 if(q->send && q->send->senders[rd])
317 {
318 /* Get data for a waiting thread if one */
319 queue_fetch_sender(q->send, rd);
320 }
321#endif
322 }
323 else
324 {
325 ev->id = SYS_TIMEOUT;
326 }
327
328 set_irq_level(oldlevel);
329}
330
331void queue_post(struct event_queue *q, long id, intptr_t data)
332{
333 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
334
335 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
336
337 q->events[wr].id = id;
338 q->events[wr].data = data;
339
340#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
341 if(q->send)
342 {
343 struct thread_entry **spp = &q->send->senders[wr];
344
345 if(*spp)
346 {
347 /* overflow protect - unblock any thread waiting at this index */
348 queue_release_sender(spp, 0);
349 }
350 }
351#endif
352
353 wakeup_thread(&q->queue);
354
355 set_irq_level(oldlevel);
356}
357
358#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
359intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
360{
361 int oldlevel = set_irq_level(oldlevel);
362
363 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
364
365 q->events[wr].id = id;
366 q->events[wr].data = data;
367
368 if(q->send)
369 {
370 struct thread_entry **spp = &q->send->senders[wr];
371
372 if(*spp)
373 {
374 /* overflow protect - unblock any thread waiting at this index */
375 queue_release_sender(spp, 0);
376 }
377
378 wakeup_thread(&q->queue);
379
380 block_thread_no_listlock(spp);
381 return thread_get_current()->retval;
382 }
383
384 /* Function as queue_post if sending is not enabled */
385 wakeup_thread(&q->queue);
386 set_irq_level(oldlevel);
387 return 0;
388}
389
390#if 0 /* not used now but probably will be later */
391/* Query if the last message dequeued was added by queue_send or not */
392bool queue_in_queue_send(struct event_queue *q)
393{
394 return q->send && q->send->curr_sender;
395}
396#endif
397
398/* Replies with retval to any dequeued message sent with queue_send */
399void queue_reply(struct event_queue *q, intptr_t retval)
400{
401 if(q->send && q->send->curr_sender)
402 {
403 queue_release_sender(&q->send->curr_sender, retval);
404 }
405}
406#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
407
408bool queue_empty(const struct event_queue* q)
409{
410 return ( q->read == q->write );
411}
412
413bool queue_peek(struct event_queue *q, struct queue_event *ev)
414{
415 if (q->read == q->write)
416 return false;
417
418 bool have_msg = false;
419
420 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
421
422 if (q->read != q->write)
423 {
424 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
425 have_msg = true;
426 }
427
428 set_irq_level(oldlevel);
429
430 return have_msg;
431}
432
433void queue_clear(struct event_queue* q)
434{
435 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
436
437 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
438#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
439 /* Release all thread waiting in the queue for a reply -
440 dequeued sent message will be handled by owning thread */
441 queue_release_all_senders(q);
442#endif
443 q->read = 0;
444 q->write = 0;
445
446 set_irq_level(oldlevel);
447}
448
449void queue_remove_from_head(struct event_queue *q, long id)
450{
451 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
452
453 while(q->read != q->write)
454 {
455 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
456
457 if(q->events[rd].id != id)
458 {
459 break;
460 }
461
462#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
463 if(q->send)
464 {
465 struct thread_entry **spp = &q->send->senders[rd];
466
467 if(*spp)
468 {
469 /* Release any thread waiting on this message */
470 queue_release_sender(spp, 0);
471 }
472 }
473#endif
474 q->read++;
475 }
476
477 set_irq_level(oldlevel);
478}
479
480int queue_count(const struct event_queue *q)
481{
482 return q->write - q->read;
483}
484
485int queue_broadcast(long id, intptr_t data)
486{
487 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
488 int i;
489
490 for(i = 0;i < num_queues;i++)
491 {
492 queue_post(all_queues[i], id, data);
493 }
494
495 set_irq_level(oldlevel);
496 return num_queues;
497}
498
499void yield(void)
500{
501 switch_thread(NULL);
502}
503
504void sleep(int ticks)
505{
506 sleep_thread(ticks);
507}
508
509void sim_tick_tasks(void)
510{
511 int i;
512
513 /* Run through the list of tick tasks */
514 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
515 {
516 if(tick_funcs[i])
517 {
518 tick_funcs[i]();
519 }
520 }
521}
522
523int tick_add_task(void (*f)(void))
524{
525 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
526 int i;
527
528 /* Add a task if there is room */
529 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
530 {
531 if(tick_funcs[i] == NULL)
532 {
533 tick_funcs[i] = f;
534 set_irq_level(oldlevel);
535 return 0;
536 }
537 }
538 fprintf(stderr, "Error! tick_add_task(): out of tasks");
539 exit(-1);
540 return -1;
541}
542
543int tick_remove_task(void (*f)(void))
544{
545 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
546 int i;
547
548 /* Remove a task if it is there */
549 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
550 {
551 if(tick_funcs[i] == f)
552 {
553 tick_funcs[i] = NULL;
554 set_irq_level(oldlevel);
555 return 0;
556 }
557 }
558
559 set_irq_level(oldlevel);
560 return -1;
561}
562
563/* Very simple mutex simulation - won't work with pre-emptive
564 multitasking, but is better than nothing at all */
565void mutex_init(struct mutex *m)
566{
567 m->queue = NULL;
568 m->thread = NULL;
569 m->count = 0;
570 m->locked = 0;
571}
572
573void mutex_lock(struct mutex *m)
574{
575 struct thread_entry *const thread = thread_get_current();
576
577 if(thread == m->thread)
578 {
579 m->count++;
580 return;
581 }
582
583 if (!test_and_set(&m->locked, 1))
584 {
585 m->thread = thread;
586 return;
587 }
588
589 block_thread_no_listlock(&m->queue);
590}
591
592void mutex_unlock(struct mutex *m)
593{
594 /* unlocker not being the owner is an unlocking violation */
595 if(m->thread != thread_get_current())
596 {
597 fprintf(stderr, "mutex_unlock->wrong thread");
598 exit(-1);
599 }
600
601 if (m->count > 0)
602 {
603 /* this thread still owns lock */
604 m->count--;
605 return;
606 }
607
608 m->thread = wakeup_thread_no_listlock(&m->queue);
609
610 if (m->thread == NULL)
611 {
612 /* release lock */
613 m->locked = 0;
614 }
615}
616
617#ifdef HAVE_SEMAPHORE_OBJECTS
618void semaphore_init(struct semaphore *s, int max, int start)
619{
620 if(max <= 0 || start < 0 || start > max)
621 {
622 fprintf(stderr, "semaphore_init->inv arg");
623 exit(-1);
624 }
625 s->queue = NULL;
626 s->max = max;
627 s->count = start;
628}
629
630void semaphore_wait(struct semaphore *s)
631{
632 if(--s->count >= 0)
633 return;
634 block_thread_no_listlock(&s->queue);
635}
636
637void semaphore_release(struct semaphore *s)
638{
639 if(s->count < s->max)
640 {
641 if(++s->count <= 0)
642 {
643 if(s->queue == NULL)
644 {
645 /* there should be threads in this queue */
646 fprintf(stderr, "semaphore->wakeup");
647 exit(-1);
648 }
649 /* a thread was queued - wake it up */
650 wakeup_thread_no_listlock(&s->queue);
651 }
652 }
653}
654#endif /* HAVE_SEMAPHORE_OBJECTS */
655
656#ifdef HAVE_EVENT_OBJECTS
657void event_init(struct event *e, unsigned int flags)
658{
659 e->queues[STATE_NONSIGNALED] = NULL;
660 e->queues[STATE_SIGNALED] = NULL;
661 e->state = flags & STATE_SIGNALED;
662 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
663}
664
665void event_wait(struct event *e, unsigned int for_state)
666{
667 unsigned int last_state = e->state;
668
669 if(e->automatic != 0)
670 {
671 /* wait for false always satisfied by definition
672 or if it just changed to false */
673 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
674 {
675 /* automatic - unsignal */
676 e->state = STATE_NONSIGNALED;
677 return;
678 }
679 /* block until state matches */
680 }
681 else if(for_state == last_state)
682 {
683 /* the state being waited for is the current state */
684 return;
685 }
686
687 /* current state does not match wait-for state */
688 block_thread_no_listlock(&e->queues[for_state]);
689}
690
691void event_set_state(struct event *e, unsigned int state)
692{
693 unsigned int last_state = e->state;
694
695 if(last_state == state)
696 {
697 /* no change */
698 return;
699 }
700
701 if(state == STATE_SIGNALED)
702 {
703 if(e->automatic != 0)
704 {
705 struct thread_entry *thread;
706
707 if(e->queues[STATE_NONSIGNALED] != NULL)
708 {
709 /* no thread should have ever blocked for nonsignaled */
710 fprintf(stderr, "set_event_state->queue[NS]:S");
711 exit(-1);
712 }
713
714 /* pass to next thread and keep unsignaled - "pulse" */
715 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
716 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
717 }
718 else
719 {
720 /* release all threads waiting for signaled */
721 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
722 e->state = STATE_SIGNALED;
723 }
724 }
725 else
726 {
727 /* release all threads waiting for unsignaled */
728 if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
729 {
730 /* no thread should have ever blocked */
731 fprintf(stderr, "set_event_state->queue[NS]:NS");
732 exit(-1);
733 }
734
735 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
736 e->state = STATE_NONSIGNALED;
737 }
738}
739#endif /* HAVE_EVENT_OBJECTS */
diff --git a/uisimulator/sdl/system-sdl.h b/uisimulator/sdl/system-sdl.h
index 2197a014c3..c5e7d40560 100644
--- a/uisimulator/sdl/system-sdl.h
+++ b/uisimulator/sdl/system-sdl.h
@@ -29,4 +29,6 @@ void sim_exit_irq_handler(void);
29bool sim_kernel_init(void); 29bool sim_kernel_init(void);
30void sim_kernel_shutdown(void); 30void sim_kernel_shutdown(void);
31 31
32extern long start_tick;
33
32#endif /* _SYSTEM_SDL_H_ */ 34#endif /* _SYSTEM_SDL_H_ */
diff --git a/uisimulator/sdl/thread-sdl.c b/uisimulator/sdl/thread-sdl.c
index d1a8e60d01..78a66f72a7 100644
--- a/uisimulator/sdl/thread-sdl.c
+++ b/uisimulator/sdl/thread-sdl.c
@@ -26,6 +26,7 @@
26#include <setjmp.h> 26#include <setjmp.h>
27#include "system-sdl.h" 27#include "system-sdl.h"
28#include "thread-sdl.h" 28#include "thread-sdl.h"
29#include "system.h"
29#include "kernel.h" 30#include "kernel.h"
30#include "thread.h" 31#include "thread.h"
31#include "debug.h" 32#include "debug.h"
@@ -37,7 +38,7 @@
37#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__) 38#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__)
38static char __name[32]; 39static char __name[32];
39#define THREAD_SDL_GET_NAME(thread) \ 40#define THREAD_SDL_GET_NAME(thread) \
40 ({ thread_get_name(__name, sizeof(__name)/sizeof(__name[0]), thread); __name; }) 41 ({ thread_get_name(__name, ARRAYLEN(__name), thread); __name; })
41#else 42#else
42#define THREAD_SDL_DEBUGF(...) 43#define THREAD_SDL_DEBUGF(...)
43#define THREAD_SDL_GET_NAME(thread) 44#define THREAD_SDL_GET_NAME(thread)
@@ -54,7 +55,6 @@ struct thread_entry threads[MAXTHREADS];
54 * way to get them back in there so they may exit */ 55 * way to get them back in there so they may exit */
55static jmp_buf thread_jmpbufs[MAXTHREADS]; 56static jmp_buf thread_jmpbufs[MAXTHREADS];
56static SDL_mutex *m; 57static SDL_mutex *m;
57static struct thread_entry *running;
58static bool threads_exit = false; 58static bool threads_exit = false;
59 59
60extern long start_tick; 60extern long start_tick;
@@ -78,7 +78,7 @@ void thread_sdl_shutdown(void)
78 { 78 {
79 /* Signal thread on delay or block */ 79 /* Signal thread on delay or block */
80 SDL_Thread *t = thread->context.t; 80 SDL_Thread *t = thread->context.t;
81 SDL_CondSignal(thread->context.c); 81 SDL_SemPost(thread->context.s);
82 SDL_UnlockMutex(m); 82 SDL_UnlockMutex(m);
83 /* Wait for it to finish */ 83 /* Wait for it to finish */
84 SDL_WaitThread(t, NULL); 84 SDL_WaitThread(t, NULL);
@@ -98,7 +98,7 @@ extern void app_main(void *param);
98static int thread_sdl_app_main(void *param) 98static int thread_sdl_app_main(void *param)
99{ 99{
100 SDL_LockMutex(m); 100 SDL_LockMutex(m);
101 running = &threads[0]; 101 cores[CURRENT_CORE].running = &threads[0];
102 102
103 /* Set the jump address for return */ 103 /* Set the jump address for return */
104 if (setjmp(thread_jmpbufs[0]) == 0) 104 if (setjmp(thread_jmpbufs[0]) == 0)
@@ -116,6 +116,8 @@ static int thread_sdl_app_main(void *param)
116/* Initialize SDL threading */ 116/* Initialize SDL threading */
117bool thread_sdl_init(void *param) 117bool thread_sdl_init(void *param)
118{ 118{
119 struct thread_entry *thread;
120 memset(cores, 0, sizeof(cores));
119 memset(threads, 0, sizeof(threads)); 121 memset(threads, 0, sizeof(threads));
120 122
121 m = SDL_CreateMutex(); 123 m = SDL_CreateMutex();
@@ -129,28 +131,30 @@ bool thread_sdl_init(void *param)
129 /* Slot 0 is reserved for the main thread - initialize it here and 131 /* Slot 0 is reserved for the main thread - initialize it here and
130 then create the SDL thread - it is possible to have a quick, early 132 then create the SDL thread - it is possible to have a quick, early
131 shutdown try to access the structure. */ 133 shutdown try to access the structure. */
132 running = &threads[0]; 134 thread = &threads[0];
133 running->stack = " "; 135 thread->stack = (uintptr_t *)" ";
134 running->stack_size = 8; 136 thread->stack_size = 8;
135 running->name = "main"; 137 thread->name = "main";
136 running->state = STATE_RUNNING; 138 thread->state = STATE_RUNNING;
137 running->context.c = SDL_CreateCond(); 139 thread->context.s = SDL_CreateSemaphore(0);
140 cores[CURRENT_CORE].running = thread;
138 141
139 if (running->context.c == NULL) 142 if (thread->context.s == NULL)
140 { 143 {
141 fprintf(stderr, "Failed to create main condition variable\n"); 144 fprintf(stderr, "Failed to create main semaphore\n");
142 return false; 145 return false;
143 } 146 }
144 147
145 running->context.t = SDL_CreateThread(thread_sdl_app_main, param); 148 thread->context.t = SDL_CreateThread(thread_sdl_app_main, param);
146 149
147 if (running->context.t == NULL) 150 if (thread->context.t == NULL)
148 { 151 {
152 SDL_DestroySemaphore(thread->context.s);
149 fprintf(stderr, "Failed to create main thread\n"); 153 fprintf(stderr, "Failed to create main thread\n");
150 return false; 154 return false;
151 } 155 }
152 156
153 THREAD_SDL_DEBUGF("Main thread: %p\n", running); 157 THREAD_SDL_DEBUGF("Main thread: %p\n", thread);
154 158
155 SDL_UnlockMutex(m); 159 SDL_UnlockMutex(m);
156 return true; 160 return true;
@@ -160,21 +164,22 @@ bool thread_sdl_init(void *param)
160void thread_sdl_thread_lock(void *me) 164void thread_sdl_thread_lock(void *me)
161{ 165{
162 SDL_LockMutex(m); 166 SDL_LockMutex(m);
163 running = (struct thread_entry *)me; 167 cores[CURRENT_CORE].running = (struct thread_entry *)me;
164 168
165 if (threads_exit) 169 if (threads_exit)
166 remove_thread(NULL); 170 thread_exit();
167} 171}
168 172
169void * thread_sdl_thread_unlock(void) 173void * thread_sdl_thread_unlock(void)
170{ 174{
171 struct thread_entry *current = running; 175 struct thread_entry *current = cores[CURRENT_CORE].running;
172 SDL_UnlockMutex(m); 176 SDL_UnlockMutex(m);
173 return current; 177 return current;
174} 178}
175 179
176static int find_empty_thread_slot(void) 180static struct thread_entry * find_empty_thread_slot(void)
177{ 181{
182 struct thread_entry *thread = NULL;
178 int n; 183 int n;
179 184
180 for (n = 0; n < MAXTHREADS; n++) 185 for (n = 0; n < MAXTHREADS; n++)
@@ -182,10 +187,13 @@ static int find_empty_thread_slot(void)
182 int state = threads[n].state; 187 int state = threads[n].state;
183 188
184 if (state == STATE_KILLED) 189 if (state == STATE_KILLED)
190 {
191 thread = &threads[n];
185 break; 192 break;
193 }
186 } 194 }
187 195
188 return n; 196 return thread;
189} 197}
190 198
191static void add_to_list_l(struct thread_entry **list, 199static void add_to_list_l(struct thread_entry **list,
@@ -229,64 +237,163 @@ static void remove_from_list_l(struct thread_entry **list,
229 thread->l.next->l.prev = thread->l.prev; 237 thread->l.next->l.prev = thread->l.prev;
230} 238}
231 239
232static inline void run_blocking_ops(void)
233{
234 set_irq_level(0);
235}
236
237struct thread_entry *thread_get_current(void) 240struct thread_entry *thread_get_current(void)
238{ 241{
239 return running; 242 return cores[CURRENT_CORE].running;
240} 243}
241 244
242void switch_thread(struct thread_entry *old) 245void switch_thread(void)
243{ 246{
244 struct thread_entry *current = running; 247 struct thread_entry *current = cores[CURRENT_CORE].running;
245 248
246 SDL_UnlockMutex(m); 249 set_irq_level(0);
247 /* Any other thread waiting already will get it first */
248 SDL_LockMutex(m);
249 running = current;
250 250
251 if (threads_exit) 251 switch (current->state)
252 remove_thread(NULL); 252 {
253 case STATE_RUNNING:
254 {
255 SDL_UnlockMutex(m);
256 /* Any other thread waiting already will get it first */
257 SDL_LockMutex(m);
258 break;
259 } /* STATE_RUNNING: */
260
261 case STATE_BLOCKED:
262 {
263 int oldlevel;
264
265 SDL_UnlockMutex(m);
266 SDL_SemWait(current->context.s);
267 SDL_LockMutex(m);
268
269 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
270 current->state = STATE_RUNNING;
271 set_irq_level(oldlevel);
272 break;
273 } /* STATE_BLOCKED: */
274
275 case STATE_BLOCKED_W_TMO:
276 {
277 int result, oldlevel;
278
279 SDL_UnlockMutex(m);
280 result = SDL_SemWaitTimeout(current->context.s, current->tmo_tick);
281 SDL_LockMutex(m);
282
283 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
284
285 if (current->state == STATE_BLOCKED_W_TMO)
286 {
287 /* Timed out */
288 remove_from_list_l(current->bqp, current);
289
290#ifdef HAVE_WAKEUP_EXT_CB
291 if (current->wakeup_ext_cb != NULL)
292 current->wakeup_ext_cb(current);
293#endif
294 current->state = STATE_RUNNING;
295 }
253 296
254 (void)old; 297 if (result == SDL_MUTEX_TIMEDOUT)
298 {
299 /* Other signals from an explicit wake could have been made before
300 * arriving here if we timed out waiting for the semaphore. Make
301 * sure the count is reset. */
302 while (SDL_SemValue(current->context.s) > 0)
303 SDL_SemTryWait(current->context.s);
304 }
305
306 set_irq_level(oldlevel);
307 break;
308 } /* STATE_BLOCKED_W_TMO: */
309
310 case STATE_SLEEPING:
311 {
312 SDL_UnlockMutex(m);
313 SDL_SemWaitTimeout(current->context.s, current->tmo_tick);
314 SDL_LockMutex(m);
315 current->state = STATE_RUNNING;
316 break;
317 } /* STATE_SLEEPING: */
318 }
319
320 cores[CURRENT_CORE].running = current;
321
322 if (threads_exit)
323 thread_exit();
255} 324}
256 325
257void sleep_thread(int ticks) 326void sleep_thread(int ticks)
258{ 327{
259 struct thread_entry *current; 328 struct thread_entry *current = cores[CURRENT_CORE].running;
260 int rem; 329 int rem;
261 330
262 current = running;
263 current->state = STATE_SLEEPING; 331 current->state = STATE_SLEEPING;
264 332
265 rem = (SDL_GetTicks() - start_tick) % (1000/HZ); 333 rem = (SDL_GetTicks() - start_tick) % (1000/HZ);
266 if (rem < 0) 334 if (rem < 0)
267 rem = 0; 335 rem = 0;
268 336
269 rem = (1000/HZ) * ticks + ((1000/HZ)-1) - rem; 337 current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
338}
339
340void block_thread(struct thread_entry *current)
341{
342 current->state = STATE_BLOCKED;
343 add_to_list_l(current->bqp, current);
344}
345
346void block_thread_w_tmo(struct thread_entry *current, int ticks)
347{
348 current->state = STATE_BLOCKED_W_TMO;
349 current->tmo_tick = (1000/HZ)*ticks;
350 add_to_list_l(current->bqp, current);
351}
352
353unsigned int wakeup_thread(struct thread_entry **list)
354{
355 struct thread_entry *thread = *list;
270 356
271 if (rem == 0) 357 if (thread != NULL)
272 { 358 {
273 /* Unlock and give up rest of quantum */ 359 switch (thread->state)
274 SDL_UnlockMutex(m); 360 {
275 SDL_Delay(0); 361 case STATE_BLOCKED:
276 SDL_LockMutex(m); 362 case STATE_BLOCKED_W_TMO:
363 remove_from_list_l(list, thread);
364 thread->state = STATE_RUNNING;
365 SDL_SemPost(thread->context.s);
366 return THREAD_OK;
367 }
277 } 368 }
278 else 369
370 return THREAD_NONE;
371}
372
373unsigned int thread_queue_wake(struct thread_entry **list)
374{
375 unsigned int result = THREAD_NONE;
376
377 for (;;)
279 { 378 {
280 /* These sleeps must be signalable for thread exit */ 379 unsigned int rc = wakeup_thread(list);
281 SDL_CondWaitTimeout(current->context.c, m, rem);
282 }
283 380
284 running = current; 381 if (rc == THREAD_NONE)
382 break;
285 383
286 current->state = STATE_RUNNING; 384 result |= rc;
385 }
287 386
288 if (threads_exit) 387 return result;
289 remove_thread(NULL); 388}
389
390void thread_thaw(struct thread_entry *thread)
391{
392 if (thread->state == STATE_FROZEN)
393 {
394 thread->state = STATE_RUNNING;
395 SDL_SemPost(thread->context.s);
396 }
290} 397}
291 398
292int runthread(void *data) 399int runthread(void *data)
@@ -297,9 +404,9 @@ int runthread(void *data)
297 /* Cannot access thread variables before locking the mutex as the 404 /* Cannot access thread variables before locking the mutex as the
298 data structures may not be filled-in yet. */ 405 data structures may not be filled-in yet. */
299 SDL_LockMutex(m); 406 SDL_LockMutex(m);
300 running = (struct thread_entry *)data; 407 cores[CURRENT_CORE].running = (struct thread_entry *)data;
301 current = running; 408 current = cores[CURRENT_CORE].running;
302 current_jmpbuf = &thread_jmpbufs[running - threads]; 409 current_jmpbuf = &thread_jmpbufs[current - threads];
303 410
304 /* Setup jump for exit */ 411 /* Setup jump for exit */
305 if (setjmp(*current_jmpbuf) == 0) 412 if (setjmp(*current_jmpbuf) == 0)
@@ -307,9 +414,10 @@ int runthread(void *data)
307 /* Run the thread routine */ 414 /* Run the thread routine */
308 if (current->state == STATE_FROZEN) 415 if (current->state == STATE_FROZEN)
309 { 416 {
310 SDL_CondWait(current->context.c, m); 417 SDL_UnlockMutex(m);
311 running = current; 418 SDL_SemWait(current->context.s);
312 419 SDL_LockMutex(m);
420 cores[CURRENT_CORE].running = current;
313 } 421 }
314 422
315 if (!threads_exit) 423 if (!threads_exit)
@@ -320,7 +428,7 @@ int runthread(void *data)
320 /* Thread routine returned - suicide */ 428 /* Thread routine returned - suicide */
321 } 429 }
322 430
323 remove_thread(NULL); 431 thread_exit();
324 } 432 }
325 else 433 else
326 { 434 {
@@ -332,131 +440,59 @@ int runthread(void *data)
332} 440}
333 441
334struct thread_entry* 442struct thread_entry*
335 create_thread(void (*function)(void), void* stack, int stack_size, 443 create_thread(void (*function)(void), void* stack, size_t stack_size,
336 unsigned flags, const char *name) 444 unsigned flags, const char *name)
337{ 445{
338 /** Avoid compiler warnings */ 446 struct thread_entry *thread;
339 SDL_Thread* t; 447 SDL_Thread* t;
340 SDL_cond *cond; 448 SDL_sem *s;
341 int slot;
342 449
343 THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : ""); 450 THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : "");
344 451
345 slot = find_empty_thread_slot(); 452 thread = find_empty_thread_slot();
346 if (slot >= MAXTHREADS) 453 if (thread == NULL)
347 { 454 {
348 DEBUGF("Failed to find thread slot\n"); 455 DEBUGF("Failed to find thread slot\n");
349 return NULL; 456 return NULL;
350 } 457 }
351 458
352 cond = SDL_CreateCond(); 459 s = SDL_CreateSemaphore(0);
353 if (cond == NULL) 460 if (s == NULL)
354 { 461 {
355 DEBUGF("Failed to create condition variable\n"); 462 DEBUGF("Failed to create semaphore\n");
356 return NULL; 463 return NULL;
357 } 464 }
358 465
359 t = SDL_CreateThread(runthread, &threads[slot]); 466 t = SDL_CreateThread(runthread, thread);
360 if (t == NULL) 467 if (t == NULL)
361 { 468 {
362 DEBUGF("Failed to create SDL thread\n"); 469 DEBUGF("Failed to create SDL thread\n");
363 SDL_DestroyCond(cond); 470 SDL_DestroySemaphore(s);
364 return NULL; 471 return NULL;
365 } 472 }
366 473
367 threads[slot].stack = stack; 474 thread->stack = stack;
368 threads[slot].stack_size = stack_size; 475 thread->stack_size = stack_size;
369 threads[slot].name = name; 476 thread->name = name;
370 threads[slot].state = (flags & CREATE_THREAD_FROZEN) ? 477 thread->state = (flags & CREATE_THREAD_FROZEN) ?
371 STATE_FROZEN : STATE_RUNNING; 478 STATE_FROZEN : STATE_RUNNING;
372 threads[slot].context.start = function; 479 thread->context.start = function;
373 threads[slot].context.t = t; 480 thread->context.t = t;
374 threads[slot].context.c = cond; 481 thread->context.s = s;
375 482
376 THREAD_SDL_DEBUGF("New Thread: %d (%s)\n", 483 THREAD_SDL_DEBUGF("New Thread: %d (%s)\n",
377 slot, THREAD_SDL_GET_NAME(&threads[slot])); 484 thread - threads, THREAD_SDL_GET_NAME(thread));
378 485
379 return &threads[slot]; 486 return thread;
380}
381
382void _block_thread(struct thread_queue *tq)
383{
384 struct thread_entry *thread = running;
385
386 thread->state = STATE_BLOCKED;
387 thread->bqp = tq;
388 add_to_list_l(&tq->queue, thread);
389
390 run_blocking_ops();
391
392 SDL_CondWait(thread->context.c, m);
393 running = thread;
394
395 if (threads_exit)
396 remove_thread(NULL);
397}
398
399void block_thread_w_tmo(struct thread_queue *tq, int ticks)
400{
401 struct thread_entry *thread = running;
402
403 thread->state = STATE_BLOCKED_W_TMO;
404 thread->bqp = tq;
405 add_to_list_l(&tq->queue, thread);
406
407 run_blocking_ops();
408
409 SDL_CondWaitTimeout(thread->context.c, m, (1000/HZ) * ticks);
410 running = thread;
411
412 if (thread->state == STATE_BLOCKED_W_TMO)
413 {
414 /* Timed out */
415 remove_from_list_l(&tq->queue, thread);
416 thread->state = STATE_RUNNING;
417 }
418
419 if (threads_exit)
420 remove_thread(NULL);
421}
422
423struct thread_entry * _wakeup_thread(struct thread_queue *tq)
424{
425 struct thread_entry *thread = tq->queue;
426
427 if (thread == NULL)
428 {
429 return NULL;
430 }
431
432 switch (thread->state)
433 {
434 case STATE_BLOCKED:
435 case STATE_BLOCKED_W_TMO:
436 remove_from_list_l(&tq->queue, thread);
437 thread->state = STATE_RUNNING;
438 SDL_CondSignal(thread->context.c);
439 return thread;
440 default:
441 return NULL;
442 }
443}
444
445void thread_thaw(struct thread_entry *thread)
446{
447 if (thread->state == STATE_FROZEN)
448 {
449 thread->state = STATE_RUNNING;
450 SDL_CondSignal(thread->context.c);
451 }
452} 487}
453 488
454void init_threads(void) 489void init_threads(void)
455{ 490{
456 /* Main thread is already initialized */ 491 /* Main thread is already initialized */
457 if (running != &threads[0]) 492 if (cores[CURRENT_CORE].running != &threads[0])
458 { 493 {
459 THREAD_PANICF("Wrong main thread in init_threads: %p\n", running); 494 THREAD_PANICF("Wrong main thread in init_threads: %p\n",
495 cores[CURRENT_CORE].running);
460 } 496 }
461 497
462 THREAD_SDL_DEBUGF("First Thread: %d (%s)\n", 498 THREAD_SDL_DEBUGF("First Thread: %d (%s)\n",
@@ -465,9 +501,9 @@ void init_threads(void)
465 501
466void remove_thread(struct thread_entry *thread) 502void remove_thread(struct thread_entry *thread)
467{ 503{
468 struct thread_entry *current = running; 504 struct thread_entry *current = cores[CURRENT_CORE].running;
469 SDL_Thread *t; 505 SDL_Thread *t;
470 SDL_cond *c; 506 SDL_sem *s;
471 507
472 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 508 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
473 509
@@ -477,7 +513,7 @@ void remove_thread(struct thread_entry *thread)
477 } 513 }
478 514
479 t = thread->context.t; 515 t = thread->context.t;
480 c = thread->context.c; 516 s = thread->context.s;
481 thread->context.t = NULL; 517 thread->context.t = NULL;
482 518
483 if (thread != current) 519 if (thread != current)
@@ -487,20 +523,25 @@ void remove_thread(struct thread_entry *thread)
487 case STATE_BLOCKED: 523 case STATE_BLOCKED:
488 case STATE_BLOCKED_W_TMO: 524 case STATE_BLOCKED_W_TMO:
489 /* Remove thread from object it's waiting on */ 525 /* Remove thread from object it's waiting on */
490 remove_from_list_l(&thread->bqp->queue, thread); 526 remove_from_list_l(thread->bqp, thread);
527
528#ifdef HAVE_WAKEUP_EXT_CB
529 if (thread->wakeup_ext_cb != NULL)
530 thread->wakeup_ext_cb(thread);
531#endif
491 break; 532 break;
492 } 533 }
493 534
494 SDL_CondSignal(c); 535 SDL_SemPost(s);
495 } 536 }
496 537
497 THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n", 538 THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n",
498 thread - threads, THREAD_SDL_GET_NAME(thread)); 539 thread - threads, THREAD_SDL_GET_NAME(thread));
499 540
500 thread_queue_wake_no_listlock(&thread->queue);
501 thread->state = STATE_KILLED; 541 thread->state = STATE_KILLED;
542 thread_queue_wake(&thread->queue);
502 543
503 SDL_DestroyCond(c); 544 SDL_DestroySemaphore(s);
504 545
505 if (thread == current) 546 if (thread == current)
506 { 547 {
@@ -514,14 +555,23 @@ void remove_thread(struct thread_entry *thread)
514 set_irq_level(oldlevel); 555 set_irq_level(oldlevel);
515} 556}
516 557
558void thread_exit(void)
559{
560 remove_thread(NULL);
561}
562
517void thread_wait(struct thread_entry *thread) 563void thread_wait(struct thread_entry *thread)
518{ 564{
565 struct thread_entry *current = cores[CURRENT_CORE].running;
566
519 if (thread == NULL) 567 if (thread == NULL)
520 thread = running; 568 thread = current;
521 569
522 if (thread->state != STATE_KILLED) 570 if (thread->state != STATE_KILLED)
523 { 571 {
524 block_thread_no_listlock(&thread->queue); 572 current->bqp = &thread->queue;
573 block_thread(current);
574 switch_thread();
525 } 575 }
526} 576}
527 577
diff --git a/uisimulator/sdl/uisdl.c b/uisimulator/sdl/uisdl.c
index e0a449ed48..09210926b5 100644
--- a/uisimulator/sdl/uisdl.c
+++ b/uisimulator/sdl/uisdl.c
@@ -40,19 +40,13 @@
40#include "SDL_thread.h" 40#include "SDL_thread.h"
41 41
42/* extern functions */ 42/* extern functions */
43extern void app_main (void *); /* mod entry point */ 43extern void new_key(int key);
44extern void new_key(int key);
45extern void sim_tick_tasks(void);
46extern bool sim_io_init(void);
47extern void sim_io_shutdown(void);
48 44
49void button_event(int key, bool pressed); 45void button_event(int key, bool pressed);
50 46
51SDL_Surface *gui_surface; 47SDL_Surface *gui_surface;
52bool background = false; /* Don't use backgrounds by default */ 48bool background = false; /* Don't use backgrounds by default */
53 49
54SDL_TimerID tick_timer_id;
55
56bool lcd_display_redraw = true; /* Used for player simulator */ 50bool lcd_display_redraw = true; /* Used for player simulator */
57char having_new_lcd = true; /* Used for player simulator */ 51char having_new_lcd = true; /* Used for player simulator */
58bool sim_alarm_wakeup = false; 52bool sim_alarm_wakeup = false;
@@ -63,31 +57,6 @@ bool debug_audio = false;
63bool debug_wps = false; 57bool debug_wps = false;
64int wps_verbose_level = 3; 58int wps_verbose_level = 3;
65 59
66long start_tick;
67
68Uint32 tick_timer(Uint32 interval, void *param)
69{
70 long new_tick;
71
72 (void) interval;
73 (void) param;
74
75 new_tick = (SDL_GetTicks() - start_tick) / (1000/HZ);
76
77 if (new_tick != current_tick) {
78 long i;
79 for (i = new_tick - current_tick; i > 0; i--)
80 {
81 sim_enter_irq_handler();
82 sim_tick_tasks();
83 sim_exit_irq_handler();
84 }
85 current_tick = new_tick;
86 }
87
88 return 1;
89}
90
91void gui_message_loop(void) 60void gui_message_loop(void)
92{ 61{
93 SDL_Event event; 62 SDL_Event event;
@@ -181,8 +150,6 @@ bool gui_startup(void)
181 SDL_UpdateRect(gui_surface, 0, 0, 0, 0); 150 SDL_UpdateRect(gui_surface, 0, 0, 0, 0);
182 } 151 }
183 152
184 start_tick = SDL_GetTicks();
185
186 return true; 153 return true;
187} 154}
188 155
@@ -191,7 +158,6 @@ bool gui_shutdown(void)
191 /* Order here is relevent to prevent deadlocks and use of destroyed 158 /* Order here is relevent to prevent deadlocks and use of destroyed
192 sync primitives by kernel threads */ 159 sync primitives by kernel threads */
193 thread_sdl_shutdown(); 160 thread_sdl_shutdown();
194 SDL_RemoveTimer(tick_timer_id);
195 sim_kernel_shutdown(); 161 sim_kernel_shutdown();
196 return true; 162 return true;
197} 163}
@@ -287,8 +253,6 @@ int main(int argc, char *argv[])
287 return -1; 253 return -1;
288 } 254 }
289 255
290 tick_timer_id = SDL_AddTimer(10, tick_timer, NULL);
291
292 gui_message_loop(); 256 gui_message_loop();
293 257
294 return gui_shutdown(); 258 return gui_shutdown();