summaryrefslogtreecommitdiff
path: root/uisimulator/sdl/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'uisimulator/sdl/kernel.c')
-rw-r--r--uisimulator/sdl/kernel.c739
1 files changed, 0 insertions, 739 deletions
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
deleted file mode 100644
index d67fb2b9f1..0000000000
--- a/uisimulator/sdl/kernel.c
+++ /dev/null
@@ -1,739 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Felix Arends
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19
20#include <stdlib.h>
21#include <SDL.h>
22#include <SDL_thread.h>
23#include "memory.h"
24#include "system-sdl.h"
25#include "uisdl.h"
26#include "kernel.h"
27#include "thread-sdl.h"
28#include "thread.h"
29#include "debug.h"
30
31/* Condition to signal that "interrupts" may proceed */
32static SDL_cond *sim_thread_cond;
33/* Mutex to serialize changing levels and exclude other threads while
34 * inside a handler */
35static SDL_mutex *sim_irq_mtx;
36static int interrupt_level = HIGHEST_IRQ_LEVEL;
37static int handlers_pending = 0;
38static int status_reg = 0;
39
40extern struct core_entry cores[NUM_CORES];
41
42/* Nescessary logic:
43 * 1) All threads must pass unblocked
44 * 2) Current handler must always pass unblocked
45 * 3) Threads must be excluded when irq routine is running
46 * 4) No more than one handler routine should execute at a time
47 */
48int set_irq_level(int level)
49{
50 SDL_LockMutex(sim_irq_mtx);
51
52 int oldlevel = interrupt_level;
53
54 if (status_reg == 0 && level == 0 && oldlevel != 0)
55 {
56 /* Not in a handler and "interrupts" are being reenabled */
57 if (handlers_pending > 0)
58 SDL_CondSignal(sim_thread_cond);
59 }
60
61 interrupt_level = level; /* save new level */
62
63 SDL_UnlockMutex(sim_irq_mtx);
64 return oldlevel;
65}
66
67void sim_enter_irq_handler(void)
68{
69 SDL_LockMutex(sim_irq_mtx);
70 handlers_pending++;
71
72 if(interrupt_level != 0)
73 {
74 /* "Interrupts" are disabled. Wait for reenable */
75 SDL_CondWait(sim_thread_cond, sim_irq_mtx);
76 }
77
78 status_reg = 1;
79}
80
81void sim_exit_irq_handler(void)
82{
83 if (--handlers_pending > 0)
84 SDL_CondSignal(sim_thread_cond);
85
86 status_reg = 0;
87 SDL_UnlockMutex(sim_irq_mtx);
88}
89
90bool sim_kernel_init(void)
91{
92 sim_irq_mtx = SDL_CreateMutex();
93 if (sim_irq_mtx == NULL)
94 {
95 fprintf(stderr, "Cannot create sim_handler_mtx\n");
96 return false;
97 }
98
99 sim_thread_cond = SDL_CreateCond();
100 if (sim_thread_cond == NULL)
101 {
102 fprintf(stderr, "Cannot create sim_thread_cond\n");
103 return false;
104 }
105
106 return true;
107}
108
109void sim_kernel_shutdown(void)
110{
111 SDL_DestroyMutex(sim_irq_mtx);
112 SDL_DestroyCond(sim_thread_cond);
113}
114
115volatile long current_tick = 0;
116static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
117
118/* This array holds all queues that are initiated. It is used for broadcast. */
119static struct event_queue *all_queues[MAX_NUM_QUEUES];
120static int num_queues = 0;
121
122#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
123/* Moves waiting thread's descriptor to the current sender when a
124 message is dequeued */
125static void queue_fetch_sender(struct queue_sender_list *send,
126 unsigned int i)
127{
128 struct thread_entry **spp = &send->senders[i];
129
130 if(*spp)
131 {
132 send->curr_sender = *spp;
133 *spp = NULL;
134 }
135}
136
137/* Puts the specified return value in the waiting thread's return value
138 and wakes the thread - a sender should be confirmed to exist first */
139static void queue_release_sender(struct thread_entry **sender,
140 intptr_t retval)
141{
142 (*sender)->retval = retval;
143 wakeup_thread_no_listlock(sender);
144 if(*sender != NULL)
145 {
146 fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
147 exit(-1);
148 }
149}
150
151/* Releases any waiting threads that are queued with queue_send -
152 reply with NULL */
153static void queue_release_all_senders(struct event_queue *q)
154{
155 if(q->send)
156 {
157 unsigned int i;
158 for(i = q->read; i != q->write; i++)
159 {
160 struct thread_entry **spp =
161 &q->send->senders[i & QUEUE_LENGTH_MASK];
162 if(*spp)
163 {
164 queue_release_sender(spp, 0);
165 }
166 }
167 }
168}
169
170/* Enables queue_send on the specified queue - caller allocates the extra
171 data structure */
172void queue_enable_queue_send(struct event_queue *q,
173 struct queue_sender_list *send)
174{
175 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
176 q->send = NULL;
177 if(send)
178 {
179 q->send = send;
180 memset(send, 0, sizeof(*send));
181 }
182 set_irq_level(oldlevel);
183}
184#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
185
186void queue_init(struct event_queue *q, bool register_queue)
187{
188 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
189
190 q->read = 0;
191 q->write = 0;
192 thread_queue_init(&q->queue);
193#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
194 q->send = NULL; /* No message sending by default */
195#endif
196
197 if(register_queue)
198 {
199 if(num_queues >= MAX_NUM_QUEUES)
200 {
201 fprintf(stderr, "queue_init->out of queues");
202 exit(-1);
203 }
204 /* Add it to the all_queues array */
205 all_queues[num_queues++] = q;
206 }
207
208 set_irq_level(oldlevel);
209}
210
211void queue_delete(struct event_queue *q)
212{
213 int i;
214 bool found = false;
215
216 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
217
218 /* Find the queue to be deleted */
219 for(i = 0;i < num_queues;i++)
220 {
221 if(all_queues[i] == q)
222 {
223 found = true;
224 break;
225 }
226 }
227
228 if(found)
229 {
230 /* Move the following queues up in the list */
231 for(;i < num_queues-1;i++)
232 {
233 all_queues[i] = all_queues[i+1];
234 }
235
236 num_queues--;
237 }
238
239 /* Release threads waiting on queue head */
240 thread_queue_wake(&q->queue);
241
242#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
243 /* Release waiting threads and reply to any dequeued message
244 waiting for one. */
245 queue_release_all_senders(q);
246 queue_reply(q, 0);
247#endif
248
249 q->read = 0;
250 q->write = 0;
251
252 set_irq_level(oldlevel);
253}
254
255void queue_wait(struct event_queue *q, struct queue_event *ev)
256{
257 unsigned int rd;
258 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
259
260#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 if (q->send && q->send->curr_sender)
262 {
263 /* auto-reply */
264 queue_release_sender(&q->send->curr_sender, 0);
265 }
266#endif
267
268 if (q->read == q->write)
269 {
270 do
271 {
272 block_thread(&q->queue);
273 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
274 }
275 while (q->read == q->write);
276 }
277
278 rd = q->read++ & QUEUE_LENGTH_MASK;
279 *ev = q->events[rd];
280
281#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
282 if(q->send && q->send->senders[rd])
283 {
284 /* Get data for a waiting thread if one */
285 queue_fetch_sender(q->send, rd);
286 }
287#endif
288
289 set_irq_level(oldlevel);
290}
291
292void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
293{
294 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
295
296#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
297 if (q->send && q->send->curr_sender)
298 {
299 /* auto-reply */
300 queue_release_sender(&q->send->curr_sender, 0);
301 }
302#endif
303
304 if (q->read == q->write && ticks > 0)
305 {
306 block_thread_w_tmo(&q->queue, ticks);
307 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
308 }
309
310 if(q->read != q->write)
311 {
312 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
313 *ev = q->events[rd];
314
315#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
316 if(q->send && q->send->senders[rd])
317 {
318 /* Get data for a waiting thread if one */
319 queue_fetch_sender(q->send, rd);
320 }
321#endif
322 }
323 else
324 {
325 ev->id = SYS_TIMEOUT;
326 }
327
328 set_irq_level(oldlevel);
329}
330
331void queue_post(struct event_queue *q, long id, intptr_t data)
332{
333 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
334
335 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
336
337 q->events[wr].id = id;
338 q->events[wr].data = data;
339
340#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
341 if(q->send)
342 {
343 struct thread_entry **spp = &q->send->senders[wr];
344
345 if(*spp)
346 {
347 /* overflow protect - unblock any thread waiting at this index */
348 queue_release_sender(spp, 0);
349 }
350 }
351#endif
352
353 wakeup_thread(&q->queue);
354
355 set_irq_level(oldlevel);
356}
357
358#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
359intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
360{
361 int oldlevel = set_irq_level(oldlevel);
362
363 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
364
365 q->events[wr].id = id;
366 q->events[wr].data = data;
367
368 if(q->send)
369 {
370 struct thread_entry **spp = &q->send->senders[wr];
371
372 if(*spp)
373 {
374 /* overflow protect - unblock any thread waiting at this index */
375 queue_release_sender(spp, 0);
376 }
377
378 wakeup_thread(&q->queue);
379
380 block_thread_no_listlock(spp);
381 return thread_get_current()->retval;
382 }
383
384 /* Function as queue_post if sending is not enabled */
385 wakeup_thread(&q->queue);
386 set_irq_level(oldlevel);
387 return 0;
388}
389
390#if 0 /* not used now but probably will be later */
391/* Query if the last message dequeued was added by queue_send or not */
392bool queue_in_queue_send(struct event_queue *q)
393{
394 return q->send && q->send->curr_sender;
395}
396#endif
397
398/* Replies with retval to any dequeued message sent with queue_send */
399void queue_reply(struct event_queue *q, intptr_t retval)
400{
401 if(q->send && q->send->curr_sender)
402 {
403 queue_release_sender(&q->send->curr_sender, retval);
404 }
405}
406#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
407
408bool queue_empty(const struct event_queue* q)
409{
410 return ( q->read == q->write );
411}
412
413bool queue_peek(struct event_queue *q, struct queue_event *ev)
414{
415 if (q->read == q->write)
416 return false;
417
418 bool have_msg = false;
419
420 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
421
422 if (q->read != q->write)
423 {
424 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
425 have_msg = true;
426 }
427
428 set_irq_level(oldlevel);
429
430 return have_msg;
431}
432
433void queue_clear(struct event_queue* q)
434{
435 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
436
437 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
438#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
439 /* Release all thread waiting in the queue for a reply -
440 dequeued sent message will be handled by owning thread */
441 queue_release_all_senders(q);
442#endif
443 q->read = 0;
444 q->write = 0;
445
446 set_irq_level(oldlevel);
447}
448
449void queue_remove_from_head(struct event_queue *q, long id)
450{
451 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
452
453 while(q->read != q->write)
454 {
455 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
456
457 if(q->events[rd].id != id)
458 {
459 break;
460 }
461
462#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
463 if(q->send)
464 {
465 struct thread_entry **spp = &q->send->senders[rd];
466
467 if(*spp)
468 {
469 /* Release any thread waiting on this message */
470 queue_release_sender(spp, 0);
471 }
472 }
473#endif
474 q->read++;
475 }
476
477 set_irq_level(oldlevel);
478}
479
480int queue_count(const struct event_queue *q)
481{
482 return q->write - q->read;
483}
484
485int queue_broadcast(long id, intptr_t data)
486{
487 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
488 int i;
489
490 for(i = 0;i < num_queues;i++)
491 {
492 queue_post(all_queues[i], id, data);
493 }
494
495 set_irq_level(oldlevel);
496 return num_queues;
497}
498
499void yield(void)
500{
501 switch_thread(NULL);
502}
503
504void sleep(int ticks)
505{
506 sleep_thread(ticks);
507}
508
509void sim_tick_tasks(void)
510{
511 int i;
512
513 /* Run through the list of tick tasks */
514 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
515 {
516 if(tick_funcs[i])
517 {
518 tick_funcs[i]();
519 }
520 }
521}
522
523int tick_add_task(void (*f)(void))
524{
525 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
526 int i;
527
528 /* Add a task if there is room */
529 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
530 {
531 if(tick_funcs[i] == NULL)
532 {
533 tick_funcs[i] = f;
534 set_irq_level(oldlevel);
535 return 0;
536 }
537 }
538 fprintf(stderr, "Error! tick_add_task(): out of tasks");
539 exit(-1);
540 return -1;
541}
542
543int tick_remove_task(void (*f)(void))
544{
545 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
546 int i;
547
548 /* Remove a task if it is there */
549 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
550 {
551 if(tick_funcs[i] == f)
552 {
553 tick_funcs[i] = NULL;
554 set_irq_level(oldlevel);
555 return 0;
556 }
557 }
558
559 set_irq_level(oldlevel);
560 return -1;
561}
562
563/* Very simple mutex simulation - won't work with pre-emptive
564 multitasking, but is better than nothing at all */
565void mutex_init(struct mutex *m)
566{
567 m->queue = NULL;
568 m->thread = NULL;
569 m->count = 0;
570 m->locked = 0;
571}
572
573void mutex_lock(struct mutex *m)
574{
575 struct thread_entry *const thread = thread_get_current();
576
577 if(thread == m->thread)
578 {
579 m->count++;
580 return;
581 }
582
583 if (!test_and_set(&m->locked, 1))
584 {
585 m->thread = thread;
586 return;
587 }
588
589 block_thread_no_listlock(&m->queue);
590}
591
592void mutex_unlock(struct mutex *m)
593{
594 /* unlocker not being the owner is an unlocking violation */
595 if(m->thread != thread_get_current())
596 {
597 fprintf(stderr, "mutex_unlock->wrong thread");
598 exit(-1);
599 }
600
601 if (m->count > 0)
602 {
603 /* this thread still owns lock */
604 m->count--;
605 return;
606 }
607
608 m->thread = wakeup_thread_no_listlock(&m->queue);
609
610 if (m->thread == NULL)
611 {
612 /* release lock */
613 m->locked = 0;
614 }
615}
616
617#ifdef HAVE_SEMAPHORE_OBJECTS
618void semaphore_init(struct semaphore *s, int max, int start)
619{
620 if(max <= 0 || start < 0 || start > max)
621 {
622 fprintf(stderr, "semaphore_init->inv arg");
623 exit(-1);
624 }
625 s->queue = NULL;
626 s->max = max;
627 s->count = start;
628}
629
630void semaphore_wait(struct semaphore *s)
631{
632 if(--s->count >= 0)
633 return;
634 block_thread_no_listlock(&s->queue);
635}
636
637void semaphore_release(struct semaphore *s)
638{
639 if(s->count < s->max)
640 {
641 if(++s->count <= 0)
642 {
643 if(s->queue == NULL)
644 {
645 /* there should be threads in this queue */
646 fprintf(stderr, "semaphore->wakeup");
647 exit(-1);
648 }
649 /* a thread was queued - wake it up */
650 wakeup_thread_no_listlock(&s->queue);
651 }
652 }
653}
654#endif /* HAVE_SEMAPHORE_OBJECTS */
655
656#ifdef HAVE_EVENT_OBJECTS
657void event_init(struct event *e, unsigned int flags)
658{
659 e->queues[STATE_NONSIGNALED] = NULL;
660 e->queues[STATE_SIGNALED] = NULL;
661 e->state = flags & STATE_SIGNALED;
662 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
663}
664
665void event_wait(struct event *e, unsigned int for_state)
666{
667 unsigned int last_state = e->state;
668
669 if(e->automatic != 0)
670 {
671 /* wait for false always satisfied by definition
672 or if it just changed to false */
673 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
674 {
675 /* automatic - unsignal */
676 e->state = STATE_NONSIGNALED;
677 return;
678 }
679 /* block until state matches */
680 }
681 else if(for_state == last_state)
682 {
683 /* the state being waited for is the current state */
684 return;
685 }
686
687 /* current state does not match wait-for state */
688 block_thread_no_listlock(&e->queues[for_state]);
689}
690
691void event_set_state(struct event *e, unsigned int state)
692{
693 unsigned int last_state = e->state;
694
695 if(last_state == state)
696 {
697 /* no change */
698 return;
699 }
700
701 if(state == STATE_SIGNALED)
702 {
703 if(e->automatic != 0)
704 {
705 struct thread_entry *thread;
706
707 if(e->queues[STATE_NONSIGNALED] != NULL)
708 {
709 /* no thread should have ever blocked for nonsignaled */
710 fprintf(stderr, "set_event_state->queue[NS]:S");
711 exit(-1);
712 }
713
714 /* pass to next thread and keep unsignaled - "pulse" */
715 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
716 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
717 }
718 else
719 {
720 /* release all threads waiting for signaled */
721 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
722 e->state = STATE_SIGNALED;
723 }
724 }
725 else
726 {
727 /* release all threads waiting for unsignaled */
728 if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
729 {
730 /* no thread should have ever blocked */
731 fprintf(stderr, "set_event_state->queue[NS]:NS");
732 exit(-1);
733 }
734
735 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
736 e->state = STATE_NONSIGNALED;
737 }
738}
739#endif /* HAVE_EVENT_OBJECTS */