summaryrefslogtreecommitdiff
path: root/uisimulator/sdl/kernel.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2007-09-08 12:20:53 +0000
committerMichael Sevakis <jethead71@rockbox.org>2007-09-08 12:20:53 +0000
commitf64ebb1c1f10e8d15fcc4879d781703c86c5fb8b (patch)
tree065072709c699ac6dc3eb640368bd3f4106144e4 /uisimulator/sdl/kernel.c
parent69b4654ea28049c7e8637d521327ba10ae405f8b (diff)
downloadrockbox-f64ebb1c1f10e8d15fcc4879d781703c86c5fb8b.tar.gz
rockbox-f64ebb1c1f10e8d15fcc4879d781703c86c5fb8b.zip
Sim I/O and threading that runs more like on target. Tweakable if any genuine slowness imitation is required for any one of them. One point of concern is the sim shutdown on an OS other than Linux just because terminating threads in a manner other than having the do it themselves is kind of dirty IMHO.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@14639 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'uisimulator/sdl/kernel.c')
-rw-r--r--uisimulator/sdl/kernel.c126
1 files changed, 68 insertions, 58 deletions
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
index 2b194a24ae..91d1afa1b9 100644
--- a/uisimulator/sdl/kernel.c
+++ b/uisimulator/sdl/kernel.c
@@ -25,25 +25,19 @@
25#include "thread.h" 25#include "thread.h"
26#include "debug.h" 26#include "debug.h"
27 27
28volatile long current_tick = 0;
28static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); 29static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
29 30
30/* This array holds all queues that are initiated. It is used for broadcast. */ 31/* This array holds all queues that are initiated. It is used for broadcast. */
31static struct event_queue *all_queues[32]; 32static struct event_queue *all_queues[32];
32static int num_queues = 0; 33static int num_queues = 0;
33 34
34int set_irq_level (int level)
35{
36 static int _lv = 0;
37 return (_lv = level);
38}
39
40#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 35#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
41/* Moves waiting thread's descriptor to the current sender when a 36/* Moves waiting thread's descriptor to the current sender when a
42 message is dequeued */ 37 message is dequeued */
43static void queue_fetch_sender(struct queue_sender_list *send, 38static void queue_fetch_sender(struct queue_sender_list *send,
44 unsigned int i) 39 unsigned int i)
45{ 40{
46 int old_level = set_irq_level(15<<4);
47 struct thread_entry **spp = &send->senders[i]; 41 struct thread_entry **spp = &send->senders[i];
48 42
49 if(*spp) 43 if(*spp)
@@ -51,8 +45,6 @@ static void queue_fetch_sender(struct queue_sender_list *send,
51 send->curr_sender = *spp; 45 send->curr_sender = *spp;
52 *spp = NULL; 46 *spp = NULL;
53 } 47 }
54
55 set_irq_level(old_level);
56} 48}
57 49
58/* Puts the specified return value in the waiting thread's return value 50/* Puts the specified return value in the waiting thread's return value
@@ -61,7 +53,12 @@ static void queue_release_sender(struct thread_entry **sender,
61 intptr_t retval) 53 intptr_t retval)
62{ 54{
63 (*sender)->retval = retval; 55 (*sender)->retval = retval;
64 *sender = NULL; 56 wakeup_thread(sender);
57 if(*sender != NULL)
58 {
59 fprintf(stderr, "queue->send slot ovf: %08X\n", (int)*sender);
60 exit(-1);
61 }
65} 62}
66 63
67/* Releases any waiting threads that are queued with queue_send - 64/* Releases any waiting threads that are queued with queue_send -
@@ -88,8 +85,12 @@ static void queue_release_all_senders(struct event_queue *q)
88void queue_enable_queue_send(struct event_queue *q, 85void queue_enable_queue_send(struct event_queue *q,
89 struct queue_sender_list *send) 86 struct queue_sender_list *send)
90{ 87{
91 q->send = send; 88 q->send = NULL;
92 memset(send, 0, sizeof(*send)); 89 if(send)
90 {
91 q->send = send;
92 memset(send, 0, sizeof(*send));
93 }
93} 94}
94#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 95#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
95 96
@@ -104,6 +105,11 @@ void queue_init(struct event_queue *q, bool register_queue)
104 105
105 if(register_queue) 106 if(register_queue)
106 { 107 {
108 if(num_queues >= 32)
109 {
110 fprintf(stderr, "queue_init->out of queues");
111 exit(-1);
112 }
107 /* Add it to the all_queues array */ 113 /* Add it to the all_queues array */
108 all_queues[num_queues++] = q; 114 all_queues[num_queues++] = q;
109 } 115 }
@@ -114,13 +120,6 @@ void queue_delete(struct event_queue *q)
114 int i; 120 int i;
115 bool found = false; 121 bool found = false;
116 122
117#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
118 /* Release waiting threads and reply to any dequeued message
119 waiting for one. */
120 queue_release_all_senders(q);
121 queue_reply(q, 0);
122#endif
123
124 /* Find the queue to be deleted */ 123 /* Find the queue to be deleted */
125 for(i = 0;i < num_queues;i++) 124 for(i = 0;i < num_queues;i++)
126 { 125 {
@@ -141,15 +140,28 @@ void queue_delete(struct event_queue *q)
141 140
142 num_queues--; 141 num_queues--;
143 } 142 }
143
144 /* Release threads waiting on queue head */
145 wakeup_thread(&q->thread);
146
147#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
148 /* Release waiting threads and reply to any dequeued message
149 waiting for one. */
150 queue_release_all_senders(q);
151 queue_reply(q, 0);
152#endif
153
154 q->read = 0;
155 q->write = 0;
144} 156}
145 157
146void queue_wait(struct event_queue *q, struct event *ev) 158void queue_wait(struct event_queue *q, struct event *ev)
147{ 159{
148 unsigned int rd; 160 unsigned int rd;
149 161
150 while(q->read == q->write) 162 if (q->read == q->write)
151 { 163 {
152 switch_thread(true, NULL); 164 block_thread(&q->thread);
153 } 165 }
154 166
155 rd = q->read++ & QUEUE_LENGTH_MASK; 167 rd = q->read++ & QUEUE_LENGTH_MASK;
@@ -166,11 +178,9 @@ void queue_wait(struct event_queue *q, struct event *ev)
166 178
167void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) 179void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
168{ 180{
169 unsigned int timeout = current_tick + ticks; 181 if (q->read == q->write && ticks > 0)
170
171 while(q->read == q->write && TIME_BEFORE( current_tick, timeout ))
172 { 182 {
173 sim_sleep(1); 183 block_thread_w_tmo(&q->thread, ticks);
174 } 184 }
175 185
176 if(q->read != q->write) 186 if(q->read != q->write)
@@ -194,7 +204,6 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
194 204
195void queue_post(struct event_queue *q, long id, intptr_t data) 205void queue_post(struct event_queue *q, long id, intptr_t data)
196{ 206{
197 int oldlevel = set_irq_level(15<<4);
198 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; 207 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
199 208
200 q->events[wr].id = id; 209 q->events[wr].id = id;
@@ -213,13 +222,12 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
213 } 222 }
214#endif 223#endif
215 224
216 set_irq_level(oldlevel); 225 wakeup_thread(&q->thread);
217} 226}
218 227
219#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 228#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
220intptr_t queue_send(struct event_queue *q, long id, intptr_t data) 229intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
221{ 230{
222 int oldlevel = set_irq_level(15<<4);
223 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; 231 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
224 232
225 q->events[wr].id = id; 233 q->events[wr].id = id;
@@ -228,7 +236,6 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
228 if(q->send) 236 if(q->send)
229 { 237 {
230 struct thread_entry **spp = &q->send->senders[wr]; 238 struct thread_entry **spp = &q->send->senders[wr];
231 static struct thread_entry sender;
232 239
233 if(*spp) 240 if(*spp)
234 { 241 {
@@ -236,19 +243,13 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
236 queue_release_sender(spp, 0); 243 queue_release_sender(spp, 0);
237 } 244 }
238 245
239 *spp = &sender; 246 wakeup_thread(&q->thread);
240
241 set_irq_level(oldlevel);
242 while (*spp != NULL)
243 {
244 switch_thread(true, NULL);
245 }
246 247
247 return sender.retval; 248 block_thread(spp);
249 return thread_get_current()->retval;
248 } 250 }
249 251
250 /* Function as queue_post if sending is not enabled */ 252 /* Function as queue_post if sending is not enabled */
251 set_irq_level(oldlevel);
252 return 0; 253 return 0;
253} 254}
254 255
@@ -289,8 +290,6 @@ void queue_clear(struct event_queue* q)
289 290
290void queue_remove_from_head(struct event_queue *q, long id) 291void queue_remove_from_head(struct event_queue *q, long id)
291{ 292{
292 int oldlevel = set_irq_level(15<<4);
293
294 while(q->read != q->write) 293 while(q->read != q->write)
295 { 294 {
296 unsigned int rd = q->read & QUEUE_LENGTH_MASK; 295 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
@@ -314,8 +313,6 @@ void queue_remove_from_head(struct event_queue *q, long id)
314#endif 313#endif
315 q->read++; 314 q->read++;
316 } 315 }
317
318 set_irq_level(oldlevel);
319} 316}
320 317
321int queue_count(const struct event_queue *q) 318int queue_count(const struct event_queue *q)
@@ -335,12 +332,14 @@ int queue_broadcast(long id, intptr_t data)
335 return num_queues; 332 return num_queues;
336} 333}
337 334
338void switch_thread(bool save_context, struct thread_entry **blocked_list) 335void yield(void)
339{ 336{
340 (void)save_context; 337 switch_thread(true, NULL);
341 (void)blocked_list; 338}
342 339
343 yield (); 340void sleep(int ticks)
341{
342 sleep_thread(ticks);
344} 343}
345 344
346void sim_tick_tasks(void) 345void sim_tick_tasks(void)
@@ -370,7 +369,8 @@ int tick_add_task(void (*f)(void))
370 return 0; 369 return 0;
371 } 370 }
372 } 371 }
373 DEBUGF("Error! tick_add_task(): out of tasks"); 372 fprintf(stderr, "Error! tick_add_task(): out of tasks");
373 exit(-1);
374 return -1; 374 return -1;
375} 375}
376 376
@@ -395,29 +395,39 @@ int tick_remove_task(void (*f)(void))
395 multitasking, but is better than nothing at all */ 395 multitasking, but is better than nothing at all */
396void mutex_init(struct mutex *m) 396void mutex_init(struct mutex *m)
397{ 397{
398 m->locked = false; 398 m->thread = NULL;
399 m->locked = 0;
399} 400}
400 401
401void mutex_lock(struct mutex *m) 402void mutex_lock(struct mutex *m)
402{ 403{
403 while(m->locked) 404 if (test_and_set(&m->locked, 1))
404 switch_thread(true, NULL); 405 {
405 m->locked = true; 406 block_thread(&m->thread);
407 }
406} 408}
407 409
408void mutex_unlock(struct mutex *m) 410void mutex_unlock(struct mutex *m)
409{ 411{
410 m->locked = false; 412 if (m->thread != NULL)
413 {
414 wakeup_thread(&m->thread);
415 }
416 else
417 {
418 m->locked = 0;
419 }
411} 420}
412 421
413void spinlock_lock(struct mutex *m) 422void spinlock_lock(struct mutex *l)
414{ 423{
415 while(m->locked) 424 while(test_and_set(&l->locked, 1))
425 {
416 switch_thread(true, NULL); 426 switch_thread(true, NULL);
417 m->locked = true; 427 }
418} 428}
419 429
420void spinlock_unlock(struct mutex *m) 430void spinlock_unlock(struct mutex *l)
421{ 431{
422 m->locked = false; 432 l->locked = 0;
423} 433}