diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2007-10-26 23:11:18 +0000 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2007-10-26 23:11:18 +0000 |
commit | d6af28739747099f98f541d1b76ba501882e113c (patch) | |
tree | b6ad8d89d0ef96dd4cea37dd89aad8d3fa345179 /uisimulator/sdl/kernel.c | |
parent | f026c0fc826149a3c88d462cca02b69ef5690c30 (diff) | |
download | rockbox-d6af28739747099f98f541d1b76ba501882e113c.tar.gz rockbox-d6af28739747099f98f541d1b76ba501882e113c.zip |
Implement as genuine a set_irq_level function for the sim as possible. The yield added earlier is still nescessary since other threads won't run anyway while viewing the database screen on either sim or target.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15321 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'uisimulator/sdl/kernel.c')
-rw-r--r-- | uisimulator/sdl/kernel.c | 147 |
1 files changed, 126 insertions, 21 deletions
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c index 6a8c9e4538..4e0a508f74 100644 --- a/uisimulator/sdl/kernel.c +++ b/uisimulator/sdl/kernel.c | |||
@@ -18,13 +18,93 @@ | |||
18 | ****************************************************************************/ | 18 | ****************************************************************************/ |
19 | 19 | ||
20 | #include <stdlib.h> | 20 | #include <stdlib.h> |
21 | #include <SDL.h> | ||
22 | #include <SDL_thread.h> | ||
21 | #include "memory.h" | 23 | #include "memory.h" |
24 | #include "system-sdl.h" | ||
22 | #include "uisdl.h" | 25 | #include "uisdl.h" |
23 | #include "kernel.h" | 26 | #include "kernel.h" |
24 | #include "thread-sdl.h" | 27 | #include "thread-sdl.h" |
25 | #include "thread.h" | 28 | #include "thread.h" |
26 | #include "debug.h" | 29 | #include "debug.h" |
27 | 30 | ||
31 | /* Prevent "irq handler" from thread concurrent access as well as current | ||
32 | * access on multiple handlers */ | ||
33 | static SDL_cond *sim_thread_cond; | ||
34 | /* Protect sim irq object when it is being changed */ | ||
35 | static SDL_mutex *sim_irq_mtx; | ||
36 | static int interrupt_level = HIGHEST_IRQ_LEVEL; | ||
37 | static int status_reg = 0; | ||
38 | |||
39 | extern struct core_entry cores[NUM_CORES]; | ||
40 | |||
41 | /* Nescessary logic: | ||
42 | * 1) All threads must pass unblocked | ||
43 | * 2) Current handler must always pass unblocked | ||
44 | * 3) Threads must be excluded when irq routine is running | ||
45 | * 4) No more than one handler routine should execute at a time | ||
46 | */ | ||
47 | int set_irq_level(int level) | ||
48 | { | ||
49 | SDL_LockMutex(sim_irq_mtx); | ||
50 | |||
51 | int oldlevel = interrupt_level; | ||
52 | |||
53 | if (status_reg == 0 && level == 0 && oldlevel != 0) | ||
54 | { | ||
55 | /* Not in a handler and "interrupts" are being reenabled */ | ||
56 | SDL_CondSignal(sim_thread_cond); | ||
57 | } | ||
58 | |||
59 | interrupt_level = level; /* save new level */ | ||
60 | |||
61 | SDL_UnlockMutex(sim_irq_mtx); | ||
62 | return oldlevel; | ||
63 | } | ||
64 | |||
65 | void sim_enter_irq_handler(void) | ||
66 | { | ||
67 | SDL_LockMutex(sim_irq_mtx); | ||
68 | if(interrupt_level != 0) | ||
69 | { | ||
70 | /* "Interrupts" are disabled. Wait for reenable */ | ||
71 | SDL_CondWait(sim_thread_cond, sim_irq_mtx); | ||
72 | } | ||
73 | status_reg = 1; | ||
74 | } | ||
75 | |||
76 | void sim_exit_irq_handler(void) | ||
77 | { | ||
78 | status_reg = 0; | ||
79 | SDL_UnlockMutex(sim_irq_mtx); | ||
80 | } | ||
81 | |||
82 | bool sim_kernel_init(void) | ||
83 | { | ||
84 | sim_irq_mtx = SDL_CreateMutex(); | ||
85 | if (sim_irq_mtx == NULL) | ||
86 | { | ||
87 | fprintf(stderr, "Cannot create sim_handler_mtx\n"); | ||
88 | return false; | ||
89 | } | ||
90 | |||
91 | /* Create with a count of 0 to have interrupts disabled by default */ | ||
92 | sim_thread_cond = SDL_CreateCond(); | ||
93 | if (sim_thread_cond == NULL) | ||
94 | { | ||
95 | fprintf(stderr, "Cannot create sim_thread_cond\n"); | ||
96 | return false; | ||
97 | } | ||
98 | |||
99 | return true; | ||
100 | } | ||
101 | |||
102 | void sim_kernel_shutdown(void) | ||
103 | { | ||
104 | SDL_DestroyMutex(sim_irq_mtx); | ||
105 | SDL_DestroyCond(sim_thread_cond); | ||
106 | } | ||
107 | |||
28 | volatile long current_tick = 0; | 108 | volatile long current_tick = 0; |
29 | static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); | 109 | static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); |
30 | 110 | ||
@@ -85,17 +165,21 @@ static void queue_release_all_senders(struct event_queue *q) | |||
85 | void queue_enable_queue_send(struct event_queue *q, | 165 | void queue_enable_queue_send(struct event_queue *q, |
86 | struct queue_sender_list *send) | 166 | struct queue_sender_list *send) |
87 | { | 167 | { |
168 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
88 | q->send = NULL; | 169 | q->send = NULL; |
89 | if(send) | 170 | if(send) |
90 | { | 171 | { |
91 | q->send = send; | 172 | q->send = send; |
92 | memset(send, 0, sizeof(*send)); | 173 | memset(send, 0, sizeof(*send)); |
93 | } | 174 | } |
175 | set_irq_level(oldlevel); | ||
94 | } | 176 | } |
95 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ | 177 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
96 | 178 | ||
97 | void queue_init(struct event_queue *q, bool register_queue) | 179 | void queue_init(struct event_queue *q, bool register_queue) |
98 | { | 180 | { |
181 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
182 | |||
99 | q->read = 0; | 183 | q->read = 0; |
100 | q->write = 0; | 184 | q->write = 0; |
101 | thread_queue_init(&q->queue); | 185 | thread_queue_init(&q->queue); |
@@ -113,6 +197,8 @@ void queue_init(struct event_queue *q, bool register_queue) | |||
113 | /* Add it to the all_queues array */ | 197 | /* Add it to the all_queues array */ |
114 | all_queues[num_queues++] = q; | 198 | all_queues[num_queues++] = q; |
115 | } | 199 | } |
200 | |||
201 | set_irq_level(oldlevel); | ||
116 | } | 202 | } |
117 | 203 | ||
118 | void queue_delete(struct event_queue *q) | 204 | void queue_delete(struct event_queue *q) |
@@ -120,6 +206,8 @@ void queue_delete(struct event_queue *q) | |||
120 | int i; | 206 | int i; |
121 | bool found = false; | 207 | bool found = false; |
122 | 208 | ||
209 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
210 | |||
123 | /* Find the queue to be deleted */ | 211 | /* Find the queue to be deleted */ |
124 | for(i = 0;i < num_queues;i++) | 212 | for(i = 0;i < num_queues;i++) |
125 | { | 213 | { |
@@ -153,11 +241,14 @@ void queue_delete(struct event_queue *q) | |||
153 | 241 | ||
154 | q->read = 0; | 242 | q->read = 0; |
155 | q->write = 0; | 243 | q->write = 0; |
244 | |||
245 | set_irq_level(oldlevel); | ||
156 | } | 246 | } |
157 | 247 | ||
158 | void queue_wait(struct event_queue *q, struct queue_event *ev) | 248 | void queue_wait(struct event_queue *q, struct queue_event *ev) |
159 | { | 249 | { |
160 | unsigned int rd; | 250 | unsigned int rd; |
251 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
161 | 252 | ||
162 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 253 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
163 | if (q->send && q->send->curr_sender) | 254 | if (q->send && q->send->curr_sender) |
@@ -171,7 +262,9 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) | |||
171 | { | 262 | { |
172 | do | 263 | do |
173 | { | 264 | { |
265 | cores[CURRENT_CORE].irq_level = oldlevel; | ||
174 | block_thread(&q->queue); | 266 | block_thread(&q->queue); |
267 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
175 | } | 268 | } |
176 | while (q->read == q->write); | 269 | while (q->read == q->write); |
177 | } | 270 | } |
@@ -186,10 +279,14 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) | |||
186 | queue_fetch_sender(q->send, rd); | 279 | queue_fetch_sender(q->send, rd); |
187 | } | 280 | } |
188 | #endif | 281 | #endif |
282 | |||
283 | set_irq_level(oldlevel); | ||
189 | } | 284 | } |
190 | 285 | ||
191 | void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | 286 | void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) |
192 | { | 287 | { |
288 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
289 | |||
193 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 290 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
194 | if (q->send && q->send->curr_sender) | 291 | if (q->send && q->send->curr_sender) |
195 | { | 292 | { |
@@ -200,7 +297,9 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
200 | 297 | ||
201 | if (q->read == q->write && ticks > 0) | 298 | if (q->read == q->write && ticks > 0) |
202 | { | 299 | { |
300 | cores[CURRENT_CORE].irq_level = oldlevel; | ||
203 | block_thread_w_tmo(&q->queue, ticks); | 301 | block_thread_w_tmo(&q->queue, ticks); |
302 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
204 | } | 303 | } |
205 | 304 | ||
206 | if(q->read != q->write) | 305 | if(q->read != q->write) |
@@ -220,10 +319,14 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
220 | { | 319 | { |
221 | ev->id = SYS_TIMEOUT; | 320 | ev->id = SYS_TIMEOUT; |
222 | } | 321 | } |
322 | |||
323 | set_irq_level(oldlevel); | ||
223 | } | 324 | } |
224 | 325 | ||
225 | void queue_post(struct event_queue *q, long id, intptr_t data) | 326 | void queue_post(struct event_queue *q, long id, intptr_t data) |
226 | { | 327 | { |
328 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
329 | |||
227 | unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; | 330 | unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; |
228 | 331 | ||
229 | q->events[wr].id = id; | 332 | q->events[wr].id = id; |
@@ -243,20 +346,15 @@ void queue_post(struct event_queue *q, long id, intptr_t data) | |||
243 | #endif | 346 | #endif |
244 | 347 | ||
245 | wakeup_thread(&q->queue); | 348 | wakeup_thread(&q->queue); |
246 | } | ||
247 | 349 | ||
248 | /* Special thread-synced queue_post for button driver or any other preemptive sim thread */ | 350 | set_irq_level(oldlevel); |
249 | void queue_syncpost(struct event_queue *q, long id, intptr_t data) | ||
250 | { | ||
251 | thread_sdl_lock(); | ||
252 | /* No rockbox threads can be running here */ | ||
253 | queue_post(q, id, data); | ||
254 | thread_sdl_unlock(); | ||
255 | } | 351 | } |
256 | 352 | ||
257 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 353 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
258 | intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | 354 | intptr_t queue_send(struct event_queue *q, long id, intptr_t data) |
259 | { | 355 | { |
356 | int oldlevel = set_irq_level(oldlevel); | ||
357 | |||
260 | unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; | 358 | unsigned int wr = q->write++ & QUEUE_LENGTH_MASK; |
261 | 359 | ||
262 | q->events[wr].id = id; | 360 | q->events[wr].id = id; |
@@ -274,11 +372,14 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | |||
274 | 372 | ||
275 | wakeup_thread(&q->queue); | 373 | wakeup_thread(&q->queue); |
276 | 374 | ||
375 | cores[CURRENT_CORE].irq_level = oldlevel; | ||
277 | block_thread_no_listlock(spp); | 376 | block_thread_no_listlock(spp); |
278 | return thread_get_current()->retval; | 377 | return thread_get_current()->retval; |
279 | } | 378 | } |
280 | 379 | ||
281 | /* Function as queue_post if sending is not enabled */ | 380 | /* Function as queue_post if sending is not enabled */ |
381 | wakeup_thread(&q->queue); | ||
382 | set_irq_level(oldlevel); | ||
282 | return 0; | 383 | return 0; |
283 | } | 384 | } |
284 | 385 | ||
@@ -307,6 +408,8 @@ bool queue_empty(const struct event_queue* q) | |||
307 | 408 | ||
308 | void queue_clear(struct event_queue* q) | 409 | void queue_clear(struct event_queue* q) |
309 | { | 410 | { |
411 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
412 | |||
310 | /* fixme: This is potentially unsafe in case we do interrupt-like processing */ | 413 | /* fixme: This is potentially unsafe in case we do interrupt-like processing */ |
311 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 414 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
312 | /* Release all thread waiting in the queue for a reply - | 415 | /* Release all thread waiting in the queue for a reply - |
@@ -315,10 +418,14 @@ void queue_clear(struct event_queue* q) | |||
315 | #endif | 418 | #endif |
316 | q->read = 0; | 419 | q->read = 0; |
317 | q->write = 0; | 420 | q->write = 0; |
421 | |||
422 | set_irq_level(oldlevel); | ||
318 | } | 423 | } |
319 | 424 | ||
320 | void queue_remove_from_head(struct event_queue *q, long id) | 425 | void queue_remove_from_head(struct event_queue *q, long id) |
321 | { | 426 | { |
427 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
428 | |||
322 | while(q->read != q->write) | 429 | while(q->read != q->write) |
323 | { | 430 | { |
324 | unsigned int rd = q->read & QUEUE_LENGTH_MASK; | 431 | unsigned int rd = q->read & QUEUE_LENGTH_MASK; |
@@ -342,6 +449,8 @@ void queue_remove_from_head(struct event_queue *q, long id) | |||
342 | #endif | 449 | #endif |
343 | q->read++; | 450 | q->read++; |
344 | } | 451 | } |
452 | |||
453 | set_irq_level(oldlevel); | ||
345 | } | 454 | } |
346 | 455 | ||
347 | int queue_count(const struct event_queue *q) | 456 | int queue_count(const struct event_queue *q) |
@@ -351,25 +460,16 @@ int queue_count(const struct event_queue *q) | |||
351 | 460 | ||
352 | int queue_broadcast(long id, intptr_t data) | 461 | int queue_broadcast(long id, intptr_t data) |
353 | { | 462 | { |
463 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
354 | int i; | 464 | int i; |
355 | 465 | ||
356 | for(i = 0;i < num_queues;i++) | 466 | for(i = 0;i < num_queues;i++) |
357 | { | 467 | { |
358 | queue_post(all_queues[i], id, data); | 468 | queue_post(all_queues[i], id, data); |
359 | } | 469 | } |
360 | |||
361 | return num_queues; | ||
362 | } | ||
363 | 470 | ||
364 | /* Special thread-synced queue_broadcast for button driver or any other preemptive sim thread */ | 471 | set_irq_level(oldlevel); |
365 | int queue_syncbroadcast(long id, intptr_t data) | 472 | return num_queues; |
366 | { | ||
367 | int i; | ||
368 | thread_sdl_lock(); | ||
369 | /* No rockbox threads can be running here */ | ||
370 | i = queue_broadcast(id, data); | ||
371 | thread_sdl_unlock(); | ||
372 | return i; | ||
373 | } | 473 | } |
374 | 474 | ||
375 | void yield(void) | 475 | void yield(void) |
@@ -398,6 +498,7 @@ void sim_tick_tasks(void) | |||
398 | 498 | ||
399 | int tick_add_task(void (*f)(void)) | 499 | int tick_add_task(void (*f)(void)) |
400 | { | 500 | { |
501 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
401 | int i; | 502 | int i; |
402 | 503 | ||
403 | /* Add a task if there is room */ | 504 | /* Add a task if there is room */ |
@@ -406,6 +507,7 @@ int tick_add_task(void (*f)(void)) | |||
406 | if(tick_funcs[i] == NULL) | 507 | if(tick_funcs[i] == NULL) |
407 | { | 508 | { |
408 | tick_funcs[i] = f; | 509 | tick_funcs[i] = f; |
510 | set_irq_level(oldlevel); | ||
409 | return 0; | 511 | return 0; |
410 | } | 512 | } |
411 | } | 513 | } |
@@ -416,6 +518,7 @@ int tick_add_task(void (*f)(void)) | |||
416 | 518 | ||
417 | int tick_remove_task(void (*f)(void)) | 519 | int tick_remove_task(void (*f)(void)) |
418 | { | 520 | { |
521 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
419 | int i; | 522 | int i; |
420 | 523 | ||
421 | /* Remove a task if it is there */ | 524 | /* Remove a task if it is there */ |
@@ -424,10 +527,12 @@ int tick_remove_task(void (*f)(void)) | |||
424 | if(tick_funcs[i] == f) | 527 | if(tick_funcs[i] == f) |
425 | { | 528 | { |
426 | tick_funcs[i] = NULL; | 529 | tick_funcs[i] = NULL; |
530 | set_irq_level(oldlevel); | ||
427 | return 0; | 531 | return 0; |
428 | } | 532 | } |
429 | } | 533 | } |
430 | 534 | ||
535 | set_irq_level(oldlevel); | ||
431 | return -1; | 536 | return -1; |
432 | } | 537 | } |
433 | 538 | ||