diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2007-10-16 01:25:17 +0000 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2007-10-16 01:25:17 +0000 |
commit | a9b2fb5ee3114fe835f6515b6aeae7454f66d821 (patch) | |
tree | fc4e96d0c1f215565918406c8827b16b806c1345 /uisimulator/sdl | |
parent | a3fbbc9fa7e12fd3fce122bbd235dc362050e024 (diff) | |
download | rockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.tar.gz rockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.zip |
Finally full multicore support for PortalPlayer 502x targets with an eye towards the possibility of other types. All SVN targets the low-lag code to speed up blocking operations. Most files are modified here simple due to a name change to actually support a real event object and a param change to create_thread. Add some use of new features but just sit on things for a bit and leave full integration for later. Work will continue on to address size on sensitive targets and simplify things if possible. Any PP target having problems with SWP can easily be changed to sw corelocks with one #define change in config.h though only PP5020 has shown an issue and seems to work without any difficulties.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15134 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'uisimulator/sdl')
-rw-r--r-- | uisimulator/sdl/button.c | 4 | ||||
-rw-r--r-- | uisimulator/sdl/kernel.c | 225 | ||||
-rw-r--r-- | uisimulator/sdl/thread-sdl.c | 160 |
3 files changed, 311 insertions, 78 deletions
diff --git a/uisimulator/sdl/button.c b/uisimulator/sdl/button.c index a15faf72c0..e50bfea087 100644 --- a/uisimulator/sdl/button.c +++ b/uisimulator/sdl/button.c | |||
@@ -743,7 +743,7 @@ int button_queue_count( void ) | |||
743 | 743 | ||
744 | long button_get(bool block) | 744 | long button_get(bool block) |
745 | { | 745 | { |
746 | struct event ev; | 746 | struct queue_event ev; |
747 | 747 | ||
748 | if ( block || !queue_empty(&button_queue) ) { | 748 | if ( block || !queue_empty(&button_queue) ) { |
749 | queue_wait(&button_queue, &ev); | 749 | queue_wait(&button_queue, &ev); |
@@ -755,7 +755,7 @@ long button_get(bool block) | |||
755 | 755 | ||
756 | long button_get_w_tmo(int ticks) | 756 | long button_get_w_tmo(int ticks) |
757 | { | 757 | { |
758 | struct event ev; | 758 | struct queue_event ev; |
759 | queue_wait_w_tmo(&button_queue, &ev, ticks); | 759 | queue_wait_w_tmo(&button_queue, &ev, ticks); |
760 | if (ev.id == SYS_TIMEOUT) | 760 | if (ev.id == SYS_TIMEOUT) |
761 | ev.id = BUTTON_NONE; | 761 | ev.id = BUTTON_NONE; |
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c index e2aa0d31c4..8a54cd91fc 100644 --- a/uisimulator/sdl/kernel.c +++ b/uisimulator/sdl/kernel.c | |||
@@ -29,7 +29,7 @@ volatile long current_tick = 0; | |||
29 | static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); | 29 | static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); |
30 | 30 | ||
31 | /* This array holds all queues that are initiated. It is used for broadcast. */ | 31 | /* This array holds all queues that are initiated. It is used for broadcast. */ |
32 | static struct event_queue *all_queues[32]; | 32 | static struct event_queue *all_queues[MAX_NUM_QUEUES]; |
33 | static int num_queues = 0; | 33 | static int num_queues = 0; |
34 | 34 | ||
35 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 35 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
@@ -53,7 +53,7 @@ static void queue_release_sender(struct thread_entry **sender, | |||
53 | intptr_t retval) | 53 | intptr_t retval) |
54 | { | 54 | { |
55 | (*sender)->retval = retval; | 55 | (*sender)->retval = retval; |
56 | wakeup_thread(sender); | 56 | wakeup_thread_no_listlock(sender); |
57 | if(*sender != NULL) | 57 | if(*sender != NULL) |
58 | { | 58 | { |
59 | fprintf(stderr, "queue->send slot ovf: %p\n", *sender); | 59 | fprintf(stderr, "queue->send slot ovf: %p\n", *sender); |
@@ -98,14 +98,14 @@ void queue_init(struct event_queue *q, bool register_queue) | |||
98 | { | 98 | { |
99 | q->read = 0; | 99 | q->read = 0; |
100 | q->write = 0; | 100 | q->write = 0; |
101 | q->thread = NULL; | 101 | thread_queue_init(&q->queue); |
102 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 102 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
103 | q->send = NULL; /* No message sending by default */ | 103 | q->send = NULL; /* No message sending by default */ |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | if(register_queue) | 106 | if(register_queue) |
107 | { | 107 | { |
108 | if(num_queues >= 32) | 108 | if(num_queues >= MAX_NUM_QUEUES) |
109 | { | 109 | { |
110 | fprintf(stderr, "queue_init->out of queues"); | 110 | fprintf(stderr, "queue_init->out of queues"); |
111 | exit(-1); | 111 | exit(-1); |
@@ -142,7 +142,7 @@ void queue_delete(struct event_queue *q) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /* Release threads waiting on queue head */ | 144 | /* Release threads waiting on queue head */ |
145 | wakeup_thread(&q->thread); | 145 | thread_queue_wake(&q->queue); |
146 | 146 | ||
147 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 147 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
148 | /* Release waiting threads and reply to any dequeued message | 148 | /* Release waiting threads and reply to any dequeued message |
@@ -155,7 +155,7 @@ void queue_delete(struct event_queue *q) | |||
155 | q->write = 0; | 155 | q->write = 0; |
156 | } | 156 | } |
157 | 157 | ||
158 | void queue_wait(struct event_queue *q, struct event *ev) | 158 | void queue_wait(struct event_queue *q, struct queue_event *ev) |
159 | { | 159 | { |
160 | unsigned int rd; | 160 | unsigned int rd; |
161 | 161 | ||
@@ -169,7 +169,11 @@ void queue_wait(struct event_queue *q, struct event *ev) | |||
169 | 169 | ||
170 | if (q->read == q->write) | 170 | if (q->read == q->write) |
171 | { | 171 | { |
172 | block_thread(&q->thread); | 172 | do |
173 | { | ||
174 | block_thread(&q->queue); | ||
175 | } | ||
176 | while (q->read == q->write); | ||
173 | } | 177 | } |
174 | 178 | ||
175 | rd = q->read++ & QUEUE_LENGTH_MASK; | 179 | rd = q->read++ & QUEUE_LENGTH_MASK; |
@@ -184,7 +188,7 @@ void queue_wait(struct event_queue *q, struct event *ev) | |||
184 | #endif | 188 | #endif |
185 | } | 189 | } |
186 | 190 | ||
187 | void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) | 191 | void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) |
188 | { | 192 | { |
189 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 193 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
190 | if (q->send && q->send->curr_sender) | 194 | if (q->send && q->send->curr_sender) |
@@ -196,7 +200,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) | |||
196 | 200 | ||
197 | if (q->read == q->write && ticks > 0) | 201 | if (q->read == q->write && ticks > 0) |
198 | { | 202 | { |
199 | block_thread_w_tmo(&q->thread, ticks); | 203 | block_thread_w_tmo(&q->queue, ticks); |
200 | } | 204 | } |
201 | 205 | ||
202 | if(q->read != q->write) | 206 | if(q->read != q->write) |
@@ -238,7 +242,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data) | |||
238 | } | 242 | } |
239 | #endif | 243 | #endif |
240 | 244 | ||
241 | wakeup_thread(&q->thread); | 245 | wakeup_thread(&q->queue); |
242 | } | 246 | } |
243 | 247 | ||
244 | /* Special thread-synced queue_post for button driver or any other preemptive sim thread */ | 248 | /* Special thread-synced queue_post for button driver or any other preemptive sim thread */ |
@@ -268,9 +272,9 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | |||
268 | queue_release_sender(spp, 0); | 272 | queue_release_sender(spp, 0); |
269 | } | 273 | } |
270 | 274 | ||
271 | wakeup_thread(&q->thread); | 275 | wakeup_thread(&q->queue); |
272 | 276 | ||
273 | block_thread(spp); | 277 | block_thread_no_listlock(spp); |
274 | return thread_get_current()->retval; | 278 | return thread_get_current()->retval; |
275 | } | 279 | } |
276 | 280 | ||
@@ -370,7 +374,7 @@ int queue_syncbroadcast(long id, intptr_t data) | |||
370 | 374 | ||
371 | void yield(void) | 375 | void yield(void) |
372 | { | 376 | { |
373 | switch_thread(true, NULL); | 377 | switch_thread(NULL); |
374 | } | 378 | } |
375 | 379 | ||
376 | void sleep(int ticks) | 380 | void sleep(int ticks) |
@@ -431,39 +435,218 @@ int tick_remove_task(void (*f)(void)) | |||
431 | multitasking, but is better than nothing at all */ | 435 | multitasking, but is better than nothing at all */ |
432 | void mutex_init(struct mutex *m) | 436 | void mutex_init(struct mutex *m) |
433 | { | 437 | { |
438 | m->queue = NULL; | ||
434 | m->thread = NULL; | 439 | m->thread = NULL; |
440 | m->count = 0; | ||
435 | m->locked = 0; | 441 | m->locked = 0; |
436 | } | 442 | } |
437 | 443 | ||
438 | void mutex_lock(struct mutex *m) | 444 | void mutex_lock(struct mutex *m) |
439 | { | 445 | { |
440 | if (test_and_set(&m->locked, 1)) | 446 | struct thread_entry *const thread = thread_get_current(); |
447 | |||
448 | if(thread == m->thread) | ||
449 | { | ||
450 | m->count++; | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | if (!test_and_set(&m->locked, 1)) | ||
441 | { | 455 | { |
442 | block_thread(&m->thread); | 456 | m->thread = thread; |
457 | return; | ||
443 | } | 458 | } |
459 | |||
460 | block_thread_no_listlock(&m->queue); | ||
444 | } | 461 | } |
445 | 462 | ||
446 | void mutex_unlock(struct mutex *m) | 463 | void mutex_unlock(struct mutex *m) |
447 | { | 464 | { |
448 | if (m->thread != NULL) | 465 | /* unlocker not being the owner is an unlocking violation */ |
466 | if(m->thread != thread_get_current()) | ||
449 | { | 467 | { |
450 | wakeup_thread(&m->thread); | 468 | fprintf(stderr, "spinlock_unlock->wrong thread"); |
469 | exit(-1); | ||
470 | } | ||
471 | |||
472 | if (m->count > 0) | ||
473 | { | ||
474 | /* this thread still owns lock */ | ||
475 | m->count--; | ||
476 | return; | ||
451 | } | 477 | } |
452 | else | 478 | |
479 | m->thread = wakeup_thread_no_listlock(&m->queue); | ||
480 | |||
481 | if (m->thread == NULL) | ||
453 | { | 482 | { |
483 | /* release lock */ | ||
454 | m->locked = 0; | 484 | m->locked = 0; |
455 | } | 485 | } |
456 | } | 486 | } |
457 | 487 | ||
458 | void spinlock_lock(struct mutex *l) | 488 | void spinlock_init(struct spinlock *l) |
489 | { | ||
490 | l->locked = 0; | ||
491 | l->thread = NULL; | ||
492 | l->count = 0; | ||
493 | } | ||
494 | |||
495 | void spinlock_lock(struct spinlock *l) | ||
459 | { | 496 | { |
497 | struct thread_entry *const thread = thread_get_current(); | ||
498 | |||
499 | if (l->thread == thread) | ||
500 | { | ||
501 | l->count++; | ||
502 | return; | ||
503 | } | ||
504 | |||
460 | while(test_and_set(&l->locked, 1)) | 505 | while(test_and_set(&l->locked, 1)) |
461 | { | 506 | { |
462 | switch_thread(true, NULL); | 507 | switch_thread(NULL); |
463 | } | 508 | } |
509 | |||
510 | l->thread = thread; | ||
464 | } | 511 | } |
465 | 512 | ||
466 | void spinlock_unlock(struct mutex *l) | 513 | void spinlock_unlock(struct spinlock *l) |
467 | { | 514 | { |
515 | /* unlocker not being the owner is an unlocking violation */ | ||
516 | if(l->thread != thread_get_current()) | ||
517 | { | ||
518 | fprintf(stderr, "spinlock_unlock->wrong thread"); | ||
519 | exit(-1); | ||
520 | } | ||
521 | |||
522 | if (l->count > 0) | ||
523 | { | ||
524 | /* this thread still owns lock */ | ||
525 | l->count--; | ||
526 | return; | ||
527 | } | ||
528 | |||
529 | /* clear owner */ | ||
530 | l->thread = NULL; | ||
468 | l->locked = 0; | 531 | l->locked = 0; |
469 | } | 532 | } |
533 | |||
534 | void semaphore_init(struct semaphore *s, int max, int start) | ||
535 | { | ||
536 | if(max <= 0 || start < 0 || start > max) | ||
537 | { | ||
538 | fprintf(stderr, "semaphore_init->inv arg"); | ||
539 | exit(-1); | ||
540 | } | ||
541 | s->queue = NULL; | ||
542 | s->max = max; | ||
543 | s->count = start; | ||
544 | } | ||
545 | |||
546 | void semaphore_wait(struct semaphore *s) | ||
547 | { | ||
548 | if(--s->count >= 0) | ||
549 | return; | ||
550 | block_thread_no_listlock(&s->queue); | ||
551 | } | ||
552 | |||
553 | void semaphore_release(struct semaphore *s) | ||
554 | { | ||
555 | if(s->count < s->max) | ||
556 | { | ||
557 | if(++s->count <= 0) | ||
558 | { | ||
559 | if(s->queue == NULL) | ||
560 | { | ||
561 | /* there should be threads in this queue */ | ||
562 | fprintf(stderr, "semaphore->wakeup"); | ||
563 | exit(-1); | ||
564 | } | ||
565 | /* a thread was queued - wake it up */ | ||
566 | wakeup_thread_no_listlock(&s->queue); | ||
567 | } | ||
568 | } | ||
569 | } | ||
570 | |||
571 | void event_init(struct event *e, unsigned int flags) | ||
572 | { | ||
573 | e->queues[STATE_NONSIGNALED] = NULL; | ||
574 | e->queues[STATE_SIGNALED] = NULL; | ||
575 | e->state = flags & STATE_SIGNALED; | ||
576 | e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0; | ||
577 | } | ||
578 | |||
579 | void event_wait(struct event *e, unsigned int for_state) | ||
580 | { | ||
581 | unsigned int last_state = e->state; | ||
582 | |||
583 | if(e->automatic != 0) | ||
584 | { | ||
585 | /* wait for false always satisfied by definition | ||
586 | or if it just changed to false */ | ||
587 | if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED) | ||
588 | { | ||
589 | /* automatic - unsignal */ | ||
590 | e->state = STATE_NONSIGNALED; | ||
591 | return; | ||
592 | } | ||
593 | /* block until state matches */ | ||
594 | } | ||
595 | else if(for_state == last_state) | ||
596 | { | ||
597 | /* the state being waited for is the current state */ | ||
598 | return; | ||
599 | } | ||
600 | |||
601 | /* current state does not match wait-for state */ | ||
602 | block_thread_no_listlock(&e->queues[for_state]); | ||
603 | } | ||
604 | |||
605 | void event_set_state(struct event *e, unsigned int state) | ||
606 | { | ||
607 | unsigned int last_state = e->state; | ||
608 | |||
609 | if(last_state == state) | ||
610 | { | ||
611 | /* no change */ | ||
612 | return; | ||
613 | } | ||
614 | |||
615 | if(state == STATE_SIGNALED) | ||
616 | { | ||
617 | if(e->automatic != 0) | ||
618 | { | ||
619 | struct thread_entry *thread; | ||
620 | |||
621 | if(e->queues[STATE_NONSIGNALED] != NULL) | ||
622 | { | ||
623 | /* no thread should have ever blocked for nonsignaled */ | ||
624 | fprintf(stderr, "set_event_state->queue[NS]:S"); | ||
625 | exit(-1); | ||
626 | } | ||
627 | |||
628 | /* pass to next thread and keep unsignaled - "pulse" */ | ||
629 | thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]); | ||
630 | e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED; | ||
631 | } | ||
632 | else | ||
633 | { | ||
634 | /* release all threads waiting for signaled */ | ||
635 | thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]); | ||
636 | e->state = STATE_SIGNALED; | ||
637 | } | ||
638 | } | ||
639 | else | ||
640 | { | ||
641 | /* release all threads waiting for unsignaled */ | ||
642 | if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0) | ||
643 | { | ||
644 | /* no thread should have ever blocked */ | ||
645 | fprintf(stderr, "set_event_state->queue[NS]:NS"); | ||
646 | exit(-1); | ||
647 | } | ||
648 | |||
649 | thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]); | ||
650 | e->state = STATE_NONSIGNALED; | ||
651 | } | ||
652 | } | ||
diff --git a/uisimulator/sdl/thread-sdl.c b/uisimulator/sdl/thread-sdl.c index 0bd7d2534e..6a3c4af9eb 100644 --- a/uisimulator/sdl/thread-sdl.c +++ b/uisimulator/sdl/thread-sdl.c | |||
@@ -131,7 +131,7 @@ bool thread_sdl_init(void *param) | |||
131 | running->stack = " "; | 131 | running->stack = " "; |
132 | running->stack_size = 8; | 132 | running->stack_size = 8; |
133 | running->name = "main"; | 133 | running->name = "main"; |
134 | running->statearg = STATE_RUNNING; | 134 | running->state = STATE_RUNNING; |
135 | running->context.c = SDL_CreateCond(); | 135 | running->context.c = SDL_CreateCond(); |
136 | 136 | ||
137 | if (running->context.c == NULL) | 137 | if (running->context.c == NULL) |
@@ -154,43 +154,55 @@ bool thread_sdl_init(void *param) | |||
154 | return true; | 154 | return true; |
155 | } | 155 | } |
156 | 156 | ||
157 | void thread_sdl_lock(void) | ||
158 | { | ||
159 | SDL_LockMutex(m); | ||
160 | } | ||
161 | |||
162 | void thread_sdl_unlock(void) | ||
163 | { | ||
164 | SDL_UnlockMutex(m); | ||
165 | } | ||
166 | |||
157 | static int find_empty_thread_slot(void) | 167 | static int find_empty_thread_slot(void) |
158 | { | 168 | { |
159 | int n; | 169 | int n; |
160 | 170 | ||
161 | for (n = 0; n < MAXTHREADS; n++) | 171 | for (n = 0; n < MAXTHREADS; n++) |
162 | { | 172 | { |
163 | if (threads[n].name == NULL) | 173 | int state = threads[n].state; |
174 | |||
175 | if (state == STATE_KILLED) | ||
164 | break; | 176 | break; |
165 | } | 177 | } |
166 | 178 | ||
167 | return n; | 179 | return n; |
168 | } | 180 | } |
169 | 181 | ||
170 | static void add_to_list(struct thread_entry **list, | 182 | static void add_to_list_l(struct thread_entry **list, |
171 | struct thread_entry *thread) | 183 | struct thread_entry *thread) |
172 | { | 184 | { |
173 | if (*list == NULL) | 185 | if (*list == NULL) |
174 | { | 186 | { |
175 | /* Insert into unoccupied list */ | 187 | /* Insert into unoccupied list */ |
176 | thread->next = thread; | 188 | thread->l.next = thread; |
177 | thread->prev = thread; | 189 | thread->l.prev = thread; |
178 | *list = thread; | 190 | *list = thread; |
179 | } | 191 | } |
180 | else | 192 | else |
181 | { | 193 | { |
182 | /* Insert last */ | 194 | /* Insert last */ |
183 | thread->next = *list; | 195 | thread->l.next = *list; |
184 | thread->prev = (*list)->prev; | 196 | thread->l.prev = (*list)->l.prev; |
185 | thread->prev->next = thread; | 197 | thread->l.prev->l.next = thread; |
186 | (*list)->prev = thread; | 198 | (*list)->l.prev = thread; |
187 | } | 199 | } |
188 | } | 200 | } |
189 | 201 | ||
190 | static void remove_from_list(struct thread_entry **list, | 202 | static void remove_from_list_l(struct thread_entry **list, |
191 | struct thread_entry *thread) | 203 | struct thread_entry *thread) |
192 | { | 204 | { |
193 | if (thread == thread->next) | 205 | if (thread == thread->l.next) |
194 | { | 206 | { |
195 | /* The only item */ | 207 | /* The only item */ |
196 | *list = NULL; | 208 | *list = NULL; |
@@ -200,12 +212,12 @@ static void remove_from_list(struct thread_entry **list, | |||
200 | if (thread == *list) | 212 | if (thread == *list) |
201 | { | 213 | { |
202 | /* List becomes next item */ | 214 | /* List becomes next item */ |
203 | *list = thread->next; | 215 | *list = thread->l.next; |
204 | } | 216 | } |
205 | 217 | ||
206 | /* Fix links to jump over the removed entry. */ | 218 | /* Fix links to jump over the removed entry. */ |
207 | thread->prev->next = thread->next; | 219 | thread->l.prev->l.next = thread->l.next; |
208 | thread->next->prev = thread->prev; | 220 | thread->l.next->l.prev = thread->l.prev; |
209 | } | 221 | } |
210 | 222 | ||
211 | struct thread_entry *thread_get_current(void) | 223 | struct thread_entry *thread_get_current(void) |
@@ -213,17 +225,7 @@ struct thread_entry *thread_get_current(void) | |||
213 | return running; | 225 | return running; |
214 | } | 226 | } |
215 | 227 | ||
216 | void thread_sdl_lock(void) | 228 | void switch_thread(struct thread_entry *old) |
217 | { | ||
218 | SDL_LockMutex(m); | ||
219 | } | ||
220 | |||
221 | void thread_sdl_unlock(void) | ||
222 | { | ||
223 | SDL_UnlockMutex(m); | ||
224 | } | ||
225 | |||
226 | void switch_thread(bool save_context, struct thread_entry **blocked_list) | ||
227 | { | 229 | { |
228 | struct thread_entry *current = running; | 230 | struct thread_entry *current = running; |
229 | 231 | ||
@@ -235,7 +237,7 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list) | |||
235 | if (threads_exit) | 237 | if (threads_exit) |
236 | remove_thread(NULL); | 238 | remove_thread(NULL); |
237 | 239 | ||
238 | (void)save_context; (void)blocked_list; | 240 | (void)old; |
239 | } | 241 | } |
240 | 242 | ||
241 | void sleep_thread(int ticks) | 243 | void sleep_thread(int ticks) |
@@ -244,7 +246,7 @@ void sleep_thread(int ticks) | |||
244 | int rem; | 246 | int rem; |
245 | 247 | ||
246 | current = running; | 248 | current = running; |
247 | current->statearg = STATE_SLEEPING; | 249 | current->state = STATE_SLEEPING; |
248 | 250 | ||
249 | rem = (SDL_GetTicks() - start_tick) % (1000/HZ); | 251 | rem = (SDL_GetTicks() - start_tick) % (1000/HZ); |
250 | if (rem < 0) | 252 | if (rem < 0) |
@@ -267,7 +269,7 @@ void sleep_thread(int ticks) | |||
267 | 269 | ||
268 | running = current; | 270 | running = current; |
269 | 271 | ||
270 | current->statearg = STATE_RUNNING; | 272 | current->state = STATE_RUNNING; |
271 | 273 | ||
272 | if (threads_exit) | 274 | if (threads_exit) |
273 | remove_thread(NULL); | 275 | remove_thread(NULL); |
@@ -289,10 +291,21 @@ int runthread(void *data) | |||
289 | if (setjmp(*current_jmpbuf) == 0) | 291 | if (setjmp(*current_jmpbuf) == 0) |
290 | { | 292 | { |
291 | /* Run the thread routine */ | 293 | /* Run the thread routine */ |
292 | current->context.start(); | 294 | if (current->state == STATE_FROZEN) |
293 | THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n", | 295 | { |
294 | current - threads, THREAD_SDL_GET_NAME(current)); | 296 | SDL_CondWait(current->context.c, m); |
295 | /* Thread routine returned - suicide */ | 297 | running = current; |
298 | |||
299 | } | ||
300 | |||
301 | if (!threads_exit) | ||
302 | { | ||
303 | current->context.start(); | ||
304 | THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n", | ||
305 | current - threads, THREAD_SDL_GET_NAME(current)); | ||
306 | /* Thread routine returned - suicide */ | ||
307 | } | ||
308 | |||
296 | remove_thread(NULL); | 309 | remove_thread(NULL); |
297 | } | 310 | } |
298 | else | 311 | else |
@@ -306,7 +319,7 @@ int runthread(void *data) | |||
306 | 319 | ||
307 | struct thread_entry* | 320 | struct thread_entry* |
308 | create_thread(void (*function)(void), void* stack, int stack_size, | 321 | create_thread(void (*function)(void), void* stack, int stack_size, |
309 | const char *name) | 322 | unsigned flags, const char *name) |
310 | { | 323 | { |
311 | /** Avoid compiler warnings */ | 324 | /** Avoid compiler warnings */ |
312 | SDL_Thread* t; | 325 | SDL_Thread* t; |
@@ -340,7 +353,8 @@ struct thread_entry* | |||
340 | threads[slot].stack = stack; | 353 | threads[slot].stack = stack; |
341 | threads[slot].stack_size = stack_size; | 354 | threads[slot].stack_size = stack_size; |
342 | threads[slot].name = name; | 355 | threads[slot].name = name; |
343 | threads[slot].statearg = STATE_RUNNING; | 356 | threads[slot].state = (flags & CREATE_THREAD_FROZEN) ? |
357 | STATE_FROZEN : STATE_RUNNING; | ||
344 | threads[slot].context.start = function; | 358 | threads[slot].context.start = function; |
345 | threads[slot].context.t = t; | 359 | threads[slot].context.t = t; |
346 | threads[slot].context.c = cond; | 360 | threads[slot].context.c = cond; |
@@ -351,12 +365,13 @@ struct thread_entry* | |||
351 | return &threads[slot]; | 365 | return &threads[slot]; |
352 | } | 366 | } |
353 | 367 | ||
354 | void block_thread(struct thread_entry **list) | 368 | void _block_thread(struct thread_queue *tq) |
355 | { | 369 | { |
356 | struct thread_entry *thread = running; | 370 | struct thread_entry *thread = running; |
357 | 371 | ||
358 | thread->statearg = STATE_BLOCKED; | 372 | thread->state = STATE_BLOCKED; |
359 | add_to_list(list, thread); | 373 | thread->bqp = tq; |
374 | add_to_list_l(&tq->queue, thread); | ||
360 | 375 | ||
361 | SDL_CondWait(thread->context.c, m); | 376 | SDL_CondWait(thread->context.c, m); |
362 | running = thread; | 377 | running = thread; |
@@ -365,44 +380,56 @@ void block_thread(struct thread_entry **list) | |||
365 | remove_thread(NULL); | 380 | remove_thread(NULL); |
366 | } | 381 | } |
367 | 382 | ||
368 | void block_thread_w_tmo(struct thread_entry **list, int ticks) | 383 | void block_thread_w_tmo(struct thread_queue *tq, int ticks) |
369 | { | 384 | { |
370 | struct thread_entry *thread = running; | 385 | struct thread_entry *thread = running; |
371 | 386 | ||
372 | thread->statearg = STATE_BLOCKED_W_TMO; | 387 | thread->state = STATE_BLOCKED_W_TMO; |
373 | add_to_list(list, thread); | 388 | thread->bqp = tq; |
389 | add_to_list_l(&tq->queue, thread); | ||
374 | 390 | ||
375 | SDL_CondWaitTimeout(thread->context.c, m, (1000/HZ) * ticks); | 391 | SDL_CondWaitTimeout(thread->context.c, m, (1000/HZ) * ticks); |
376 | running = thread; | 392 | running = thread; |
377 | 393 | ||
378 | if (thread->statearg == STATE_BLOCKED_W_TMO) | 394 | if (thread->state == STATE_BLOCKED_W_TMO) |
379 | { | 395 | { |
380 | /* Timed out */ | 396 | /* Timed out */ |
381 | remove_from_list(list, thread); | 397 | remove_from_list_l(&tq->queue, thread); |
382 | thread->statearg = STATE_RUNNING; | 398 | thread->state = STATE_RUNNING; |
383 | } | 399 | } |
384 | 400 | ||
385 | if (threads_exit) | 401 | if (threads_exit) |
386 | remove_thread(NULL); | 402 | remove_thread(NULL); |
387 | } | 403 | } |
388 | 404 | ||
389 | void wakeup_thread(struct thread_entry **list) | 405 | struct thread_entry * _wakeup_thread(struct thread_queue *tq) |
390 | { | 406 | { |
391 | struct thread_entry *thread = *list; | 407 | struct thread_entry *thread = tq->queue; |
392 | 408 | ||
393 | if (thread == NULL) | 409 | if (thread == NULL) |
394 | { | 410 | { |
395 | return; | 411 | return NULL; |
396 | } | 412 | } |
397 | 413 | ||
398 | switch (thread->statearg) | 414 | switch (thread->state) |
399 | { | 415 | { |
400 | case STATE_BLOCKED: | 416 | case STATE_BLOCKED: |
401 | case STATE_BLOCKED_W_TMO: | 417 | case STATE_BLOCKED_W_TMO: |
402 | remove_from_list(list, thread); | 418 | remove_from_list_l(&tq->queue, thread); |
403 | thread->statearg = STATE_RUNNING; | 419 | thread->state = STATE_RUNNING; |
420 | SDL_CondSignal(thread->context.c); | ||
421 | return thread; | ||
422 | default: | ||
423 | return NULL; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | void thread_thaw(struct thread_entry *thread) | ||
428 | { | ||
429 | if (thread->state == STATE_FROZEN) | ||
430 | { | ||
431 | thread->state = STATE_RUNNING; | ||
404 | SDL_CondSignal(thread->context.c); | 432 | SDL_CondSignal(thread->context.c); |
405 | break; | ||
406 | } | 433 | } |
407 | } | 434 | } |
408 | 435 | ||
@@ -434,12 +461,24 @@ void remove_thread(struct thread_entry *thread) | |||
434 | thread->context.t = NULL; | 461 | thread->context.t = NULL; |
435 | 462 | ||
436 | if (thread != current) | 463 | if (thread != current) |
464 | { | ||
465 | switch (thread->state) | ||
466 | { | ||
467 | case STATE_BLOCKED: | ||
468 | case STATE_BLOCKED_W_TMO: | ||
469 | /* Remove thread from object it's waiting on */ | ||
470 | remove_from_list_l(&thread->bqp->queue, thread); | ||
471 | break; | ||
472 | } | ||
473 | |||
437 | SDL_CondSignal(c); | 474 | SDL_CondSignal(c); |
475 | } | ||
438 | 476 | ||
439 | THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n", | 477 | THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n", |
440 | thread - threads, THREAD_SDL_GET_NAME(thread)); | 478 | thread - threads, THREAD_SDL_GET_NAME(thread)); |
441 | 479 | ||
442 | thread->name = NULL; | 480 | thread_queue_wake_no_listlock(&thread->queue); |
481 | thread->state = STATE_KILLED; | ||
443 | 482 | ||
444 | SDL_DestroyCond(c); | 483 | SDL_DestroyCond(c); |
445 | 484 | ||
@@ -453,15 +492,26 @@ void remove_thread(struct thread_entry *thread) | |||
453 | SDL_KillThread(t); | 492 | SDL_KillThread(t); |
454 | } | 493 | } |
455 | 494 | ||
495 | void thread_wait(struct thread_entry *thread) | ||
496 | { | ||
497 | if (thread == NULL) | ||
498 | thread = running; | ||
499 | |||
500 | if (thread->state != STATE_KILLED) | ||
501 | { | ||
502 | block_thread_no_listlock(&thread->queue); | ||
503 | } | ||
504 | } | ||
505 | |||
456 | int thread_stack_usage(const struct thread_entry *thread) | 506 | int thread_stack_usage(const struct thread_entry *thread) |
457 | { | 507 | { |
458 | return 50; | 508 | return 50; |
459 | (void)thread; | 509 | (void)thread; |
460 | } | 510 | } |
461 | 511 | ||
462 | int thread_get_status(const struct thread_entry *thread) | 512 | unsigned thread_get_status(const struct thread_entry *thread) |
463 | { | 513 | { |
464 | return thread->statearg; | 514 | return thread->state; |
465 | } | 515 | } |
466 | 516 | ||
467 | /* Return name if one or ID if none */ | 517 | /* Return name if one or ID if none */ |