summaryrefslogtreecommitdiff
path: root/uisimulator/sdl/kernel.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2007-10-16 01:25:17 +0000
committerMichael Sevakis <jethead71@rockbox.org>2007-10-16 01:25:17 +0000
commita9b2fb5ee3114fe835f6515b6aeae7454f66d821 (patch)
treefc4e96d0c1f215565918406c8827b16b806c1345 /uisimulator/sdl/kernel.c
parenta3fbbc9fa7e12fd3fce122bbd235dc362050e024 (diff)
downloadrockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.tar.gz
rockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.zip
Finally full multicore support for PortalPlayer 502x targets with an eye towards the possibility of other types. All SVN targets the low-lag code to speed up blocking operations. Most files are modified here simple due to a name change to actually support a real event object and a param change to create_thread. Add some use of new features but just sit on things for a bit and leave full integration for later. Work will continue on to address size on sensitive targets and simplify things if possible. Any PP target having problems with SWP can easily be changed to sw corelocks with one #define change in config.h though only PP5020 has shown an issue and seems to work without any difficulties.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15134 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'uisimulator/sdl/kernel.c')
-rw-r--r--uisimulator/sdl/kernel.c225
1 files changed, 204 insertions, 21 deletions
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
index e2aa0d31c4..8a54cd91fc 100644
--- a/uisimulator/sdl/kernel.c
+++ b/uisimulator/sdl/kernel.c
@@ -29,7 +29,7 @@ volatile long current_tick = 0;
29static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); 29static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
30 30
31/* This array holds all queues that are initiated. It is used for broadcast. */ 31/* This array holds all queues that are initiated. It is used for broadcast. */
32static struct event_queue *all_queues[32]; 32static struct event_queue *all_queues[MAX_NUM_QUEUES];
33static int num_queues = 0; 33static int num_queues = 0;
34 34
35#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 35#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
@@ -53,7 +53,7 @@ static void queue_release_sender(struct thread_entry **sender,
53 intptr_t retval) 53 intptr_t retval)
54{ 54{
55 (*sender)->retval = retval; 55 (*sender)->retval = retval;
56 wakeup_thread(sender); 56 wakeup_thread_no_listlock(sender);
57 if(*sender != NULL) 57 if(*sender != NULL)
58 { 58 {
59 fprintf(stderr, "queue->send slot ovf: %p\n", *sender); 59 fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
@@ -98,14 +98,14 @@ void queue_init(struct event_queue *q, bool register_queue)
98{ 98{
99 q->read = 0; 99 q->read = 0;
100 q->write = 0; 100 q->write = 0;
101 q->thread = NULL; 101 thread_queue_init(&q->queue);
102#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 102#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
103 q->send = NULL; /* No message sending by default */ 103 q->send = NULL; /* No message sending by default */
104#endif 104#endif
105 105
106 if(register_queue) 106 if(register_queue)
107 { 107 {
108 if(num_queues >= 32) 108 if(num_queues >= MAX_NUM_QUEUES)
109 { 109 {
110 fprintf(stderr, "queue_init->out of queues"); 110 fprintf(stderr, "queue_init->out of queues");
111 exit(-1); 111 exit(-1);
@@ -142,7 +142,7 @@ void queue_delete(struct event_queue *q)
142 } 142 }
143 143
144 /* Release threads waiting on queue head */ 144 /* Release threads waiting on queue head */
145 wakeup_thread(&q->thread); 145 thread_queue_wake(&q->queue);
146 146
147#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 147#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
148 /* Release waiting threads and reply to any dequeued message 148 /* Release waiting threads and reply to any dequeued message
@@ -155,7 +155,7 @@ void queue_delete(struct event_queue *q)
155 q->write = 0; 155 q->write = 0;
156} 156}
157 157
158void queue_wait(struct event_queue *q, struct event *ev) 158void queue_wait(struct event_queue *q, struct queue_event *ev)
159{ 159{
160 unsigned int rd; 160 unsigned int rd;
161 161
@@ -169,7 +169,11 @@ void queue_wait(struct event_queue *q, struct event *ev)
169 169
170 if (q->read == q->write) 170 if (q->read == q->write)
171 { 171 {
172 block_thread(&q->thread); 172 do
173 {
174 block_thread(&q->queue);
175 }
176 while (q->read == q->write);
173 } 177 }
174 178
175 rd = q->read++ & QUEUE_LENGTH_MASK; 179 rd = q->read++ & QUEUE_LENGTH_MASK;
@@ -184,7 +188,7 @@ void queue_wait(struct event_queue *q, struct event *ev)
184#endif 188#endif
185} 189}
186 190
187void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) 191void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
188{ 192{
189#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 193#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
190 if (q->send && q->send->curr_sender) 194 if (q->send && q->send->curr_sender)
@@ -196,7 +200,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
196 200
197 if (q->read == q->write && ticks > 0) 201 if (q->read == q->write && ticks > 0)
198 { 202 {
199 block_thread_w_tmo(&q->thread, ticks); 203 block_thread_w_tmo(&q->queue, ticks);
200 } 204 }
201 205
202 if(q->read != q->write) 206 if(q->read != q->write)
@@ -238,7 +242,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
238 } 242 }
239#endif 243#endif
240 244
241 wakeup_thread(&q->thread); 245 wakeup_thread(&q->queue);
242} 246}
243 247
244/* Special thread-synced queue_post for button driver or any other preemptive sim thread */ 248/* Special thread-synced queue_post for button driver or any other preemptive sim thread */
@@ -268,9 +272,9 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
268 queue_release_sender(spp, 0); 272 queue_release_sender(spp, 0);
269 } 273 }
270 274
271 wakeup_thread(&q->thread); 275 wakeup_thread(&q->queue);
272 276
273 block_thread(spp); 277 block_thread_no_listlock(spp);
274 return thread_get_current()->retval; 278 return thread_get_current()->retval;
275 } 279 }
276 280
@@ -370,7 +374,7 @@ int queue_syncbroadcast(long id, intptr_t data)
370 374
371void yield(void) 375void yield(void)
372{ 376{
373 switch_thread(true, NULL); 377 switch_thread(NULL);
374} 378}
375 379
376void sleep(int ticks) 380void sleep(int ticks)
@@ -431,39 +435,218 @@ int tick_remove_task(void (*f)(void))
431 multitasking, but is better than nothing at all */ 435 multitasking, but is better than nothing at all */
432void mutex_init(struct mutex *m) 436void mutex_init(struct mutex *m)
433{ 437{
438 m->queue = NULL;
434 m->thread = NULL; 439 m->thread = NULL;
440 m->count = 0;
435 m->locked = 0; 441 m->locked = 0;
436} 442}
437 443
438void mutex_lock(struct mutex *m) 444void mutex_lock(struct mutex *m)
439{ 445{
440 if (test_and_set(&m->locked, 1)) 446 struct thread_entry *const thread = thread_get_current();
447
448 if(thread == m->thread)
449 {
450 m->count++;
451 return;
452 }
453
454 if (!test_and_set(&m->locked, 1))
441 { 455 {
442 block_thread(&m->thread); 456 m->thread = thread;
457 return;
443 } 458 }
459
460 block_thread_no_listlock(&m->queue);
444} 461}
445 462
446void mutex_unlock(struct mutex *m) 463void mutex_unlock(struct mutex *m)
447{ 464{
448 if (m->thread != NULL) 465 /* unlocker not being the owner is an unlocking violation */
466 if(m->thread != thread_get_current())
449 { 467 {
450 wakeup_thread(&m->thread); 468 fprintf(stderr, "spinlock_unlock->wrong thread");
469 exit(-1);
470 }
471
472 if (m->count > 0)
473 {
474 /* this thread still owns lock */
475 m->count--;
476 return;
451 } 477 }
452 else 478
479 m->thread = wakeup_thread_no_listlock(&m->queue);
480
481 if (m->thread == NULL)
453 { 482 {
483 /* release lock */
454 m->locked = 0; 484 m->locked = 0;
455 } 485 }
456} 486}
457 487
458void spinlock_lock(struct mutex *l) 488void spinlock_init(struct spinlock *l)
489{
490 l->locked = 0;
491 l->thread = NULL;
492 l->count = 0;
493}
494
495void spinlock_lock(struct spinlock *l)
459{ 496{
497 struct thread_entry *const thread = thread_get_current();
498
499 if (l->thread == thread)
500 {
501 l->count++;
502 return;
503 }
504
460 while(test_and_set(&l->locked, 1)) 505 while(test_and_set(&l->locked, 1))
461 { 506 {
462 switch_thread(true, NULL); 507 switch_thread(NULL);
463 } 508 }
509
510 l->thread = thread;
464} 511}
465 512
466void spinlock_unlock(struct mutex *l) 513void spinlock_unlock(struct spinlock *l)
467{ 514{
515 /* unlocker not being the owner is an unlocking violation */
516 if(l->thread != thread_get_current())
517 {
518 fprintf(stderr, "spinlock_unlock->wrong thread");
519 exit(-1);
520 }
521
522 if (l->count > 0)
523 {
524 /* this thread still owns lock */
525 l->count--;
526 return;
527 }
528
529 /* clear owner */
530 l->thread = NULL;
468 l->locked = 0; 531 l->locked = 0;
469} 532}
533
534void semaphore_init(struct semaphore *s, int max, int start)
535{
536 if(max <= 0 || start < 0 || start > max)
537 {
538 fprintf(stderr, "semaphore_init->inv arg");
539 exit(-1);
540 }
541 s->queue = NULL;
542 s->max = max;
543 s->count = start;
544}
545
546void semaphore_wait(struct semaphore *s)
547{
548 if(--s->count >= 0)
549 return;
550 block_thread_no_listlock(&s->queue);
551}
552
553void semaphore_release(struct semaphore *s)
554{
555 if(s->count < s->max)
556 {
557 if(++s->count <= 0)
558 {
559 if(s->queue == NULL)
560 {
561 /* there should be threads in this queue */
562 fprintf(stderr, "semaphore->wakeup");
563 exit(-1);
564 }
565 /* a thread was queued - wake it up */
566 wakeup_thread_no_listlock(&s->queue);
567 }
568 }
569}
570
571void event_init(struct event *e, unsigned int flags)
572{
573 e->queues[STATE_NONSIGNALED] = NULL;
574 e->queues[STATE_SIGNALED] = NULL;
575 e->state = flags & STATE_SIGNALED;
576 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
577}
578
579void event_wait(struct event *e, unsigned int for_state)
580{
581 unsigned int last_state = e->state;
582
583 if(e->automatic != 0)
584 {
585 /* wait for false always satisfied by definition
586 or if it just changed to false */
587 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
588 {
589 /* automatic - unsignal */
590 e->state = STATE_NONSIGNALED;
591 return;
592 }
593 /* block until state matches */
594 }
595 else if(for_state == last_state)
596 {
597 /* the state being waited for is the current state */
598 return;
599 }
600
601 /* current state does not match wait-for state */
602 block_thread_no_listlock(&e->queues[for_state]);
603}
604
605void event_set_state(struct event *e, unsigned int state)
606{
607 unsigned int last_state = e->state;
608
609 if(last_state == state)
610 {
611 /* no change */
612 return;
613 }
614
615 if(state == STATE_SIGNALED)
616 {
617 if(e->automatic != 0)
618 {
619 struct thread_entry *thread;
620
621 if(e->queues[STATE_NONSIGNALED] != NULL)
622 {
623 /* no thread should have ever blocked for nonsignaled */
624 fprintf(stderr, "set_event_state->queue[NS]:S");
625 exit(-1);
626 }
627
628 /* pass to next thread and keep unsignaled - "pulse" */
629 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
630 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
631 }
632 else
633 {
634 /* release all threads waiting for signaled */
635 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
636 e->state = STATE_SIGNALED;
637 }
638 }
639 else
640 {
641 /* release all threads waiting for unsignaled */
642 if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
643 {
644 /* no thread should have ever blocked */
645 fprintf(stderr, "set_event_state->queue[NS]:NS");
646 exit(-1);
647 }
648
649 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
650 e->state = STATE_NONSIGNALED;
651 }
652}